net.cpp 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. #include <iostream>
  2. #include "Enclave_u.h"
  3. #include "net.hpp"
  4. // The command type byte values
  5. #define COMMAND_EPOCH 0x00
  6. #define COMMAND_MESSAGE 0x01
  7. #define COMMAND_CHUNK 0x02
  8. NetIO *g_netio = NULL;
  9. NodeIO::NodeIO(tcp::socket &&socket) : sock(std::move(socket))
  10. {
  11. }
  12. uint8_t *NodeIO::request_frame()
  13. {
  14. if (frames_available.empty()) {
  15. // Allocate a new frame. Note that this memory will (at this
  16. // time) never get deallocated. In theory, we could deallocate
  17. // it in return_frame, but if a certain number of frames were
  18. // allocated here, it means we had that much data in flight
  19. // (queued but not accepted for sending by the OS), and we're
  20. // likely to need that much again. Subsequent messages will
  21. // _reuse_ the allocated data, though, so the used memory won't
  22. // grow forever, and will be limited to the amount of in-flight
  23. // data needed.
  24. return new uint8_t[MAXCHUNKSIZE];
  25. }
  26. // Copy the pointer to the frame out of the deque and remove it from
  27. // the deque. Note this is _not_ taking the address of the element
  28. // *in* the deque (and then popping it, which would invalidate that
  29. // pointer).
  30. frame_deque_lock.lock();
  31. uint8_t *frame = frames_available.back();
  32. frames_available.pop_back();
  33. frame_deque_lock.unlock();
  34. return frame;
  35. }
  36. void NodeIO::return_frame(uint8_t *frame)
  37. {
  38. if (!frame) return;
  39. // We push the frame back on to the end of the deque so that it will
  40. // be the next one used. This may lead to better cache behaviour?
  41. frame_deque_lock.lock();
  42. frames_available.push_back(frame);
  43. frame_deque_lock.unlock();
  44. }
  45. void NodeIO::send_header_data(uint64_t header, uint8_t *data, size_t len)
  46. {
  47. commands_deque_lock.lock();
  48. commands_inflight.push_back({header, data, len});
  49. if (commands_inflight.size() == 1) {
  50. async_send_commands();
  51. }
  52. commands_deque_lock.unlock();
  53. }
  54. void NodeIO::async_send_commands()
  55. {
  56. std::vector<boost::asio::const_buffer> tosend;
  57. CommandTuple *commandp = &(commands_inflight.front());
  58. tosend.push_back(boost::asio::buffer(&(std::get<0>(*commandp)), 5));
  59. if (std::get<1>(*commandp) != NULL && std::get<2>(*commandp) > 0) {
  60. tosend.push_back(boost::asio::buffer(std::get<1>(*commandp),
  61. std::get<2>(*commandp)));
  62. }
  63. boost::asio::async_write(sock, tosend,
  64. [this, commandp](boost::system::error_code, std::size_t){
  65. // When the write completes, pop the command from the deque
  66. // (which should now be in the front)
  67. commands_deque_lock.lock();
  68. assert(!commands_inflight.empty() &&
  69. &(commands_inflight.front()) == commandp);
  70. uint8_t *data = std::get<1>(*commandp);
  71. commands_inflight.pop_front();
  72. if (commands_inflight.size() > 0) {
  73. async_send_commands();
  74. }
  75. // And return the frame
  76. return_frame(data);
  77. commands_deque_lock.unlock();
  78. });
  79. }
  80. void NodeIO::send_epoch(uint32_t epoch_num)
  81. {
  82. uint64_t header = (uint64_t(epoch_num) << 8) + COMMAND_EPOCH;
  83. send_header_data(header, NULL, 0);
  84. }
  85. void NodeIO::send_message_header(uint32_t tot_message_len)
  86. {
  87. uint64_t header = (uint64_t(tot_message_len) << 8) + COMMAND_MESSAGE;
  88. send_header_data(header, NULL, 0);
  89. // If we're sending a new message header, we have to have finished
  90. // sending the previous message.
  91. assert(chunksize_inflight == msgsize_inflight);
  92. msgsize_inflight = tot_message_len;
  93. chunksize_inflight = 0;
  94. }
  95. bool NodeIO::send_chunk(uint8_t *data, uint32_t chunk_len)
  96. {
  97. assert(chunk_len <= MAXCHUNKSIZE);
  98. uint64_t header = (uint64_t(chunk_len) << 8) + COMMAND_CHUNK;
  99. send_header_data(header, data, chunk_len);
  100. chunksize_inflight += chunk_len;
  101. assert(chunksize_inflight <= msgsize_inflight);
  102. return (chunksize_inflight < msgsize_inflight);
  103. }
  104. void NodeIO::recv_commands(
  105. std::function<void(boost::system::error_code)> error_cb,
  106. std::function<void(uint32_t)> epoch_cb,
  107. std::function<void(uint32_t)> message_cb,
  108. std::function<void(uint8_t*,uint32_t)> chunk_cb)
  109. {
  110. // Asynchronously read the header
  111. receive_header = 0;
  112. boost::asio::async_read(sock, boost::asio::buffer(&receive_header, 5),
  113. [this, error_cb, epoch_cb, message_cb, chunk_cb]
  114. (boost::system::error_code ec, std::size_t) {
  115. if (ec) {
  116. error_cb(ec);
  117. return;
  118. }
  119. if ((receive_header & 0xff) == COMMAND_EPOCH) {
  120. epoch_cb(uint32_t(receive_header >> 8));
  121. recv_commands(error_cb, epoch_cb, message_cb, chunk_cb);
  122. } else if ((receive_header & 0xff) == COMMAND_MESSAGE) {
  123. assert(recv_msgsize_inflight == recv_chunksize_inflight);
  124. recv_msgsize_inflight = uint32_t(receive_header >> 8);
  125. recv_chunksize_inflight = 0;
  126. message_cb(recv_msgsize_inflight);
  127. recv_commands(error_cb, epoch_cb, message_cb, chunk_cb);
  128. } else if ((receive_header & 0xff) == COMMAND_CHUNK) {
  129. uint32_t this_chunk_size = uint32_t(receive_header >> 8);
  130. assert(recv_chunksize_inflight + this_chunk_size <=
  131. recv_msgsize_inflight);
  132. recv_chunksize_inflight += this_chunk_size;
  133. boost::asio::async_read(sock, boost::asio::buffer(
  134. receive_frame, this_chunk_size),
  135. [this, error_cb, epoch_cb, message_cb, chunk_cb,
  136. this_chunk_size]
  137. (boost::system::error_code ecc, std::size_t) {
  138. if (ecc) {
  139. error_cb(ecc);
  140. return;
  141. }
  142. chunk_cb(receive_frame, this_chunk_size);
  143. recv_commands(error_cb, epoch_cb,
  144. message_cb, chunk_cb);
  145. });
  146. } else {
  147. error_cb(boost::system::errc::make_error_code(
  148. boost::system::errc::errc_t::invalid_argument));
  149. }
  150. });
  151. }
  152. NetIO::NetIO(boost::asio::io_context &io_context, const Config &config)
  153. : conf(config), myconf(config.nodes[config.my_node_num])
  154. {
  155. num_nodes = conf.nodes.size();
  156. nodeios.resize(num_nodes);
  157. me = conf.my_node_num;
  158. // Node number n will accept connections from nodes 0, ..., n-1 and
  159. // make connections to nodes n+1, ..., num_nodes-1. This is all
  160. // single threaded, but it doesn't deadlock because node 0 isn't
  161. // waiting for any incoming connections, so it immediately makes
  162. // outgoing connections. When it connects to node 1, that node
  163. // accepts its (only) incoming connection, and then starts making
  164. // its outgoing connections, etc.
  165. tcp::resolver resolver(io_context);
  166. tcp::acceptor acceptor(io_context,
  167. resolver.resolve(myconf.listenhost, myconf.listenport)->endpoint());
  168. for(size_t i=0; i<me; ++i) {
  169. #ifdef VERBOSE_NET
  170. std::cerr << "Accepting number " << i << "\n";
  171. #endif
  172. tcp::socket nodesock = acceptor.accept();
  173. #ifdef VERBOSE_NET
  174. std::cerr << "Accepted number " << i << "\n";
  175. #endif
  176. // Read 2 bytes from the socket, which will be the
  177. // connecting node's node number
  178. unsigned short node_num;
  179. boost::asio::read(nodesock,
  180. boost::asio::buffer(&node_num, sizeof(node_num)));
  181. if (node_num >= num_nodes) {
  182. std::cerr << "Received bad node number\n";
  183. } else {
  184. nodeios[node_num].emplace(std::move(nodesock));
  185. #ifdef VERBOSE_NET
  186. std::cerr << "Received connection from " <<
  187. config.nodes[node_num].name << "\n";
  188. #endif
  189. }
  190. }
  191. for(size_t i=me+1; i<num_nodes; ++i) {
  192. boost::system::error_code err;
  193. tcp::socket nodesock(io_context);
  194. while(1) {
  195. #ifdef VERBOSE_NET
  196. std::cerr << "Connecting to " << config.nodes[i].name << "...\n";
  197. #endif
  198. boost::asio::connect(nodesock,
  199. resolver.resolve(config.nodes[i].listenhost,
  200. config.nodes[i].listenport), err);
  201. if (!err) break;
  202. std::cerr << "Connection to " << config.nodes[i].name <<
  203. " refused, will retry.\n";
  204. sleep(1);
  205. }
  206. // Write 2 bytes to the socket to tell the peer node our node
  207. // number
  208. unsigned short node_num = (unsigned short)me;
  209. boost::asio::write(nodesock,
  210. boost::asio::buffer(&node_num, sizeof(node_num)));
  211. nodeios[i].emplace(std::move(nodesock));
  212. #ifdef VERBOSE_NET
  213. std::cerr << "Connected to " << config.nodes[i].name << "\n";
  214. #endif
  215. }
  216. }
  217. /* The enclave calls this to inform the untrusted app that there's a new
  218. * messaage to send. The return value is the frame the enclave should
  219. * use to store the first (encrypted) chunk of this message. */
  220. uint8_t *ocall_message(nodenum_t node_num, uint32_t message_len)
  221. {
  222. assert(g_netio != NULL);
  223. NodeIO &node = g_netio->node(node_num);
  224. node.send_message_header(message_len);
  225. return node.request_frame();
  226. }
  227. /* The enclave calls this to inform the untrusted app that there's a new
  228. * chunk to send. The return value is the frame the enclave should use
  229. * to store the next (encrypted) chunk of this message, or NULL if this
  230. * was the last chunk. */
  231. uint8_t *ocall_chunk(nodenum_t node_num, uint8_t *chunkdata,
  232. uint32_t chunklen)
  233. {
  234. assert(g_netio != NULL);
  235. NodeIO &node = g_netio->node(node_num);
  236. bool morechunks = node.send_chunk(chunkdata, chunklen);
  237. if (morechunks) {
  238. return node.request_frame();
  239. }
  240. return NULL;
  241. }