net.cpp 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. #include <iostream>
  2. #include "Enclave_u.h"
  3. #include "net.hpp"
  4. // The command type byte values
  5. #define COMMAND_EPOCH 0x00
  6. #define COMMAND_MESSAGE 0x01
  7. #define COMMAND_CHUNK 0x02
  8. NetIO *g_netio = NULL;
  9. NodeIO::NodeIO(tcp::socket &&socket) : sock(std::move(socket))
  10. {
  11. }
  12. uint8_t *NodeIO::request_frame()
  13. {
  14. if (frames_available.empty()) {
  15. // Allocate a new frame. Note that this memory will (at this
  16. // time) never get deallocated. In theory, we could deallocate
  17. // it in return_frame, but if a certain number of frames were
  18. // allocated here, it means we had that much data in flight
  19. // (queued but not accepted for sending by the OS), and we're
  20. // likely to need that much again. Subsequent messages will
  21. // _reuse_ the allocated data, though, so the used memory won't
  22. // grow forever, and will be limited to the amount of in-flight
  23. // data needed.
  24. return new uint8_t[MAXCHUNKSIZE];
  25. }
  26. // Copy the pointer to the frame out of the deque and remove it from
  27. // the deque. Note this is _not_ taking the address of the element
  28. // *in* the deque (and then popping it, which would invalidate that
  29. // pointer).
  30. frame_deque_lock.lock();
  31. uint8_t *frame = frames_available.back();
  32. frames_available.pop_back();
  33. frame_deque_lock.unlock();
  34. return frame;
  35. }
  36. void NodeIO::return_frame(uint8_t *frame)
  37. {
  38. if (!frame) return;
  39. // We push the frame back on to the end of the deque so that it will
  40. // be the next one used. This may lead to better cache behaviour?
  41. frame_deque_lock.lock();
  42. frames_available.push_back(frame);
  43. frame_deque_lock.unlock();
  44. }
  45. void NodeIO::send_header_data(uint64_t header, uint8_t *data, size_t len)
  46. {
  47. commands_deque_lock.lock();
  48. commands_inflight.push_back({header, data, len});
  49. if (commands_inflight.size() == 1) {
  50. async_send_commands();
  51. }
  52. commands_deque_lock.unlock();
  53. }
  54. void NodeIO::async_send_commands()
  55. {
  56. std::vector<boost::asio::const_buffer> tosend;
  57. CommandTuple *commandp = &(commands_inflight.front());
  58. tosend.push_back(boost::asio::buffer(&(std::get<0>(*commandp)), 5));
  59. if (std::get<1>(*commandp) != NULL && std::get<2>(*commandp) > 0) {
  60. tosend.push_back(boost::asio::buffer(std::get<1>(*commandp),
  61. std::get<2>(*commandp)));
  62. }
  63. boost::asio::async_write(sock, tosend,
  64. [this, commandp](boost::system::error_code, std::size_t){
  65. // When the write completes, pop the command from the deque
  66. // (which should now be in the front)
  67. commands_deque_lock.lock();
  68. assert(!commands_inflight.empty() &&
  69. &(commands_inflight.front()) == commandp);
  70. uint8_t *data = std::get<1>(*commandp);
  71. commands_inflight.pop_front();
  72. if (commands_inflight.size() > 0) {
  73. async_send_commands();
  74. }
  75. // And return the frame
  76. return_frame(data);
  77. commands_deque_lock.unlock();
  78. });
  79. }
  80. void NodeIO::send_epoch(uint32_t epoch_num)
  81. {
  82. uint64_t header = (uint64_t(epoch_num) << 8) + COMMAND_EPOCH;
  83. send_header_data(header, NULL, 0);
  84. }
  85. void NodeIO::send_message_header(uint32_t tot_message_len)
  86. {
  87. uint64_t header = (uint64_t(tot_message_len) << 8) + COMMAND_MESSAGE;
  88. send_header_data(header, NULL, 0);
  89. // If we're sending a new message header, we have to have finished
  90. // sending the previous message.
  91. assert(chunksize_inflight == msgsize_inflight);
  92. msgsize_inflight = tot_message_len;
  93. chunksize_inflight = 0;
  94. }
  95. void NodeIO::send_chunk(uint8_t *data, uint32_t chunk_len)
  96. {
  97. assert(chunk_len <= MAXCHUNKSIZE);
  98. uint64_t header = (uint64_t(chunk_len) << 8) + COMMAND_CHUNK;
  99. send_header_data(header, data, chunk_len);
  100. chunksize_inflight += chunk_len;
  101. assert(chunksize_inflight <= msgsize_inflight);
  102. }
  103. void NodeIO::recv_commands(
  104. std::function<void(boost::system::error_code)> error_cb,
  105. std::function<void(uint32_t)> epoch_cb,
  106. std::function<void(uint32_t)> message_cb,
  107. std::function<void(uint8_t*,uint32_t)> chunk_cb)
  108. {
  109. // Asynchronously read the header
  110. receive_header = 0;
  111. boost::asio::async_read(sock, boost::asio::buffer(&receive_header, 5),
  112. [this, error_cb, epoch_cb, message_cb, chunk_cb]
  113. (boost::system::error_code ec, std::size_t) {
  114. if (ec) {
  115. error_cb(ec);
  116. return;
  117. }
  118. if ((receive_header & 0xff) == COMMAND_EPOCH) {
  119. epoch_cb(uint32_t(receive_header >> 8));
  120. recv_commands(error_cb, epoch_cb, message_cb, chunk_cb);
  121. } else if ((receive_header & 0xff) == COMMAND_MESSAGE) {
  122. assert(recv_msgsize_inflight == recv_chunksize_inflight);
  123. recv_msgsize_inflight = uint32_t(receive_header >> 8);
  124. recv_chunksize_inflight = 0;
  125. message_cb(recv_msgsize_inflight);
  126. recv_commands(error_cb, epoch_cb, message_cb, chunk_cb);
  127. } else if ((receive_header & 0xff) == COMMAND_CHUNK) {
  128. uint32_t this_chunk_size = uint32_t(receive_header >> 8);
  129. assert(recv_chunksize_inflight + this_chunk_size <=
  130. recv_msgsize_inflight);
  131. recv_chunksize_inflight += this_chunk_size;
  132. boost::asio::async_read(sock, boost::asio::buffer(
  133. receive_frame, this_chunk_size),
  134. [this, error_cb, epoch_cb, message_cb, chunk_cb,
  135. this_chunk_size]
  136. (boost::system::error_code ecc, std::size_t) {
  137. if (ecc) {
  138. error_cb(ecc);
  139. return;
  140. }
  141. chunk_cb(receive_frame, this_chunk_size);
  142. recv_commands(error_cb, epoch_cb,
  143. message_cb, chunk_cb);
  144. });
  145. } else {
  146. error_cb(boost::system::errc::make_error_code(
  147. boost::system::errc::errc_t::invalid_argument));
  148. }
  149. });
  150. }
  151. NetIO::NetIO(boost::asio::io_context &io_context, const Config &config)
  152. : conf(config), myconf(config.nodes[config.my_node_num])
  153. {
  154. num_nodes = conf.nodes.size();
  155. nodeios.resize(num_nodes);
  156. me = conf.my_node_num;
  157. // Node number n will accept connections from nodes 0, ..., n-1 and
  158. // make connections to nodes n+1, ..., num_nodes-1. This is all
  159. // single threaded, but it doesn't deadlock because node 0 isn't
  160. // waiting for any incoming connections, so it immediately makes
  161. // outgoing connections. When it connects to node 1, that node
  162. // accepts its (only) incoming connection, and then starts making
  163. // its outgoing connections, etc.
  164. tcp::resolver resolver(io_context);
  165. tcp::acceptor acceptor(io_context,
  166. resolver.resolve(myconf.listenhost, myconf.listenport)->endpoint());
  167. for(size_t i=0; i<me; ++i) {
  168. #ifdef VERBOSE_NET
  169. std::cerr << "Accepting number " << i << "\n";
  170. #endif
  171. tcp::socket nodesock = acceptor.accept();
  172. #ifdef VERBOSE_NET
  173. std::cerr << "Accepted number " << i << "\n";
  174. #endif
  175. // Read 2 bytes from the socket, which will be the
  176. // connecting node's node number
  177. unsigned short node_num;
  178. boost::asio::read(nodesock,
  179. boost::asio::buffer(&node_num, sizeof(node_num)));
  180. if (node_num >= num_nodes) {
  181. std::cerr << "Received bad node number\n";
  182. } else {
  183. nodeios[node_num].emplace(std::move(nodesock));
  184. #ifdef VERBOSE_NET
  185. std::cerr << "Received connection from " <<
  186. config.nodes[node_num].name << "\n";
  187. #endif
  188. }
  189. }
  190. for(size_t i=me+1; i<num_nodes; ++i) {
  191. boost::system::error_code err;
  192. tcp::socket nodesock(io_context);
  193. while(1) {
  194. #ifdef VERBOSE_NET
  195. std::cerr << "Connecting to " << config.nodes[i].name << "...\n";
  196. #endif
  197. boost::asio::connect(nodesock,
  198. resolver.resolve(config.nodes[i].listenhost,
  199. config.nodes[i].listenport), err);
  200. if (!err) break;
  201. std::cerr << "Connection to " << config.nodes[i].name <<
  202. " refused, will retry.\n";
  203. sleep(1);
  204. }
  205. // Write 2 bytes to the socket to tell the peer node our node
  206. // number
  207. unsigned short node_num = (unsigned short)me;
  208. boost::asio::write(nodesock,
  209. boost::asio::buffer(&node_num, sizeof(node_num)));
  210. nodeios[i].emplace(std::move(nodesock));
  211. #ifdef VERBOSE_NET
  212. std::cerr << "Connected to " << config.nodes[i].name << "\n";
  213. #endif
  214. }
  215. }
  216. /* The enclave calls this to inform the untrusted app that there's a new
  217. * messaage to send. The return value is the frame the enclave should
  218. * use to store the first (encrypted) chunk of this message. */
  219. uint8_t *ocall_message(nodenum_t node_num, uint32_t message_len)
  220. {
  221. return NULL;
  222. }
  223. /* The enclave calls this to inform the untrusted app that there's a new
  224. * chunk to send. The return value is the frame the enclave should use
  225. * to store the next (encrypted) chunk of this message, or NULL if this
  226. * was the last chunk. */
  227. uint8_t *ocall_chunk(nodenum_t node_num, const uint8_t *chunkdata,
  228. uint32_t chunklen)
  229. {
  230. return NULL;
  231. }