net.cpp 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. #include <iostream>
  2. #include "Enclave_u.h"
  3. #include "Untrusted.hpp"
  4. #include "net.hpp"
  5. // The command type byte values
  6. #define COMMAND_EPOCH 0x00
  7. #define COMMAND_MESSAGE 0x01
  8. #define COMMAND_CHUNK 0x02
  9. NetIO *g_netio = NULL;
  10. NodeIO::NodeIO(tcp::socket &&socket, nodenum_t nodenum) :
  11. sock(std::move(socket)), node_num(nodenum), bytes_sent(0)
  12. {
  13. }
  14. uint8_t *NodeIO::request_frame()
  15. {
  16. if (frames_available.empty()) {
  17. // Allocate a new frame. Note that this memory will (at this
  18. // time) never get deallocated. In theory, we could deallocate
  19. // it in return_frame, but if a certain number of frames were
  20. // allocated here, it means we had that much data in flight
  21. // (queued but not accepted for sending by the OS), and we're
  22. // likely to need that much again. Subsequent messages will
  23. // _reuse_ the allocated data, though, so the used memory won't
  24. // grow forever, and will be limited to the amount of in-flight
  25. // data needed.
  26. return new uint8_t[FRAME_SIZE];
  27. }
  28. // Copy the pointer to the frame out of the deque and remove it from
  29. // the deque. Note this is _not_ taking the address of the element
  30. // *in* the deque (and then popping it, which would invalidate that
  31. // pointer).
  32. frame_deque_lock.lock();
  33. uint8_t *frame = frames_available.back();
  34. frames_available.pop_back();
  35. frame_deque_lock.unlock();
  36. return frame;
  37. }
  38. void NodeIO::return_frame(uint8_t *frame)
  39. {
  40. if (!frame) return;
  41. // We push the frame back on to the end of the deque so that it will
  42. // be the next one used. This may lead to better cache behaviour?
  43. frame_deque_lock.lock();
  44. frames_available.push_back(frame);
  45. frame_deque_lock.unlock();
  46. }
  47. void NodeIO::send_header_data(uint64_t header, uint8_t *data, size_t len)
  48. {
  49. commands_deque_lock.lock();
  50. commands_inflight.push_back({header, data, len});
  51. if (commands_inflight.size() == 1) {
  52. async_send_commands();
  53. }
  54. commands_deque_lock.unlock();
  55. }
  56. void NodeIO::async_send_commands()
  57. {
  58. std::vector<boost::asio::const_buffer> tosend;
  59. CommandTuple *commandp = &(commands_inflight.front());
  60. tosend.push_back(boost::asio::buffer(&(std::get<0>(*commandp)), 5));
  61. if (std::get<1>(*commandp) != NULL && std::get<2>(*commandp) > 0) {
  62. tosend.push_back(boost::asio::buffer(std::get<1>(*commandp),
  63. std::get<2>(*commandp)));
  64. }
  65. boost::asio::async_write(sock, tosend,
  66. [this, commandp](boost::system::error_code, std::size_t){
  67. // When the write completes, pop the command from the deque
  68. // (which should now be in the front)
  69. commands_deque_lock.lock();
  70. assert(!commands_inflight.empty() &&
  71. &(commands_inflight.front()) == commandp);
  72. uint8_t *data = std::get<1>(*commandp);
  73. commands_inflight.pop_front();
  74. bytes_sent += std::get<2>(*commandp);
  75. if (commands_inflight.size() > 0) {
  76. async_send_commands();
  77. }
  78. // And return the frame
  79. return_frame(data);
  80. commands_deque_lock.unlock();
  81. });
  82. }
  83. void NodeIO::send_epoch(uint32_t epoch_num)
  84. {
  85. uint64_t header = (uint64_t(epoch_num) << 8) + COMMAND_EPOCH;
  86. send_header_data(header, NULL, 0);
  87. }
  88. void NodeIO::send_message_header(uint32_t tot_message_len)
  89. {
  90. uint64_t header = (uint64_t(tot_message_len) << 8) + COMMAND_MESSAGE;
  91. send_header_data(header, NULL, 0);
  92. // If we're sending a new message header, we have to have finished
  93. // sending the previous message.
  94. assert(chunksize_inflight == msgsize_inflight);
  95. msgsize_inflight = tot_message_len;
  96. chunksize_inflight = 0;
  97. }
  98. bool NodeIO::send_chunk(uint8_t *data, uint32_t chunk_len)
  99. {
  100. assert(chunk_len <= FRAME_SIZE);
  101. uint64_t header = (uint64_t(chunk_len) << 8) + COMMAND_CHUNK;
  102. send_header_data(header, data, chunk_len);
  103. chunksize_inflight += chunk_len;
  104. assert(chunksize_inflight <= msgsize_inflight);
  105. return (chunksize_inflight < msgsize_inflight);
  106. }
  107. void NodeIO::recv_commands(
  108. std::function<void(boost::system::error_code)> error_cb,
  109. std::function<void(uint32_t)> epoch_cb)
  110. {
  111. // Asynchronously read the header
  112. receive_header = 0;
  113. boost::asio::async_read(sock, boost::asio::buffer(&receive_header, 5),
  114. [this, error_cb, epoch_cb]
  115. (boost::system::error_code ec, std::size_t) {
  116. if (ec) {
  117. error_cb(ec);
  118. return;
  119. }
  120. if ((receive_header & 0xff) == COMMAND_EPOCH) {
  121. epoch_cb(uint32_t(receive_header >> 8));
  122. recv_commands(error_cb, epoch_cb);
  123. } else if ((receive_header & 0xff) == COMMAND_MESSAGE) {
  124. assert(recv_msgsize_inflight == recv_chunksize_inflight);
  125. recv_msgsize_inflight = uint32_t(receive_header >> 8);
  126. recv_chunksize_inflight = 0;
  127. if (ecall_message(node_num, recv_msgsize_inflight)) {
  128. recv_commands(error_cb, epoch_cb);
  129. } else {
  130. printf("ecall_message failed\n");
  131. }
  132. } else if ((receive_header & 0xff) == COMMAND_CHUNK) {
  133. uint32_t this_chunk_size = uint32_t(receive_header >> 8);
  134. assert(recv_chunksize_inflight + this_chunk_size <=
  135. recv_msgsize_inflight);
  136. recv_chunksize_inflight += this_chunk_size;
  137. boost::asio::async_read(sock, boost::asio::buffer(
  138. receive_frame, this_chunk_size),
  139. [this, error_cb, epoch_cb, this_chunk_size]
  140. (boost::system::error_code ecc, std::size_t) {
  141. if (ecc) {
  142. error_cb(ecc);
  143. return;
  144. }
  145. if (ecall_chunk(node_num, receive_frame,
  146. this_chunk_size)) {
  147. recv_commands(error_cb, epoch_cb);
  148. } else {
  149. printf("ecall_chunk failed\n");
  150. }
  151. });
  152. } else {
  153. error_cb(boost::system::errc::make_error_code(
  154. boost::system::errc::errc_t::invalid_argument));
  155. }
  156. });
  157. }
  158. NetIO::NetIO(boost::asio::io_context &io_context, const Config &config)
  159. : context(io_context), conf(config),
  160. myconf(config.nodes[config.my_node_num])
  161. {
  162. num_nodes = nodenum_t(conf.nodes.size());
  163. nodeios.resize(num_nodes);
  164. me = conf.my_node_num;
  165. // Node number n will accept connections from nodes 0, ..., n-1 and
  166. // make connections to nodes n+1, ..., num_nodes-1. This is all
  167. // single threaded, but it doesn't deadlock because node 0 isn't
  168. // waiting for any incoming connections, so it immediately makes
  169. // outgoing connections. When it connects to node 1, that node
  170. // accepts its (only) incoming connection, and then starts making
  171. // its outgoing connections, etc.
  172. tcp::resolver resolver(io_context);
  173. tcp::acceptor acceptor(io_context,
  174. resolver.resolve(myconf.listenhost, myconf.listenport)->endpoint());
  175. for(size_t i=0; i<me; ++i) {
  176. #ifdef VERBOSE_NET
  177. std::cerr << "Accepting number " << i << "\n";
  178. #endif
  179. tcp::socket nodesock = acceptor.accept();
  180. #ifdef VERBOSE_NET
  181. std::cerr << "Accepted number " << i << "\n";
  182. #endif
  183. // Read 2 bytes from the socket, which will be the
  184. // connecting node's node number
  185. unsigned short node_num;
  186. boost::asio::read(nodesock,
  187. boost::asio::buffer(&node_num, sizeof(node_num)));
  188. if (node_num >= num_nodes) {
  189. std::cerr << "Received bad node number\n";
  190. } else {
  191. nodeios[node_num].emplace(std::move(nodesock), node_num);
  192. #ifdef VERBOSE_NET
  193. std::cerr << "Received connection from " <<
  194. config.nodes[node_num].name << "\n";
  195. #endif
  196. }
  197. }
  198. for(size_t i=me+1; i<num_nodes; ++i) {
  199. boost::system::error_code err;
  200. tcp::socket nodesock(io_context);
  201. while(1) {
  202. #ifdef VERBOSE_NET
  203. std::cerr << "Connecting to " << config.nodes[i].name << "...\n";
  204. #endif
  205. boost::asio::connect(nodesock,
  206. resolver.resolve(config.nodes[i].listenhost,
  207. config.nodes[i].listenport), err);
  208. if (!err) break;
  209. std::cerr << "Connection to " << config.nodes[i].name <<
  210. " refused, will retry.\n";
  211. sleep(1);
  212. }
  213. // Write 2 bytes to the socket to tell the peer node our node
  214. // number
  215. nodenum_t node_num = (nodenum_t)me;
  216. boost::asio::write(nodesock,
  217. boost::asio::buffer(&node_num, sizeof(node_num)));
  218. nodeios[i].emplace(std::move(nodesock), i);
  219. #ifdef VERBOSE_NET
  220. std::cerr << "Connected to " << config.nodes[i].name << "\n";
  221. #endif
  222. }
  223. }
  224. void NetIO::recv_commands(
  225. std::function<void(boost::system::error_code)> error_cb,
  226. std::function<void(uint32_t)> epoch_cb)
  227. {
  228. for (nodenum_t node_num = 0; node_num < num_nodes; ++node_num) {
  229. if (node_num == me) continue;
  230. NodeIO &n = node(node_num);
  231. n.recv_commands(error_cb, epoch_cb);
  232. }
  233. }
  234. void NetIO::close()
  235. {
  236. for (nodenum_t node_num = 0; node_num < num_nodes; ++node_num) {
  237. if (node_num == me) continue;
  238. NodeIO &n = node(node_num);
  239. n.close();
  240. }
  241. }
  242. /* The enclave calls this to inform the untrusted app that there's a new
  243. * messaage to send. The return value is the frame the enclave should
  244. * use to store the first (encrypted) chunk of this message. */
  245. uint8_t *ocall_message(nodenum_t node_num, uint32_t message_len)
  246. {
  247. assert(g_netio != NULL);
  248. NodeIO &node = g_netio->node(node_num);
  249. node.send_message_header(message_len);
  250. return node.request_frame();
  251. }
  252. /* The enclave calls this to inform the untrusted app that there's a new
  253. * chunk to send. The return value is the frame the enclave should use
  254. * to store the next (encrypted) chunk of this message, or NULL if this
  255. * was the last chunk. */
  256. uint8_t *ocall_chunk(nodenum_t node_num, uint8_t *chunkdata,
  257. uint32_t chunklen)
  258. {
  259. assert(g_netio != NULL);
  260. NodeIO &node = g_netio->node(node_num);
  261. bool morechunks = node.send_chunk(chunkdata, chunklen);
  262. if (morechunks) {
  263. return node.request_frame();
  264. }
  265. return NULL;
  266. }