net.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363
  1. #include <iostream>
  2. #include "Enclave_u.h"
  3. #include "Untrusted.hpp"
  4. #include "net.hpp"
  5. // The command type byte values
  6. #define COMMAND_EPOCH 0x00
  7. #define COMMAND_MESSAGE 0x01
  8. #define COMMAND_CHUNK 0x02
  9. #define VERBOSE_NET
  10. NetIO *g_netio = NULL;
  11. NodeIO::NodeIO(tcp::socket &&socket, nodenum_t nodenum) :
  12. sock(std::move(socket)), node_num(nodenum)
  13. {
  14. }
  15. uint8_t *NodeIO::request_frame()
  16. {
  17. if (frames_available.empty()) {
  18. // Allocate a new frame. Note that this memory will (at this
  19. // time) never get deallocated. In theory, we could deallocate
  20. // it in return_frame, but if a certain number of frames were
  21. // allocated here, it means we had that much data in flight
  22. // (queued but not accepted for sending by the OS), and we're
  23. // likely to need that much again. Subsequent messages will
  24. // _reuse_ the allocated data, though, so the used memory won't
  25. // grow forever, and will be limited to the amount of in-flight
  26. // data needed.
  27. return new uint8_t[FRAME_SIZE];
  28. }
  29. // Copy the pointer to the frame out of the deque and remove it from
  30. // the deque. Note this is _not_ taking the address of the element
  31. // *in* the deque (and then popping it, which would invalidate that
  32. // pointer).
  33. frame_deque_lock.lock();
  34. uint8_t *frame = frames_available.back();
  35. frames_available.pop_back();
  36. frame_deque_lock.unlock();
  37. return frame;
  38. }
  39. void NodeIO::return_frame(uint8_t *frame)
  40. {
  41. if (!frame) return;
  42. // We push the frame back on to the end of the deque so that it will
  43. // be the next one used. This may lead to better cache behaviour?
  44. frame_deque_lock.lock();
  45. frames_available.push_back(frame);
  46. frame_deque_lock.unlock();
  47. }
  48. void NodeIO::send_header_data(uint64_t header, uint8_t *data, size_t len)
  49. {
  50. commands_deque_lock.lock();
  51. commands_inflight.push_back({header, data, len});
  52. if (commands_inflight.size() == 1) {
  53. async_send_commands();
  54. }
  55. commands_deque_lock.unlock();
  56. }
  57. void NodeIO::async_send_commands()
  58. {
  59. std::vector<boost::asio::const_buffer> tosend;
  60. CommandTuple *commandp = &(commands_inflight.front());
  61. tosend.push_back(boost::asio::buffer(&(std::get<0>(*commandp)), 5));
  62. if (std::get<1>(*commandp) != NULL && std::get<2>(*commandp) > 0) {
  63. tosend.push_back(boost::asio::buffer(std::get<1>(*commandp),
  64. std::get<2>(*commandp)));
  65. }
  66. boost::asio::async_write(sock, tosend,
  67. [this, commandp](boost::system::error_code, std::size_t){
  68. // When the write completes, pop the command from the deque
  69. // (which should now be in the front)
  70. commands_deque_lock.lock();
  71. assert(!commands_inflight.empty() &&
  72. &(commands_inflight.front()) == commandp);
  73. uint8_t *data = std::get<1>(*commandp);
  74. commands_inflight.pop_front();
  75. if (commands_inflight.size() > 0) {
  76. async_send_commands();
  77. }
  78. // And return the frame
  79. return_frame(data);
  80. commands_deque_lock.unlock();
  81. });
  82. }
  83. void NodeIO::send_epoch(uint32_t epoch_num)
  84. {
  85. uint64_t header = (uint64_t(epoch_num) << 8) + COMMAND_EPOCH;
  86. send_header_data(header, NULL, 0);
  87. }
  88. void NodeIO::send_message_header(uint32_t tot_message_len)
  89. {
  90. uint64_t header = (uint64_t(tot_message_len) << 8) + COMMAND_MESSAGE;
  91. send_header_data(header, NULL, 0);
  92. // If we're sending a new message header, we have to have finished
  93. // sending the previous message.
  94. assert(chunksize_inflight == msgsize_inflight);
  95. msgsize_inflight = tot_message_len;
  96. chunksize_inflight = 0;
  97. }
  98. bool NodeIO::send_chunk(uint8_t *data, uint32_t chunk_len)
  99. {
  100. assert(chunk_len <= FRAME_SIZE);
  101. uint64_t header = (uint64_t(chunk_len) << 8) + COMMAND_CHUNK;
  102. send_header_data(header, data, chunk_len);
  103. chunksize_inflight += chunk_len;
  104. assert(chunksize_inflight <= msgsize_inflight);
  105. return (chunksize_inflight < msgsize_inflight);
  106. }
  107. void NodeIO::recv_commands(
  108. std::function<void(boost::system::error_code)> error_cb,
  109. std::function<void(uint32_t)> epoch_cb)
  110. {
  111. // Asynchronously read the header
  112. receive_header = 0;
  113. boost::asio::async_read(sock, boost::asio::buffer(&receive_header, 5),
  114. [this, error_cb, epoch_cb]
  115. (boost::system::error_code ec, std::size_t) {
  116. if (ec) {
  117. error_cb(ec);
  118. return;
  119. }
  120. if ((receive_header & 0xff) == COMMAND_EPOCH) {
  121. epoch_cb(uint32_t(receive_header >> 8));
  122. recv_commands(error_cb, epoch_cb);
  123. } else if ((receive_header & 0xff) == COMMAND_MESSAGE) {
  124. assert(recv_msgsize_inflight == recv_chunksize_inflight);
  125. recv_msgsize_inflight = uint32_t(receive_header >> 8);
  126. recv_chunksize_inflight = 0;
  127. if (ecall_message(node_num, recv_msgsize_inflight)) {
  128. recv_commands(error_cb, epoch_cb);
  129. } else {
  130. printf("ecall_message failed\n");
  131. }
  132. } else if ((receive_header & 0xff) == COMMAND_CHUNK) {
  133. uint32_t this_chunk_size = uint32_t(receive_header >> 8);
  134. assert(recv_chunksize_inflight + this_chunk_size <=
  135. recv_msgsize_inflight);
  136. recv_chunksize_inflight += this_chunk_size;
  137. boost::asio::async_read(sock, boost::asio::buffer(
  138. receive_frame, this_chunk_size),
  139. [this, error_cb, epoch_cb, this_chunk_size]
  140. (boost::system::error_code ecc, std::size_t) {
  141. if (ecc) {
  142. error_cb(ecc);
  143. return;
  144. }
  145. if (ecall_chunk(node_num, receive_frame,
  146. this_chunk_size)) {
  147. recv_commands(error_cb, epoch_cb);
  148. } else {
  149. printf("ecall_chunk failed\n");
  150. }
  151. });
  152. } else {
  153. error_cb(boost::system::errc::make_error_code(
  154. boost::system::errc::errc_t::invalid_argument));
  155. }
  156. });
  157. }
  158. /*
  159. Handler for received client messages.
  160. */
  161. void NetIO::handle_async_clients(std::shared_ptr<tcp::socket> csocket,
  162. const boost::system::error_code& error, size_t auth_size,
  163. size_t msgbundle_size)
  164. {
  165. if(!error) {
  166. #ifdef VERBOSE_NET
  167. printf("Accept handler success\n");
  168. #endif
  169. // Read header (1 uint64_t) from the socket and extract the client ID
  170. size_t header;
  171. clientid_t cid;
  172. boost::asio::read(*csocket,
  173. boost::asio::buffer(&header, sizeof(uint64_t)));
  174. if((header & 0xff) == CLIENT_AUTHENTICATE) {
  175. // Read the authentication token
  176. boost::asio::read(*csocket,
  177. boost::asio::buffer(&header, auth_size));
  178. } else if ((header & 0xff) == CLIENT_MESSAGE_BUNDLE) {
  179. unsigned char *msgbundle = (unsigned char*) malloc(msgbundle_size);
  180. cid = (clientid_t)(header >> 8);
  181. // Read the message_bundle
  182. boost::asio::read(*csocket,
  183. boost::asio::buffer(msgbundle, msgbundle_size));
  184. //Ingest the message_bundle
  185. bool ret = ecall_ingest_msgbundle(cid, msgbundle, apiparams.m_priv_out);
  186. free(msgbundle);
  187. }
  188. start_accept(auth_size, msgbundle_size);
  189. } else {
  190. printf("Accept handler failed\n");
  191. }
  192. }
  193. /*
  194. Asynchronously accept client connections
  195. */
  196. void NetIO::start_accept(size_t auth_size, size_t msgbundle_size)
  197. {
  198. std::shared_ptr<tcp::socket> csocket(new tcp::socket(io_context_));
  199. #ifdef VERBOSE_NET
  200. std::cout << "Accepting on " << myconf.clistenhost << ":" << myconf.clistenport << "\n";
  201. #endif
  202. client_acceptor->async_accept(*csocket,
  203. boost::bind(&NetIO::handle_async_clients, this, csocket,
  204. boost::asio::placeholders::error, auth_size, msgbundle_size));
  205. }
  206. NetIO::NetIO(boost::asio::io_context &io_context, const Config &config)
  207. : io_context_(io_context), conf(config),
  208. myconf(config.nodes[config.my_node_num])
  209. {
  210. num_nodes = nodenum_t(conf.nodes.size());
  211. nodeios.resize(num_nodes);
  212. me = conf.my_node_num;
  213. // Node number n will accept connections from nodes 0, ..., n-1 and
  214. // make connections to nodes n+1, ..., num_nodes-1. This is all
  215. // single threaded, but it doesn't deadlock because node 0 isn't
  216. // waiting for any incoming connections, so it immediately makes
  217. // outgoing connections. When it connects to node 1, that node
  218. // accepts its (only) incoming connection, and then starts making
  219. // its outgoing connections, etc.
  220. tcp::resolver resolver(io_context);
  221. tcp::acceptor acceptor(io_context,
  222. resolver.resolve(myconf.listenhost, myconf.listenport)->endpoint());
  223. for(size_t i=0; i<me; ++i) {
  224. #ifdef VERBOSE_NET
  225. std::cerr << "Accepting number " << i << "\n";
  226. #endif
  227. tcp::socket nodesock = acceptor.accept();
  228. #ifdef VERBOSE_NET
  229. std::cerr << "Accepted number " << i << "\n";
  230. #endif
  231. // Read 2 bytes from the socket, which will be the
  232. // connecting node's node number
  233. unsigned short node_num;
  234. boost::asio::read(nodesock,
  235. boost::asio::buffer(&node_num, sizeof(node_num)));
  236. if (node_num >= num_nodes) {
  237. std::cerr << "Received bad node number\n";
  238. } else {
  239. nodeios[node_num].emplace(std::move(nodesock), node_num);
  240. #ifdef VERBOSE_NET
  241. std::cerr << "Received connection from " <<
  242. config.nodes[node_num].name << "\n";
  243. #endif
  244. }
  245. }
  246. for(size_t i=me+1; i<num_nodes; ++i) {
  247. boost::system::error_code err;
  248. tcp::socket nodesock(io_context);
  249. while(1) {
  250. #ifdef VERBOSE_NET
  251. std::cerr << "Connecting to " << config.nodes[i].name << "...\n";
  252. #endif
  253. boost::asio::connect(nodesock,
  254. resolver.resolve(config.nodes[i].listenhost,
  255. config.nodes[i].listenport), err);
  256. if (!err) break;
  257. std::cerr << "Connection to " << config.nodes[i].name <<
  258. " refused, will retry.\n";
  259. sleep(1);
  260. }
  261. // Write 2 bytes to the socket to tell the peer node our node
  262. // number
  263. nodenum_t node_num = (nodenum_t)me;
  264. boost::asio::write(nodesock,
  265. boost::asio::buffer(&node_num, sizeof(node_num)));
  266. nodeios[i].emplace(std::move(nodesock), i);
  267. #ifdef VERBOSE_NET
  268. std::cerr << "Connected to " << config.nodes[i].name << "\n";
  269. #endif
  270. }
  271. if(myconf.roles & ROLE_INGESTION) {
  272. client_acceptor = std::shared_ptr<tcp::acceptor>(
  273. new tcp::acceptor(io_context,
  274. resolver.resolve(this->myconf.clistenhost,
  275. this->myconf.clistenport)->endpoint()));
  276. size_t auth_size, msgbundle_size;
  277. auth_size = SGX_AESGCM_MAC_SIZE;
  278. msgbundle_size = SGX_AESGCM_IV_SIZE +
  279. (apiparams.m_priv_out * apiparams.msg_size) + SGX_AESGCM_MAC_SIZE;
  280. start_accept(auth_size, msgbundle_size);
  281. }
  282. }
  283. void NetIO::recv_commands(
  284. std::function<void(boost::system::error_code)> error_cb,
  285. std::function<void(uint32_t)> epoch_cb)
  286. {
  287. for (nodenum_t node_num = 0; node_num < num_nodes; ++node_num) {
  288. if (node_num == me) continue;
  289. NodeIO &n = node(node_num);
  290. n.recv_commands(error_cb, epoch_cb);
  291. }
  292. }
  293. void NetIO::close()
  294. {
  295. for (nodenum_t node_num = 0; node_num < num_nodes; ++node_num) {
  296. if (node_num == me) continue;
  297. NodeIO &n = node(node_num);
  298. n.close();
  299. }
  300. }
  301. /* The enclave calls this to inform the untrusted app that there's a new
  302. * messaage to send. The return value is the frame the enclave should
  303. * use to store the first (encrypted) chunk of this message. */
  304. uint8_t *ocall_message(nodenum_t node_num, uint32_t message_len)
  305. {
  306. assert(g_netio != NULL);
  307. NodeIO &node = g_netio->node(node_num);
  308. node.send_message_header(message_len);
  309. return node.request_frame();
  310. }
  311. /* The enclave calls this to inform the untrusted app that there's a new
  312. * chunk to send. The return value is the frame the enclave should use
  313. * to store the next (encrypted) chunk of this message, or NULL if this
  314. * was the last chunk. */
  315. uint8_t *ocall_chunk(nodenum_t node_num, uint8_t *chunkdata,
  316. uint32_t chunklen)
  317. {
  318. assert(g_netio != NULL);
  319. NodeIO &node = g_netio->node(node_num);
  320. bool morechunks = node.send_chunk(chunkdata, chunklen);
  321. if (morechunks) {
  322. return node.request_frame();
  323. }
  324. return NULL;
  325. }