net.cpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. #include <iostream>
  2. #include "Enclave_u.h"
  3. #include "Untrusted.hpp"
  4. #include "net.hpp"
  5. // The command type byte values
  6. #define COMMAND_EPOCH 0x00
  7. #define COMMAND_MESSAGE 0x01
  8. #define COMMAND_CHUNK 0x02
  9. NetIO *g_netio = NULL;
  10. NodeIO::NodeIO(tcp::socket &&socket, nodenum_t nodenum) :
  11. sock(std::move(socket)), node_num(nodenum)
  12. {
  13. }
  14. uint8_t *NodeIO::request_frame()
  15. {
  16. if (frames_available.empty()) {
  17. // Allocate a new frame. Note that this memory will (at this
  18. // time) never get deallocated. In theory, we could deallocate
  19. // it in return_frame, but if a certain number of frames were
  20. // allocated here, it means we had that much data in flight
  21. // (queued but not accepted for sending by the OS), and we're
  22. // likely to need that much again. Subsequent messages will
  23. // _reuse_ the allocated data, though, so the used memory won't
  24. // grow forever, and will be limited to the amount of in-flight
  25. // data needed.
  26. return new uint8_t[FRAME_SIZE];
  27. }
  28. // Copy the pointer to the frame out of the deque and remove it from
  29. // the deque. Note this is _not_ taking the address of the element
  30. // *in* the deque (and then popping it, which would invalidate that
  31. // pointer).
  32. frame_deque_lock.lock();
  33. uint8_t *frame = frames_available.back();
  34. frames_available.pop_back();
  35. frame_deque_lock.unlock();
  36. return frame;
  37. }
  38. void NodeIO::return_frame(uint8_t *frame)
  39. {
  40. if (!frame) return;
  41. // We push the frame back on to the end of the deque so that it will
  42. // be the next one used. This may lead to better cache behaviour?
  43. frame_deque_lock.lock();
  44. frames_available.push_back(frame);
  45. frame_deque_lock.unlock();
  46. }
  47. void NodeIO::send_header_data(uint64_t header, uint8_t *data, size_t len)
  48. {
  49. commands_deque_lock.lock();
  50. commands_inflight.push_back({header, data, len});
  51. if (commands_inflight.size() == 1) {
  52. async_send_commands();
  53. }
  54. commands_deque_lock.unlock();
  55. }
  56. void NodeIO::async_send_commands()
  57. {
  58. std::vector<boost::asio::const_buffer> tosend;
  59. CommandTuple *commandp = &(commands_inflight.front());
  60. tosend.push_back(boost::asio::buffer(&(std::get<0>(*commandp)), 5));
  61. if (std::get<1>(*commandp) != NULL && std::get<2>(*commandp) > 0) {
  62. tosend.push_back(boost::asio::buffer(std::get<1>(*commandp),
  63. std::get<2>(*commandp)));
  64. }
  65. boost::asio::async_write(sock, tosend,
  66. [this, commandp](boost::system::error_code, std::size_t){
  67. // When the write completes, pop the command from the deque
  68. // (which should now be in the front)
  69. commands_deque_lock.lock();
  70. assert(!commands_inflight.empty() &&
  71. &(commands_inflight.front()) == commandp);
  72. uint8_t *data = std::get<1>(*commandp);
  73. commands_inflight.pop_front();
  74. if (commands_inflight.size() > 0) {
  75. async_send_commands();
  76. }
  77. // And return the frame
  78. return_frame(data);
  79. commands_deque_lock.unlock();
  80. });
  81. }
  82. void NodeIO::send_epoch(uint32_t epoch_num)
  83. {
  84. uint64_t header = (uint64_t(epoch_num) << 8) + COMMAND_EPOCH;
  85. send_header_data(header, NULL, 0);
  86. }
  87. void NodeIO::send_message_header(uint32_t tot_message_len)
  88. {
  89. uint64_t header = (uint64_t(tot_message_len) << 8) + COMMAND_MESSAGE;
  90. send_header_data(header, NULL, 0);
  91. // If we're sending a new message header, we have to have finished
  92. // sending the previous message.
  93. assert(chunksize_inflight == msgsize_inflight);
  94. msgsize_inflight = tot_message_len;
  95. chunksize_inflight = 0;
  96. }
  97. bool NodeIO::send_chunk(uint8_t *data, uint32_t chunk_len)
  98. {
  99. assert(chunk_len <= FRAME_SIZE);
  100. uint64_t header = (uint64_t(chunk_len) << 8) + COMMAND_CHUNK;
  101. send_header_data(header, data, chunk_len);
  102. chunksize_inflight += chunk_len;
  103. assert(chunksize_inflight <= msgsize_inflight);
  104. return (chunksize_inflight < msgsize_inflight);
  105. }
  106. void NodeIO::recv_commands(
  107. std::function<void(boost::system::error_code)> error_cb,
  108. std::function<void(uint32_t)> epoch_cb)
  109. {
  110. // Asynchronously read the header
  111. receive_header = 0;
  112. boost::asio::async_read(sock, boost::asio::buffer(&receive_header, 5),
  113. [this, error_cb, epoch_cb]
  114. (boost::system::error_code ec, std::size_t) {
  115. if (ec) {
  116. error_cb(ec);
  117. return;
  118. }
  119. if ((receive_header & 0xff) == COMMAND_EPOCH) {
  120. epoch_cb(uint32_t(receive_header >> 8));
  121. recv_commands(error_cb, epoch_cb);
  122. } else if ((receive_header & 0xff) == COMMAND_MESSAGE) {
  123. assert(recv_msgsize_inflight == recv_chunksize_inflight);
  124. recv_msgsize_inflight = uint32_t(receive_header >> 8);
  125. recv_chunksize_inflight = 0;
  126. if (ecall_message(node_num, recv_msgsize_inflight)) {
  127. recv_commands(error_cb, epoch_cb);
  128. } else {
  129. printf("ecall_message failed\n");
  130. }
  131. } else if ((receive_header & 0xff) == COMMAND_CHUNK) {
  132. uint32_t this_chunk_size = uint32_t(receive_header >> 8);
  133. assert(recv_chunksize_inflight + this_chunk_size <=
  134. recv_msgsize_inflight);
  135. recv_chunksize_inflight += this_chunk_size;
  136. boost::asio::async_read(sock, boost::asio::buffer(
  137. receive_frame, this_chunk_size),
  138. [this, error_cb, epoch_cb, this_chunk_size]
  139. (boost::system::error_code ecc, std::size_t) {
  140. if (ecc) {
  141. error_cb(ecc);
  142. return;
  143. }
  144. if (ecall_chunk(node_num, receive_frame,
  145. this_chunk_size)) {
  146. recv_commands(error_cb, epoch_cb);
  147. } else {
  148. printf("ecall_chunk failed\n");
  149. }
  150. });
  151. } else {
  152. error_cb(boost::system::errc::make_error_code(
  153. boost::system::errc::errc_t::invalid_argument));
  154. }
  155. });
  156. }
  157. #ifdef VERBOSE_NET
  158. void displayMessage(unsigned char *msg, uint16_t msg_size) {
  159. clientid_t sid, rid;
  160. unsigned char *ptr = msg;
  161. sid = *((clientid_t*) ptr);
  162. ptr+=sizeof(sid);
  163. rid = *((clientid_t*) ptr);
  164. uint16_t srpair_size = sizeof(sid)*2;
  165. printf("Sender ID: %d, Receiver ID: %d, Token: N/A\n", sid, rid );
  166. printf("Message: ");
  167. for(int j = 0; j<msg_size - srpair_size; j++) {
  168. printf("%x", (*ptr));
  169. ptr++;
  170. }
  171. printf("\n");
  172. }
  173. void displayMessageBundle(unsigned char *bundle, uint16_t priv_out, uint16_t msg_size) {
  174. unsigned char *ptr = bundle;
  175. /*
  176. // Header is already parsed on this end
  177. uint64_t header = *((uint64_t*) ptr);
  178. ptr+=sizeof(uint64_t);
  179. */
  180. for(int i=0; i<priv_out; i++) {
  181. displayMessage(ptr, msg_size);
  182. printf("\n");
  183. ptr+=msg_size;
  184. }
  185. }
  186. #endif
  187. /*
  188. Handler for received client messages.
  189. */
  190. void NetIO::handle_async_clients(std::shared_ptr<tcp::socket> csocket,
  191. const boost::system::error_code& error, size_t auth_size,
  192. size_t msgbundle_size)
  193. {
  194. if(!error) {
  195. #ifdef VERBOSE_NET
  196. printf("Accept handler success\n");
  197. #endif
  198. // Read header (1 uint64_t) from the socket and extract the client ID
  199. size_t header;
  200. clientid_t cid;
  201. boost::asio::read(*csocket,
  202. boost::asio::buffer(&header, sizeof(uint64_t)));
  203. if((header & 0xff) == CLIENT_AUTHENTICATE) {
  204. // Read the authentication token
  205. boost::asio::read(*csocket,
  206. boost::asio::buffer(&header, auth_size));
  207. } else if ((header & 0xff) == CLIENT_MESSAGE_BUNDLE) {
  208. unsigned char *msgbundle = (unsigned char*) malloc(msgbundle_size);
  209. cid = (clientid_t)(header >> 8);
  210. // Read the message_bundle
  211. boost::asio::read(*csocket,
  212. boost::asio::buffer(msgbundle, msgbundle_size));
  213. #ifdef VERBOSE_NET
  214. displayMessageBundle(msgbundle, apiparams.m_priv_out, apiparams.msg_size);
  215. #endif
  216. //Ingest the message_bundle
  217. bool ret = ecall_ingest_msgbundle(cid, msgbundle, apiparams.m_priv_out);
  218. free(msgbundle);
  219. }
  220. /*
  221. This should read a MESSAGES_DROP_OFF packet of fixed length
  222. from a client.
  223. Send this packet over to ingestion processing:
  224. - Decrypt the packet with the correct key for that client id
  225. - Verify private channel token
  226. - Buffer the message for route
  227. */
  228. start_accept(auth_size, msgbundle_size);
  229. } else {
  230. printf("Accept handler failed\n");
  231. }
  232. }
  233. /*
  234. Asynchronously accept client connections
  235. */
  236. void NetIO::start_accept(size_t auth_size, size_t msgbundle_size)
  237. {
  238. std::shared_ptr<tcp::socket> csocket(new tcp::socket(io_context_));
  239. #ifdef VERBOSE_NET
  240. std::cout << "Accepting on " << myconf.clistenhost << ":" << myconf.clistenport << "\n";
  241. #endif
  242. client_acceptor->async_accept(*csocket,
  243. boost::bind(&NetIO::handle_async_clients, this, csocket,
  244. boost::asio::placeholders::error, auth_size, msgbundle_size));
  245. }
  246. NetIO::NetIO(boost::asio::io_context &io_context, const Config &config)
  247. : io_context_(io_context), conf(config),
  248. myconf(config.nodes[config.my_node_num])
  249. {
  250. num_nodes = nodenum_t(conf.nodes.size());
  251. nodeios.resize(num_nodes);
  252. me = conf.my_node_num;
  253. // Node number n will accept connections from nodes 0, ..., n-1 and
  254. // make connections to nodes n+1, ..., num_nodes-1. This is all
  255. // single threaded, but it doesn't deadlock because node 0 isn't
  256. // waiting for any incoming connections, so it immediately makes
  257. // outgoing connections. When it connects to node 1, that node
  258. // accepts its (only) incoming connection, and then starts making
  259. // its outgoing connections, etc.
  260. tcp::resolver resolver(io_context);
  261. tcp::acceptor acceptor(io_context,
  262. resolver.resolve(myconf.listenhost, myconf.listenport)->endpoint());
  263. for(size_t i=0; i<me; ++i) {
  264. #ifdef VERBOSE_NET
  265. std::cerr << "Accepting number " << i << "\n";
  266. #endif
  267. tcp::socket nodesock = acceptor.accept();
  268. #ifdef VERBOSE_NET
  269. std::cerr << "Accepted number " << i << "\n";
  270. #endif
  271. // Read 2 bytes from the socket, which will be the
  272. // connecting node's node number
  273. unsigned short node_num;
  274. boost::asio::read(nodesock,
  275. boost::asio::buffer(&node_num, sizeof(node_num)));
  276. if (node_num >= num_nodes) {
  277. std::cerr << "Received bad node number\n";
  278. } else {
  279. nodeios[node_num].emplace(std::move(nodesock), node_num);
  280. #ifdef VERBOSE_NET
  281. std::cerr << "Received connection from " <<
  282. config.nodes[node_num].name << "\n";
  283. #endif
  284. }
  285. }
  286. for(size_t i=me+1; i<num_nodes; ++i) {
  287. boost::system::error_code err;
  288. tcp::socket nodesock(io_context);
  289. while(1) {
  290. #ifdef VERBOSE_NET
  291. std::cerr << "Connecting to " << config.nodes[i].name << "...\n";
  292. #endif
  293. boost::asio::connect(nodesock,
  294. resolver.resolve(config.nodes[i].listenhost,
  295. config.nodes[i].listenport), err);
  296. if (!err) break;
  297. std::cerr << "Connection to " << config.nodes[i].name <<
  298. " refused, will retry.\n";
  299. sleep(1);
  300. }
  301. // Write 2 bytes to the socket to tell the peer node our node
  302. // number
  303. nodenum_t node_num = (nodenum_t)me;
  304. boost::asio::write(nodesock,
  305. boost::asio::buffer(&node_num, sizeof(node_num)));
  306. nodeios[i].emplace(std::move(nodesock), i);
  307. #ifdef VERBOSE_NET
  308. std::cerr << "Connected to " << config.nodes[i].name << "\n";
  309. #endif
  310. }
  311. if(myconf.roles & ROLE_INGESTION) {
  312. client_acceptor = std::shared_ptr<tcp::acceptor>(
  313. new tcp::acceptor(io_context,
  314. resolver.resolve(this->myconf.clistenhost,
  315. this->myconf.clistenport)->endpoint()));
  316. size_t auth_size, msgbundle_size;
  317. auth_size = SGX_AESGCM_MAC_SIZE;
  318. msgbundle_size = (apiparams.m_priv_out * apiparams.msg_size) + SGX_AESGCM_MAC_SIZE;
  319. start_accept(auth_size, msgbundle_size);
  320. }
  321. }
  322. void NetIO::recv_commands(
  323. std::function<void(boost::system::error_code)> error_cb,
  324. std::function<void(uint32_t)> epoch_cb)
  325. {
  326. for (nodenum_t node_num = 0; node_num < num_nodes; ++node_num) {
  327. if (node_num == me) continue;
  328. NodeIO &n = node(node_num);
  329. n.recv_commands(error_cb, epoch_cb);
  330. }
  331. }
  332. void NetIO::close()
  333. {
  334. for (nodenum_t node_num = 0; node_num < num_nodes; ++node_num) {
  335. if (node_num == me) continue;
  336. NodeIO &n = node(node_num);
  337. n.close();
  338. }
  339. }
  340. /* The enclave calls this to inform the untrusted app that there's a new
  341. * messaage to send. The return value is the frame the enclave should
  342. * use to store the first (encrypted) chunk of this message. */
  343. uint8_t *ocall_message(nodenum_t node_num, uint32_t message_len)
  344. {
  345. assert(g_netio != NULL);
  346. NodeIO &node = g_netio->node(node_num);
  347. node.send_message_header(message_len);
  348. return node.request_frame();
  349. }
  350. /* The enclave calls this to inform the untrusted app that there's a new
  351. * chunk to send. The return value is the frame the enclave should use
  352. * to store the next (encrypted) chunk of this message, or NULL if this
  353. * was the last chunk. */
  354. uint8_t *ocall_chunk(nodenum_t node_num, uint8_t *chunkdata,
  355. uint32_t chunklen)
  356. {
  357. assert(g_netio != NULL);
  358. NodeIO &node = g_netio->node(node_num);
  359. bool morechunks = node.send_chunk(chunkdata, chunklen);
  360. if (morechunks) {
  361. return node.request_frame();
  362. }
  363. return NULL;
  364. }