net.cpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. #include <iostream>
  2. #include "Enclave_u.h"
  3. #include "Untrusted.hpp"
  4. #include "net.hpp"
  5. // The command type byte values
  6. #define COMMAND_EPOCH 0x00
  7. #define COMMAND_MESSAGE 0x01
  8. #define COMMAND_CHUNK 0x02
  9. #define VERBOSE_NET
  10. // #define DEBUG_NET_CLIENTS
  11. NetIO *g_netio = NULL;
  12. NodeIO::NodeIO(tcp::socket &&socket, nodenum_t nodenum) :
  13. sock(std::move(socket)), node_num(nodenum)
  14. {
  15. }
  16. uint8_t *NodeIO::request_frame()
  17. {
  18. if (frames_available.empty()) {
  19. // Allocate a new frame. Note that this memory will (at this
  20. // time) never get deallocated. In theory, we could deallocate
  21. // it in return_frame, but if a certain number of frames were
  22. // allocated here, it means we had that much data in flight
  23. // (queued but not accepted for sending by the OS), and we're
  24. // likely to need that much again. Subsequent messages will
  25. // _reuse_ the allocated data, though, so the used memory won't
  26. // grow forever, and will be limited to the amount of in-flight
  27. // data needed.
  28. return new uint8_t[FRAME_SIZE];
  29. }
  30. // Copy the pointer to the frame out of the deque and remove it from
  31. // the deque. Note this is _not_ taking the address of the element
  32. // *in* the deque (and then popping it, which would invalidate that
  33. // pointer).
  34. frame_deque_lock.lock();
  35. uint8_t *frame = frames_available.back();
  36. frames_available.pop_back();
  37. frame_deque_lock.unlock();
  38. return frame;
  39. }
  40. void NodeIO::return_frame(uint8_t *frame)
  41. {
  42. if (!frame) return;
  43. // We push the frame back on to the end of the deque so that it will
  44. // be the next one used. This may lead to better cache behaviour?
  45. frame_deque_lock.lock();
  46. frames_available.push_back(frame);
  47. frame_deque_lock.unlock();
  48. }
  49. void NodeIO::send_header_data(uint64_t header, uint8_t *data, size_t len)
  50. {
  51. commands_deque_lock.lock();
  52. commands_inflight.push_back({header, data, len});
  53. if (commands_inflight.size() == 1) {
  54. async_send_commands();
  55. }
  56. commands_deque_lock.unlock();
  57. }
  58. void NodeIO::async_send_commands()
  59. {
  60. std::vector<boost::asio::const_buffer> tosend;
  61. CommandTuple *commandp = &(commands_inflight.front());
  62. tosend.push_back(boost::asio::buffer(&(std::get<0>(*commandp)), 5));
  63. if (std::get<1>(*commandp) != NULL && std::get<2>(*commandp) > 0) {
  64. tosend.push_back(boost::asio::buffer(std::get<1>(*commandp),
  65. std::get<2>(*commandp)));
  66. }
  67. boost::asio::async_write(sock, tosend,
  68. [this, commandp](boost::system::error_code, std::size_t){
  69. // When the write completes, pop the command from the deque
  70. // (which should now be in the front)
  71. commands_deque_lock.lock();
  72. assert(!commands_inflight.empty() &&
  73. &(commands_inflight.front()) == commandp);
  74. uint8_t *data = std::get<1>(*commandp);
  75. commands_inflight.pop_front();
  76. if (commands_inflight.size() > 0) {
  77. async_send_commands();
  78. }
  79. // And return the frame
  80. return_frame(data);
  81. commands_deque_lock.unlock();
  82. });
  83. }
  84. void NodeIO::send_epoch(uint32_t epoch_num)
  85. {
  86. uint64_t header = (uint64_t(epoch_num) << 8) + COMMAND_EPOCH;
  87. send_header_data(header, NULL, 0);
  88. }
  89. void NodeIO::send_message_header(uint32_t tot_message_len)
  90. {
  91. uint64_t header = (uint64_t(tot_message_len) << 8) + COMMAND_MESSAGE;
  92. send_header_data(header, NULL, 0);
  93. // If we're sending a new message header, we have to have finished
  94. // sending the previous message.
  95. assert(chunksize_inflight == msgsize_inflight);
  96. msgsize_inflight = tot_message_len;
  97. chunksize_inflight = 0;
  98. }
  99. bool NodeIO::send_chunk(uint8_t *data, uint32_t chunk_len)
  100. {
  101. assert(chunk_len <= FRAME_SIZE);
  102. uint64_t header = (uint64_t(chunk_len) << 8) + COMMAND_CHUNK;
  103. send_header_data(header, data, chunk_len);
  104. chunksize_inflight += chunk_len;
  105. assert(chunksize_inflight <= msgsize_inflight);
  106. return (chunksize_inflight < msgsize_inflight);
  107. }
  108. void NodeIO::recv_commands(
  109. std::function<void(boost::system::error_code)> error_cb,
  110. std::function<void(uint32_t)> epoch_cb)
  111. {
  112. // Asynchronously read the header
  113. receive_header = 0;
  114. boost::asio::async_read(sock, boost::asio::buffer(&receive_header, 5),
  115. [this, error_cb, epoch_cb]
  116. (boost::system::error_code ec, std::size_t) {
  117. if (ec) {
  118. error_cb(ec);
  119. return;
  120. }
  121. if ((receive_header & 0xff) == COMMAND_EPOCH) {
  122. epoch_cb(uint32_t(receive_header >> 8));
  123. recv_commands(error_cb, epoch_cb);
  124. } else if ((receive_header & 0xff) == COMMAND_MESSAGE) {
  125. assert(recv_msgsize_inflight == recv_chunksize_inflight);
  126. recv_msgsize_inflight = uint32_t(receive_header >> 8);
  127. recv_chunksize_inflight = 0;
  128. if (ecall_message(node_num, recv_msgsize_inflight)) {
  129. recv_commands(error_cb, epoch_cb);
  130. } else {
  131. printf("ecall_message failed\n");
  132. }
  133. } else if ((receive_header & 0xff) == COMMAND_CHUNK) {
  134. uint32_t this_chunk_size = uint32_t(receive_header >> 8);
  135. assert(recv_chunksize_inflight + this_chunk_size <=
  136. recv_msgsize_inflight);
  137. recv_chunksize_inflight += this_chunk_size;
  138. boost::asio::async_read(sock, boost::asio::buffer(
  139. receive_frame, this_chunk_size),
  140. [this, error_cb, epoch_cb, this_chunk_size]
  141. (boost::system::error_code ecc, std::size_t) {
  142. if (ecc) {
  143. error_cb(ecc);
  144. return;
  145. }
  146. if (ecall_chunk(node_num, receive_frame,
  147. this_chunk_size)) {
  148. recv_commands(error_cb, epoch_cb);
  149. } else {
  150. printf("ecall_chunk failed\n");
  151. }
  152. });
  153. } else {
  154. error_cb(boost::system::errc::make_error_code(
  155. boost::system::errc::errc_t::invalid_argument));
  156. }
  157. });
  158. }
  159. /*
  160. Receive clients dropped off messages, i.e. a CLIENT_MESSAGE_BUNDLE
  161. */
  162. void NetIO::receive_msgbundle(tcp::socket* csocket, clientid_t c_simid)
  163. {
  164. unsigned char *msgbundle = (unsigned char*) malloc(msgbundle_size);
  165. boost::asio::async_read(*csocket, boost::asio::buffer(msgbundle, msgbundle_size),
  166. [this, csocket, msgbundle, c_simid]
  167. (boost::system::error_code ec, std::size_t) {
  168. if (ec) {
  169. if(ec == boost::asio::error::eof) {
  170. // Client connection terminated so we delete this socket
  171. delete(csocket);
  172. }
  173. else {
  174. printf("Error %s\n", ec.message().c_str());
  175. }
  176. return;
  177. }
  178. //Ingest the message_bundle
  179. bool ret = ecall_ingest_msgbundle(c_simid, msgbundle, apiparams.m_priv_out);
  180. free(msgbundle);
  181. // Continue to async receive client message bundles
  182. receive_msgbundle(csocket, c_simid);
  183. });
  184. }
  185. /*
  186. Handle new client connections.
  187. New clients always send a CLIENT_AUTHENTICATE message, and then followed
  188. by their CLIENT_MESSAGE_BUNDLE every epoch.
  189. */
  190. void NetIO::authenticate_new_client(tcp::socket* csocket,
  191. const boost::system::error_code& error)
  192. {
  193. if(error) {
  194. printf("Accept handler failed\n");
  195. return;
  196. }
  197. #ifdef DEBUG_NET_CLIENTS
  198. printf("Accept handler success\n");
  199. #endif
  200. unsigned char* auth_message = (unsigned char*) malloc(auth_size);
  201. boost::asio::async_read(*csocket, boost::asio::buffer(auth_message, auth_size),
  202. [this, csocket, auth_message]
  203. (boost::system::error_code ec, std::size_t) {
  204. if (ec) {
  205. if(ec == boost::asio::error::eof) {
  206. // Client connection terminated so we delete this socket
  207. delete(csocket);
  208. } else {
  209. printf("Error %s\n", ec.message().c_str());
  210. }
  211. return;
  212. }
  213. else {
  214. clientid_t c_simid = *((clientid_t *)(auth_message));
  215. // Read the authentication token
  216. unsigned char *auth_ptr = auth_message + sizeof(clientid_t);
  217. bool ret = ecall_authenticate(c_simid, auth_ptr);
  218. free(auth_message);
  219. // Receive client message bundles on this socket
  220. // for client sim_id c_simid
  221. if(ret) {
  222. receive_msgbundle(csocket, c_simid);
  223. } else{
  224. delete(csocket);
  225. }
  226. }
  227. });
  228. start_accept();
  229. }
  230. /*
  231. Asynchronously accept new client connections
  232. */
  233. void NetIO::start_accept()
  234. {
  235. tcp::socket *csocket = new tcp::socket(io_context());
  236. #ifdef DEBUG_NET_CLIENTS
  237. std::cout << "Accepting on " << myconf.clistenhost << ":" << myconf.clistenport << "\n";
  238. #endif
  239. client_acceptor->async_accept(*csocket,
  240. boost::bind(&NetIO::authenticate_new_client, this, csocket,
  241. boost::asio::placeholders::error));
  242. }
  243. NetIO::NetIO(boost::asio::io_context &io_context, const Config &config)
  244. : context(io_context), conf(config),
  245. myconf(config.nodes[config.my_node_num])
  246. {
  247. num_nodes = nodenum_t(conf.nodes.size());
  248. nodeios.resize(num_nodes);
  249. me = conf.my_node_num;
  250. // Node number n will accept connections from nodes 0, ..., n-1 and
  251. // make connections to nodes n+1, ..., num_nodes-1. This is all
  252. // single threaded, but it doesn't deadlock because node 0 isn't
  253. // waiting for any incoming connections, so it immediately makes
  254. // outgoing connections. When it connects to node 1, that node
  255. // accepts its (only) incoming connection, and then starts making
  256. // its outgoing connections, etc.
  257. tcp::resolver resolver(io_context);
  258. tcp::acceptor acceptor(io_context,
  259. resolver.resolve(myconf.listenhost, myconf.listenport)->endpoint());
  260. for(size_t i=0; i<me; ++i) {
  261. #ifdef VERBOSE_NET
  262. std::cerr << "Accepting number " << i << "\n";
  263. #endif
  264. tcp::socket nodesock = acceptor.accept();
  265. #ifdef VERBOSE_NET
  266. std::cerr << "Accepted number " << i << "\n";
  267. #endif
  268. // Read 2 bytes from the socket, which will be the
  269. // connecting node's node number
  270. unsigned short node_num;
  271. boost::asio::read(nodesock,
  272. boost::asio::buffer(&node_num, sizeof(node_num)));
  273. if (node_num >= num_nodes) {
  274. std::cerr << "Received bad node number\n";
  275. } else {
  276. nodeios[node_num].emplace(std::move(nodesock), node_num);
  277. #ifdef VERBOSE_NET
  278. std::cerr << "Received connection from " <<
  279. config.nodes[node_num].name << "\n";
  280. #endif
  281. }
  282. }
  283. for(size_t i=me+1; i<num_nodes; ++i) {
  284. boost::system::error_code err;
  285. tcp::socket nodesock(io_context);
  286. while(1) {
  287. #ifdef VERBOSE_NET
  288. std::cerr << "Connecting to " << config.nodes[i].name << "...\n";
  289. #endif
  290. boost::asio::connect(nodesock,
  291. resolver.resolve(config.nodes[i].listenhost,
  292. config.nodes[i].listenport), err);
  293. if (!err) break;
  294. std::cerr << "Connection to " << config.nodes[i].name <<
  295. " refused, will retry.\n";
  296. sleep(1);
  297. }
  298. // Write 2 bytes to the socket to tell the peer node our node
  299. // number
  300. nodenum_t node_num = (nodenum_t)me;
  301. boost::asio::write(nodesock,
  302. boost::asio::buffer(&node_num, sizeof(node_num)));
  303. nodeios[i].emplace(std::move(nodesock), i);
  304. #ifdef VERBOSE_NET
  305. std::cerr << "Connected to " << config.nodes[i].name << "\n";
  306. #endif
  307. }
  308. if(myconf.roles & ROLE_INGESTION) {
  309. client_acceptor = std::shared_ptr<tcp::acceptor>(
  310. new tcp::acceptor(io_context,
  311. resolver.resolve(this->myconf.clistenhost,
  312. this->myconf.clistenport)->endpoint()));
  313. auth_size = sizeof(clientid_t) + sizeof(unsigned long) + SGX_AESGCM_KEY_SIZE;
  314. msgbundle_size = SGX_AESGCM_IV_SIZE +
  315. (apiparams.m_priv_out * apiparams.msg_size) + SGX_AESGCM_MAC_SIZE;
  316. start_accept();
  317. }
  318. }
  319. void NetIO::recv_commands(
  320. std::function<void(boost::system::error_code)> error_cb,
  321. std::function<void(uint32_t)> epoch_cb)
  322. {
  323. for (nodenum_t node_num = 0; node_num < num_nodes; ++node_num) {
  324. if (node_num == me) continue;
  325. NodeIO &n = node(node_num);
  326. n.recv_commands(error_cb, epoch_cb);
  327. }
  328. }
  329. void NetIO::close()
  330. {
  331. for (nodenum_t node_num = 0; node_num < num_nodes; ++node_num) {
  332. if (node_num == me) continue;
  333. NodeIO &n = node(node_num);
  334. n.close();
  335. }
  336. }
  337. /* The enclave calls this to inform the untrusted app that there's a new
  338. * messaage to send. The return value is the frame the enclave should
  339. * use to store the first (encrypted) chunk of this message. */
  340. uint8_t *ocall_message(nodenum_t node_num, uint32_t message_len)
  341. {
  342. assert(g_netio != NULL);
  343. NodeIO &node = g_netio->node(node_num);
  344. node.send_message_header(message_len);
  345. return node.request_frame();
  346. }
  347. /* The enclave calls this to inform the untrusted app that there's a new
  348. * chunk to send. The return value is the frame the enclave should use
  349. * to store the next (encrypted) chunk of this message, or NULL if this
  350. * was the last chunk. */
  351. uint8_t *ocall_chunk(nodenum_t node_num, uint8_t *chunkdata,
  352. uint32_t chunklen)
  353. {
  354. assert(g_netio != NULL);
  355. NodeIO &node = g_netio->node(node_num);
  356. bool morechunks = node.send_chunk(chunkdata, chunklen);
  357. if (morechunks) {
  358. return node.request_frame();
  359. }
  360. return NULL;
  361. }