net.cpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725
  1. #include <iostream>
  2. #include "Enclave_u.h"
  3. #include "Untrusted.hpp"
  4. #include "net.hpp"
  5. // The command type byte values
  6. #define COMMAND_EPOCH 0x00
  7. #define COMMAND_MESSAGE 0x01
  8. #define COMMAND_CHUNK 0x02
  9. #define VERBOSE_NET
  10. // #define DEBUG_NET_CLIENTS
  11. #define CEILDIV(x,y) (((x)+(y)-1)/(y))
  12. NetIO *g_netio = NULL;
  13. size_t client_count = 0;
  14. NodeIO::NodeIO(tcp::socket &&socket, nodenum_t nodenum) :
  15. sock(std::move(socket)), node_num(nodenum), msgsize_inflight(0),
  16. chunksize_inflight(0), recv_msgsize_inflight(0),
  17. recv_chunksize_inflight(0), bytes_sent(0)
  18. {
  19. }
  20. uint8_t *NodeIO::request_frame()
  21. {
  22. if (frames_available.empty()) {
  23. // Allocate a new frame. Note that this memory will (at this
  24. // time) never get deallocated. In theory, we could deallocate
  25. // it in return_frame, but if a certain number of frames were
  26. // allocated here, it means we had that much data in flight
  27. // (queued but not accepted for sending by the OS), and we're
  28. // likely to need that much again. Subsequent messages will
  29. // _reuse_ the allocated data, though, so the used memory won't
  30. // grow forever, and will be limited to the amount of in-flight
  31. // data needed.
  32. return new uint8_t[FRAME_SIZE];
  33. }
  34. // Copy the pointer to the frame out of the deque and remove it from
  35. // the deque. Note this is _not_ taking the address of the element
  36. // *in* the deque (and then popping it, which would invalidate that
  37. // pointer).
  38. frame_deque_lock.lock();
  39. uint8_t *frame = frames_available.back();
  40. frames_available.pop_back();
  41. frame_deque_lock.unlock();
  42. return frame;
  43. }
  44. void NodeIO::return_frame(uint8_t *frame)
  45. {
  46. if (!frame) return;
  47. // We push the frame back on to the end of the deque so that it will
  48. // be the next one used. This may lead to better cache behaviour?
  49. frame_deque_lock.lock();
  50. frames_available.push_back(frame);
  51. frame_deque_lock.unlock();
  52. }
  53. void NodeIO::send_header_data(uint64_t header, uint8_t *data, size_t len)
  54. {
  55. commands_deque_lock.lock();
  56. commands_inflight.push_back({header, data, len});
  57. if (commands_inflight.size() == 1) {
  58. async_send_commands();
  59. }
  60. commands_deque_lock.unlock();
  61. }
  62. void NodeIO::async_send_commands()
  63. {
  64. std::vector<boost::asio::const_buffer> tosend;
  65. CommandTuple *commandp = &(commands_inflight.front());
  66. tosend.push_back(boost::asio::buffer(&(std::get<0>(*commandp)), 5));
  67. if (std::get<1>(*commandp) != NULL && std::get<2>(*commandp) > 0) {
  68. tosend.push_back(boost::asio::buffer(std::get<1>(*commandp),
  69. std::get<2>(*commandp)));
  70. }
  71. boost::asio::async_write(sock, tosend,
  72. [this, commandp](boost::system::error_code, std::size_t){
  73. // When the write completes, pop the command from the deque
  74. // (which should now be in the front)
  75. commands_deque_lock.lock();
  76. assert(!commands_inflight.empty() &&
  77. &(commands_inflight.front()) == commandp);
  78. bytes_sent = bytes_sent + 5 + std::get<2>(*commandp);
  79. uint8_t *data = std::get<1>(*commandp);
  80. commands_inflight.pop_front();
  81. if (commands_inflight.size() > 0) {
  82. async_send_commands();
  83. }
  84. // And return the frame
  85. return_frame(data);
  86. commands_deque_lock.unlock();
  87. });
  88. }
  89. void NodeIO::send_epoch(uint32_t epoch_num)
  90. {
  91. uint64_t header = (uint64_t(epoch_num) << 8) + COMMAND_EPOCH;
  92. send_header_data(header, NULL, 0);
  93. }
  94. void NodeIO::send_message_header(uint32_t tot_message_len)
  95. {
  96. uint64_t header = (uint64_t(tot_message_len) << 8) + COMMAND_MESSAGE;
  97. send_header_data(header, NULL, 0);
  98. // If we're sending a new message header, we have to have finished
  99. // sending the previous message.
  100. assert(chunksize_inflight == msgsize_inflight);
  101. msgsize_inflight = tot_message_len;
  102. chunksize_inflight = 0;
  103. #ifdef TRACE_SOCKIO
  104. struct timeval now;
  105. gettimeofday(&now, NULL);
  106. printf("%lu.%06lu: RTE queueing %u bytes to %s\n", now.tv_sec,
  107. now.tv_usec, msgsize_inflight,
  108. g_netio->config().nodes[node_num].name.c_str());
  109. if (msgsize_inflight == 0) {
  110. printf("%lu.%06lu: RTE queued %u bytes to %s\n", now.tv_sec,
  111. now.tv_usec, msgsize_inflight,
  112. g_netio->config().nodes[node_num].name.c_str());
  113. }
  114. #endif
  115. }
  116. bool NodeIO::send_chunk(uint8_t *data, uint32_t chunk_len)
  117. {
  118. assert(chunk_len <= FRAME_SIZE);
  119. uint64_t header = (uint64_t(chunk_len) << 8) + COMMAND_CHUNK;
  120. send_header_data(header, data, chunk_len);
  121. chunksize_inflight += chunk_len;
  122. assert(chunksize_inflight <= msgsize_inflight);
  123. #ifdef TRACE_SOCKIO
  124. if (msgsize_inflight == chunksize_inflight) {
  125. struct timeval now;
  126. gettimeofday(&now, NULL);
  127. printf("%lu.%06lu: RTE queued %u bytes to %s\n", now.tv_sec,
  128. now.tv_usec, msgsize_inflight,
  129. g_netio->config().nodes[node_num].name.c_str());
  130. }
  131. #endif
  132. return (chunksize_inflight < msgsize_inflight);
  133. }
  134. void NodeIO::recv_commands(
  135. std::function<void(boost::system::error_code)> error_cb,
  136. std::function<void(uint32_t)> epoch_cb)
  137. {
  138. // Asynchronously read the header
  139. receive_header = 0;
  140. boost::asio::async_read(sock, boost::asio::buffer(&receive_header, 5),
  141. [this, error_cb, epoch_cb]
  142. (boost::system::error_code ec, std::size_t) {
  143. if (ec) {
  144. error_cb(ec);
  145. return;
  146. }
  147. if ((receive_header & 0xff) == COMMAND_EPOCH) {
  148. epoch_cb(uint32_t(receive_header >> 8));
  149. recv_commands(error_cb, epoch_cb);
  150. } else if ((receive_header & 0xff) == COMMAND_MESSAGE) {
  151. assert(recv_msgsize_inflight == recv_chunksize_inflight);
  152. recv_msgsize_inflight = uint32_t(receive_header >> 8);
  153. recv_chunksize_inflight = 0;
  154. #ifdef TRACE_SOCKIO
  155. struct timeval now;
  156. gettimeofday(&now, NULL);
  157. printf("%lu.%06lu: RTE receiving %u bytes from %s\n", now.tv_sec,
  158. now.tv_usec, recv_msgsize_inflight,
  159. g_netio->config().nodes[node_num].name.c_str());
  160. if (recv_msgsize_inflight == 0) {
  161. printf("%lu.%06lu: RTE received %u bytes from %s\n", now.tv_sec,
  162. now.tv_usec, recv_msgsize_inflight,
  163. g_netio->config().nodes[node_num].name.c_str());
  164. }
  165. #endif
  166. if (ecall_message(node_num, recv_msgsize_inflight)) {
  167. recv_commands(error_cb, epoch_cb);
  168. } else {
  169. printf("ecall_message failed\n");
  170. }
  171. } else if ((receive_header & 0xff) == COMMAND_CHUNK) {
  172. uint32_t this_chunk_size = uint32_t(receive_header >> 8);
  173. assert(recv_chunksize_inflight + this_chunk_size <=
  174. recv_msgsize_inflight);
  175. recv_chunksize_inflight += this_chunk_size;
  176. boost::asio::async_read(sock, boost::asio::buffer(
  177. receive_frame, this_chunk_size),
  178. [this, error_cb, epoch_cb, this_chunk_size]
  179. (boost::system::error_code ecc, std::size_t) {
  180. if (ecc) {
  181. error_cb(ecc);
  182. return;
  183. }
  184. #ifdef TRACE_SOCKIO
  185. if (recv_msgsize_inflight == recv_chunksize_inflight) {
  186. struct timeval now;
  187. gettimeofday(&now, NULL);
  188. printf("%lu.%06lu: RTE received %u bytes from %s\n", now.tv_sec,
  189. now.tv_usec, recv_msgsize_inflight,
  190. g_netio->config().nodes[node_num].name.c_str());
  191. }
  192. #endif
  193. if (ecall_chunk(node_num, receive_frame,
  194. this_chunk_size)) {
  195. recv_commands(error_cb, epoch_cb);
  196. } else {
  197. printf("ecall_chunk failed\n");
  198. }
  199. });
  200. } else {
  201. error_cb(boost::system::errc::make_error_code(
  202. boost::system::errc::errc_t::invalid_argument));
  203. }
  204. });
  205. }
  206. uint64_t NodeIO::reset_bytes_sent()
  207. {
  208. uint64_t b_sent = bytes_sent;
  209. bytes_sent = 0;
  210. return b_sent;
  211. }
  212. uint64_t NetIO::reset_bytes_sent()
  213. {
  214. uint64_t total=0;
  215. for(size_t i = 0; i<nodeios.size(); i++) {
  216. if(nodeios[i].has_value()) {
  217. total+=((nodeios[i].value()).reset_bytes_sent());
  218. }
  219. }
  220. return total;
  221. }
  222. /*
  223. Receive clients dropped off messages, i.e. a CLIENT_MESSAGE_BUNDLE
  224. */
  225. void NetIO::ing_receive_msgbundle(tcp::socket* csocket, clientid_t c_simid)
  226. {
  227. unsigned char *msgbundle = (unsigned char*) malloc(msgbundle_size);
  228. boost::asio::async_read(*csocket, boost::asio::buffer(msgbundle, msgbundle_size),
  229. [this, csocket, msgbundle, c_simid]
  230. (boost::system::error_code ec, std::size_t) {
  231. if (ec) {
  232. if(ec == boost::asio::error::eof) {
  233. // Client connection terminated so we delete this socket
  234. delete(csocket);
  235. }
  236. else {
  237. printf("Error ing_receive_msgbundle : %s\n", ec.message().c_str());
  238. }
  239. return;
  240. }
  241. #ifdef TRACE_SOCKIO
  242. struct timeval now;
  243. gettimeofday(&now, NULL);
  244. long elapsedus = (now.tv_sec - last_ing.tv_sec) * 1000000
  245. + (now.tv_usec - last_ing.tv_usec);
  246. if (num_ing > 0 && elapsedus > 500000) {
  247. printf("%lu.%06lu: End ingestion of %lu messages\n",
  248. last_ing.tv_sec, last_ing.tv_usec, num_ing);
  249. num_ing = 0;
  250. }
  251. if (num_ing == 0) {
  252. printf("%lu.%06lu: Begin ingestion\n", now.tv_sec,
  253. now.tv_usec);
  254. }
  255. #endif
  256. bool ret;
  257. //Ingest the message_bundle
  258. if(conf.private_routing) {
  259. ret = ecall_ingest_msgbundle(c_simid, msgbundle, conf.m_priv_out);
  260. } else {
  261. ret = ecall_ingest_msgbundle(c_simid, msgbundle, conf.m_pub_out);
  262. }
  263. free(msgbundle);
  264. #ifdef TRACE_SOCKIO
  265. gettimeofday(&last_ing, NULL);
  266. ++num_ing;
  267. #endif
  268. // Continue to async receive client message bundles
  269. if(ret) {
  270. ing_receive_msgbundle(csocket, c_simid);
  271. }
  272. });
  273. }
  274. /*
  275. Handle new client connections.
  276. New clients always send an authentication message.
  277. For ingestion this is then followed by their msg_bundles every epoch.
  278. */
  279. void NetIO::ing_authenticate_new_client(tcp::socket* csocket,
  280. const boost::system::error_code& error)
  281. {
  282. if(error) {
  283. printf("Accept handler failed\n");
  284. return;
  285. }
  286. #ifdef DEBUG_NET_CLIENTS
  287. printf("Accept handler success\n");
  288. #endif
  289. unsigned char* auth_message = (unsigned char*) malloc(auth_size);
  290. boost::asio::async_read(*csocket, boost::asio::buffer(auth_message, auth_size),
  291. [this, csocket, auth_message]
  292. (boost::system::error_code ec, std::size_t) {
  293. if (ec) {
  294. if(ec == boost::asio::error::eof) {
  295. // Client connection terminated so we delete this socket
  296. delete(csocket);
  297. } else {
  298. printf("Error ing_auth_new_client : %s\n", ec.message().c_str());
  299. }
  300. return;
  301. }
  302. else {
  303. clientid_t c_simid = *((clientid_t *)(auth_message));
  304. // Read the authentication token
  305. unsigned char *auth_ptr = auth_message + sizeof(clientid_t);
  306. bool ret = ecall_authenticate(c_simid, auth_ptr);
  307. free(auth_message);
  308. // Receive client message bundles on this socket
  309. // for client sim_id c_simid
  310. if(ret) {
  311. client_count++;
  312. ing_receive_msgbundle(csocket, c_simid);
  313. } else{
  314. printf("Client <-> Ingestion authentication failed\n");
  315. delete(csocket);
  316. }
  317. }
  318. });
  319. ing_start_accept();
  320. }
  321. #ifdef TRACE_SOCKIO
  322. static size_t stg_clients_connected = 0;
  323. static size_t stg_clients_authenticated = 0;
  324. #endif
  325. /*
  326. Handle new client connections.
  327. New clients always send an authentication message.
  328. For storage this is then followed by the storage servers sending them
  329. their mailbox every epoch.
  330. */
  331. void NetIO::stg_authenticate_new_client(tcp::socket* csocket,
  332. const boost::system::error_code& error)
  333. {
  334. if(error) {
  335. printf("Accept handler failed\n");
  336. return;
  337. }
  338. #ifdef DEBUG_NET_CLIENTS
  339. printf("Accept handler success\n");
  340. #endif
  341. unsigned char* auth_message = (unsigned char*) malloc(auth_size);
  342. boost::asio::async_read(*csocket, boost::asio::buffer(auth_message, auth_size),
  343. [this, csocket, auth_message]
  344. (boost::system::error_code ec, std::size_t) {
  345. if (ec) {
  346. if(ec == boost::asio::error::eof) {
  347. // Client connection terminated so we delete this socket
  348. delete(csocket);
  349. } else {
  350. printf("Error stg_auth_new_client: %s\n", ec.message().c_str());
  351. }
  352. return;
  353. }
  354. else {
  355. #ifdef TRACE_SOCKIO
  356. ++stg_clients_connected;
  357. if (stg_clients_connected % 1000 == 0) {
  358. struct timeval now;
  359. gettimeofday(&now, NULL);
  360. printf("%lu.%06lu: STG %lu clients connected\n",
  361. now.tv_sec, now.tv_usec, stg_clients_connected);
  362. }
  363. #endif
  364. clientid_t c_simid = *((clientid_t *)(auth_message));
  365. // Read the authentication token
  366. unsigned char *auth_ptr = auth_message + sizeof(clientid_t);
  367. bool ret = ecall_storage_authenticate(c_simid, auth_ptr);
  368. free(auth_message);
  369. // If the auth is successful, store this socket into
  370. // a client socket array at the local_c_simid index
  371. // for storage servers to send clients their mailbox periodically.
  372. if(ret) {
  373. uint32_t lcid = c_simid / num_stg_nodes;
  374. client_sockets[lcid] = csocket;
  375. #ifdef TRACE_SOCKIO
  376. ++stg_clients_authenticated;
  377. if (stg_clients_authenticated % 1000 == 0) {
  378. struct timeval now;
  379. gettimeofday(&now, NULL);
  380. printf("%lu.%06lu: STG %lu clients authenticated\n",
  381. now.tv_sec, now.tv_usec, stg_clients_authenticated);
  382. }
  383. #endif
  384. }
  385. else{
  386. printf("Client <-> Storage authentication failed\n");
  387. delete (csocket);
  388. }
  389. }
  390. });
  391. stg_start_accept();
  392. }
  393. /*
  394. Asynchronously accept new client connections
  395. */
  396. void NetIO::ing_start_accept()
  397. {
  398. tcp::socket *csocket = new tcp::socket(io_context());
  399. #ifdef DEBUG_NET_CLIENTS
  400. std::cout << "Accepting on " << myconf.clistenhost << ":" << myconf.clistenport << "\n";
  401. #endif
  402. ingestion_acceptor->async_accept(*csocket,
  403. boost::bind(&NetIO::ing_authenticate_new_client, this, csocket,
  404. boost::asio::placeholders::error));
  405. }
  406. void NetIO::stg_start_accept()
  407. {
  408. tcp::socket *csocket = new tcp::socket(io_context());
  409. #ifdef DEBUG_NET_CLIENTS
  410. std::cout << "Accepting on " << myconf.slistenhost << ":" << myconf.slistenport << "\n";
  411. #endif
  412. storage_acceptor->async_accept(*csocket,
  413. boost::bind(&NetIO::stg_authenticate_new_client, this, csocket,
  414. boost::asio::placeholders::error));
  415. }
  416. void NetIO::send_client_mailbox()
  417. {
  418. #ifdef PROFILE_NET_CLIENTS
  419. struct timespec tp;
  420. clock_gettime(CLOCK_REALTIME_COARSE, &tp);
  421. unsigned long start = tp.tv_sec * 1000000 + tp.tv_nsec/1000;
  422. #endif
  423. #ifdef TRACE_SOCKIO
  424. size_t clients_without_sockets = 0;
  425. size_t mailboxes_queued = 0;
  426. #endif
  427. // Send each client their tokens for the next epoch
  428. for(uint32_t lcid = 0; lcid < num_clients_per_stg; lcid++)
  429. {
  430. unsigned char *tkn_ptr = epoch_tokens + lcid * token_bundle_size;
  431. unsigned char *buf_ptr = epoch_mailboxes + lcid * mailbox_size;
  432. if(client_sockets[lcid]!=nullptr) {
  433. boost::asio::async_write(*(client_sockets[lcid]),
  434. boost::asio::buffer(tkn_ptr, token_bundle_size),
  435. [this, lcid, buf_ptr](boost::system::error_code ec, std::size_t){
  436. if (ec) {
  437. if(ec == boost::asio::error::eof) {
  438. // Client connection terminated so we delete this socket
  439. delete(client_sockets[lcid]);
  440. printf("Client socket terminated!\n");
  441. } else {
  442. printf("Error send_client_mailbox tokens: %s\n", ec.message().c_str());
  443. }
  444. return;
  445. }
  446. boost::asio::async_write(*(client_sockets[lcid]),
  447. boost::asio::buffer(buf_ptr, mailbox_size),
  448. [this, lcid](boost::system::error_code ecc, std::size_t){
  449. //printf("NetIO::send_client_mailbox, Client %d messages was sent\n", lcid);
  450. if (ecc) {
  451. if(ecc == boost::asio::error::eof) {
  452. // Client connection terminated so we delete this socket
  453. delete(client_sockets[lcid]);
  454. printf("Client socket terminated!\n");
  455. } else {
  456. printf("Error send_client_mailbox mailbox (lcid = %d): %s\n",
  457. lcid, ecc.message().c_str());
  458. }
  459. return;
  460. }
  461. });
  462. });
  463. #ifdef TRACE_SOCKIO
  464. ++mailboxes_queued;
  465. } else {
  466. ++clients_without_sockets;
  467. #endif
  468. }
  469. }
  470. #ifdef TRACE_SOCKIO
  471. struct timeval now;
  472. gettimeofday(&now, NULL);
  473. printf("%lu.%06lu: STG queued %lu mailboxes; %lu clients without sockets\n",
  474. now.tv_sec, now.tv_usec, mailboxes_queued,
  475. clients_without_sockets);
  476. #endif
  477. #ifdef PROFILE_NET_CLIENTS
  478. clock_gettime(CLOCK_REALTIME_COARSE, &tp);
  479. unsigned long end = tp.tv_sec * 1000000 + tp.tv_nsec/1000;
  480. unsigned long diff = end - start;
  481. printf("send_client_mailbox time: %lu.%06lu s\n", diff/1000000, diff%1000000);
  482. #endif
  483. }
  484. NetIO::NetIO(boost::asio::io_context &io_context, const Config &config)
  485. : context(io_context), conf(config),
  486. myconf(config.nodes[config.my_node_num])
  487. {
  488. num_nodes = nodenum_t(conf.nodes.size());
  489. nodeios.resize(num_nodes);
  490. me = conf.my_node_num;
  491. #ifdef TRACE_SOCKIO
  492. last_ing = {0, 0};
  493. num_ing = 0;
  494. #endif
  495. // Node number n will accept connections from nodes 0, ..., n-1 and
  496. // make connections to nodes n+1, ..., num_nodes-1. This is all
  497. // single threaded, but it doesn't deadlock because node 0 isn't
  498. // waiting for any incoming connections, so it immediately makes
  499. // outgoing connections. When it connects to node 1, that node
  500. // accepts its (only) incoming connection, and then starts making
  501. // its outgoing connections, etc.
  502. tcp::resolver resolver(io_context);
  503. tcp::acceptor acceptor(io_context,
  504. resolver.resolve(myconf.listenhost, myconf.listenport)->endpoint());
  505. for(size_t i=0; i<me; ++i) {
  506. #ifdef VERBOSE_NET
  507. std::cerr << "Accepting number " << i << "\n";
  508. #endif
  509. tcp::socket nodesock = acceptor.accept();
  510. #ifdef VERBOSE_NET
  511. std::cerr << "Accepted number " << i << "\n";
  512. #endif
  513. // Read 2 bytes from the socket, which will be the
  514. // connecting node's node number
  515. unsigned short node_num;
  516. boost::asio::read(nodesock,
  517. boost::asio::buffer(&node_num, sizeof(node_num)));
  518. if (node_num >= num_nodes) {
  519. std::cerr << "Received bad node number\n";
  520. } else {
  521. nodeios[node_num].emplace(std::move(nodesock), node_num);
  522. #ifdef VERBOSE_NET
  523. std::cerr << "Received connection from " <<
  524. config.nodes[node_num].name << "\n";
  525. #endif
  526. }
  527. }
  528. for(size_t i=me+1; i<num_nodes; ++i) {
  529. boost::system::error_code err;
  530. tcp::socket nodesock(io_context);
  531. while(1) {
  532. #ifdef VERBOSE_NET
  533. std::cerr << "Connecting to " << config.nodes[i].name << "...\n";
  534. #endif
  535. boost::asio::connect(nodesock,
  536. resolver.resolve(config.nodes[i].listenhost,
  537. config.nodes[i].listenport), err);
  538. if (!err) break;
  539. std::cerr << "Connection to " << config.nodes[i].name <<
  540. " refused, will retry.\n";
  541. sleep(1);
  542. }
  543. // Write 2 bytes to the socket to tell the peer node our node
  544. // number
  545. nodenum_t node_num = (nodenum_t)me;
  546. boost::asio::write(nodesock,
  547. boost::asio::buffer(&node_num, sizeof(node_num)));
  548. nodeios[i].emplace(std::move(nodesock), i);
  549. #ifdef VERBOSE_NET
  550. std::cerr << "Connected to " << config.nodes[i].name << "\n";
  551. #endif
  552. }
  553. auth_size = sizeof(clientid_t) + sizeof(unsigned long) + SGX_AESGCM_KEY_SIZE;
  554. uint16_t priv_out, priv_in, pub_in;
  555. if(config.private_routing) {
  556. priv_out = conf.m_priv_out;
  557. priv_in = conf.m_priv_in;
  558. msgbundle_size = SGX_AESGCM_IV_SIZE
  559. + (conf.m_priv_out * (conf.msg_size + TOKEN_SIZE))
  560. + SGX_AESGCM_MAC_SIZE;
  561. token_bundle_size = ((priv_out * TOKEN_SIZE)
  562. + SGX_AESGCM_IV_SIZE + SGX_AESGCM_MAC_SIZE);
  563. mailbox_size = (priv_in * conf.msg_size) + SGX_AESGCM_IV_SIZE
  564. + SGX_AESGCM_MAC_SIZE;
  565. } else {
  566. pub_in = conf.m_pub_in;
  567. msgbundle_size = SGX_AESGCM_IV_SIZE
  568. + (conf.m_pub_out * conf.msg_size)
  569. + SGX_AESGCM_MAC_SIZE;
  570. mailbox_size = (pub_in * conf.msg_size) + SGX_AESGCM_IV_SIZE
  571. + SGX_AESGCM_MAC_SIZE;
  572. }
  573. if(myconf.roles & ROLE_STORAGE) {
  574. // Setup the client sockets
  575. // Compute no_of_clients per storage_server
  576. uint32_t num_users = config.user_count;
  577. NodeConfig nc;
  578. num_stg_nodes = 0;
  579. for (nodenum_t i=0; i<num_nodes; ++i) {
  580. nc = conf.nodes[i];
  581. if(nc.roles & ROLE_STORAGE) {
  582. num_stg_nodes++;
  583. }
  584. }
  585. num_clients_per_stg = CEILDIV(num_users, num_stg_nodes);
  586. for(uint32_t i = 0; i<num_clients_per_stg; i++) {
  587. client_sockets.emplace_back(nullptr);
  588. }
  589. uint32_t epoch_mailboxes_size = num_clients_per_stg * mailbox_size;
  590. uint32_t epoch_tokens_size = num_clients_per_stg * token_bundle_size;
  591. epoch_mailboxes = (unsigned char *) malloc(epoch_mailboxes_size);
  592. epoch_tokens = (unsigned char *) malloc (epoch_tokens_size);
  593. ecall_supply_storage_buffers(epoch_mailboxes, epoch_mailboxes_size,
  594. epoch_tokens, epoch_tokens_size);
  595. storage_acceptor = std::shared_ptr<tcp::acceptor>(
  596. new tcp::acceptor(io_context,
  597. resolver.resolve(this->myconf.slistenhost,
  598. this->myconf.slistenport)->endpoint()));
  599. stg_start_accept();
  600. }
  601. if(myconf.roles & ROLE_INGESTION) {
  602. ingestion_acceptor = std::shared_ptr<tcp::acceptor>(
  603. new tcp::acceptor(io_context,
  604. resolver.resolve(this->myconf.clistenhost,
  605. this->myconf.clistenport)->endpoint()));
  606. ing_start_accept();
  607. }
  608. }
  609. void NetIO::recv_commands(
  610. std::function<void(boost::system::error_code)> error_cb,
  611. std::function<void(uint32_t)> epoch_cb)
  612. {
  613. for (nodenum_t node_num = 0; node_num < num_nodes; ++node_num) {
  614. if (node_num == me) continue;
  615. NodeIO &n = node(node_num);
  616. n.recv_commands(error_cb, epoch_cb);
  617. }
  618. }
  619. void NetIO::close()
  620. {
  621. for (nodenum_t node_num = 0; node_num < num_nodes; ++node_num) {
  622. if (node_num == me) continue;
  623. NodeIO &n = node(node_num);
  624. n.close();
  625. }
  626. }
  627. /* The enclave calls this to inform the untrusted app that there's a new
  628. * messaage to send. The return value is the frame the enclave should
  629. * use to store the first (encrypted) chunk of this message. */
  630. uint8_t *ocall_message(nodenum_t node_num, uint32_t message_len)
  631. {
  632. assert(g_netio != NULL);
  633. NodeIO &node = g_netio->node(node_num);
  634. node.send_message_header(message_len);
  635. return node.request_frame();
  636. }
  637. /* The enclave calls this to inform the untrusted app that there's a new
  638. * chunk to send. The return value is the frame the enclave should use
  639. * to store the next (encrypted) chunk of this message, or NULL if this
  640. * was the last chunk. */
  641. uint8_t *ocall_chunk(nodenum_t node_num, uint8_t *chunkdata,
  642. uint32_t chunklen)
  643. {
  644. assert(g_netio != NULL);
  645. NodeIO &node = g_netio->node(node_num);
  646. bool morechunks = node.send_chunk(chunkdata, chunklen);
  647. if (morechunks) {
  648. return node.request_frame();
  649. }
  650. return NULL;
  651. }