mpcio.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. #include "mpcio.hpp"
  2. #include "bitutils.hpp"
  3. template<typename T>
  4. PreCompStorage<T>::PreCompStorage(unsigned player, bool preprocessing,
  5. const char *filenameprefix, unsigned thread_num) {
  6. if (preprocessing) return;
  7. std::string filename(filenameprefix);
  8. char suffix[20];
  9. sprintf(suffix, ".p%d.t%u", player%10, thread_num);
  10. filename.append(suffix);
  11. storage.open(filename);
  12. if (storage.fail()) {
  13. std::cerr << "Failed to open " << filename << "\n";
  14. exit(1);
  15. }
  16. count = 0;
  17. }
  18. template<typename T>
  19. void PreCompStorage<T>::get(T& nextval) {
  20. storage.read((char *)&nextval, sizeof(T));
  21. if (storage.gcount() != sizeof(T)) {
  22. std::cerr << "Failed to read precomputed value from storage\n";
  23. exit(1);
  24. }
  25. ++count;
  26. }
  27. void MPCSingleIO::async_send_from_msgqueue()
  28. {
  29. #ifdef SEND_LAMPORT_CLOCKS
  30. std::vector<boost::asio::const_buffer> tosend;
  31. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  32. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  33. #endif
  34. boost::asio::async_write(sock,
  35. #ifdef SEND_LAMPORT_CLOCKS
  36. tosend,
  37. #else
  38. boost::asio::buffer(messagequeue.front()),
  39. #endif
  40. [&](boost::system::error_code ec, std::size_t amt){
  41. messagequeuelock.lock();
  42. messagequeue.pop();
  43. if (messagequeue.size() > 0) {
  44. async_send_from_msgqueue();
  45. }
  46. messagequeuelock.unlock();
  47. });
  48. }
  49. size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
  50. {
  51. // Is this a new message?
  52. size_t newmsg = 0;
  53. dataqueue.append((const char *)data, len);
  54. // If this is the first queue() since the last explicit send(),
  55. // which we'll know because message_lamport will be nullopt, set
  56. // message_lamport to the current Lamport clock. Note that the
  57. // boolean test tests whether message_lamport is nullopt, not
  58. // whether its value is zero.
  59. if (!message_lamport) {
  60. message_lamport = lamport;
  61. newmsg = 1;
  62. }
  63. // If we already have some full packets worth of data, may as
  64. // well send it.
  65. if (dataqueue.size() > 28800) {
  66. send(true);
  67. }
  68. return newmsg;
  69. }
  70. void MPCSingleIO::send(bool implicit_send)
  71. {
  72. size_t thissize = dataqueue.size();
  73. // Ignore spurious calls to send(), except for resetting
  74. // message_lamport if this was an explicit send().
  75. if (thissize == 0) {
  76. #ifdef SEND_LAMPORT_CLOCKS
  77. // If this was an explicit send(), reset the message_lamport so
  78. // that it gets updated at the next queue().
  79. if (!implicit_send) {
  80. message_lamport.reset();
  81. }
  82. #endif
  83. return;
  84. }
  85. #ifdef RECORD_IOTRACE
  86. iotrace.push_back(thissize);
  87. #endif
  88. messagequeuelock.lock();
  89. // Move the current message to send into the message queue (this
  90. // moves a pointer to the data, not copying the data itself)
  91. #ifdef SEND_LAMPORT_CLOCKS
  92. messagequeue.emplace(std::move(dataqueue),
  93. message_lamport.value());
  94. // If this was an explicit send(), reset the message_lamport so
  95. // that it gets updated at the next queue().
  96. if (!implicit_send) {
  97. message_lamport.reset();
  98. }
  99. #else
  100. messagequeue.emplace(std::move(dataqueue));
  101. #endif
  102. // If this is now the first thing in the message queue, launch
  103. // an async_write to write it
  104. if (messagequeue.size() == 1) {
  105. async_send_from_msgqueue();
  106. }
  107. messagequeuelock.unlock();
  108. }
  109. size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
  110. {
  111. #ifdef SEND_LAMPORT_CLOCKS
  112. char *cdata = (char *)data;
  113. size_t res = 0;
  114. while (len > 0) {
  115. while (recvdataremain == 0) {
  116. // Read a new header
  117. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  118. uint32_t datalen;
  119. lamport_t recv_lamport;
  120. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  121. memmove(&datalen, hdr, sizeof(datalen));
  122. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  123. lamport_t new_lamport = recv_lamport + 1;
  124. if (lamport < new_lamport) {
  125. lamport = new_lamport;
  126. }
  127. if (datalen > 0) {
  128. recvdata.resize(datalen, '\0');
  129. boost::asio::read(sock, boost::asio::buffer(recvdata));
  130. recvdataremain = datalen;
  131. }
  132. }
  133. size_t amttoread = len;
  134. if (amttoread > recvdataremain) {
  135. amttoread = recvdataremain;
  136. }
  137. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  138. amttoread);
  139. cdata += amttoread;
  140. len -= amttoread;
  141. recvdataremain -= amttoread;
  142. res += amttoread;
  143. }
  144. #else
  145. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  146. #endif
  147. #ifdef RECORD_IOTRACE
  148. iotrace.push_back(-(ssize_t(res)));
  149. #endif
  150. return res;
  151. }
  152. #ifdef RECORD_IOTRACE
  153. void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
  154. {
  155. if (label) {
  156. os << label << " ";
  157. }
  158. os << "IO trace:";
  159. for (auto& s: iotrace) {
  160. os << " " << s;
  161. }
  162. os << "\n";
  163. }
  164. #endif
  165. void MPCIO::reset_stats()
  166. {
  167. msgs_sent.clear();
  168. msg_bytes_sent.clear();
  169. aes_ops.clear();
  170. for (size_t i=0; i<num_threads; ++i) {
  171. msgs_sent.push_back(0);
  172. msg_bytes_sent.push_back(0);
  173. aes_ops.push_back(0);
  174. }
  175. steady_start = boost::chrono::steady_clock::now();
  176. cpu_start = boost::chrono::process_cpu_clock::now();
  177. }
  178. void MPCIO::dump_stats(std::ostream &os)
  179. {
  180. size_t tot_msgs_sent = 0;
  181. size_t tot_msg_bytes_sent = 0;
  182. size_t tot_aes_ops = 0;
  183. for (auto& n : msgs_sent) {
  184. tot_msgs_sent += n;
  185. }
  186. for (auto& n : msg_bytes_sent) {
  187. tot_msg_bytes_sent += n;
  188. }
  189. for (auto& n : aes_ops) {
  190. tot_aes_ops += n;
  191. }
  192. auto steady_elapsed =
  193. boost::chrono::steady_clock::now() - steady_start;
  194. auto cpu_elapsed =
  195. boost::chrono::process_cpu_clock::now() - cpu_start;
  196. os << tot_msgs_sent << " messages sent\n";
  197. os << tot_msg_bytes_sent << " message bytes sent\n";
  198. os << tot_aes_ops << " local AES operations\n";
  199. os << lamport << " Lamport clock (latencies)\n";
  200. os << boost::chrono::duration_cast
  201. <boost::chrono::milliseconds>(steady_elapsed) <<
  202. " wall clock time\n";
  203. os << cpu_elapsed << " {real;user;system}\n";
  204. }
  205. MPCPeerIO::MPCPeerIO(unsigned player, bool preprocessing,
  206. std::deque<tcp::socket> &peersocks,
  207. std::deque<tcp::socket> &serversocks) :
  208. MPCIO(player, preprocessing, peersocks.size())
  209. {
  210. unsigned num_threads = unsigned(peersocks.size());
  211. for (unsigned i=0; i<num_threads; ++i) {
  212. triples.emplace_back(player, preprocessing, "triples", i);
  213. }
  214. for (unsigned i=0; i<num_threads; ++i) {
  215. halftriples.emplace_back(player, preprocessing, "halves", i);
  216. }
  217. for (auto &&sock : peersocks) {
  218. peerios.emplace_back(std::move(sock));
  219. }
  220. for (auto &&sock : serversocks) {
  221. serverios.emplace_back(std::move(sock));
  222. }
  223. }
  224. void MPCPeerIO::dump_precomp_stats(std::ostream &os)
  225. {
  226. for (size_t i=0; i<triples.size(); ++i) {
  227. if (i > 0) {
  228. os << " ";
  229. }
  230. os << "T" << i << " t:" << triples[i].get_stats() <<
  231. " h:" << halftriples[i].get_stats();
  232. }
  233. os << "\n";
  234. }
  235. void MPCPeerIO::reset_precomp_stats()
  236. {
  237. for (size_t i=0; i<triples.size(); ++i) {
  238. triples[i].reset_stats();
  239. halftriples[i].reset_stats();
  240. }
  241. }
  242. void MPCPeerIO::dump_stats(std::ostream &os)
  243. {
  244. MPCIO::dump_stats(os);
  245. os << "Precomputed values used: ";
  246. dump_precomp_stats(os);
  247. }
  248. MPCServerIO::MPCServerIO(bool preprocessing,
  249. std::deque<tcp::socket> &p0socks,
  250. std::deque<tcp::socket> &p1socks) :
  251. MPCIO(2, preprocessing, p0socks.size())
  252. {
  253. for (auto &&sock : p0socks) {
  254. p0ios.emplace_back(std::move(sock));
  255. }
  256. for (auto &&sock : p1socks) {
  257. p1ios.emplace_back(std::move(sock));
  258. }
  259. }
  260. MPCTIO::MPCTIO(MPCIO &mpcio, int thread_num) :
  261. thread_num(thread_num), thread_lamport(mpcio.lamport),
  262. mpcio(mpcio)
  263. {
  264. if (mpcio.player < 2) {
  265. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  266. peer_iostream.emplace(mpcpio.peerios[thread_num], thread_lamport);
  267. server_iostream.emplace(mpcpio.serverios[thread_num], thread_lamport);
  268. } else {
  269. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  270. p0_iostream.emplace(mpcsrvio.p0ios[thread_num], thread_lamport);
  271. p1_iostream.emplace(mpcsrvio.p1ios[thread_num], thread_lamport);
  272. }
  273. }
  274. // Sync our per-thread lamport clock with the master one in the
  275. // mpcio. You only need to call this explicitly if your MPCTIO
  276. // outlives your thread (in which case call it after the join), or
  277. // if your threads do interthread communication amongst themselves
  278. // (in which case call it in the sending thread before the send, and
  279. // call it in the receiving thread after the receive).
  280. void MPCTIO::sync_lamport()
  281. {
  282. // Update the mpcio Lamport time to be max of the thread Lamport
  283. // time and what we thought it was before. We use this
  284. // compare_exchange construction in order to atomically
  285. // do the comparison, computation, and replacement
  286. lamport_t old_lamport = mpcio.lamport;
  287. lamport_t new_lamport = thread_lamport;
  288. do {
  289. if (new_lamport < old_lamport) {
  290. new_lamport = old_lamport;
  291. }
  292. // The next line atomically checks if lamport still has
  293. // the value old_lamport; if so, it changes its value to
  294. // new_lamport and returns true (ending the loop). If
  295. // not, it sets old_lamport to the current value of
  296. // lamport, and returns false (continuing the loop so
  297. // that new_lamport can be recomputed based on this new
  298. // value).
  299. } while (!mpcio.lamport.compare_exchange_weak(
  300. old_lamport, new_lamport));
  301. thread_lamport = new_lamport;
  302. }
  303. // Queue up data to the peer or to the server
  304. void MPCTIO::queue_peer(const void *data, size_t len)
  305. {
  306. if (mpcio.player < 2) {
  307. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  308. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  309. mpcpio.msgs_sent[thread_num] += newmsg;
  310. mpcpio.msg_bytes_sent[thread_num] += len;
  311. }
  312. }
  313. void MPCTIO::queue_server(const void *data, size_t len)
  314. {
  315. if (mpcio.player < 2) {
  316. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  317. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  318. mpcpio.msgs_sent[thread_num] += newmsg;
  319. mpcpio.msg_bytes_sent[thread_num] += len;
  320. }
  321. }
  322. // Receive data from the peer or to the server
  323. size_t MPCTIO::recv_peer(void *data, size_t len)
  324. {
  325. if (mpcio.player < 2) {
  326. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  327. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  328. }
  329. return 0;
  330. }
  331. size_t MPCTIO::recv_server(void *data, size_t len)
  332. {
  333. if (mpcio.player < 2) {
  334. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  335. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  336. }
  337. return 0;
  338. }
  339. // Queue up data to p0 or p1
  340. void MPCTIO::queue_p0(const void *data, size_t len)
  341. {
  342. if (mpcio.player == 2) {
  343. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  344. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  345. mpcsrvio.msgs_sent[thread_num] += newmsg;
  346. mpcsrvio.msg_bytes_sent[thread_num] += len;
  347. }
  348. }
  349. void MPCTIO::queue_p1(const void *data, size_t len)
  350. {
  351. if (mpcio.player == 2) {
  352. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  353. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  354. mpcsrvio.msgs_sent[thread_num] += newmsg;
  355. mpcsrvio.msg_bytes_sent[thread_num] += len;
  356. }
  357. }
  358. // Receive data from p0 or p1
  359. size_t MPCTIO::recv_p0(void *data, size_t len)
  360. {
  361. if (mpcio.player == 2) {
  362. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  363. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  364. }
  365. return 0;
  366. }
  367. size_t MPCTIO::recv_p1(void *data, size_t len)
  368. {
  369. if (mpcio.player == 2) {
  370. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  371. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  372. }
  373. return 0;
  374. }
  375. // Send all queued data for this thread
  376. void MPCTIO::send()
  377. {
  378. if (mpcio.player < 2) {
  379. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  380. mpcpio.peerios[thread_num].send();
  381. mpcpio.serverios[thread_num].send();
  382. } else {
  383. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  384. mpcsrvio.p0ios[thread_num].send();
  385. mpcsrvio.p1ios[thread_num].send();
  386. }
  387. }
  388. // Functions to get precomputed values. If we're in the online
  389. // phase, get them from PreCompStorage. If we're in the
  390. // preprocessing phase, read them from the server.
  391. MultTriple MPCTIO::triple()
  392. {
  393. MultTriple val;
  394. if (mpcio.player < 2) {
  395. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  396. if (mpcpio.preprocessing) {
  397. recv_server(&val, sizeof(val));
  398. } else {
  399. mpcpio.triples[thread_num].get(val);
  400. }
  401. } else if (mpcio.preprocessing) {
  402. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  403. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  404. value_t X0, Y0, Z0, X1, Y1, Z1;
  405. arc4random_buf(&X0, sizeof(X0));
  406. arc4random_buf(&Y0, sizeof(Y0));
  407. arc4random_buf(&Z0, sizeof(Z0));
  408. arc4random_buf(&X1, sizeof(X1));
  409. arc4random_buf(&Y1, sizeof(Y1));
  410. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  411. MultTriple T0, T1;
  412. T0 = std::make_tuple(X0, Y0, Z0);
  413. T1 = std::make_tuple(X1, Y1, Z1);
  414. queue_p0(&T0, sizeof(T0));
  415. queue_p1(&T1, sizeof(T1));
  416. }
  417. return val;
  418. }
  419. HalfTriple MPCTIO::halftriple()
  420. {
  421. HalfTriple val;
  422. if (mpcio.player < 2) {
  423. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  424. if (mpcpio.preprocessing) {
  425. recv_server(&val, sizeof(val));
  426. } else {
  427. mpcpio.halftriples[thread_num].get(val);
  428. }
  429. } else if (mpcio.preprocessing) {
  430. // Create half-triples (X0,Z0),(Y1,Z1) such that
  431. // X0*Y1 = Z0 + Z1
  432. value_t X0, Z0, Y1, Z1;
  433. arc4random_buf(&X0, sizeof(X0));
  434. arc4random_buf(&Z0, sizeof(Z0));
  435. arc4random_buf(&Y1, sizeof(Y1));
  436. Z1 = X0 * Y1 - Z0;
  437. HalfTriple H0, H1;
  438. H0 = std::make_tuple(X0, Z0);
  439. H1 = std::make_tuple(Y1, Z1);
  440. queue_p0(&H0, sizeof(H0));
  441. queue_p1(&H1, sizeof(H1));
  442. }
  443. return val;
  444. }
  445. SelectTriple MPCTIO::selecttriple()
  446. {
  447. SelectTriple val;
  448. if (mpcio.player < 2) {
  449. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  450. if (mpcpio.preprocessing) {
  451. uint8_t Xbyte;
  452. recv_server(&Xbyte, sizeof(Xbyte));
  453. val.X = Xbyte & 1;
  454. recv_server(&val.Y, sizeof(val.Y));
  455. recv_server(&val.Z, sizeof(val.Z));
  456. } else {
  457. std::cerr << "Attempted to read SelectTriple in online phase\n";
  458. }
  459. } else if (mpcio.preprocessing) {
  460. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  461. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  462. bit_t X0, X1;
  463. DPFnode Y0, Z0, Y1, Z1;
  464. X0 = arc4random() & 1;
  465. arc4random_buf(&Y0, sizeof(Y0));
  466. arc4random_buf(&Z0, sizeof(Z0));
  467. X1 = arc4random() & 1;
  468. arc4random_buf(&Y1, sizeof(Y1));
  469. DPFnode X0ext, X1ext;
  470. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  471. // 1 -> 1111...1)
  472. X0ext = if128_mask[X0];
  473. X1ext = if128_mask[X1];
  474. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  475. queue_p0(&X0, sizeof(X0));
  476. queue_p0(&Y0, sizeof(Y0));
  477. queue_p0(&Z0, sizeof(Z0));
  478. queue_p1(&X1, sizeof(X1));
  479. queue_p1(&Y1, sizeof(Y1));
  480. queue_p1(&Z1, sizeof(Z1));
  481. }
  482. return val;
  483. }
  484. // The port number for the P1 -> P0 connection
  485. static const unsigned short port_p1_p0 = 2115;
  486. // The port number for the P2 -> P0 connection
  487. static const unsigned short port_p2_p0 = 2116;
  488. // The port number for the P2 -> P1 connection
  489. static const unsigned short port_p2_p1 = 2117;
  490. void mpcio_setup_computational(unsigned player,
  491. boost::asio::io_context &io_context,
  492. const char *p0addr, // can be NULL when player=0
  493. int num_threads,
  494. std::deque<tcp::socket> &peersocks,
  495. std::deque<tcp::socket> &serversocks)
  496. {
  497. if (player == 0) {
  498. // Listen for connections from P1 and from P2
  499. tcp::acceptor acceptor_p1(io_context,
  500. tcp::endpoint(tcp::v4(), port_p1_p0));
  501. tcp::acceptor acceptor_p2(io_context,
  502. tcp::endpoint(tcp::v4(), port_p2_p0));
  503. peersocks.clear();
  504. serversocks.clear();
  505. for (int i=0;i<num_threads;++i) {
  506. peersocks.emplace_back(io_context);
  507. serversocks.emplace_back(io_context);
  508. }
  509. for (int i=0;i<num_threads;++i) {
  510. tcp::socket peersock = acceptor_p1.accept();
  511. // Read 2 bytes from the socket, which will be the thread
  512. // number
  513. unsigned short thread_num;
  514. boost::asio::read(peersock,
  515. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  516. if (thread_num >= num_threads) {
  517. std::cerr << "Received bad thread number from peer\n";
  518. } else {
  519. peersocks[thread_num] = std::move(peersock);
  520. }
  521. }
  522. for (int i=0;i<num_threads;++i) {
  523. tcp::socket serversock = acceptor_p2.accept();
  524. // Read 2 bytes from the socket, which will be the thread
  525. // number
  526. unsigned short thread_num;
  527. boost::asio::read(serversock,
  528. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  529. if (thread_num >= num_threads) {
  530. std::cerr << "Received bad thread number from server\n";
  531. } else {
  532. serversocks[thread_num] = std::move(serversock);
  533. }
  534. }
  535. } else if (player == 1) {
  536. // Listen for connections from P2, make num_threads connections to P0
  537. tcp::acceptor acceptor_p2(io_context,
  538. tcp::endpoint(tcp::v4(), port_p2_p1));
  539. tcp::resolver resolver(io_context);
  540. boost::system::error_code err;
  541. peersocks.clear();
  542. serversocks.clear();
  543. for (int i=0;i<num_threads;++i) {
  544. serversocks.emplace_back(io_context);
  545. }
  546. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  547. tcp::socket peersock(io_context);
  548. while(1) {
  549. boost::asio::connect(peersock,
  550. resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
  551. if (!err) break;
  552. std::cerr << "Connection to p0 refused, will retry.\n";
  553. sleep(1);
  554. }
  555. // Write 2 bytes to the socket indicating which thread
  556. // number this socket is for
  557. boost::asio::write(peersock,
  558. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  559. peersocks.push_back(std::move(peersock));
  560. }
  561. for (int i=0;i<num_threads;++i) {
  562. tcp::socket serversock = acceptor_p2.accept();
  563. // Read 2 bytes from the socket, which will be the thread
  564. // number
  565. unsigned short thread_num;
  566. boost::asio::read(serversock,
  567. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  568. if (thread_num >= num_threads) {
  569. std::cerr << "Received bad thread number from server\n";
  570. } else {
  571. serversocks[thread_num] = std::move(serversock);
  572. }
  573. }
  574. } else {
  575. std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
  576. }
  577. }
  578. void mpcio_setup_server(boost::asio::io_context &io_context,
  579. const char *p0addr, const char *p1addr, int num_threads,
  580. std::deque<tcp::socket> &p0socks,
  581. std::deque<tcp::socket> &p1socks)
  582. {
  583. // Make connections to P0 and P1
  584. tcp::resolver resolver(io_context);
  585. boost::system::error_code err;
  586. p0socks.clear();
  587. p1socks.clear();
  588. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  589. tcp::socket p0sock(io_context);
  590. while(1) {
  591. boost::asio::connect(p0sock,
  592. resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
  593. if (!err) break;
  594. std::cerr << "Connection to p0 refused, will retry.\n";
  595. sleep(1);
  596. }
  597. // Write 2 bytes to the socket indicating which thread
  598. // number this socket is for
  599. boost::asio::write(p0sock,
  600. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  601. p0socks.push_back(std::move(p0sock));
  602. }
  603. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  604. tcp::socket p1sock(io_context);
  605. while(1) {
  606. boost::asio::connect(p1sock,
  607. resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
  608. if (!err) break;
  609. std::cerr << "Connection to p1 refused, will retry.\n";
  610. sleep(1);
  611. }
  612. // Write 2 bytes to the socket indicating which thread
  613. // number this socket is for
  614. boost::asio::write(p1sock,
  615. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  616. p1socks.push_back(std::move(p1sock));
  617. }
  618. }