mpcio.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640
  1. #include "mpcio.hpp"
  2. template<typename T>
  3. PreCompStorage<T>::PreCompStorage(unsigned player, bool preprocessing,
  4. const char *filenameprefix, unsigned thread_num) {
  5. if (preprocessing) return;
  6. std::string filename(filenameprefix);
  7. char suffix[20];
  8. sprintf(suffix, ".p%d.t%u", player%10, thread_num);
  9. filename.append(suffix);
  10. storage.open(filename);
  11. if (storage.fail()) {
  12. std::cerr << "Failed to open " << filename << "\n";
  13. exit(1);
  14. }
  15. count = 0;
  16. }
  17. template<typename T>
  18. void PreCompStorage<T>::get(T& nextval) {
  19. storage.read((char *)&nextval, sizeof(T));
  20. if (storage.gcount() != sizeof(T)) {
  21. std::cerr << "Failed to read precomputed value from storage\n";
  22. exit(1);
  23. }
  24. ++count;
  25. }
  26. void MPCSingleIO::async_send_from_msgqueue()
  27. {
  28. #ifdef SEND_LAMPORT_CLOCKS
  29. std::vector<boost::asio::const_buffer> tosend;
  30. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  31. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  32. #endif
  33. boost::asio::async_write(sock,
  34. #ifdef SEND_LAMPORT_CLOCKS
  35. tosend,
  36. #else
  37. boost::asio::buffer(messagequeue.front()),
  38. #endif
  39. [&](boost::system::error_code ec, std::size_t amt){
  40. messagequeuelock.lock();
  41. messagequeue.pop();
  42. if (messagequeue.size() > 0) {
  43. async_send_from_msgqueue();
  44. }
  45. messagequeuelock.unlock();
  46. });
  47. }
  48. size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
  49. {
  50. // Is this a new message?
  51. size_t newmsg = 0;
  52. dataqueue.append((const char *)data, len);
  53. // If this is the first queue() since the last explicit send(),
  54. // which we'll know because message_lamport will be nullopt, set
  55. // message_lamport to the current Lamport clock. Note that the
  56. // boolean test tests whether message_lamport is nullopt, not
  57. // whether its value is zero.
  58. if (!message_lamport) {
  59. message_lamport = lamport;
  60. newmsg = 1;
  61. }
  62. // If we already have some full packets worth of data, may as
  63. // well send it.
  64. if (dataqueue.size() > 28800) {
  65. send(true);
  66. }
  67. return newmsg;
  68. }
  69. void MPCSingleIO::send(bool implicit_send)
  70. {
  71. size_t thissize = dataqueue.size();
  72. // Ignore spurious calls to send(), except for resetting
  73. // message_lamport if this was an explicit send().
  74. if (thissize == 0) {
  75. #ifdef SEND_LAMPORT_CLOCKS
  76. // If this was an explicit send(), reset the message_lamport so
  77. // that it gets updated at the next queue().
  78. if (!implicit_send) {
  79. message_lamport.reset();
  80. }
  81. #endif
  82. return;
  83. }
  84. #ifdef RECORD_IOTRACE
  85. iotrace.push_back(thissize);
  86. #endif
  87. messagequeuelock.lock();
  88. // Move the current message to send into the message queue (this
  89. // moves a pointer to the data, not copying the data itself)
  90. #ifdef SEND_LAMPORT_CLOCKS
  91. messagequeue.emplace(std::move(dataqueue),
  92. message_lamport.value());
  93. // If this was an explicit send(), reset the message_lamport so
  94. // that it gets updated at the next queue().
  95. if (!implicit_send) {
  96. message_lamport.reset();
  97. }
  98. #else
  99. messagequeue.emplace(std::move(dataqueue));
  100. #endif
  101. // If this is now the first thing in the message queue, launch
  102. // an async_write to write it
  103. if (messagequeue.size() == 1) {
  104. async_send_from_msgqueue();
  105. }
  106. messagequeuelock.unlock();
  107. }
  108. size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
  109. {
  110. #ifdef SEND_LAMPORT_CLOCKS
  111. char *cdata = (char *)data;
  112. size_t res = 0;
  113. while (len > 0) {
  114. while (recvdataremain == 0) {
  115. // Read a new header
  116. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  117. uint32_t datalen;
  118. lamport_t recv_lamport;
  119. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  120. memmove(&datalen, hdr, sizeof(datalen));
  121. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  122. lamport_t new_lamport = recv_lamport + 1;
  123. if (lamport < new_lamport) {
  124. lamport = new_lamport;
  125. }
  126. if (datalen > 0) {
  127. recvdata.resize(datalen, '\0');
  128. boost::asio::read(sock, boost::asio::buffer(recvdata));
  129. recvdataremain = datalen;
  130. }
  131. }
  132. size_t amttoread = len;
  133. if (amttoread > recvdataremain) {
  134. amttoread = recvdataremain;
  135. }
  136. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  137. amttoread);
  138. cdata += amttoread;
  139. len -= amttoread;
  140. recvdataremain -= amttoread;
  141. res += amttoread;
  142. }
  143. #else
  144. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  145. #endif
  146. #ifdef RECORD_IOTRACE
  147. iotrace.push_back(-(ssize_t(res)));
  148. #endif
  149. return res;
  150. }
  151. #ifdef RECORD_IOTRACE
  152. void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
  153. {
  154. if (label) {
  155. os << label << " ";
  156. }
  157. os << "IO trace:";
  158. for (auto& s: iotrace) {
  159. os << " " << s;
  160. }
  161. os << "\n";
  162. }
  163. #endif
  164. void MPCIO::reset_stats()
  165. {
  166. msgs_sent.clear();
  167. msg_bytes_sent.clear();
  168. aes_ops.clear();
  169. for (size_t i=0; i<num_threads; ++i) {
  170. msgs_sent.push_back(0);
  171. msg_bytes_sent.push_back(0);
  172. aes_ops.push_back(0);
  173. }
  174. steady_start = boost::chrono::steady_clock::now();
  175. cpu_start = boost::chrono::process_cpu_clock::now();
  176. }
  177. void MPCIO::dump_stats(std::ostream &os)
  178. {
  179. size_t tot_msgs_sent = 0;
  180. size_t tot_msg_bytes_sent = 0;
  181. size_t tot_aes_ops = 0;
  182. for (auto& n : msgs_sent) {
  183. tot_msgs_sent += n;
  184. }
  185. for (auto& n : msg_bytes_sent) {
  186. tot_msg_bytes_sent += n;
  187. }
  188. for (auto& n : aes_ops) {
  189. tot_aes_ops += n;
  190. }
  191. auto steady_elapsed =
  192. boost::chrono::steady_clock::now() - steady_start;
  193. auto cpu_elapsed =
  194. boost::chrono::process_cpu_clock::now() - cpu_start;
  195. os << tot_msgs_sent << " messages sent\n";
  196. os << tot_msg_bytes_sent << " message bytes sent\n";
  197. os << tot_aes_ops << " local AES operations\n";
  198. os << lamport << " Lamport clock (latencies)\n";
  199. os << boost::chrono::duration_cast
  200. <boost::chrono::milliseconds>(steady_elapsed) <<
  201. " wall clock time\n";
  202. os << cpu_elapsed << " {real;user;system}\n";
  203. }
  204. MPCPeerIO::MPCPeerIO(unsigned player, bool preprocessing,
  205. std::deque<tcp::socket> &peersocks,
  206. std::deque<tcp::socket> &serversocks) :
  207. MPCIO(player, preprocessing, peersocks.size())
  208. {
  209. unsigned num_threads = unsigned(peersocks.size());
  210. for (unsigned i=0; i<num_threads; ++i) {
  211. triples.emplace_back(player, preprocessing, "triples", i);
  212. }
  213. for (unsigned i=0; i<num_threads; ++i) {
  214. halftriples.emplace_back(player, preprocessing, "halves", i);
  215. }
  216. for (auto &&sock : peersocks) {
  217. peerios.emplace_back(std::move(sock));
  218. }
  219. for (auto &&sock : serversocks) {
  220. serverios.emplace_back(std::move(sock));
  221. }
  222. }
  223. void MPCPeerIO::dump_precomp_stats(std::ostream &os)
  224. {
  225. for (size_t i=0; i<triples.size(); ++i) {
  226. if (i > 0) {
  227. os << " ";
  228. }
  229. os << "T" << i << " t:" << triples[i].get_stats() <<
  230. " h:" << halftriples[i].get_stats();
  231. }
  232. os << "\n";
  233. }
  234. void MPCPeerIO::reset_precomp_stats()
  235. {
  236. for (size_t i=0; i<triples.size(); ++i) {
  237. triples[i].reset_stats();
  238. halftriples[i].reset_stats();
  239. }
  240. }
  241. void MPCPeerIO::dump_stats(std::ostream &os)
  242. {
  243. MPCIO::dump_stats(os);
  244. os << "Precomputed values used: ";
  245. dump_precomp_stats(os);
  246. }
  247. MPCServerIO::MPCServerIO(bool preprocessing,
  248. std::deque<tcp::socket> &p0socks,
  249. std::deque<tcp::socket> &p1socks) :
  250. MPCIO(2, preprocessing, p0socks.size())
  251. {
  252. for (auto &&sock : p0socks) {
  253. p0ios.emplace_back(std::move(sock));
  254. }
  255. for (auto &&sock : p1socks) {
  256. p1ios.emplace_back(std::move(sock));
  257. }
  258. }
  259. // Sync our per-thread lamport clock with the master one in the
  260. // mpcio. You only need to call this explicitly if your MPCTIO
  261. // outlives your thread (in which case call it after the join), or
  262. // if your threads do interthread communication amongst themselves
  263. // (in which case call it in the sending thread before the send, and
  264. // call it in the receiving thread after the receive).
  265. void MPCTIO::sync_lamport()
  266. {
  267. // Update the mpcio Lamport time to be max of the thread Lamport
  268. // time and what we thought it was before. We use this
  269. // compare_exchange construction in order to atomically
  270. // do the comparison, computation, and replacement
  271. lamport_t old_lamport = mpcio.lamport;
  272. lamport_t new_lamport = thread_lamport;
  273. do {
  274. if (new_lamport < old_lamport) {
  275. new_lamport = old_lamport;
  276. }
  277. // The next line atomically checks if lamport still has
  278. // the value old_lamport; if so, it changes its value to
  279. // new_lamport and returns true (ending the loop). If
  280. // not, it sets old_lamport to the current value of
  281. // lamport, and returns false (continuing the loop so
  282. // that new_lamport can be recomputed based on this new
  283. // value).
  284. } while (!mpcio.lamport.compare_exchange_weak(
  285. old_lamport, new_lamport));
  286. thread_lamport = new_lamport;
  287. }
  288. // Queue up data to the peer or to the server
  289. void MPCTIO::queue_peer(const void *data, size_t len)
  290. {
  291. if (mpcio.player < 2) {
  292. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  293. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  294. mpcpio.msgs_sent[thread_num] += newmsg;
  295. mpcpio.msg_bytes_sent[thread_num] += len;
  296. }
  297. }
  298. void MPCTIO::queue_server(const void *data, size_t len)
  299. {
  300. if (mpcio.player < 2) {
  301. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  302. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  303. mpcpio.msgs_sent[thread_num] += newmsg;
  304. mpcpio.msg_bytes_sent[thread_num] += len;
  305. }
  306. }
  307. // Receive data from the peer or to the server
  308. size_t MPCTIO::recv_peer(void *data, size_t len)
  309. {
  310. if (mpcio.player < 2) {
  311. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  312. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  313. }
  314. return 0;
  315. }
  316. size_t MPCTIO::recv_server(void *data, size_t len)
  317. {
  318. if (mpcio.player < 2) {
  319. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  320. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  321. }
  322. return 0;
  323. }
  324. // Queue up data to p0 or p1
  325. void MPCTIO::queue_p0(const void *data, size_t len)
  326. {
  327. if (mpcio.player == 2) {
  328. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  329. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  330. mpcsrvio.msgs_sent[thread_num] += newmsg;
  331. mpcsrvio.msg_bytes_sent[thread_num] += len;
  332. }
  333. }
  334. void MPCTIO::queue_p1(const void *data, size_t len)
  335. {
  336. if (mpcio.player == 2) {
  337. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  338. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  339. mpcsrvio.msgs_sent[thread_num] += newmsg;
  340. mpcsrvio.msg_bytes_sent[thread_num] += len;
  341. }
  342. }
  343. // Receive data from p0 or p1
  344. size_t MPCTIO::recv_p0(void *data, size_t len)
  345. {
  346. if (mpcio.player == 2) {
  347. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  348. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  349. }
  350. return 0;
  351. }
  352. size_t MPCTIO::recv_p1(void *data, size_t len)
  353. {
  354. if (mpcio.player == 2) {
  355. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  356. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  357. }
  358. return 0;
  359. }
  360. // Send all queued data for this thread
  361. void MPCTIO::send()
  362. {
  363. if (mpcio.player < 2) {
  364. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  365. mpcpio.peerios[thread_num].send();
  366. mpcpio.serverios[thread_num].send();
  367. } else {
  368. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  369. mpcsrvio.p0ios[thread_num].send();
  370. mpcsrvio.p1ios[thread_num].send();
  371. }
  372. }
  373. // Functions to get precomputed values. If we're in the online
  374. // phase, get them from PreCompStorage. If we're in the
  375. // preprocessing phase, read them from the server.
  376. MultTriple MPCTIO::triple()
  377. {
  378. MultTriple val;
  379. if (mpcio.player < 2) {
  380. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  381. if (mpcpio.preprocessing) {
  382. recv_server(&val, sizeof(val));
  383. } else {
  384. mpcpio.triples[thread_num].get(val);
  385. }
  386. } else if (mpcio.preprocessing) {
  387. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  388. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  389. value_t X0, Y0, Z0, X1, Y1, Z1;
  390. arc4random_buf(&X0, sizeof(X0));
  391. arc4random_buf(&Y0, sizeof(Y0));
  392. arc4random_buf(&Z0, sizeof(Z0));
  393. arc4random_buf(&X1, sizeof(X1));
  394. arc4random_buf(&Y1, sizeof(Y1));
  395. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  396. MultTriple T0, T1;
  397. T0 = std::make_tuple(X0, Y0, Z0);
  398. T1 = std::make_tuple(X1, Y1, Z1);
  399. queue_p0(&T0, sizeof(T0));
  400. queue_p1(&T1, sizeof(T1));
  401. }
  402. return val;
  403. }
  404. HalfTriple MPCTIO::halftriple()
  405. {
  406. HalfTriple val;
  407. if (mpcio.player < 2) {
  408. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  409. if (mpcpio.preprocessing) {
  410. recv_server(&val, sizeof(val));
  411. } else {
  412. mpcpio.halftriples[thread_num].get(val);
  413. }
  414. } else if (mpcio.preprocessing) {
  415. // Create half-triples (X0,Z0),(Y1,Z1) such that
  416. // X0*Y1 = Z0 + Z1
  417. value_t X0, Z0, Y1, Z1;
  418. arc4random_buf(&X0, sizeof(X0));
  419. arc4random_buf(&Z0, sizeof(Z0));
  420. arc4random_buf(&Y1, sizeof(Y1));
  421. Z1 = X0 * Y1 - Z0;
  422. HalfTriple H0, H1;
  423. H0 = std::make_tuple(X0, Z0);
  424. H1 = std::make_tuple(Y1, Z1);
  425. queue_p0(&H0, sizeof(H0));
  426. queue_p1(&H1, sizeof(H1));
  427. }
  428. return val;
  429. }
  430. AndTriple MPCTIO::andtriple()
  431. {
  432. AndTriple val;
  433. if (mpcio.player < 2) {
  434. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  435. if (mpcpio.preprocessing) {
  436. recv_server(&val, sizeof(val));
  437. } else {
  438. std::cerr << "Attempted to read AndTriple in online phase\n";
  439. }
  440. } else if (mpcio.preprocessing) {
  441. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  442. // (X0&Y1 ^ Y0&X1) = (Z0^Z1)
  443. DPFnode X0, Y0, Z0, X1, Y1, Z1;
  444. arc4random_buf(&X0, sizeof(X0));
  445. arc4random_buf(&Y0, sizeof(Y0));
  446. arc4random_buf(&Z0, sizeof(Z0));
  447. arc4random_buf(&X1, sizeof(X1));
  448. arc4random_buf(&Y1, sizeof(Y1));
  449. Z1 = _mm_xor_si128(
  450. _mm_xor_si128(_mm_and_si128(X0, Y1), _mm_and_si128(X1, Y0)),
  451. Z0);
  452. AndTriple T0, T1;
  453. T0 = std::make_tuple(X0, Y0, Z0);
  454. T1 = std::make_tuple(X1, Y1, Z1);
  455. queue_p0(&T0, sizeof(T0));
  456. queue_p1(&T1, sizeof(T1));
  457. }
  458. return val;
  459. }
  460. // The port number for the P1 -> P0 connection
  461. static const unsigned short port_p1_p0 = 2115;
  462. // The port number for the P2 -> P0 connection
  463. static const unsigned short port_p2_p0 = 2116;
  464. // The port number for the P2 -> P1 connection
  465. static const unsigned short port_p2_p1 = 2117;
  466. void mpcio_setup_computational(unsigned player,
  467. boost::asio::io_context &io_context,
  468. const char *p0addr, // can be NULL when player=0
  469. int num_threads,
  470. std::deque<tcp::socket> &peersocks,
  471. std::deque<tcp::socket> &serversocks)
  472. {
  473. if (player == 0) {
  474. // Listen for connections from P1 and from P2
  475. tcp::acceptor acceptor_p1(io_context,
  476. tcp::endpoint(tcp::v4(), port_p1_p0));
  477. tcp::acceptor acceptor_p2(io_context,
  478. tcp::endpoint(tcp::v4(), port_p2_p0));
  479. peersocks.clear();
  480. serversocks.clear();
  481. for (int i=0;i<num_threads;++i) {
  482. peersocks.emplace_back(io_context);
  483. serversocks.emplace_back(io_context);
  484. }
  485. for (int i=0;i<num_threads;++i) {
  486. tcp::socket peersock = acceptor_p1.accept();
  487. // Read 2 bytes from the socket, which will be the thread
  488. // number
  489. unsigned short thread_num;
  490. boost::asio::read(peersock,
  491. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  492. if (thread_num >= num_threads) {
  493. std::cerr << "Received bad thread number from peer\n";
  494. } else {
  495. peersocks[thread_num] = std::move(peersock);
  496. }
  497. }
  498. for (int i=0;i<num_threads;++i) {
  499. tcp::socket serversock = acceptor_p2.accept();
  500. // Read 2 bytes from the socket, which will be the thread
  501. // number
  502. unsigned short thread_num;
  503. boost::asio::read(serversock,
  504. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  505. if (thread_num >= num_threads) {
  506. std::cerr << "Received bad thread number from server\n";
  507. } else {
  508. serversocks[thread_num] = std::move(serversock);
  509. }
  510. }
  511. } else if (player == 1) {
  512. // Listen for connections from P2, make num_threads connections to P0
  513. tcp::acceptor acceptor_p2(io_context,
  514. tcp::endpoint(tcp::v4(), port_p2_p1));
  515. tcp::resolver resolver(io_context);
  516. boost::system::error_code err;
  517. peersocks.clear();
  518. serversocks.clear();
  519. for (int i=0;i<num_threads;++i) {
  520. serversocks.emplace_back(io_context);
  521. }
  522. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  523. tcp::socket peersock(io_context);
  524. while(1) {
  525. boost::asio::connect(peersock,
  526. resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
  527. if (!err) break;
  528. std::cerr << "Connection to p0 refused, will retry.\n";
  529. sleep(1);
  530. }
  531. // Write 2 bytes to the socket indicating which thread
  532. // number this socket is for
  533. boost::asio::write(peersock,
  534. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  535. peersocks.push_back(std::move(peersock));
  536. }
  537. for (int i=0;i<num_threads;++i) {
  538. tcp::socket serversock = acceptor_p2.accept();
  539. // Read 2 bytes from the socket, which will be the thread
  540. // number
  541. unsigned short thread_num;
  542. boost::asio::read(serversock,
  543. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  544. if (thread_num >= num_threads) {
  545. std::cerr << "Received bad thread number from server\n";
  546. } else {
  547. serversocks[thread_num] = std::move(serversock);
  548. }
  549. }
  550. } else {
  551. std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
  552. }
  553. }
  554. void mpcio_setup_server(boost::asio::io_context &io_context,
  555. const char *p0addr, const char *p1addr, int num_threads,
  556. std::deque<tcp::socket> &p0socks,
  557. std::deque<tcp::socket> &p1socks)
  558. {
  559. // Make connections to P0 and P1
  560. tcp::resolver resolver(io_context);
  561. boost::system::error_code err;
  562. p0socks.clear();
  563. p1socks.clear();
  564. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  565. tcp::socket p0sock(io_context);
  566. while(1) {
  567. boost::asio::connect(p0sock,
  568. resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
  569. if (!err) break;
  570. std::cerr << "Connection to p0 refused, will retry.\n";
  571. sleep(1);
  572. }
  573. // Write 2 bytes to the socket indicating which thread
  574. // number this socket is for
  575. boost::asio::write(p0sock,
  576. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  577. p0socks.push_back(std::move(p0sock));
  578. }
  579. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  580. tcp::socket p1sock(io_context);
  581. while(1) {
  582. boost::asio::connect(p1sock,
  583. resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
  584. if (!err) break;
  585. std::cerr << "Connection to p1 refused, will retry.\n";
  586. sleep(1);
  587. }
  588. // Write 2 bytes to the socket indicating which thread
  589. // number this socket is for
  590. boost::asio::write(p1sock,
  591. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  592. p1socks.push_back(std::move(p1sock));
  593. }
  594. }