mpcio.cpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. #include "mpcio.hpp"
  2. #include "rdpf.hpp"
  3. #include "bitutils.hpp"
  4. template<typename T, typename N>
  5. PreCompStorage<T,N>::PreCompStorage(unsigned player, bool preprocessing,
  6. const char *filenameprefix, unsigned thread_num) :
  7. name(N::name), depth(0)
  8. {
  9. init(player, preprocessing, filenameprefix, thread_num);
  10. }
  11. template<typename T, typename N>
  12. void PreCompStorage<T,N>::init(unsigned player, bool preprocessing,
  13. const char *filenameprefix, unsigned thread_num, nbits_t depth)
  14. {
  15. if (preprocessing) return;
  16. std::string filename(filenameprefix);
  17. char suffix[20];
  18. if (depth) {
  19. this->depth = depth;
  20. sprintf(suffix, "%02d.p%d.t%u", depth, player%10, thread_num);
  21. } else {
  22. sprintf(suffix, ".p%d.t%u", player%10, thread_num);
  23. }
  24. filename.append(suffix);
  25. storage.open(filename);
  26. // It's OK if files for not every depth exist
  27. if (!depth && storage.fail()) {
  28. std::cerr << "Failed to open " << filename << "\n";
  29. exit(1);
  30. }
  31. count = 0;
  32. }
  33. template<typename T, typename N>
  34. void PreCompStorage<T,N>::get(T& nextval)
  35. {
  36. storage >> nextval;
  37. if (!storage.good()) {
  38. std::cerr << "Failed to read precomputed value from " << name;
  39. if (depth) {
  40. std::cerr << (int)depth;
  41. }
  42. std::cerr << " storage\n";
  43. exit(1);
  44. }
  45. ++count;
  46. }
  47. void MPCSingleIO::async_send_from_msgqueue()
  48. {
  49. #ifdef SEND_LAMPORT_CLOCKS
  50. std::vector<boost::asio::const_buffer> tosend;
  51. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  52. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  53. #endif
  54. boost::asio::async_write(sock,
  55. #ifdef SEND_LAMPORT_CLOCKS
  56. tosend,
  57. #else
  58. boost::asio::buffer(messagequeue.front()),
  59. #endif
  60. [&](boost::system::error_code ec, std::size_t amt){
  61. messagequeuelock.lock();
  62. messagequeue.pop();
  63. if (messagequeue.size() > 0) {
  64. async_send_from_msgqueue();
  65. }
  66. messagequeuelock.unlock();
  67. });
  68. }
  69. size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
  70. {
  71. // Is this a new message?
  72. size_t newmsg = 0;
  73. dataqueue.append((const char *)data, len);
  74. // If this is the first queue() since the last explicit send(),
  75. // which we'll know because message_lamport will be nullopt, set
  76. // message_lamport to the current Lamport clock. Note that the
  77. // boolean test tests whether message_lamport is nullopt, not
  78. // whether its value is zero.
  79. if (!message_lamport) {
  80. message_lamport = lamport;
  81. newmsg = 1;
  82. }
  83. // If we already have some full packets worth of data, may as
  84. // well send it.
  85. if (dataqueue.size() > 28800) {
  86. send(true);
  87. }
  88. return newmsg;
  89. }
  90. void MPCSingleIO::send(bool implicit_send)
  91. {
  92. size_t thissize = dataqueue.size();
  93. // Ignore spurious calls to send(), except for resetting
  94. // message_lamport if this was an explicit send().
  95. if (thissize == 0) {
  96. #ifdef SEND_LAMPORT_CLOCKS
  97. // If this was an explicit send(), reset the message_lamport so
  98. // that it gets updated at the next queue().
  99. if (!implicit_send) {
  100. message_lamport.reset();
  101. }
  102. #endif
  103. return;
  104. }
  105. #ifdef RECORD_IOTRACE
  106. iotrace.push_back(thissize);
  107. #endif
  108. messagequeuelock.lock();
  109. // Move the current message to send into the message queue (this
  110. // moves a pointer to the data, not copying the data itself)
  111. #ifdef SEND_LAMPORT_CLOCKS
  112. messagequeue.emplace(std::move(dataqueue),
  113. message_lamport.value());
  114. // If this was an explicit send(), reset the message_lamport so
  115. // that it gets updated at the next queue().
  116. if (!implicit_send) {
  117. message_lamport.reset();
  118. }
  119. #else
  120. messagequeue.emplace(std::move(dataqueue));
  121. #endif
  122. // If this is now the first thing in the message queue, launch
  123. // an async_write to write it
  124. if (messagequeue.size() == 1) {
  125. async_send_from_msgqueue();
  126. }
  127. messagequeuelock.unlock();
  128. }
  129. size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
  130. {
  131. #ifdef SEND_LAMPORT_CLOCKS
  132. char *cdata = (char *)data;
  133. size_t res = 0;
  134. while (len > 0) {
  135. while (recvdataremain == 0) {
  136. // Read a new header
  137. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  138. uint32_t datalen;
  139. lamport_t recv_lamport;
  140. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  141. memmove(&datalen, hdr, sizeof(datalen));
  142. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  143. lamport_t new_lamport = recv_lamport + 1;
  144. if (lamport < new_lamport) {
  145. lamport = new_lamport;
  146. }
  147. if (datalen > 0) {
  148. recvdata.resize(datalen, '\0');
  149. boost::asio::read(sock, boost::asio::buffer(recvdata));
  150. recvdataremain = datalen;
  151. }
  152. }
  153. size_t amttoread = len;
  154. if (amttoread > recvdataremain) {
  155. amttoread = recvdataremain;
  156. }
  157. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  158. amttoread);
  159. cdata += amttoread;
  160. len -= amttoread;
  161. recvdataremain -= amttoread;
  162. res += amttoread;
  163. }
  164. #else
  165. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  166. #endif
  167. #ifdef RECORD_IOTRACE
  168. iotrace.push_back(-(ssize_t(res)));
  169. #endif
  170. return res;
  171. }
  172. #ifdef RECORD_IOTRACE
  173. void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
  174. {
  175. if (label) {
  176. os << label << " ";
  177. }
  178. os << "IO trace:";
  179. for (auto& s: iotrace) {
  180. os << " " << s;
  181. }
  182. os << "\n";
  183. }
  184. #endif
  185. void MPCIO::reset_stats()
  186. {
  187. msgs_sent.clear();
  188. msg_bytes_sent.clear();
  189. aes_ops.clear();
  190. for (size_t i=0; i<num_threads; ++i) {
  191. msgs_sent.push_back(0);
  192. msg_bytes_sent.push_back(0);
  193. aes_ops.push_back(0);
  194. }
  195. steady_start = boost::chrono::steady_clock::now();
  196. cpu_start = boost::chrono::process_cpu_clock::now();
  197. }
  198. void MPCIO::dump_stats(std::ostream &os)
  199. {
  200. size_t tot_msgs_sent = 0;
  201. size_t tot_msg_bytes_sent = 0;
  202. size_t tot_aes_ops = 0;
  203. for (auto& n : msgs_sent) {
  204. tot_msgs_sent += n;
  205. }
  206. for (auto& n : msg_bytes_sent) {
  207. tot_msg_bytes_sent += n;
  208. }
  209. for (auto& n : aes_ops) {
  210. tot_aes_ops += n;
  211. }
  212. auto steady_elapsed =
  213. boost::chrono::steady_clock::now() - steady_start;
  214. auto cpu_elapsed =
  215. boost::chrono::process_cpu_clock::now() - cpu_start;
  216. os << tot_msgs_sent << " messages sent\n";
  217. os << tot_msg_bytes_sent << " message bytes sent\n";
  218. os << tot_aes_ops << " local AES operations\n";
  219. os << lamport << " Lamport clock (latencies)\n";
  220. os << boost::chrono::duration_cast
  221. <boost::chrono::milliseconds>(steady_elapsed) <<
  222. " wall clock time\n";
  223. os << cpu_elapsed << " {real;user;system}\n";
  224. }
  225. MPCPeerIO::MPCPeerIO(unsigned player, bool preprocessing,
  226. std::deque<tcp::socket> &peersocks,
  227. std::deque<tcp::socket> &serversocks) :
  228. MPCIO(player, preprocessing, peersocks.size())
  229. {
  230. unsigned num_threads = unsigned(peersocks.size());
  231. for (unsigned i=0; i<num_threads; ++i) {
  232. triples.emplace_back(player, preprocessing, "triples", i);
  233. }
  234. for (unsigned i=0; i<num_threads; ++i) {
  235. halftriples.emplace_back(player, preprocessing, "halves", i);
  236. }
  237. rdpftriples.resize(num_threads);
  238. for (unsigned i=0; i<num_threads; ++i) {
  239. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  240. rdpftriples[i][depth-1].init(player, preprocessing,
  241. "rdpf", i, depth);
  242. }
  243. }
  244. for (auto &&sock : peersocks) {
  245. peerios.emplace_back(std::move(sock));
  246. }
  247. for (auto &&sock : serversocks) {
  248. serverios.emplace_back(std::move(sock));
  249. }
  250. }
  251. void MPCPeerIO::dump_precomp_stats(std::ostream &os)
  252. {
  253. for (size_t i=0; i<triples.size(); ++i) {
  254. if (i > 0) {
  255. os << " ";
  256. }
  257. os << "T" << i << " t:" << triples[i].get_stats() <<
  258. " h:" << halftriples[i].get_stats();
  259. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  260. size_t cnt = rdpftriples[i][depth-1].get_stats();
  261. if (cnt > 0) {
  262. os << " r" << int(depth) << ":" << cnt;
  263. }
  264. }
  265. }
  266. os << "\n";
  267. }
  268. void MPCPeerIO::reset_precomp_stats()
  269. {
  270. for (size_t i=0; i<triples.size(); ++i) {
  271. triples[i].reset_stats();
  272. halftriples[i].reset_stats();
  273. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  274. rdpftriples[i][depth-1].reset_stats();
  275. }
  276. }
  277. }
  278. void MPCPeerIO::dump_stats(std::ostream &os)
  279. {
  280. MPCIO::dump_stats(os);
  281. os << "Precomputed values used: ";
  282. dump_precomp_stats(os);
  283. }
  284. MPCServerIO::MPCServerIO(bool preprocessing,
  285. std::deque<tcp::socket> &p0socks,
  286. std::deque<tcp::socket> &p1socks) :
  287. MPCIO(2, preprocessing, p0socks.size())
  288. {
  289. rdpfpairs.resize(num_threads);
  290. for (unsigned i=0; i<num_threads; ++i) {
  291. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  292. rdpfpairs[i][depth-1].init(player, preprocessing,
  293. "rdpf", i, depth);
  294. }
  295. }
  296. for (auto &&sock : p0socks) {
  297. p0ios.emplace_back(std::move(sock));
  298. }
  299. for (auto &&sock : p1socks) {
  300. p1ios.emplace_back(std::move(sock));
  301. }
  302. }
  303. void MPCServerIO::dump_precomp_stats(std::ostream &os)
  304. {
  305. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  306. if (i > 0) {
  307. os << " ";
  308. }
  309. os << "T" << i;
  310. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  311. size_t cnt = rdpfpairs[i][depth-1].get_stats();
  312. if (cnt > 0) {
  313. os << " r" << int(depth) << ":" << cnt;
  314. }
  315. }
  316. }
  317. os << "\n";
  318. }
  319. void MPCServerIO::reset_precomp_stats()
  320. {
  321. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  322. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  323. rdpfpairs[i][depth-1].reset_stats();
  324. }
  325. }
  326. }
  327. void MPCServerIO::dump_stats(std::ostream &os)
  328. {
  329. MPCIO::dump_stats(os);
  330. os << "Precomputed values used: ";
  331. dump_precomp_stats(os);
  332. }
  333. MPCTIO::MPCTIO(MPCIO &mpcio, int thread_num) :
  334. thread_num(thread_num), thread_lamport(mpcio.lamport),
  335. mpcio(mpcio)
  336. {
  337. if (mpcio.player < 2) {
  338. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  339. peer_iostream.emplace(mpcpio.peerios[thread_num],
  340. thread_lamport, mpcpio.msgs_sent[thread_num],
  341. mpcpio.msg_bytes_sent[thread_num]);
  342. server_iostream.emplace(mpcpio.serverios[thread_num],
  343. thread_lamport, mpcpio.msgs_sent[thread_num],
  344. mpcpio.msg_bytes_sent[thread_num]);
  345. } else {
  346. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  347. p0_iostream.emplace(mpcsrvio.p0ios[thread_num],
  348. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  349. mpcsrvio.msg_bytes_sent[thread_num]);
  350. p1_iostream.emplace(mpcsrvio.p1ios[thread_num],
  351. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  352. mpcsrvio.msg_bytes_sent[thread_num]);
  353. }
  354. }
  355. // Sync our per-thread lamport clock with the master one in the
  356. // mpcio. You only need to call this explicitly if your MPCTIO
  357. // outlives your thread (in which case call it after the join), or
  358. // if your threads do interthread communication amongst themselves
  359. // (in which case call it in the sending thread before the send, and
  360. // call it in the receiving thread after the receive).
  361. void MPCTIO::sync_lamport()
  362. {
  363. // Update the mpcio Lamport time to be max of the thread Lamport
  364. // time and what we thought it was before. We use this
  365. // compare_exchange construction in order to atomically
  366. // do the comparison, computation, and replacement
  367. lamport_t old_lamport = mpcio.lamport;
  368. lamport_t new_lamport = thread_lamport;
  369. do {
  370. if (new_lamport < old_lamport) {
  371. new_lamport = old_lamport;
  372. }
  373. // The next line atomically checks if lamport still has
  374. // the value old_lamport; if so, it changes its value to
  375. // new_lamport and returns true (ending the loop). If
  376. // not, it sets old_lamport to the current value of
  377. // lamport, and returns false (continuing the loop so
  378. // that new_lamport can be recomputed based on this new
  379. // value).
  380. } while (!mpcio.lamport.compare_exchange_weak(
  381. old_lamport, new_lamport));
  382. thread_lamport = new_lamport;
  383. }
  384. // Queue up data to the peer or to the server
  385. void MPCTIO::queue_peer(const void *data, size_t len)
  386. {
  387. if (mpcio.player < 2) {
  388. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  389. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  390. mpcpio.msgs_sent[thread_num] += newmsg;
  391. mpcpio.msg_bytes_sent[thread_num] += len;
  392. }
  393. }
  394. void MPCTIO::queue_server(const void *data, size_t len)
  395. {
  396. if (mpcio.player < 2) {
  397. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  398. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  399. mpcpio.msgs_sent[thread_num] += newmsg;
  400. mpcpio.msg_bytes_sent[thread_num] += len;
  401. }
  402. }
  403. // Receive data from the peer or to the server
  404. size_t MPCTIO::recv_peer(void *data, size_t len)
  405. {
  406. if (mpcio.player < 2) {
  407. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  408. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  409. }
  410. return 0;
  411. }
  412. size_t MPCTIO::recv_server(void *data, size_t len)
  413. {
  414. if (mpcio.player < 2) {
  415. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  416. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  417. }
  418. return 0;
  419. }
  420. // Queue up data to p0 or p1
  421. void MPCTIO::queue_p0(const void *data, size_t len)
  422. {
  423. if (mpcio.player == 2) {
  424. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  425. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  426. mpcsrvio.msgs_sent[thread_num] += newmsg;
  427. mpcsrvio.msg_bytes_sent[thread_num] += len;
  428. }
  429. }
  430. void MPCTIO::queue_p1(const void *data, size_t len)
  431. {
  432. if (mpcio.player == 2) {
  433. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  434. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  435. mpcsrvio.msgs_sent[thread_num] += newmsg;
  436. mpcsrvio.msg_bytes_sent[thread_num] += len;
  437. }
  438. }
  439. // Receive data from p0 or p1
  440. size_t MPCTIO::recv_p0(void *data, size_t len)
  441. {
  442. if (mpcio.player == 2) {
  443. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  444. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  445. }
  446. return 0;
  447. }
  448. size_t MPCTIO::recv_p1(void *data, size_t len)
  449. {
  450. if (mpcio.player == 2) {
  451. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  452. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  453. }
  454. return 0;
  455. }
  456. // Send all queued data for this thread
  457. void MPCTIO::send()
  458. {
  459. if (mpcio.player < 2) {
  460. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  461. mpcpio.peerios[thread_num].send();
  462. mpcpio.serverios[thread_num].send();
  463. } else {
  464. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  465. mpcsrvio.p0ios[thread_num].send();
  466. mpcsrvio.p1ios[thread_num].send();
  467. }
  468. }
  469. // Functions to get precomputed values. If we're in the online
  470. // phase, get them from PreCompStorage. If we're in the
  471. // preprocessing phase, read them from the server.
  472. MultTriple MPCTIO::triple()
  473. {
  474. MultTriple val;
  475. if (mpcio.player < 2) {
  476. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  477. if (mpcpio.preprocessing) {
  478. recv_server(&val, sizeof(val));
  479. } else {
  480. mpcpio.triples[thread_num].get(val);
  481. }
  482. } else if (mpcio.preprocessing) {
  483. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  484. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  485. value_t X0, Y0, Z0, X1, Y1, Z1;
  486. arc4random_buf(&X0, sizeof(X0));
  487. arc4random_buf(&Y0, sizeof(Y0));
  488. arc4random_buf(&Z0, sizeof(Z0));
  489. arc4random_buf(&X1, sizeof(X1));
  490. arc4random_buf(&Y1, sizeof(Y1));
  491. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  492. MultTriple T0, T1;
  493. T0 = std::make_tuple(X0, Y0, Z0);
  494. T1 = std::make_tuple(X1, Y1, Z1);
  495. queue_p0(&T0, sizeof(T0));
  496. queue_p1(&T1, sizeof(T1));
  497. }
  498. return val;
  499. }
  500. HalfTriple MPCTIO::halftriple()
  501. {
  502. HalfTriple val;
  503. if (mpcio.player < 2) {
  504. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  505. if (mpcpio.preprocessing) {
  506. recv_server(&val, sizeof(val));
  507. } else {
  508. mpcpio.halftriples[thread_num].get(val);
  509. }
  510. } else if (mpcio.preprocessing) {
  511. // Create half-triples (X0,Z0),(Y1,Z1) such that
  512. // X0*Y1 = Z0 + Z1
  513. value_t X0, Z0, Y1, Z1;
  514. arc4random_buf(&X0, sizeof(X0));
  515. arc4random_buf(&Z0, sizeof(Z0));
  516. arc4random_buf(&Y1, sizeof(Y1));
  517. Z1 = X0 * Y1 - Z0;
  518. HalfTriple H0, H1;
  519. H0 = std::make_tuple(X0, Z0);
  520. H1 = std::make_tuple(Y1, Z1);
  521. queue_p0(&H0, sizeof(H0));
  522. queue_p1(&H1, sizeof(H1));
  523. }
  524. return val;
  525. }
  526. SelectTriple MPCTIO::selecttriple()
  527. {
  528. SelectTriple val;
  529. if (mpcio.player < 2) {
  530. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  531. if (mpcpio.preprocessing) {
  532. uint8_t Xbyte;
  533. recv_server(&Xbyte, sizeof(Xbyte));
  534. val.X = Xbyte & 1;
  535. recv_server(&val.Y, sizeof(val.Y));
  536. recv_server(&val.Z, sizeof(val.Z));
  537. } else {
  538. std::cerr << "Attempted to read SelectTriple in online phase\n";
  539. }
  540. } else if (mpcio.preprocessing) {
  541. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  542. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  543. bit_t X0, X1;
  544. DPFnode Y0, Z0, Y1, Z1;
  545. X0 = arc4random() & 1;
  546. arc4random_buf(&Y0, sizeof(Y0));
  547. arc4random_buf(&Z0, sizeof(Z0));
  548. X1 = arc4random() & 1;
  549. arc4random_buf(&Y1, sizeof(Y1));
  550. DPFnode X0ext, X1ext;
  551. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  552. // 1 -> 1111...1)
  553. X0ext = if128_mask[X0];
  554. X1ext = if128_mask[X1];
  555. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  556. queue_p0(&X0, sizeof(X0));
  557. queue_p0(&Y0, sizeof(Y0));
  558. queue_p0(&Z0, sizeof(Z0));
  559. queue_p1(&X1, sizeof(X1));
  560. queue_p1(&Y1, sizeof(Y1));
  561. queue_p1(&Z1, sizeof(Z1));
  562. }
  563. return val;
  564. }
  565. RDPFTriple MPCTIO::rdpftriple(nbits_t depth)
  566. {
  567. RDPFTriple val;
  568. if (!mpcio.preprocessing && mpcio.player <= 2) {
  569. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  570. mpcpio.rdpftriples[thread_num][depth-1].get(val);
  571. }
  572. return val;
  573. }
  574. RDPFPair MPCTIO::rdpfpair(nbits_t depth)
  575. {
  576. RDPFPair val;
  577. if (!mpcio.preprocessing && mpcio.player == 2) {
  578. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  579. mpcsrvio.rdpfpairs[thread_num][depth-1].get(val);
  580. }
  581. return val;
  582. }
  583. // The port number for the P1 -> P0 connection
  584. static const unsigned short port_p1_p0 = 2115;
  585. // The port number for the P2 -> P0 connection
  586. static const unsigned short port_p2_p0 = 2116;
  587. // The port number for the P2 -> P1 connection
  588. static const unsigned short port_p2_p1 = 2117;
  589. void mpcio_setup_computational(unsigned player,
  590. boost::asio::io_context &io_context,
  591. const char *p0addr, // can be NULL when player=0
  592. int num_threads,
  593. std::deque<tcp::socket> &peersocks,
  594. std::deque<tcp::socket> &serversocks)
  595. {
  596. if (player == 0) {
  597. // Listen for connections from P1 and from P2
  598. tcp::acceptor acceptor_p1(io_context,
  599. tcp::endpoint(tcp::v4(), port_p1_p0));
  600. tcp::acceptor acceptor_p2(io_context,
  601. tcp::endpoint(tcp::v4(), port_p2_p0));
  602. peersocks.clear();
  603. serversocks.clear();
  604. for (int i=0;i<num_threads;++i) {
  605. peersocks.emplace_back(io_context);
  606. serversocks.emplace_back(io_context);
  607. }
  608. for (int i=0;i<num_threads;++i) {
  609. tcp::socket peersock = acceptor_p1.accept();
  610. // Read 2 bytes from the socket, which will be the thread
  611. // number
  612. unsigned short thread_num;
  613. boost::asio::read(peersock,
  614. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  615. if (thread_num >= num_threads) {
  616. std::cerr << "Received bad thread number from peer\n";
  617. } else {
  618. peersocks[thread_num] = std::move(peersock);
  619. }
  620. }
  621. for (int i=0;i<num_threads;++i) {
  622. tcp::socket serversock = acceptor_p2.accept();
  623. // Read 2 bytes from the socket, which will be the thread
  624. // number
  625. unsigned short thread_num;
  626. boost::asio::read(serversock,
  627. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  628. if (thread_num >= num_threads) {
  629. std::cerr << "Received bad thread number from server\n";
  630. } else {
  631. serversocks[thread_num] = std::move(serversock);
  632. }
  633. }
  634. } else if (player == 1) {
  635. // Listen for connections from P2, make num_threads connections to P0
  636. tcp::acceptor acceptor_p2(io_context,
  637. tcp::endpoint(tcp::v4(), port_p2_p1));
  638. tcp::resolver resolver(io_context);
  639. boost::system::error_code err;
  640. peersocks.clear();
  641. serversocks.clear();
  642. for (int i=0;i<num_threads;++i) {
  643. serversocks.emplace_back(io_context);
  644. }
  645. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  646. tcp::socket peersock(io_context);
  647. while(1) {
  648. boost::asio::connect(peersock,
  649. resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
  650. if (!err) break;
  651. std::cerr << "Connection to p0 refused, will retry.\n";
  652. sleep(1);
  653. }
  654. // Write 2 bytes to the socket indicating which thread
  655. // number this socket is for
  656. boost::asio::write(peersock,
  657. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  658. peersocks.push_back(std::move(peersock));
  659. }
  660. for (int i=0;i<num_threads;++i) {
  661. tcp::socket serversock = acceptor_p2.accept();
  662. // Read 2 bytes from the socket, which will be the thread
  663. // number
  664. unsigned short thread_num;
  665. boost::asio::read(serversock,
  666. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  667. if (thread_num >= num_threads) {
  668. std::cerr << "Received bad thread number from server\n";
  669. } else {
  670. serversocks[thread_num] = std::move(serversock);
  671. }
  672. }
  673. } else {
  674. std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
  675. }
  676. }
  677. void mpcio_setup_server(boost::asio::io_context &io_context,
  678. const char *p0addr, const char *p1addr, int num_threads,
  679. std::deque<tcp::socket> &p0socks,
  680. std::deque<tcp::socket> &p1socks)
  681. {
  682. // Make connections to P0 and P1
  683. tcp::resolver resolver(io_context);
  684. boost::system::error_code err;
  685. p0socks.clear();
  686. p1socks.clear();
  687. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  688. tcp::socket p0sock(io_context);
  689. while(1) {
  690. boost::asio::connect(p0sock,
  691. resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
  692. if (!err) break;
  693. std::cerr << "Connection to p0 refused, will retry.\n";
  694. sleep(1);
  695. }
  696. // Write 2 bytes to the socket indicating which thread
  697. // number this socket is for
  698. boost::asio::write(p0sock,
  699. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  700. p0socks.push_back(std::move(p0sock));
  701. }
  702. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  703. tcp::socket p1sock(io_context);
  704. while(1) {
  705. boost::asio::connect(p1sock,
  706. resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
  707. if (!err) break;
  708. std::cerr << "Connection to p1 refused, will retry.\n";
  709. sleep(1);
  710. }
  711. // Write 2 bytes to the socket indicating which thread
  712. // number this socket is for
  713. boost::asio::write(p1sock,
  714. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  715. p1socks.push_back(std::move(p1sock));
  716. }
  717. }