mpcio.cpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. #include "mpcio.hpp"
  2. #include "rdpf.hpp"
  3. #include "bitutils.hpp"
  4. template<typename T, typename N>
  5. PreCompStorage<T,N>::PreCompStorage(unsigned player, bool preprocessing,
  6. const char *filenameprefix, unsigned thread_num) :
  7. name(N::name), depth(0)
  8. {
  9. init(player, preprocessing, filenameprefix, thread_num);
  10. }
  11. template<typename T, typename N>
  12. void PreCompStorage<T,N>::init(unsigned player, bool preprocessing,
  13. const char *filenameprefix, unsigned thread_num, nbits_t depth)
  14. {
  15. if (preprocessing) return;
  16. std::string filename(filenameprefix);
  17. char suffix[20];
  18. if (depth) {
  19. this->depth = depth;
  20. sprintf(suffix, "%02d.p%d.t%u", depth, player%10, thread_num);
  21. } else {
  22. sprintf(suffix, ".p%d.t%u", player%10, thread_num);
  23. }
  24. filename.append(suffix);
  25. storage.open(filename);
  26. // It's OK if not every file exists; so don't worry about checking
  27. // for errors here. We'll report an error in get() if we actually
  28. // try to use a value for which we don't have a precomputed file.
  29. count = 0;
  30. }
  31. template<typename T, typename N>
  32. void PreCompStorage<T,N>::get(T& nextval)
  33. {
  34. storage >> nextval;
  35. if (!storage.good()) {
  36. std::cerr << "Failed to read precomputed value from " << name;
  37. if (depth) {
  38. std::cerr << (int)depth;
  39. }
  40. std::cerr << " storage\n";
  41. exit(1);
  42. }
  43. ++count;
  44. }
  45. void MPCSingleIO::async_send_from_msgqueue()
  46. {
  47. #ifdef SEND_LAMPORT_CLOCKS
  48. std::vector<boost::asio::const_buffer> tosend;
  49. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  50. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  51. #endif
  52. boost::asio::async_write(sock,
  53. #ifdef SEND_LAMPORT_CLOCKS
  54. tosend,
  55. #else
  56. boost::asio::buffer(messagequeue.front()),
  57. #endif
  58. [&](boost::system::error_code ec, std::size_t amt){
  59. messagequeuelock.lock();
  60. messagequeue.pop();
  61. if (messagequeue.size() > 0) {
  62. async_send_from_msgqueue();
  63. }
  64. messagequeuelock.unlock();
  65. });
  66. }
  67. size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
  68. {
  69. // Is this a new message?
  70. size_t newmsg = 0;
  71. dataqueue.append((const char *)data, len);
  72. // If this is the first queue() since the last explicit send(),
  73. // which we'll know because message_lamport will be nullopt, set
  74. // message_lamport to the current Lamport clock. Note that the
  75. // boolean test tests whether message_lamport is nullopt, not
  76. // whether its value is zero.
  77. if (!message_lamport) {
  78. message_lamport = lamport;
  79. newmsg = 1;
  80. }
  81. // If we already have some full packets worth of data, may as
  82. // well send it.
  83. if (dataqueue.size() > 28800) {
  84. send(true);
  85. }
  86. return newmsg;
  87. }
  88. void MPCSingleIO::send(bool implicit_send)
  89. {
  90. size_t thissize = dataqueue.size();
  91. // Ignore spurious calls to send(), except for resetting
  92. // message_lamport if this was an explicit send().
  93. if (thissize == 0) {
  94. #ifdef SEND_LAMPORT_CLOCKS
  95. // If this was an explicit send(), reset the message_lamport so
  96. // that it gets updated at the next queue().
  97. if (!implicit_send) {
  98. message_lamport.reset();
  99. }
  100. #endif
  101. return;
  102. }
  103. #ifdef RECORD_IOTRACE
  104. iotrace.push_back(thissize);
  105. #endif
  106. messagequeuelock.lock();
  107. // Move the current message to send into the message queue (this
  108. // moves a pointer to the data, not copying the data itself)
  109. #ifdef SEND_LAMPORT_CLOCKS
  110. messagequeue.emplace(std::move(dataqueue),
  111. message_lamport.value());
  112. // If this was an explicit send(), reset the message_lamport so
  113. // that it gets updated at the next queue().
  114. if (!implicit_send) {
  115. message_lamport.reset();
  116. }
  117. #else
  118. messagequeue.emplace(std::move(dataqueue));
  119. #endif
  120. // If this is now the first thing in the message queue, launch
  121. // an async_write to write it
  122. if (messagequeue.size() == 1) {
  123. async_send_from_msgqueue();
  124. }
  125. messagequeuelock.unlock();
  126. }
  127. size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
  128. {
  129. #ifdef SEND_LAMPORT_CLOCKS
  130. char *cdata = (char *)data;
  131. size_t res = 0;
  132. while (len > 0) {
  133. while (recvdataremain == 0) {
  134. // Read a new header
  135. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  136. uint32_t datalen;
  137. lamport_t recv_lamport;
  138. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  139. memmove(&datalen, hdr, sizeof(datalen));
  140. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  141. lamport_t new_lamport = recv_lamport + 1;
  142. if (lamport < new_lamport) {
  143. lamport = new_lamport;
  144. }
  145. if (datalen > 0) {
  146. recvdata.resize(datalen, '\0');
  147. boost::asio::read(sock, boost::asio::buffer(recvdata));
  148. recvdataremain = datalen;
  149. }
  150. }
  151. size_t amttoread = len;
  152. if (amttoread > recvdataremain) {
  153. amttoread = recvdataremain;
  154. }
  155. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  156. amttoread);
  157. cdata += amttoread;
  158. len -= amttoread;
  159. recvdataremain -= amttoread;
  160. res += amttoread;
  161. }
  162. #else
  163. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  164. #endif
  165. #ifdef RECORD_IOTRACE
  166. iotrace.push_back(-(ssize_t(res)));
  167. #endif
  168. return res;
  169. }
  170. #ifdef RECORD_IOTRACE
  171. void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
  172. {
  173. if (label) {
  174. os << label << " ";
  175. }
  176. os << "IO trace:";
  177. for (auto& s: iotrace) {
  178. os << " " << s;
  179. }
  180. os << "\n";
  181. }
  182. #endif
  183. void MPCIO::reset_stats()
  184. {
  185. msgs_sent.clear();
  186. msg_bytes_sent.clear();
  187. aes_ops.clear();
  188. for (size_t i=0; i<num_threads; ++i) {
  189. msgs_sent.push_back(0);
  190. msg_bytes_sent.push_back(0);
  191. aes_ops.push_back(0);
  192. }
  193. steady_start = boost::chrono::steady_clock::now();
  194. cpu_start = boost::chrono::process_cpu_clock::now();
  195. }
  196. void MPCIO::dump_stats(std::ostream &os)
  197. {
  198. size_t tot_msgs_sent = 0;
  199. size_t tot_msg_bytes_sent = 0;
  200. size_t tot_aes_ops = 0;
  201. for (auto& n : msgs_sent) {
  202. tot_msgs_sent += n;
  203. }
  204. for (auto& n : msg_bytes_sent) {
  205. tot_msg_bytes_sent += n;
  206. }
  207. for (auto& n : aes_ops) {
  208. tot_aes_ops += n;
  209. }
  210. auto steady_elapsed =
  211. boost::chrono::steady_clock::now() - steady_start;
  212. auto cpu_elapsed =
  213. boost::chrono::process_cpu_clock::now() - cpu_start;
  214. os << tot_msgs_sent << " messages sent\n";
  215. os << tot_msg_bytes_sent << " message bytes sent\n";
  216. os << tot_aes_ops << " local AES operations\n";
  217. os << lamport << " Lamport clock (latencies)\n";
  218. os << boost::chrono::duration_cast
  219. <boost::chrono::milliseconds>(steady_elapsed) <<
  220. " wall clock time\n";
  221. os << cpu_elapsed << " {real;user;system}\n";
  222. }
  223. MPCPeerIO::MPCPeerIO(unsigned player, bool preprocessing,
  224. std::deque<tcp::socket> &peersocks,
  225. std::deque<tcp::socket> &serversocks) :
  226. MPCIO(player, preprocessing, peersocks.size())
  227. {
  228. unsigned num_threads = unsigned(peersocks.size());
  229. for (unsigned i=0; i<num_threads; ++i) {
  230. triples.emplace_back(player, preprocessing, "triples", i);
  231. }
  232. for (unsigned i=0; i<num_threads; ++i) {
  233. halftriples.emplace_back(player, preprocessing, "halves", i);
  234. }
  235. rdpftriples.resize(num_threads);
  236. for (unsigned i=0; i<num_threads; ++i) {
  237. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  238. rdpftriples[i][depth-1].init(player, preprocessing,
  239. "rdpf", i, depth);
  240. }
  241. }
  242. for (auto &&sock : peersocks) {
  243. peerios.emplace_back(std::move(sock));
  244. }
  245. for (auto &&sock : serversocks) {
  246. serverios.emplace_back(std::move(sock));
  247. }
  248. }
  249. void MPCPeerIO::dump_precomp_stats(std::ostream &os)
  250. {
  251. for (size_t i=0; i<triples.size(); ++i) {
  252. if (i > 0) {
  253. os << " ";
  254. }
  255. os << "T" << i << " t:" << triples[i].get_stats() <<
  256. " h:" << halftriples[i].get_stats();
  257. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  258. size_t cnt = rdpftriples[i][depth-1].get_stats();
  259. if (cnt > 0) {
  260. os << " r" << int(depth) << ":" << cnt;
  261. }
  262. }
  263. }
  264. os << "\n";
  265. }
  266. void MPCPeerIO::reset_precomp_stats()
  267. {
  268. for (size_t i=0; i<triples.size(); ++i) {
  269. triples[i].reset_stats();
  270. halftriples[i].reset_stats();
  271. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  272. rdpftriples[i][depth-1].reset_stats();
  273. }
  274. }
  275. }
  276. void MPCPeerIO::dump_stats(std::ostream &os)
  277. {
  278. MPCIO::dump_stats(os);
  279. os << "Precomputed values used: ";
  280. dump_precomp_stats(os);
  281. }
  282. MPCServerIO::MPCServerIO(bool preprocessing,
  283. std::deque<tcp::socket> &p0socks,
  284. std::deque<tcp::socket> &p1socks) :
  285. MPCIO(2, preprocessing, p0socks.size())
  286. {
  287. rdpfpairs.resize(num_threads);
  288. for (unsigned i=0; i<num_threads; ++i) {
  289. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  290. rdpfpairs[i][depth-1].init(player, preprocessing,
  291. "rdpf", i, depth);
  292. }
  293. }
  294. for (auto &&sock : p0socks) {
  295. p0ios.emplace_back(std::move(sock));
  296. }
  297. for (auto &&sock : p1socks) {
  298. p1ios.emplace_back(std::move(sock));
  299. }
  300. }
  301. void MPCServerIO::dump_precomp_stats(std::ostream &os)
  302. {
  303. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  304. if (i > 0) {
  305. os << " ";
  306. }
  307. os << "T" << i;
  308. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  309. size_t cnt = rdpfpairs[i][depth-1].get_stats();
  310. if (cnt > 0) {
  311. os << " r" << int(depth) << ":" << cnt;
  312. }
  313. }
  314. }
  315. os << "\n";
  316. }
  317. void MPCServerIO::reset_precomp_stats()
  318. {
  319. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  320. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  321. rdpfpairs[i][depth-1].reset_stats();
  322. }
  323. }
  324. }
  325. void MPCServerIO::dump_stats(std::ostream &os)
  326. {
  327. MPCIO::dump_stats(os);
  328. os << "Precomputed values used: ";
  329. dump_precomp_stats(os);
  330. }
  331. MPCTIO::MPCTIO(MPCIO &mpcio, int thread_num) :
  332. thread_num(thread_num), thread_lamport(mpcio.lamport),
  333. mpcio(mpcio)
  334. {
  335. if (mpcio.player < 2) {
  336. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  337. peer_iostream.emplace(mpcpio.peerios[thread_num],
  338. thread_lamport, mpcpio.msgs_sent[thread_num],
  339. mpcpio.msg_bytes_sent[thread_num]);
  340. server_iostream.emplace(mpcpio.serverios[thread_num],
  341. thread_lamport, mpcpio.msgs_sent[thread_num],
  342. mpcpio.msg_bytes_sent[thread_num]);
  343. } else {
  344. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  345. p0_iostream.emplace(mpcsrvio.p0ios[thread_num],
  346. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  347. mpcsrvio.msg_bytes_sent[thread_num]);
  348. p1_iostream.emplace(mpcsrvio.p1ios[thread_num],
  349. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  350. mpcsrvio.msg_bytes_sent[thread_num]);
  351. }
  352. }
  353. // Sync our per-thread lamport clock with the master one in the
  354. // mpcio. You only need to call this explicitly if your MPCTIO
  355. // outlives your thread (in which case call it after the join), or
  356. // if your threads do interthread communication amongst themselves
  357. // (in which case call it in the sending thread before the send, and
  358. // call it in the receiving thread after the receive).
  359. void MPCTIO::sync_lamport()
  360. {
  361. // Update the mpcio Lamport time to be max of the thread Lamport
  362. // time and what we thought it was before. We use this
  363. // compare_exchange construction in order to atomically
  364. // do the comparison, computation, and replacement
  365. lamport_t old_lamport = mpcio.lamport;
  366. lamport_t new_lamport = thread_lamport;
  367. do {
  368. if (new_lamport < old_lamport) {
  369. new_lamport = old_lamport;
  370. }
  371. // The next line atomically checks if lamport still has
  372. // the value old_lamport; if so, it changes its value to
  373. // new_lamport and returns true (ending the loop). If
  374. // not, it sets old_lamport to the current value of
  375. // lamport, and returns false (continuing the loop so
  376. // that new_lamport can be recomputed based on this new
  377. // value).
  378. } while (!mpcio.lamport.compare_exchange_weak(
  379. old_lamport, new_lamport));
  380. thread_lamport = new_lamport;
  381. }
  382. // Queue up data to the peer or to the server
  383. void MPCTIO::queue_peer(const void *data, size_t len)
  384. {
  385. if (mpcio.player < 2) {
  386. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  387. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  388. mpcpio.msgs_sent[thread_num] += newmsg;
  389. mpcpio.msg_bytes_sent[thread_num] += len;
  390. }
  391. }
  392. void MPCTIO::queue_server(const void *data, size_t len)
  393. {
  394. if (mpcio.player < 2) {
  395. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  396. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  397. mpcpio.msgs_sent[thread_num] += newmsg;
  398. mpcpio.msg_bytes_sent[thread_num] += len;
  399. }
  400. }
  401. // Receive data from the peer or to the server
  402. size_t MPCTIO::recv_peer(void *data, size_t len)
  403. {
  404. if (mpcio.player < 2) {
  405. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  406. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  407. }
  408. return 0;
  409. }
  410. size_t MPCTIO::recv_server(void *data, size_t len)
  411. {
  412. if (mpcio.player < 2) {
  413. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  414. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  415. }
  416. return 0;
  417. }
  418. // Queue up data to p0 or p1
  419. void MPCTIO::queue_p0(const void *data, size_t len)
  420. {
  421. if (mpcio.player == 2) {
  422. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  423. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  424. mpcsrvio.msgs_sent[thread_num] += newmsg;
  425. mpcsrvio.msg_bytes_sent[thread_num] += len;
  426. }
  427. }
  428. void MPCTIO::queue_p1(const void *data, size_t len)
  429. {
  430. if (mpcio.player == 2) {
  431. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  432. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  433. mpcsrvio.msgs_sent[thread_num] += newmsg;
  434. mpcsrvio.msg_bytes_sent[thread_num] += len;
  435. }
  436. }
  437. // Receive data from p0 or p1
  438. size_t MPCTIO::recv_p0(void *data, size_t len)
  439. {
  440. if (mpcio.player == 2) {
  441. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  442. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  443. }
  444. return 0;
  445. }
  446. size_t MPCTIO::recv_p1(void *data, size_t len)
  447. {
  448. if (mpcio.player == 2) {
  449. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  450. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  451. }
  452. return 0;
  453. }
  454. // Send all queued data for this thread
  455. void MPCTIO::send()
  456. {
  457. if (mpcio.player < 2) {
  458. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  459. mpcpio.peerios[thread_num].send();
  460. mpcpio.serverios[thread_num].send();
  461. } else {
  462. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  463. mpcsrvio.p0ios[thread_num].send();
  464. mpcsrvio.p1ios[thread_num].send();
  465. }
  466. }
  467. // Functions to get precomputed values. If we're in the online
  468. // phase, get them from PreCompStorage. If we're in the
  469. // preprocessing phase, read them from the server.
  470. MultTriple MPCTIO::triple()
  471. {
  472. MultTriple val;
  473. if (mpcio.player < 2) {
  474. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  475. if (mpcpio.preprocessing) {
  476. recv_server(&val, sizeof(val));
  477. } else {
  478. mpcpio.triples[thread_num].get(val);
  479. }
  480. } else if (mpcio.preprocessing) {
  481. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  482. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  483. value_t X0, Y0, Z0, X1, Y1, Z1;
  484. arc4random_buf(&X0, sizeof(X0));
  485. arc4random_buf(&Y0, sizeof(Y0));
  486. arc4random_buf(&Z0, sizeof(Z0));
  487. arc4random_buf(&X1, sizeof(X1));
  488. arc4random_buf(&Y1, sizeof(Y1));
  489. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  490. MultTriple T0, T1;
  491. T0 = std::make_tuple(X0, Y0, Z0);
  492. T1 = std::make_tuple(X1, Y1, Z1);
  493. queue_p0(&T0, sizeof(T0));
  494. queue_p1(&T1, sizeof(T1));
  495. }
  496. return val;
  497. }
  498. HalfTriple MPCTIO::halftriple()
  499. {
  500. HalfTriple val;
  501. if (mpcio.player < 2) {
  502. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  503. if (mpcpio.preprocessing) {
  504. recv_server(&val, sizeof(val));
  505. } else {
  506. mpcpio.halftriples[thread_num].get(val);
  507. }
  508. } else if (mpcio.preprocessing) {
  509. // Create half-triples (X0,Z0),(Y1,Z1) such that
  510. // X0*Y1 = Z0 + Z1
  511. value_t X0, Z0, Y1, Z1;
  512. arc4random_buf(&X0, sizeof(X0));
  513. arc4random_buf(&Z0, sizeof(Z0));
  514. arc4random_buf(&Y1, sizeof(Y1));
  515. Z1 = X0 * Y1 - Z0;
  516. HalfTriple H0, H1;
  517. H0 = std::make_tuple(X0, Z0);
  518. H1 = std::make_tuple(Y1, Z1);
  519. queue_p0(&H0, sizeof(H0));
  520. queue_p1(&H1, sizeof(H1));
  521. }
  522. return val;
  523. }
  524. SelectTriple MPCTIO::selecttriple()
  525. {
  526. SelectTriple val;
  527. if (mpcio.player < 2) {
  528. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  529. if (mpcpio.preprocessing) {
  530. uint8_t Xbyte;
  531. recv_server(&Xbyte, sizeof(Xbyte));
  532. val.X = Xbyte & 1;
  533. recv_server(&val.Y, sizeof(val.Y));
  534. recv_server(&val.Z, sizeof(val.Z));
  535. } else {
  536. std::cerr << "Attempted to read SelectTriple in online phase\n";
  537. }
  538. } else if (mpcio.preprocessing) {
  539. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  540. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  541. bit_t X0, X1;
  542. DPFnode Y0, Z0, Y1, Z1;
  543. X0 = arc4random() & 1;
  544. arc4random_buf(&Y0, sizeof(Y0));
  545. arc4random_buf(&Z0, sizeof(Z0));
  546. X1 = arc4random() & 1;
  547. arc4random_buf(&Y1, sizeof(Y1));
  548. DPFnode X0ext, X1ext;
  549. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  550. // 1 -> 1111...1)
  551. X0ext = if128_mask[X0];
  552. X1ext = if128_mask[X1];
  553. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  554. queue_p0(&X0, sizeof(X0));
  555. queue_p0(&Y0, sizeof(Y0));
  556. queue_p0(&Z0, sizeof(Z0));
  557. queue_p1(&X1, sizeof(X1));
  558. queue_p1(&Y1, sizeof(Y1));
  559. queue_p1(&Z1, sizeof(Z1));
  560. }
  561. return val;
  562. }
  563. RDPFTriple MPCTIO::rdpftriple(nbits_t depth)
  564. {
  565. RDPFTriple val;
  566. if (!mpcio.preprocessing && mpcio.player <= 2) {
  567. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  568. mpcpio.rdpftriples[thread_num][depth-1].get(val);
  569. }
  570. return val;
  571. }
  572. RDPFPair MPCTIO::rdpfpair(nbits_t depth)
  573. {
  574. RDPFPair val;
  575. if (!mpcio.preprocessing && mpcio.player == 2) {
  576. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  577. mpcsrvio.rdpfpairs[thread_num][depth-1].get(val);
  578. }
  579. return val;
  580. }
  581. // The port number for the P1 -> P0 connection
  582. static const unsigned short port_p1_p0 = 2115;
  583. // The port number for the P2 -> P0 connection
  584. static const unsigned short port_p2_p0 = 2116;
  585. // The port number for the P2 -> P1 connection
  586. static const unsigned short port_p2_p1 = 2117;
  587. void mpcio_setup_computational(unsigned player,
  588. boost::asio::io_context &io_context,
  589. const char *p0addr, // can be NULL when player=0
  590. int num_threads,
  591. std::deque<tcp::socket> &peersocks,
  592. std::deque<tcp::socket> &serversocks)
  593. {
  594. if (player == 0) {
  595. // Listen for connections from P1 and from P2
  596. tcp::acceptor acceptor_p1(io_context,
  597. tcp::endpoint(tcp::v4(), port_p1_p0));
  598. tcp::acceptor acceptor_p2(io_context,
  599. tcp::endpoint(tcp::v4(), port_p2_p0));
  600. peersocks.clear();
  601. serversocks.clear();
  602. for (int i=0;i<num_threads;++i) {
  603. peersocks.emplace_back(io_context);
  604. serversocks.emplace_back(io_context);
  605. }
  606. for (int i=0;i<num_threads;++i) {
  607. tcp::socket peersock = acceptor_p1.accept();
  608. // Read 2 bytes from the socket, which will be the thread
  609. // number
  610. unsigned short thread_num;
  611. boost::asio::read(peersock,
  612. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  613. if (thread_num >= num_threads) {
  614. std::cerr << "Received bad thread number from peer\n";
  615. } else {
  616. peersocks[thread_num] = std::move(peersock);
  617. }
  618. }
  619. for (int i=0;i<num_threads;++i) {
  620. tcp::socket serversock = acceptor_p2.accept();
  621. // Read 2 bytes from the socket, which will be the thread
  622. // number
  623. unsigned short thread_num;
  624. boost::asio::read(serversock,
  625. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  626. if (thread_num >= num_threads) {
  627. std::cerr << "Received bad thread number from server\n";
  628. } else {
  629. serversocks[thread_num] = std::move(serversock);
  630. }
  631. }
  632. } else if (player == 1) {
  633. // Listen for connections from P2, make num_threads connections to P0
  634. tcp::acceptor acceptor_p2(io_context,
  635. tcp::endpoint(tcp::v4(), port_p2_p1));
  636. tcp::resolver resolver(io_context);
  637. boost::system::error_code err;
  638. peersocks.clear();
  639. serversocks.clear();
  640. for (int i=0;i<num_threads;++i) {
  641. serversocks.emplace_back(io_context);
  642. }
  643. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  644. tcp::socket peersock(io_context);
  645. while(1) {
  646. boost::asio::connect(peersock,
  647. resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
  648. if (!err) break;
  649. std::cerr << "Connection to p0 refused, will retry.\n";
  650. sleep(1);
  651. }
  652. // Write 2 bytes to the socket indicating which thread
  653. // number this socket is for
  654. boost::asio::write(peersock,
  655. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  656. peersocks.push_back(std::move(peersock));
  657. }
  658. for (int i=0;i<num_threads;++i) {
  659. tcp::socket serversock = acceptor_p2.accept();
  660. // Read 2 bytes from the socket, which will be the thread
  661. // number
  662. unsigned short thread_num;
  663. boost::asio::read(serversock,
  664. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  665. if (thread_num >= num_threads) {
  666. std::cerr << "Received bad thread number from server\n";
  667. } else {
  668. serversocks[thread_num] = std::move(serversock);
  669. }
  670. }
  671. } else {
  672. std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
  673. }
  674. }
  675. void mpcio_setup_server(boost::asio::io_context &io_context,
  676. const char *p0addr, const char *p1addr, int num_threads,
  677. std::deque<tcp::socket> &p0socks,
  678. std::deque<tcp::socket> &p1socks)
  679. {
  680. // Make connections to P0 and P1
  681. tcp::resolver resolver(io_context);
  682. boost::system::error_code err;
  683. p0socks.clear();
  684. p1socks.clear();
  685. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  686. tcp::socket p0sock(io_context);
  687. while(1) {
  688. boost::asio::connect(p0sock,
  689. resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
  690. if (!err) break;
  691. std::cerr << "Connection to p0 refused, will retry.\n";
  692. sleep(1);
  693. }
  694. // Write 2 bytes to the socket indicating which thread
  695. // number this socket is for
  696. boost::asio::write(p0sock,
  697. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  698. p0socks.push_back(std::move(p0sock));
  699. }
  700. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  701. tcp::socket p1sock(io_context);
  702. while(1) {
  703. boost::asio::connect(p1sock,
  704. resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
  705. if (!err) break;
  706. std::cerr << "Connection to p1 refused, will retry.\n";
  707. sleep(1);
  708. }
  709. // Write 2 bytes to the socket indicating which thread
  710. // number this socket is for
  711. boost::asio::write(p1sock,
  712. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  713. p1socks.push_back(std::move(p1sock));
  714. }
  715. }