mpcio.cpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761
  1. #include "mpcio.hpp"
  2. #include "rdpf.hpp"
  3. #include "bitutils.hpp"
  4. template<typename T>
  5. PreCompStorage<T>::PreCompStorage(unsigned player, bool preprocessing,
  6. const char *filenameprefix, unsigned thread_num) {
  7. init(player, preprocessing, filenameprefix, thread_num);
  8. }
  9. template<typename T>
  10. void PreCompStorage<T>::init(unsigned player, bool preprocessing,
  11. const char *filenameprefix, unsigned thread_num, nbits_t depth) {
  12. if (preprocessing) return;
  13. std::string filename(filenameprefix);
  14. char suffix[20];
  15. if (depth) {
  16. sprintf(suffix, "%02d.p%d.t%u", depth, player%10, thread_num);
  17. } else {
  18. sprintf(suffix, ".p%d.t%u", player%10, thread_num);
  19. }
  20. filename.append(suffix);
  21. storage.open(filename);
  22. // It's OK if files for not every depth exist
  23. if (!depth && storage.fail()) {
  24. std::cerr << "Failed to open " << filename << "\n";
  25. exit(1);
  26. }
  27. count = 0;
  28. }
  29. template<typename T>
  30. void PreCompStorage<T>::get(T& nextval) {
  31. storage >> nextval;
  32. if (!storage.good()) {
  33. std::cerr << "Failed to read precomputed value from storage\n";
  34. exit(1);
  35. }
  36. ++count;
  37. }
  38. void MPCSingleIO::async_send_from_msgqueue()
  39. {
  40. #ifdef SEND_LAMPORT_CLOCKS
  41. std::vector<boost::asio::const_buffer> tosend;
  42. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  43. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  44. #endif
  45. boost::asio::async_write(sock,
  46. #ifdef SEND_LAMPORT_CLOCKS
  47. tosend,
  48. #else
  49. boost::asio::buffer(messagequeue.front()),
  50. #endif
  51. [&](boost::system::error_code ec, std::size_t amt){
  52. messagequeuelock.lock();
  53. messagequeue.pop();
  54. if (messagequeue.size() > 0) {
  55. async_send_from_msgqueue();
  56. }
  57. messagequeuelock.unlock();
  58. });
  59. }
  60. size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
  61. {
  62. // Is this a new message?
  63. size_t newmsg = 0;
  64. dataqueue.append((const char *)data, len);
  65. // If this is the first queue() since the last explicit send(),
  66. // which we'll know because message_lamport will be nullopt, set
  67. // message_lamport to the current Lamport clock. Note that the
  68. // boolean test tests whether message_lamport is nullopt, not
  69. // whether its value is zero.
  70. if (!message_lamport) {
  71. message_lamport = lamport;
  72. newmsg = 1;
  73. }
  74. // If we already have some full packets worth of data, may as
  75. // well send it.
  76. if (dataqueue.size() > 28800) {
  77. send(true);
  78. }
  79. return newmsg;
  80. }
  81. void MPCSingleIO::send(bool implicit_send)
  82. {
  83. size_t thissize = dataqueue.size();
  84. // Ignore spurious calls to send(), except for resetting
  85. // message_lamport if this was an explicit send().
  86. if (thissize == 0) {
  87. #ifdef SEND_LAMPORT_CLOCKS
  88. // If this was an explicit send(), reset the message_lamport so
  89. // that it gets updated at the next queue().
  90. if (!implicit_send) {
  91. message_lamport.reset();
  92. }
  93. #endif
  94. return;
  95. }
  96. #ifdef RECORD_IOTRACE
  97. iotrace.push_back(thissize);
  98. #endif
  99. messagequeuelock.lock();
  100. // Move the current message to send into the message queue (this
  101. // moves a pointer to the data, not copying the data itself)
  102. #ifdef SEND_LAMPORT_CLOCKS
  103. messagequeue.emplace(std::move(dataqueue),
  104. message_lamport.value());
  105. // If this was an explicit send(), reset the message_lamport so
  106. // that it gets updated at the next queue().
  107. if (!implicit_send) {
  108. message_lamport.reset();
  109. }
  110. #else
  111. messagequeue.emplace(std::move(dataqueue));
  112. #endif
  113. // If this is now the first thing in the message queue, launch
  114. // an async_write to write it
  115. if (messagequeue.size() == 1) {
  116. async_send_from_msgqueue();
  117. }
  118. messagequeuelock.unlock();
  119. }
  120. size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
  121. {
  122. #ifdef SEND_LAMPORT_CLOCKS
  123. char *cdata = (char *)data;
  124. size_t res = 0;
  125. while (len > 0) {
  126. while (recvdataremain == 0) {
  127. // Read a new header
  128. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  129. uint32_t datalen;
  130. lamport_t recv_lamport;
  131. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  132. memmove(&datalen, hdr, sizeof(datalen));
  133. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  134. lamport_t new_lamport = recv_lamport + 1;
  135. if (lamport < new_lamport) {
  136. lamport = new_lamport;
  137. }
  138. if (datalen > 0) {
  139. recvdata.resize(datalen, '\0');
  140. boost::asio::read(sock, boost::asio::buffer(recvdata));
  141. recvdataremain = datalen;
  142. }
  143. }
  144. size_t amttoread = len;
  145. if (amttoread > recvdataremain) {
  146. amttoread = recvdataremain;
  147. }
  148. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  149. amttoread);
  150. cdata += amttoread;
  151. len -= amttoread;
  152. recvdataremain -= amttoread;
  153. res += amttoread;
  154. }
  155. #else
  156. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  157. #endif
  158. #ifdef RECORD_IOTRACE
  159. iotrace.push_back(-(ssize_t(res)));
  160. #endif
  161. return res;
  162. }
  163. #ifdef RECORD_IOTRACE
  164. void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
  165. {
  166. if (label) {
  167. os << label << " ";
  168. }
  169. os << "IO trace:";
  170. for (auto& s: iotrace) {
  171. os << " " << s;
  172. }
  173. os << "\n";
  174. }
  175. #endif
  176. void MPCIO::reset_stats()
  177. {
  178. msgs_sent.clear();
  179. msg_bytes_sent.clear();
  180. aes_ops.clear();
  181. for (size_t i=0; i<num_threads; ++i) {
  182. msgs_sent.push_back(0);
  183. msg_bytes_sent.push_back(0);
  184. aes_ops.push_back(0);
  185. }
  186. steady_start = boost::chrono::steady_clock::now();
  187. cpu_start = boost::chrono::process_cpu_clock::now();
  188. }
  189. void MPCIO::dump_stats(std::ostream &os)
  190. {
  191. size_t tot_msgs_sent = 0;
  192. size_t tot_msg_bytes_sent = 0;
  193. size_t tot_aes_ops = 0;
  194. for (auto& n : msgs_sent) {
  195. tot_msgs_sent += n;
  196. }
  197. for (auto& n : msg_bytes_sent) {
  198. tot_msg_bytes_sent += n;
  199. }
  200. for (auto& n : aes_ops) {
  201. tot_aes_ops += n;
  202. }
  203. auto steady_elapsed =
  204. boost::chrono::steady_clock::now() - steady_start;
  205. auto cpu_elapsed =
  206. boost::chrono::process_cpu_clock::now() - cpu_start;
  207. os << tot_msgs_sent << " messages sent\n";
  208. os << tot_msg_bytes_sent << " message bytes sent\n";
  209. os << tot_aes_ops << " local AES operations\n";
  210. os << lamport << " Lamport clock (latencies)\n";
  211. os << boost::chrono::duration_cast
  212. <boost::chrono::milliseconds>(steady_elapsed) <<
  213. " wall clock time\n";
  214. os << cpu_elapsed << " {real;user;system}\n";
  215. }
  216. MPCPeerIO::MPCPeerIO(unsigned player, bool preprocessing,
  217. std::deque<tcp::socket> &peersocks,
  218. std::deque<tcp::socket> &serversocks) :
  219. MPCIO(player, preprocessing, peersocks.size())
  220. {
  221. unsigned num_threads = unsigned(peersocks.size());
  222. for (unsigned i=0; i<num_threads; ++i) {
  223. triples.emplace_back(player, preprocessing, "triples", i);
  224. }
  225. for (unsigned i=0; i<num_threads; ++i) {
  226. halftriples.emplace_back(player, preprocessing, "halves", i);
  227. }
  228. rdpftriples.resize(num_threads);
  229. for (unsigned i=0; i<num_threads; ++i) {
  230. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  231. rdpftriples[i][depth-1].init(player, preprocessing,
  232. "rdpf", i, depth);
  233. }
  234. }
  235. for (auto &&sock : peersocks) {
  236. peerios.emplace_back(std::move(sock));
  237. }
  238. for (auto &&sock : serversocks) {
  239. serverios.emplace_back(std::move(sock));
  240. }
  241. }
  242. void MPCPeerIO::dump_precomp_stats(std::ostream &os)
  243. {
  244. for (size_t i=0; i<triples.size(); ++i) {
  245. if (i > 0) {
  246. os << " ";
  247. }
  248. os << "T" << i << " t:" << triples[i].get_stats() <<
  249. " h:" << halftriples[i].get_stats();
  250. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  251. size_t cnt = rdpftriples[i][depth-1].get_stats();
  252. if (cnt > 0) {
  253. os << " r" << int(depth) << ":" << cnt;
  254. }
  255. }
  256. }
  257. os << "\n";
  258. }
  259. void MPCPeerIO::reset_precomp_stats()
  260. {
  261. for (size_t i=0; i<triples.size(); ++i) {
  262. triples[i].reset_stats();
  263. halftriples[i].reset_stats();
  264. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  265. rdpftriples[i][depth-1].reset_stats();
  266. }
  267. }
  268. }
  269. void MPCPeerIO::dump_stats(std::ostream &os)
  270. {
  271. MPCIO::dump_stats(os);
  272. os << "Precomputed values used: ";
  273. dump_precomp_stats(os);
  274. }
  275. MPCServerIO::MPCServerIO(bool preprocessing,
  276. std::deque<tcp::socket> &p0socks,
  277. std::deque<tcp::socket> &p1socks) :
  278. MPCIO(2, preprocessing, p0socks.size())
  279. {
  280. rdpfpairs.resize(num_threads);
  281. for (unsigned i=0; i<num_threads; ++i) {
  282. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  283. rdpfpairs[i][depth-1].init(player, preprocessing,
  284. "rdpf", i, depth);
  285. }
  286. }
  287. for (auto &&sock : p0socks) {
  288. p0ios.emplace_back(std::move(sock));
  289. }
  290. for (auto &&sock : p1socks) {
  291. p1ios.emplace_back(std::move(sock));
  292. }
  293. }
  294. void MPCServerIO::dump_precomp_stats(std::ostream &os)
  295. {
  296. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  297. if (i > 0) {
  298. os << " ";
  299. }
  300. os << "T" << i;
  301. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  302. size_t cnt = rdpfpairs[i][depth-1].get_stats();
  303. if (cnt > 0) {
  304. os << " r" << int(depth) << ":" << cnt;
  305. }
  306. }
  307. }
  308. os << "\n";
  309. }
  310. void MPCServerIO::reset_precomp_stats()
  311. {
  312. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  313. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  314. rdpfpairs[i][depth-1].reset_stats();
  315. }
  316. }
  317. }
  318. void MPCServerIO::dump_stats(std::ostream &os)
  319. {
  320. MPCIO::dump_stats(os);
  321. os << "Precomputed values used: ";
  322. dump_precomp_stats(os);
  323. }
  324. MPCTIO::MPCTIO(MPCIO &mpcio, int thread_num) :
  325. thread_num(thread_num), thread_lamport(mpcio.lamport),
  326. mpcio(mpcio)
  327. {
  328. if (mpcio.player < 2) {
  329. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  330. peer_iostream.emplace(mpcpio.peerios[thread_num],
  331. thread_lamport, mpcpio.msgs_sent[thread_num],
  332. mpcpio.msg_bytes_sent[thread_num]);
  333. server_iostream.emplace(mpcpio.serverios[thread_num],
  334. thread_lamport, mpcpio.msgs_sent[thread_num],
  335. mpcpio.msg_bytes_sent[thread_num]);
  336. } else {
  337. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  338. p0_iostream.emplace(mpcsrvio.p0ios[thread_num],
  339. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  340. mpcsrvio.msg_bytes_sent[thread_num]);
  341. p1_iostream.emplace(mpcsrvio.p1ios[thread_num],
  342. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  343. mpcsrvio.msg_bytes_sent[thread_num]);
  344. }
  345. }
  346. // Sync our per-thread lamport clock with the master one in the
  347. // mpcio. You only need to call this explicitly if your MPCTIO
  348. // outlives your thread (in which case call it after the join), or
  349. // if your threads do interthread communication amongst themselves
  350. // (in which case call it in the sending thread before the send, and
  351. // call it in the receiving thread after the receive).
  352. void MPCTIO::sync_lamport()
  353. {
  354. // Update the mpcio Lamport time to be max of the thread Lamport
  355. // time and what we thought it was before. We use this
  356. // compare_exchange construction in order to atomically
  357. // do the comparison, computation, and replacement
  358. lamport_t old_lamport = mpcio.lamport;
  359. lamport_t new_lamport = thread_lamport;
  360. do {
  361. if (new_lamport < old_lamport) {
  362. new_lamport = old_lamport;
  363. }
  364. // The next line atomically checks if lamport still has
  365. // the value old_lamport; if so, it changes its value to
  366. // new_lamport and returns true (ending the loop). If
  367. // not, it sets old_lamport to the current value of
  368. // lamport, and returns false (continuing the loop so
  369. // that new_lamport can be recomputed based on this new
  370. // value).
  371. } while (!mpcio.lamport.compare_exchange_weak(
  372. old_lamport, new_lamport));
  373. thread_lamport = new_lamport;
  374. }
  375. // Queue up data to the peer or to the server
  376. void MPCTIO::queue_peer(const void *data, size_t len)
  377. {
  378. if (mpcio.player < 2) {
  379. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  380. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  381. mpcpio.msgs_sent[thread_num] += newmsg;
  382. mpcpio.msg_bytes_sent[thread_num] += len;
  383. }
  384. }
  385. void MPCTIO::queue_server(const void *data, size_t len)
  386. {
  387. if (mpcio.player < 2) {
  388. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  389. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  390. mpcpio.msgs_sent[thread_num] += newmsg;
  391. mpcpio.msg_bytes_sent[thread_num] += len;
  392. }
  393. }
  394. // Receive data from the peer or to the server
  395. size_t MPCTIO::recv_peer(void *data, size_t len)
  396. {
  397. if (mpcio.player < 2) {
  398. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  399. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  400. }
  401. return 0;
  402. }
  403. size_t MPCTIO::recv_server(void *data, size_t len)
  404. {
  405. if (mpcio.player < 2) {
  406. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  407. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  408. }
  409. return 0;
  410. }
  411. // Queue up data to p0 or p1
  412. void MPCTIO::queue_p0(const void *data, size_t len)
  413. {
  414. if (mpcio.player == 2) {
  415. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  416. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  417. mpcsrvio.msgs_sent[thread_num] += newmsg;
  418. mpcsrvio.msg_bytes_sent[thread_num] += len;
  419. }
  420. }
  421. void MPCTIO::queue_p1(const void *data, size_t len)
  422. {
  423. if (mpcio.player == 2) {
  424. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  425. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  426. mpcsrvio.msgs_sent[thread_num] += newmsg;
  427. mpcsrvio.msg_bytes_sent[thread_num] += len;
  428. }
  429. }
  430. // Receive data from p0 or p1
  431. size_t MPCTIO::recv_p0(void *data, size_t len)
  432. {
  433. if (mpcio.player == 2) {
  434. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  435. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  436. }
  437. return 0;
  438. }
  439. size_t MPCTIO::recv_p1(void *data, size_t len)
  440. {
  441. if (mpcio.player == 2) {
  442. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  443. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  444. }
  445. return 0;
  446. }
  447. // Send all queued data for this thread
  448. void MPCTIO::send()
  449. {
  450. if (mpcio.player < 2) {
  451. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  452. mpcpio.peerios[thread_num].send();
  453. mpcpio.serverios[thread_num].send();
  454. } else {
  455. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  456. mpcsrvio.p0ios[thread_num].send();
  457. mpcsrvio.p1ios[thread_num].send();
  458. }
  459. }
  460. // Functions to get precomputed values. If we're in the online
  461. // phase, get them from PreCompStorage. If we're in the
  462. // preprocessing phase, read them from the server.
  463. MultTriple MPCTIO::triple()
  464. {
  465. MultTriple val;
  466. if (mpcio.player < 2) {
  467. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  468. if (mpcpio.preprocessing) {
  469. recv_server(&val, sizeof(val));
  470. } else {
  471. mpcpio.triples[thread_num].get(val);
  472. }
  473. } else if (mpcio.preprocessing) {
  474. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  475. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  476. value_t X0, Y0, Z0, X1, Y1, Z1;
  477. arc4random_buf(&X0, sizeof(X0));
  478. arc4random_buf(&Y0, sizeof(Y0));
  479. arc4random_buf(&Z0, sizeof(Z0));
  480. arc4random_buf(&X1, sizeof(X1));
  481. arc4random_buf(&Y1, sizeof(Y1));
  482. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  483. MultTriple T0, T1;
  484. T0 = std::make_tuple(X0, Y0, Z0);
  485. T1 = std::make_tuple(X1, Y1, Z1);
  486. queue_p0(&T0, sizeof(T0));
  487. queue_p1(&T1, sizeof(T1));
  488. }
  489. return val;
  490. }
  491. HalfTriple MPCTIO::halftriple()
  492. {
  493. HalfTriple val;
  494. if (mpcio.player < 2) {
  495. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  496. if (mpcpio.preprocessing) {
  497. recv_server(&val, sizeof(val));
  498. } else {
  499. mpcpio.halftriples[thread_num].get(val);
  500. }
  501. } else if (mpcio.preprocessing) {
  502. // Create half-triples (X0,Z0),(Y1,Z1) such that
  503. // X0*Y1 = Z0 + Z1
  504. value_t X0, Z0, Y1, Z1;
  505. arc4random_buf(&X0, sizeof(X0));
  506. arc4random_buf(&Z0, sizeof(Z0));
  507. arc4random_buf(&Y1, sizeof(Y1));
  508. Z1 = X0 * Y1 - Z0;
  509. HalfTriple H0, H1;
  510. H0 = std::make_tuple(X0, Z0);
  511. H1 = std::make_tuple(Y1, Z1);
  512. queue_p0(&H0, sizeof(H0));
  513. queue_p1(&H1, sizeof(H1));
  514. }
  515. return val;
  516. }
  517. SelectTriple MPCTIO::selecttriple()
  518. {
  519. SelectTriple val;
  520. if (mpcio.player < 2) {
  521. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  522. if (mpcpio.preprocessing) {
  523. uint8_t Xbyte;
  524. recv_server(&Xbyte, sizeof(Xbyte));
  525. val.X = Xbyte & 1;
  526. recv_server(&val.Y, sizeof(val.Y));
  527. recv_server(&val.Z, sizeof(val.Z));
  528. } else {
  529. std::cerr << "Attempted to read SelectTriple in online phase\n";
  530. }
  531. } else if (mpcio.preprocessing) {
  532. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  533. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  534. bit_t X0, X1;
  535. DPFnode Y0, Z0, Y1, Z1;
  536. X0 = arc4random() & 1;
  537. arc4random_buf(&Y0, sizeof(Y0));
  538. arc4random_buf(&Z0, sizeof(Z0));
  539. X1 = arc4random() & 1;
  540. arc4random_buf(&Y1, sizeof(Y1));
  541. DPFnode X0ext, X1ext;
  542. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  543. // 1 -> 1111...1)
  544. X0ext = if128_mask[X0];
  545. X1ext = if128_mask[X1];
  546. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  547. queue_p0(&X0, sizeof(X0));
  548. queue_p0(&Y0, sizeof(Y0));
  549. queue_p0(&Z0, sizeof(Z0));
  550. queue_p1(&X1, sizeof(X1));
  551. queue_p1(&Y1, sizeof(Y1));
  552. queue_p1(&Z1, sizeof(Z1));
  553. }
  554. return val;
  555. }
  556. RDPFTriple MPCTIO::rdpftriple(nbits_t depth)
  557. {
  558. RDPFTriple val;
  559. if (!mpcio.preprocessing && mpcio.player <= 2) {
  560. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  561. mpcpio.rdpftriples[thread_num][depth-1].get(val);
  562. }
  563. return val;
  564. }
  565. RDPFPair MPCTIO::rdpfpair(nbits_t depth)
  566. {
  567. RDPFPair val;
  568. if (!mpcio.preprocessing && mpcio.player == 2) {
  569. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  570. mpcsrvio.rdpfpairs[thread_num][depth-1].get(val);
  571. }
  572. return val;
  573. }
  574. // The port number for the P1 -> P0 connection
  575. static const unsigned short port_p1_p0 = 2115;
  576. // The port number for the P2 -> P0 connection
  577. static const unsigned short port_p2_p0 = 2116;
  578. // The port number for the P2 -> P1 connection
  579. static const unsigned short port_p2_p1 = 2117;
  580. void mpcio_setup_computational(unsigned player,
  581. boost::asio::io_context &io_context,
  582. const char *p0addr, // can be NULL when player=0
  583. int num_threads,
  584. std::deque<tcp::socket> &peersocks,
  585. std::deque<tcp::socket> &serversocks)
  586. {
  587. if (player == 0) {
  588. // Listen for connections from P1 and from P2
  589. tcp::acceptor acceptor_p1(io_context,
  590. tcp::endpoint(tcp::v4(), port_p1_p0));
  591. tcp::acceptor acceptor_p2(io_context,
  592. tcp::endpoint(tcp::v4(), port_p2_p0));
  593. peersocks.clear();
  594. serversocks.clear();
  595. for (int i=0;i<num_threads;++i) {
  596. peersocks.emplace_back(io_context);
  597. serversocks.emplace_back(io_context);
  598. }
  599. for (int i=0;i<num_threads;++i) {
  600. tcp::socket peersock = acceptor_p1.accept();
  601. // Read 2 bytes from the socket, which will be the thread
  602. // number
  603. unsigned short thread_num;
  604. boost::asio::read(peersock,
  605. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  606. if (thread_num >= num_threads) {
  607. std::cerr << "Received bad thread number from peer\n";
  608. } else {
  609. peersocks[thread_num] = std::move(peersock);
  610. }
  611. }
  612. for (int i=0;i<num_threads;++i) {
  613. tcp::socket serversock = acceptor_p2.accept();
  614. // Read 2 bytes from the socket, which will be the thread
  615. // number
  616. unsigned short thread_num;
  617. boost::asio::read(serversock,
  618. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  619. if (thread_num >= num_threads) {
  620. std::cerr << "Received bad thread number from server\n";
  621. } else {
  622. serversocks[thread_num] = std::move(serversock);
  623. }
  624. }
  625. } else if (player == 1) {
  626. // Listen for connections from P2, make num_threads connections to P0
  627. tcp::acceptor acceptor_p2(io_context,
  628. tcp::endpoint(tcp::v4(), port_p2_p1));
  629. tcp::resolver resolver(io_context);
  630. boost::system::error_code err;
  631. peersocks.clear();
  632. serversocks.clear();
  633. for (int i=0;i<num_threads;++i) {
  634. serversocks.emplace_back(io_context);
  635. }
  636. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  637. tcp::socket peersock(io_context);
  638. while(1) {
  639. boost::asio::connect(peersock,
  640. resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
  641. if (!err) break;
  642. std::cerr << "Connection to p0 refused, will retry.\n";
  643. sleep(1);
  644. }
  645. // Write 2 bytes to the socket indicating which thread
  646. // number this socket is for
  647. boost::asio::write(peersock,
  648. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  649. peersocks.push_back(std::move(peersock));
  650. }
  651. for (int i=0;i<num_threads;++i) {
  652. tcp::socket serversock = acceptor_p2.accept();
  653. // Read 2 bytes from the socket, which will be the thread
  654. // number
  655. unsigned short thread_num;
  656. boost::asio::read(serversock,
  657. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  658. if (thread_num >= num_threads) {
  659. std::cerr << "Received bad thread number from server\n";
  660. } else {
  661. serversocks[thread_num] = std::move(serversock);
  662. }
  663. }
  664. } else {
  665. std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
  666. }
  667. }
  668. void mpcio_setup_server(boost::asio::io_context &io_context,
  669. const char *p0addr, const char *p1addr, int num_threads,
  670. std::deque<tcp::socket> &p0socks,
  671. std::deque<tcp::socket> &p1socks)
  672. {
  673. // Make connections to P0 and P1
  674. tcp::resolver resolver(io_context);
  675. boost::system::error_code err;
  676. p0socks.clear();
  677. p1socks.clear();
  678. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  679. tcp::socket p0sock(io_context);
  680. while(1) {
  681. boost::asio::connect(p0sock,
  682. resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
  683. if (!err) break;
  684. std::cerr << "Connection to p0 refused, will retry.\n";
  685. sleep(1);
  686. }
  687. // Write 2 bytes to the socket indicating which thread
  688. // number this socket is for
  689. boost::asio::write(p0sock,
  690. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  691. p0socks.push_back(std::move(p0sock));
  692. }
  693. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  694. tcp::socket p1sock(io_context);
  695. while(1) {
  696. boost::asio::connect(p1sock,
  697. resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
  698. if (!err) break;
  699. std::cerr << "Connection to p1 refused, will retry.\n";
  700. sleep(1);
  701. }
  702. // Write 2 bytes to the socket indicating which thread
  703. // number this socket is for
  704. boost::asio::write(p1sock,
  705. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  706. p1socks.push_back(std::move(p1sock));
  707. }
  708. }