mpcio.cpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784
  1. #include <sys/time.h> // getrusage
  2. #include <sys/resource.h> // getrusage
  3. #include "mpcio.hpp"
  4. #include "rdpf.hpp"
  5. #include "bitutils.hpp"
  6. // T is the type being stored
  7. // N is a type whose "name" static member is a string naming the type
  8. // so that we can report something useful to the user if they try
  9. // to read a type that we don't have any more values for
  10. template<typename T, typename N>
  11. PreCompStorage<T,N>::PreCompStorage(unsigned player, bool preprocessing,
  12. const char *filenameprefix, unsigned thread_num) :
  13. name(N::name), depth(0)
  14. {
  15. init(player, preprocessing, filenameprefix, thread_num);
  16. }
  17. template<typename T, typename N>
  18. void PreCompStorage<T,N>::init(unsigned player, bool preprocessing,
  19. const char *filenameprefix, unsigned thread_num, nbits_t depth)
  20. {
  21. if (preprocessing) return;
  22. std::string filename(filenameprefix);
  23. char suffix[20];
  24. if (depth) {
  25. this->depth = depth;
  26. sprintf(suffix, "%02d.p%d.t%u", depth, player%10, thread_num);
  27. } else {
  28. sprintf(suffix, ".p%d.t%u", player%10, thread_num);
  29. }
  30. filename.append(suffix);
  31. storage.open(filename);
  32. // It's OK if not every file exists; so don't worry about checking
  33. // for errors here. We'll report an error in get() if we actually
  34. // try to use a value for which we don't have a precomputed file.
  35. count = 0;
  36. }
  37. template<typename T, typename N>
  38. void PreCompStorage<T,N>::get(T& nextval)
  39. {
  40. storage >> nextval;
  41. if (!storage.good()) {
  42. std::cerr << "Failed to read precomputed value from " << name;
  43. if (depth) {
  44. std::cerr << (int)depth;
  45. }
  46. std::cerr << " storage\n";
  47. exit(1);
  48. }
  49. ++count;
  50. }
  51. void MPCSingleIO::async_send_from_msgqueue()
  52. {
  53. #ifdef SEND_LAMPORT_CLOCKS
  54. std::vector<boost::asio::const_buffer> tosend;
  55. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  56. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  57. #endif
  58. boost::asio::async_write(sock,
  59. #ifdef SEND_LAMPORT_CLOCKS
  60. tosend,
  61. #else
  62. boost::asio::buffer(messagequeue.front()),
  63. #endif
  64. [&](boost::system::error_code ec, std::size_t amt){
  65. messagequeuelock.lock();
  66. messagequeue.pop();
  67. if (messagequeue.size() > 0) {
  68. async_send_from_msgqueue();
  69. }
  70. messagequeuelock.unlock();
  71. });
  72. }
  73. size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
  74. {
  75. // Is this a new message?
  76. size_t newmsg = 0;
  77. dataqueue.append((const char *)data, len);
  78. // If this is the first queue() since the last explicit send(),
  79. // which we'll know because message_lamport will be nullopt, set
  80. // message_lamport to the current Lamport clock. Note that the
  81. // boolean test tests whether message_lamport is nullopt, not
  82. // whether its value is zero.
  83. if (!message_lamport) {
  84. message_lamport = lamport;
  85. newmsg = 1;
  86. }
  87. // If we already have some full packets worth of data, may as
  88. // well send it.
  89. if (dataqueue.size() > 28800) {
  90. send(true);
  91. }
  92. return newmsg;
  93. }
  94. void MPCSingleIO::send(bool implicit_send)
  95. {
  96. size_t thissize = dataqueue.size();
  97. // Ignore spurious calls to send(), except for resetting
  98. // message_lamport if this was an explicit send().
  99. if (thissize == 0) {
  100. #ifdef SEND_LAMPORT_CLOCKS
  101. // If this was an explicit send(), reset the message_lamport so
  102. // that it gets updated at the next queue().
  103. if (!implicit_send) {
  104. message_lamport.reset();
  105. }
  106. #endif
  107. return;
  108. }
  109. #ifdef RECORD_IOTRACE
  110. iotrace.push_back(thissize);
  111. #endif
  112. messagequeuelock.lock();
  113. // Move the current message to send into the message queue (this
  114. // moves a pointer to the data, not copying the data itself)
  115. #ifdef SEND_LAMPORT_CLOCKS
  116. messagequeue.emplace(std::move(dataqueue),
  117. message_lamport.value());
  118. // If this was an explicit send(), reset the message_lamport so
  119. // that it gets updated at the next queue().
  120. if (!implicit_send) {
  121. message_lamport.reset();
  122. }
  123. #else
  124. messagequeue.emplace(std::move(dataqueue));
  125. #endif
  126. // If this is now the first thing in the message queue, launch
  127. // an async_write to write it
  128. if (messagequeue.size() == 1) {
  129. async_send_from_msgqueue();
  130. }
  131. messagequeuelock.unlock();
  132. }
  133. size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
  134. {
  135. #ifdef SEND_LAMPORT_CLOCKS
  136. char *cdata = (char *)data;
  137. size_t res = 0;
  138. while (len > 0) {
  139. while (recvdataremain == 0) {
  140. // Read a new header
  141. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  142. uint32_t datalen;
  143. lamport_t recv_lamport;
  144. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  145. memmove(&datalen, hdr, sizeof(datalen));
  146. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  147. lamport_t new_lamport = recv_lamport + 1;
  148. if (lamport < new_lamport) {
  149. lamport = new_lamport;
  150. }
  151. if (datalen > 0) {
  152. recvdata.resize(datalen, '\0');
  153. boost::asio::read(sock, boost::asio::buffer(recvdata));
  154. recvdataremain = datalen;
  155. }
  156. }
  157. size_t amttoread = len;
  158. if (amttoread > recvdataremain) {
  159. amttoread = recvdataremain;
  160. }
  161. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  162. amttoread);
  163. cdata += amttoread;
  164. len -= amttoread;
  165. recvdataremain -= amttoread;
  166. res += amttoread;
  167. }
  168. #else
  169. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  170. #endif
  171. #ifdef RECORD_IOTRACE
  172. iotrace.push_back(-(ssize_t(res)));
  173. #endif
  174. return res;
  175. }
  176. #ifdef RECORD_IOTRACE
  177. void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
  178. {
  179. if (label) {
  180. os << label << " ";
  181. }
  182. os << "IO trace:";
  183. for (auto& s: iotrace) {
  184. os << " " << s;
  185. }
  186. os << "\n";
  187. }
  188. #endif
  189. void MPCIO::reset_stats()
  190. {
  191. msgs_sent.clear();
  192. msg_bytes_sent.clear();
  193. aes_ops.clear();
  194. for (size_t i=0; i<num_threads; ++i) {
  195. msgs_sent.push_back(0);
  196. msg_bytes_sent.push_back(0);
  197. aes_ops.push_back(0);
  198. }
  199. steady_start = boost::chrono::steady_clock::now();
  200. cpu_start = boost::chrono::process_cpu_clock::now();
  201. }
  202. // Report the memory usage
  203. void MPCIO::dump_memusage(std::ostream &os)
  204. {
  205. struct rusage ru;
  206. getrusage(RUSAGE_SELF, &ru);
  207. os << "Mem: " << ru.ru_maxrss << " KiB\n";
  208. }
  209. void MPCIO::dump_stats(std::ostream &os)
  210. {
  211. size_t tot_msgs_sent = 0;
  212. size_t tot_msg_bytes_sent = 0;
  213. size_t tot_aes_ops = 0;
  214. for (auto& n : msgs_sent) {
  215. tot_msgs_sent += n;
  216. }
  217. for (auto& n : msg_bytes_sent) {
  218. tot_msg_bytes_sent += n;
  219. }
  220. for (auto& n : aes_ops) {
  221. tot_aes_ops += n;
  222. }
  223. auto steady_elapsed =
  224. boost::chrono::steady_clock::now() - steady_start;
  225. auto cpu_elapsed =
  226. boost::chrono::process_cpu_clock::now() - cpu_start;
  227. os << tot_msgs_sent << " messages sent\n";
  228. os << tot_msg_bytes_sent << " message bytes sent\n";
  229. os << lamport << " Lamport clock (latencies)\n";
  230. os << tot_aes_ops << " local AES operations\n";
  231. os << boost::chrono::duration_cast
  232. <boost::chrono::milliseconds>(steady_elapsed) <<
  233. " wall clock time\n";
  234. os << cpu_elapsed << " {real;user;system}\n";
  235. dump_memusage(os);
  236. }
  237. MPCPeerIO::MPCPeerIO(unsigned player, bool preprocessing,
  238. std::deque<tcp::socket> &peersocks,
  239. std::deque<tcp::socket> &serversocks) :
  240. MPCIO(player, preprocessing, peersocks.size())
  241. {
  242. unsigned num_threads = unsigned(peersocks.size());
  243. for (unsigned i=0; i<num_threads; ++i) {
  244. triples.emplace_back(player, preprocessing, "triples", i);
  245. }
  246. for (unsigned i=0; i<num_threads; ++i) {
  247. halftriples.emplace_back(player, preprocessing, "halves", i);
  248. }
  249. rdpftriples.resize(num_threads);
  250. for (unsigned i=0; i<num_threads; ++i) {
  251. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  252. rdpftriples[i][depth-1].init(player, preprocessing,
  253. "rdpf", i, depth);
  254. }
  255. }
  256. for (auto &&sock : peersocks) {
  257. peerios.emplace_back(std::move(sock));
  258. }
  259. for (auto &&sock : serversocks) {
  260. serverios.emplace_back(std::move(sock));
  261. }
  262. }
  263. void MPCPeerIO::dump_precomp_stats(std::ostream &os)
  264. {
  265. for (size_t i=0; i<triples.size(); ++i) {
  266. if (i > 0) {
  267. os << " ";
  268. }
  269. os << "T" << i << " t:" << triples[i].get_stats() <<
  270. " h:" << halftriples[i].get_stats();
  271. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  272. size_t cnt = rdpftriples[i][depth-1].get_stats();
  273. if (cnt > 0) {
  274. os << " r" << int(depth) << ":" << cnt;
  275. }
  276. }
  277. }
  278. os << "\n";
  279. }
  280. void MPCPeerIO::reset_precomp_stats()
  281. {
  282. for (size_t i=0; i<triples.size(); ++i) {
  283. triples[i].reset_stats();
  284. halftriples[i].reset_stats();
  285. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  286. rdpftriples[i][depth-1].reset_stats();
  287. }
  288. }
  289. }
  290. void MPCPeerIO::dump_stats(std::ostream &os)
  291. {
  292. MPCIO::dump_stats(os);
  293. os << "Precomputed values used: ";
  294. dump_precomp_stats(os);
  295. }
  296. MPCServerIO::MPCServerIO(bool preprocessing,
  297. std::deque<tcp::socket> &p0socks,
  298. std::deque<tcp::socket> &p1socks) :
  299. MPCIO(2, preprocessing, p0socks.size())
  300. {
  301. rdpfpairs.resize(num_threads);
  302. for (unsigned i=0; i<num_threads; ++i) {
  303. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  304. rdpfpairs[i][depth-1].init(player, preprocessing,
  305. "rdpf", i, depth);
  306. }
  307. }
  308. for (auto &&sock : p0socks) {
  309. p0ios.emplace_back(std::move(sock));
  310. }
  311. for (auto &&sock : p1socks) {
  312. p1ios.emplace_back(std::move(sock));
  313. }
  314. }
  315. void MPCServerIO::dump_precomp_stats(std::ostream &os)
  316. {
  317. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  318. if (i > 0) {
  319. os << " ";
  320. }
  321. os << "T" << i;
  322. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  323. size_t cnt = rdpfpairs[i][depth-1].get_stats();
  324. if (cnt > 0) {
  325. os << " r" << int(depth) << ":" << cnt;
  326. }
  327. }
  328. }
  329. os << "\n";
  330. }
  331. void MPCServerIO::reset_precomp_stats()
  332. {
  333. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  334. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  335. rdpfpairs[i][depth-1].reset_stats();
  336. }
  337. }
  338. }
  339. void MPCServerIO::dump_stats(std::ostream &os)
  340. {
  341. MPCIO::dump_stats(os);
  342. os << "Precomputed values used: ";
  343. dump_precomp_stats(os);
  344. }
  345. MPCTIO::MPCTIO(MPCIO &mpcio, int thread_num) :
  346. thread_num(thread_num), thread_lamport(mpcio.lamport),
  347. mpcio(mpcio)
  348. {
  349. if (mpcio.player < 2) {
  350. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  351. peer_iostream.emplace(mpcpio.peerios[thread_num],
  352. thread_lamport, mpcpio.msgs_sent[thread_num],
  353. mpcpio.msg_bytes_sent[thread_num]);
  354. server_iostream.emplace(mpcpio.serverios[thread_num],
  355. thread_lamport, mpcpio.msgs_sent[thread_num],
  356. mpcpio.msg_bytes_sent[thread_num]);
  357. } else {
  358. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  359. p0_iostream.emplace(mpcsrvio.p0ios[thread_num],
  360. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  361. mpcsrvio.msg_bytes_sent[thread_num]);
  362. p1_iostream.emplace(mpcsrvio.p1ios[thread_num],
  363. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  364. mpcsrvio.msg_bytes_sent[thread_num]);
  365. }
  366. }
  367. // Sync our per-thread lamport clock with the master one in the
  368. // mpcio. You only need to call this explicitly if your MPCTIO
  369. // outlives your thread (in which case call it after the join), or
  370. // if your threads do interthread communication amongst themselves
  371. // (in which case call it in the sending thread before the send, and
  372. // call it in the receiving thread after the receive).
  373. void MPCTIO::sync_lamport()
  374. {
  375. // Update the mpcio Lamport time to be max of the thread Lamport
  376. // time and what we thought it was before. We use this
  377. // compare_exchange construction in order to atomically
  378. // do the comparison, computation, and replacement
  379. lamport_t old_lamport = mpcio.lamport;
  380. lamport_t new_lamport = thread_lamport;
  381. do {
  382. if (new_lamport < old_lamport) {
  383. new_lamport = old_lamport;
  384. }
  385. // The next line atomically checks if lamport still has
  386. // the value old_lamport; if so, it changes its value to
  387. // new_lamport and returns true (ending the loop). If
  388. // not, it sets old_lamport to the current value of
  389. // lamport, and returns false (continuing the loop so
  390. // that new_lamport can be recomputed based on this new
  391. // value).
  392. } while (!mpcio.lamport.compare_exchange_weak(
  393. old_lamport, new_lamport));
  394. thread_lamport = new_lamport;
  395. }
  396. // Queue up data to the peer or to the server
  397. void MPCTIO::queue_peer(const void *data, size_t len)
  398. {
  399. if (mpcio.player < 2) {
  400. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  401. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  402. mpcpio.msgs_sent[thread_num] += newmsg;
  403. mpcpio.msg_bytes_sent[thread_num] += len;
  404. }
  405. }
  406. void MPCTIO::queue_server(const void *data, size_t len)
  407. {
  408. if (mpcio.player < 2) {
  409. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  410. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  411. mpcpio.msgs_sent[thread_num] += newmsg;
  412. mpcpio.msg_bytes_sent[thread_num] += len;
  413. }
  414. }
  415. // Receive data from the peer or to the server
  416. size_t MPCTIO::recv_peer(void *data, size_t len)
  417. {
  418. if (mpcio.player < 2) {
  419. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  420. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  421. }
  422. return 0;
  423. }
  424. size_t MPCTIO::recv_server(void *data, size_t len)
  425. {
  426. if (mpcio.player < 2) {
  427. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  428. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  429. }
  430. return 0;
  431. }
  432. // Queue up data to p0 or p1
  433. void MPCTIO::queue_p0(const void *data, size_t len)
  434. {
  435. if (mpcio.player == 2) {
  436. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  437. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  438. mpcsrvio.msgs_sent[thread_num] += newmsg;
  439. mpcsrvio.msg_bytes_sent[thread_num] += len;
  440. }
  441. }
  442. void MPCTIO::queue_p1(const void *data, size_t len)
  443. {
  444. if (mpcio.player == 2) {
  445. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  446. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  447. mpcsrvio.msgs_sent[thread_num] += newmsg;
  448. mpcsrvio.msg_bytes_sent[thread_num] += len;
  449. }
  450. }
  451. // Receive data from p0 or p1
  452. size_t MPCTIO::recv_p0(void *data, size_t len)
  453. {
  454. if (mpcio.player == 2) {
  455. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  456. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  457. }
  458. return 0;
  459. }
  460. size_t MPCTIO::recv_p1(void *data, size_t len)
  461. {
  462. if (mpcio.player == 2) {
  463. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  464. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  465. }
  466. return 0;
  467. }
  468. // Send all queued data for this thread
  469. void MPCTIO::send()
  470. {
  471. if (mpcio.player < 2) {
  472. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  473. mpcpio.peerios[thread_num].send();
  474. mpcpio.serverios[thread_num].send();
  475. } else {
  476. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  477. mpcsrvio.p0ios[thread_num].send();
  478. mpcsrvio.p1ios[thread_num].send();
  479. }
  480. }
  481. // Functions to get precomputed values. If we're in the online
  482. // phase, get them from PreCompStorage. If we're in the
  483. // preprocessing phase, read them from the server.
  484. MultTriple MPCTIO::triple()
  485. {
  486. MultTriple val;
  487. if (mpcio.player < 2) {
  488. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  489. if (mpcpio.preprocessing) {
  490. recv_server(&val, sizeof(val));
  491. } else {
  492. mpcpio.triples[thread_num].get(val);
  493. }
  494. } else if (mpcio.preprocessing) {
  495. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  496. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  497. value_t X0, Y0, Z0, X1, Y1, Z1;
  498. arc4random_buf(&X0, sizeof(X0));
  499. arc4random_buf(&Y0, sizeof(Y0));
  500. arc4random_buf(&Z0, sizeof(Z0));
  501. arc4random_buf(&X1, sizeof(X1));
  502. arc4random_buf(&Y1, sizeof(Y1));
  503. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  504. MultTriple T0, T1;
  505. T0 = std::make_tuple(X0, Y0, Z0);
  506. T1 = std::make_tuple(X1, Y1, Z1);
  507. queue_p0(&T0, sizeof(T0));
  508. queue_p1(&T1, sizeof(T1));
  509. }
  510. return val;
  511. }
  512. HalfTriple MPCTIO::halftriple()
  513. {
  514. HalfTriple val;
  515. if (mpcio.player < 2) {
  516. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  517. if (mpcpio.preprocessing) {
  518. recv_server(&val, sizeof(val));
  519. } else {
  520. mpcpio.halftriples[thread_num].get(val);
  521. }
  522. } else if (mpcio.preprocessing) {
  523. // Create half-triples (X0,Z0),(Y1,Z1) such that
  524. // X0*Y1 = Z0 + Z1
  525. value_t X0, Z0, Y1, Z1;
  526. arc4random_buf(&X0, sizeof(X0));
  527. arc4random_buf(&Z0, sizeof(Z0));
  528. arc4random_buf(&Y1, sizeof(Y1));
  529. Z1 = X0 * Y1 - Z0;
  530. HalfTriple H0, H1;
  531. H0 = std::make_tuple(X0, Z0);
  532. H1 = std::make_tuple(Y1, Z1);
  533. queue_p0(&H0, sizeof(H0));
  534. queue_p1(&H1, sizeof(H1));
  535. }
  536. return val;
  537. }
  538. SelectTriple MPCTIO::selecttriple()
  539. {
  540. SelectTriple val;
  541. if (mpcio.player < 2) {
  542. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  543. if (mpcpio.preprocessing) {
  544. uint8_t Xbyte;
  545. recv_server(&Xbyte, sizeof(Xbyte));
  546. val.X = Xbyte & 1;
  547. recv_server(&val.Y, sizeof(val.Y));
  548. recv_server(&val.Z, sizeof(val.Z));
  549. } else {
  550. std::cerr << "Attempted to read SelectTriple in online phase\n";
  551. }
  552. } else if (mpcio.preprocessing) {
  553. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  554. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  555. bit_t X0, X1;
  556. DPFnode Y0, Z0, Y1, Z1;
  557. X0 = arc4random() & 1;
  558. arc4random_buf(&Y0, sizeof(Y0));
  559. arc4random_buf(&Z0, sizeof(Z0));
  560. X1 = arc4random() & 1;
  561. arc4random_buf(&Y1, sizeof(Y1));
  562. DPFnode X0ext, X1ext;
  563. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  564. // 1 -> 1111...1)
  565. X0ext = if128_mask[X0];
  566. X1ext = if128_mask[X1];
  567. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  568. queue_p0(&X0, sizeof(X0));
  569. queue_p0(&Y0, sizeof(Y0));
  570. queue_p0(&Z0, sizeof(Z0));
  571. queue_p1(&X1, sizeof(X1));
  572. queue_p1(&Y1, sizeof(Y1));
  573. queue_p1(&Z1, sizeof(Z1));
  574. }
  575. return val;
  576. }
  577. RDPFTriple MPCTIO::rdpftriple(nbits_t depth)
  578. {
  579. RDPFTriple val;
  580. if (!mpcio.preprocessing && mpcio.player <= 2) {
  581. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  582. mpcpio.rdpftriples[thread_num][depth-1].get(val);
  583. }
  584. return val;
  585. }
  586. RDPFPair MPCTIO::rdpfpair(nbits_t depth)
  587. {
  588. RDPFPair val;
  589. if (!mpcio.preprocessing && mpcio.player == 2) {
  590. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  591. mpcsrvio.rdpfpairs[thread_num][depth-1].get(val);
  592. }
  593. return val;
  594. }
  595. // The port number for the P1 -> P0 connection
  596. static const unsigned short port_p1_p0 = 2115;
  597. // The port number for the P2 -> P0 connection
  598. static const unsigned short port_p2_p0 = 2116;
  599. // The port number for the P2 -> P1 connection
  600. static const unsigned short port_p2_p1 = 2117;
  601. void mpcio_setup_computational(unsigned player,
  602. boost::asio::io_context &io_context,
  603. const char *p0addr, // can be NULL when player=0
  604. int num_threads,
  605. std::deque<tcp::socket> &peersocks,
  606. std::deque<tcp::socket> &serversocks)
  607. {
  608. if (player == 0) {
  609. // Listen for connections from P1 and from P2
  610. tcp::acceptor acceptor_p1(io_context,
  611. tcp::endpoint(tcp::v4(), port_p1_p0));
  612. tcp::acceptor acceptor_p2(io_context,
  613. tcp::endpoint(tcp::v4(), port_p2_p0));
  614. peersocks.clear();
  615. serversocks.clear();
  616. for (int i=0;i<num_threads;++i) {
  617. peersocks.emplace_back(io_context);
  618. serversocks.emplace_back(io_context);
  619. }
  620. for (int i=0;i<num_threads;++i) {
  621. tcp::socket peersock = acceptor_p1.accept();
  622. // Read 2 bytes from the socket, which will be the thread
  623. // number
  624. unsigned short thread_num;
  625. boost::asio::read(peersock,
  626. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  627. if (thread_num >= num_threads) {
  628. std::cerr << "Received bad thread number from peer\n";
  629. } else {
  630. peersocks[thread_num] = std::move(peersock);
  631. }
  632. }
  633. for (int i=0;i<num_threads;++i) {
  634. tcp::socket serversock = acceptor_p2.accept();
  635. // Read 2 bytes from the socket, which will be the thread
  636. // number
  637. unsigned short thread_num;
  638. boost::asio::read(serversock,
  639. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  640. if (thread_num >= num_threads) {
  641. std::cerr << "Received bad thread number from server\n";
  642. } else {
  643. serversocks[thread_num] = std::move(serversock);
  644. }
  645. }
  646. } else if (player == 1) {
  647. // Listen for connections from P2, make num_threads connections to P0
  648. tcp::acceptor acceptor_p2(io_context,
  649. tcp::endpoint(tcp::v4(), port_p2_p1));
  650. tcp::resolver resolver(io_context);
  651. boost::system::error_code err;
  652. peersocks.clear();
  653. serversocks.clear();
  654. for (int i=0;i<num_threads;++i) {
  655. serversocks.emplace_back(io_context);
  656. }
  657. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  658. tcp::socket peersock(io_context);
  659. while(1) {
  660. boost::asio::connect(peersock,
  661. resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
  662. if (!err) break;
  663. std::cerr << "Connection to p0 refused, will retry.\n";
  664. sleep(1);
  665. }
  666. // Write 2 bytes to the socket indicating which thread
  667. // number this socket is for
  668. boost::asio::write(peersock,
  669. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  670. peersocks.push_back(std::move(peersock));
  671. }
  672. for (int i=0;i<num_threads;++i) {
  673. tcp::socket serversock = acceptor_p2.accept();
  674. // Read 2 bytes from the socket, which will be the thread
  675. // number
  676. unsigned short thread_num;
  677. boost::asio::read(serversock,
  678. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  679. if (thread_num >= num_threads) {
  680. std::cerr << "Received bad thread number from server\n";
  681. } else {
  682. serversocks[thread_num] = std::move(serversock);
  683. }
  684. }
  685. } else {
  686. std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
  687. }
  688. }
  689. void mpcio_setup_server(boost::asio::io_context &io_context,
  690. const char *p0addr, const char *p1addr, int num_threads,
  691. std::deque<tcp::socket> &p0socks,
  692. std::deque<tcp::socket> &p1socks)
  693. {
  694. // Make connections to P0 and P1
  695. tcp::resolver resolver(io_context);
  696. boost::system::error_code err;
  697. p0socks.clear();
  698. p1socks.clear();
  699. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  700. tcp::socket p0sock(io_context);
  701. while(1) {
  702. boost::asio::connect(p0sock,
  703. resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
  704. if (!err) break;
  705. std::cerr << "Connection to p0 refused, will retry.\n";
  706. sleep(1);
  707. }
  708. // Write 2 bytes to the socket indicating which thread
  709. // number this socket is for
  710. boost::asio::write(p0sock,
  711. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  712. p0socks.push_back(std::move(p0sock));
  713. }
  714. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  715. tcp::socket p1sock(io_context);
  716. while(1) {
  717. boost::asio::connect(p1sock,
  718. resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
  719. if (!err) break;
  720. std::cerr << "Connection to p1 refused, will retry.\n";
  721. sleep(1);
  722. }
  723. // Write 2 bytes to the socket indicating which thread
  724. // number this socket is for
  725. boost::asio::write(p1sock,
  726. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  727. p1socks.push_back(std::move(p1sock));
  728. }
  729. }