mpcio.cpp 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810
  1. #include <sys/time.h> // getrusage
  2. #include <sys/resource.h> // getrusage
  3. #include "mpcio.hpp"
  4. #include "rdpf.hpp"
  5. #include "cdpf.hpp"
  6. #include "bitutils.hpp"
  7. // T is the type being stored
  8. // N is a type whose "name" static member is a string naming the type
  9. // so that we can report something useful to the user if they try
  10. // to read a type that we don't have any more values for
  11. template<typename T, typename N>
  12. PreCompStorage<T,N>::PreCompStorage(unsigned player, bool preprocessing,
  13. const char *filenameprefix, unsigned thread_num) :
  14. name(N::name), depth(0)
  15. {
  16. init(player, preprocessing, filenameprefix, thread_num);
  17. }
  18. template<typename T, typename N>
  19. void PreCompStorage<T,N>::init(unsigned player, bool preprocessing,
  20. const char *filenameprefix, unsigned thread_num, nbits_t depth)
  21. {
  22. if (preprocessing) return;
  23. std::string filename(filenameprefix);
  24. char suffix[20];
  25. if (depth) {
  26. this->depth = depth;
  27. sprintf(suffix, "%02d.p%d.t%u", depth, player%10, thread_num);
  28. } else {
  29. sprintf(suffix, ".p%d.t%u", player%10, thread_num);
  30. }
  31. filename.append(suffix);
  32. storage.open(filename);
  33. // It's OK if not every file exists; so don't worry about checking
  34. // for errors here. We'll report an error in get() if we actually
  35. // try to use a value for which we don't have a precomputed file.
  36. count = 0;
  37. }
  38. template<typename T, typename N>
  39. void PreCompStorage<T,N>::get(T& nextval)
  40. {
  41. storage >> nextval;
  42. if (!storage.good()) {
  43. std::cerr << "Failed to read precomputed value from " << name;
  44. if (depth) {
  45. std::cerr << (int)depth;
  46. }
  47. std::cerr << " storage\n";
  48. exit(1);
  49. }
  50. ++count;
  51. }
  52. void MPCSingleIO::async_send_from_msgqueue()
  53. {
  54. #ifdef SEND_LAMPORT_CLOCKS
  55. std::vector<boost::asio::const_buffer> tosend;
  56. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  57. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  58. #endif
  59. boost::asio::async_write(sock,
  60. #ifdef SEND_LAMPORT_CLOCKS
  61. tosend,
  62. #else
  63. boost::asio::buffer(messagequeue.front()),
  64. #endif
  65. [&](boost::system::error_code ec, std::size_t amt){
  66. messagequeuelock.lock();
  67. messagequeue.pop();
  68. if (messagequeue.size() > 0) {
  69. async_send_from_msgqueue();
  70. }
  71. messagequeuelock.unlock();
  72. });
  73. }
  74. size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
  75. {
  76. // Is this a new message?
  77. size_t newmsg = 0;
  78. dataqueue.append((const char *)data, len);
  79. // If this is the first queue() since the last explicit send(),
  80. // which we'll know because message_lamport will be nullopt, set
  81. // message_lamport to the current Lamport clock. Note that the
  82. // boolean test tests whether message_lamport is nullopt, not
  83. // whether its value is zero.
  84. if (!message_lamport) {
  85. message_lamport = lamport;
  86. newmsg = 1;
  87. }
  88. // If we already have some full packets worth of data, may as
  89. // well send it.
  90. if (dataqueue.size() > 28800) {
  91. send(true);
  92. }
  93. return newmsg;
  94. }
  95. void MPCSingleIO::send(bool implicit_send)
  96. {
  97. size_t thissize = dataqueue.size();
  98. // Ignore spurious calls to send(), except for resetting
  99. // message_lamport if this was an explicit send().
  100. if (thissize == 0) {
  101. #ifdef SEND_LAMPORT_CLOCKS
  102. // If this was an explicit send(), reset the message_lamport so
  103. // that it gets updated at the next queue().
  104. if (!implicit_send) {
  105. message_lamport.reset();
  106. }
  107. #endif
  108. return;
  109. }
  110. #ifdef RECORD_IOTRACE
  111. iotrace.push_back(thissize);
  112. #endif
  113. messagequeuelock.lock();
  114. // Move the current message to send into the message queue (this
  115. // moves a pointer to the data, not copying the data itself)
  116. #ifdef SEND_LAMPORT_CLOCKS
  117. messagequeue.emplace(std::move(dataqueue),
  118. message_lamport.value());
  119. // If this was an explicit send(), reset the message_lamport so
  120. // that it gets updated at the next queue().
  121. if (!implicit_send) {
  122. message_lamport.reset();
  123. }
  124. #else
  125. messagequeue.emplace(std::move(dataqueue));
  126. #endif
  127. // If this is now the first thing in the message queue, launch
  128. // an async_write to write it
  129. if (messagequeue.size() == 1) {
  130. async_send_from_msgqueue();
  131. }
  132. messagequeuelock.unlock();
  133. }
  134. size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
  135. {
  136. #ifdef SEND_LAMPORT_CLOCKS
  137. char *cdata = (char *)data;
  138. size_t res = 0;
  139. while (len > 0) {
  140. while (recvdataremain == 0) {
  141. // Read a new header
  142. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  143. uint32_t datalen;
  144. lamport_t recv_lamport;
  145. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  146. memmove(&datalen, hdr, sizeof(datalen));
  147. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  148. lamport_t new_lamport = recv_lamport + 1;
  149. if (lamport < new_lamport) {
  150. lamport = new_lamport;
  151. }
  152. if (datalen > 0) {
  153. recvdata.resize(datalen, '\0');
  154. boost::asio::read(sock, boost::asio::buffer(recvdata));
  155. recvdataremain = datalen;
  156. }
  157. }
  158. size_t amttoread = len;
  159. if (amttoread > recvdataremain) {
  160. amttoread = recvdataremain;
  161. }
  162. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  163. amttoread);
  164. cdata += amttoread;
  165. len -= amttoread;
  166. recvdataremain -= amttoread;
  167. res += amttoread;
  168. }
  169. #else
  170. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  171. #endif
  172. #ifdef RECORD_IOTRACE
  173. iotrace.push_back(-(ssize_t(res)));
  174. #endif
  175. return res;
  176. }
  177. #ifdef RECORD_IOTRACE
  178. void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
  179. {
  180. if (label) {
  181. os << label << " ";
  182. }
  183. os << "IO trace:";
  184. for (auto& s: iotrace) {
  185. os << " " << s;
  186. }
  187. os << "\n";
  188. }
  189. #endif
  190. void MPCIO::reset_stats()
  191. {
  192. msgs_sent.clear();
  193. msg_bytes_sent.clear();
  194. aes_ops.clear();
  195. for (size_t i=0; i<num_threads; ++i) {
  196. msgs_sent.push_back(0);
  197. msg_bytes_sent.push_back(0);
  198. aes_ops.push_back(0);
  199. }
  200. steady_start = boost::chrono::steady_clock::now();
  201. cpu_start = boost::chrono::process_cpu_clock::now();
  202. }
  203. // Report the memory usage
  204. void MPCIO::dump_memusage(std::ostream &os)
  205. {
  206. struct rusage ru;
  207. getrusage(RUSAGE_SELF, &ru);
  208. os << "Mem: " << ru.ru_maxrss << " KiB\n";
  209. }
  210. void MPCIO::dump_stats(std::ostream &os)
  211. {
  212. size_t tot_msgs_sent = 0;
  213. size_t tot_msg_bytes_sent = 0;
  214. size_t tot_aes_ops = 0;
  215. for (auto& n : msgs_sent) {
  216. tot_msgs_sent += n;
  217. }
  218. for (auto& n : msg_bytes_sent) {
  219. tot_msg_bytes_sent += n;
  220. }
  221. for (auto& n : aes_ops) {
  222. tot_aes_ops += n;
  223. }
  224. auto steady_elapsed =
  225. boost::chrono::steady_clock::now() - steady_start;
  226. auto cpu_elapsed =
  227. boost::chrono::process_cpu_clock::now() - cpu_start;
  228. os << tot_msgs_sent << " messages sent\n";
  229. os << tot_msg_bytes_sent << " message bytes sent\n";
  230. os << lamport << " Lamport clock (latencies)\n";
  231. os << tot_aes_ops << " local AES operations\n";
  232. os << boost::chrono::duration_cast
  233. <boost::chrono::milliseconds>(steady_elapsed) <<
  234. " wall clock time\n";
  235. os << cpu_elapsed << " {real;user;system}\n";
  236. dump_memusage(os);
  237. }
  238. MPCPeerIO::MPCPeerIO(unsigned player, bool preprocessing,
  239. std::deque<tcp::socket> &peersocks,
  240. std::deque<tcp::socket> &serversocks) :
  241. MPCIO(player, preprocessing, peersocks.size())
  242. {
  243. unsigned num_threads = unsigned(peersocks.size());
  244. for (unsigned i=0; i<num_threads; ++i) {
  245. triples.emplace_back(player, preprocessing, "triples", i);
  246. }
  247. for (unsigned i=0; i<num_threads; ++i) {
  248. halftriples.emplace_back(player, preprocessing, "halves", i);
  249. }
  250. rdpftriples.resize(num_threads);
  251. for (unsigned i=0; i<num_threads; ++i) {
  252. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  253. rdpftriples[i][depth-1].init(player, preprocessing,
  254. "rdpf", i, depth);
  255. }
  256. }
  257. for (unsigned i=0; i<num_threads; ++i) {
  258. cdpfs.emplace_back(player, preprocessing, "cdpf", i);
  259. }
  260. for (auto &&sock : peersocks) {
  261. peerios.emplace_back(std::move(sock));
  262. }
  263. for (auto &&sock : serversocks) {
  264. serverios.emplace_back(std::move(sock));
  265. }
  266. }
  267. void MPCPeerIO::dump_precomp_stats(std::ostream &os)
  268. {
  269. for (size_t i=0; i<triples.size(); ++i) {
  270. if (i > 0) {
  271. os << " ";
  272. }
  273. os << "T" << i << " t:" << triples[i].get_stats() <<
  274. " h:" << halftriples[i].get_stats();
  275. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  276. size_t cnt = rdpftriples[i][depth-1].get_stats();
  277. if (cnt > 0) {
  278. os << " r" << int(depth) << ":" << cnt;
  279. }
  280. }
  281. size_t ccnt = cdpfs[i].get_stats();
  282. if (ccnt > 0) {
  283. os << " c:" << ccnt;
  284. }
  285. }
  286. os << "\n";
  287. }
  288. void MPCPeerIO::reset_precomp_stats()
  289. {
  290. for (size_t i=0; i<triples.size(); ++i) {
  291. triples[i].reset_stats();
  292. halftriples[i].reset_stats();
  293. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  294. rdpftriples[i][depth-1].reset_stats();
  295. }
  296. }
  297. }
  298. void MPCPeerIO::dump_stats(std::ostream &os)
  299. {
  300. MPCIO::dump_stats(os);
  301. os << "Precomputed values used: ";
  302. dump_precomp_stats(os);
  303. }
  304. MPCServerIO::MPCServerIO(bool preprocessing,
  305. std::deque<tcp::socket> &p0socks,
  306. std::deque<tcp::socket> &p1socks) :
  307. MPCIO(2, preprocessing, p0socks.size())
  308. {
  309. rdpfpairs.resize(num_threads);
  310. for (unsigned i=0; i<num_threads; ++i) {
  311. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  312. rdpfpairs[i][depth-1].init(player, preprocessing,
  313. "rdpf", i, depth);
  314. }
  315. }
  316. for (auto &&sock : p0socks) {
  317. p0ios.emplace_back(std::move(sock));
  318. }
  319. for (auto &&sock : p1socks) {
  320. p1ios.emplace_back(std::move(sock));
  321. }
  322. }
  323. void MPCServerIO::dump_precomp_stats(std::ostream &os)
  324. {
  325. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  326. if (i > 0) {
  327. os << " ";
  328. }
  329. os << "T" << i;
  330. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  331. size_t cnt = rdpfpairs[i][depth-1].get_stats();
  332. if (cnt > 0) {
  333. os << " r" << int(depth) << ":" << cnt;
  334. }
  335. }
  336. }
  337. os << "\n";
  338. }
  339. void MPCServerIO::reset_precomp_stats()
  340. {
  341. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  342. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  343. rdpfpairs[i][depth-1].reset_stats();
  344. }
  345. }
  346. }
  347. void MPCServerIO::dump_stats(std::ostream &os)
  348. {
  349. MPCIO::dump_stats(os);
  350. os << "Precomputed values used: ";
  351. dump_precomp_stats(os);
  352. }
  353. MPCTIO::MPCTIO(MPCIO &mpcio, int thread_num) :
  354. thread_num(thread_num), thread_lamport(mpcio.lamport),
  355. mpcio(mpcio)
  356. {
  357. if (mpcio.player < 2) {
  358. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  359. peer_iostream.emplace(mpcpio.peerios[thread_num],
  360. thread_lamport, mpcpio.msgs_sent[thread_num],
  361. mpcpio.msg_bytes_sent[thread_num]);
  362. server_iostream.emplace(mpcpio.serverios[thread_num],
  363. thread_lamport, mpcpio.msgs_sent[thread_num],
  364. mpcpio.msg_bytes_sent[thread_num]);
  365. } else {
  366. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  367. p0_iostream.emplace(mpcsrvio.p0ios[thread_num],
  368. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  369. mpcsrvio.msg_bytes_sent[thread_num]);
  370. p1_iostream.emplace(mpcsrvio.p1ios[thread_num],
  371. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  372. mpcsrvio.msg_bytes_sent[thread_num]);
  373. }
  374. }
  375. // Sync our per-thread lamport clock with the master one in the
  376. // mpcio. You only need to call this explicitly if your MPCTIO
  377. // outlives your thread (in which case call it after the join), or
  378. // if your threads do interthread communication amongst themselves
  379. // (in which case call it in the sending thread before the send, and
  380. // call it in the receiving thread after the receive).
  381. void MPCTIO::sync_lamport()
  382. {
  383. // Update the mpcio Lamport time to be max of the thread Lamport
  384. // time and what we thought it was before. We use this
  385. // compare_exchange construction in order to atomically
  386. // do the comparison, computation, and replacement
  387. lamport_t old_lamport = mpcio.lamport;
  388. lamport_t new_lamport = thread_lamport;
  389. do {
  390. if (new_lamport < old_lamport) {
  391. new_lamport = old_lamport;
  392. }
  393. // The next line atomically checks if lamport still has
  394. // the value old_lamport; if so, it changes its value to
  395. // new_lamport and returns true (ending the loop). If
  396. // not, it sets old_lamport to the current value of
  397. // lamport, and returns false (continuing the loop so
  398. // that new_lamport can be recomputed based on this new
  399. // value).
  400. } while (!mpcio.lamport.compare_exchange_weak(
  401. old_lamport, new_lamport));
  402. thread_lamport = new_lamport;
  403. }
  404. // Queue up data to the peer or to the server
  405. void MPCTIO::queue_peer(const void *data, size_t len)
  406. {
  407. if (mpcio.player < 2) {
  408. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  409. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  410. mpcpio.msgs_sent[thread_num] += newmsg;
  411. mpcpio.msg_bytes_sent[thread_num] += len;
  412. }
  413. }
  414. void MPCTIO::queue_server(const void *data, size_t len)
  415. {
  416. if (mpcio.player < 2) {
  417. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  418. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  419. mpcpio.msgs_sent[thread_num] += newmsg;
  420. mpcpio.msg_bytes_sent[thread_num] += len;
  421. }
  422. }
  423. // Receive data from the peer or to the server
  424. size_t MPCTIO::recv_peer(void *data, size_t len)
  425. {
  426. if (mpcio.player < 2) {
  427. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  428. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  429. }
  430. return 0;
  431. }
  432. size_t MPCTIO::recv_server(void *data, size_t len)
  433. {
  434. if (mpcio.player < 2) {
  435. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  436. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  437. }
  438. return 0;
  439. }
  440. // Queue up data to p0 or p1
  441. void MPCTIO::queue_p0(const void *data, size_t len)
  442. {
  443. if (mpcio.player == 2) {
  444. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  445. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  446. mpcsrvio.msgs_sent[thread_num] += newmsg;
  447. mpcsrvio.msg_bytes_sent[thread_num] += len;
  448. }
  449. }
  450. void MPCTIO::queue_p1(const void *data, size_t len)
  451. {
  452. if (mpcio.player == 2) {
  453. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  454. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  455. mpcsrvio.msgs_sent[thread_num] += newmsg;
  456. mpcsrvio.msg_bytes_sent[thread_num] += len;
  457. }
  458. }
  459. // Receive data from p0 or p1
  460. size_t MPCTIO::recv_p0(void *data, size_t len)
  461. {
  462. if (mpcio.player == 2) {
  463. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  464. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  465. }
  466. return 0;
  467. }
  468. size_t MPCTIO::recv_p1(void *data, size_t len)
  469. {
  470. if (mpcio.player == 2) {
  471. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  472. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  473. }
  474. return 0;
  475. }
  476. // Send all queued data for this thread
  477. void MPCTIO::send()
  478. {
  479. if (mpcio.player < 2) {
  480. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  481. mpcpio.peerios[thread_num].send();
  482. mpcpio.serverios[thread_num].send();
  483. } else {
  484. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  485. mpcsrvio.p0ios[thread_num].send();
  486. mpcsrvio.p1ios[thread_num].send();
  487. }
  488. }
  489. // Functions to get precomputed values. If we're in the online
  490. // phase, get them from PreCompStorage. If we're in the
  491. // preprocessing phase, read them from the server.
  492. MultTriple MPCTIO::triple()
  493. {
  494. MultTriple val;
  495. if (mpcio.player < 2) {
  496. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  497. if (mpcpio.preprocessing) {
  498. recv_server(&val, sizeof(val));
  499. } else {
  500. mpcpio.triples[thread_num].get(val);
  501. }
  502. } else if (mpcio.preprocessing) {
  503. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  504. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  505. value_t X0, Y0, Z0, X1, Y1, Z1;
  506. arc4random_buf(&X0, sizeof(X0));
  507. arc4random_buf(&Y0, sizeof(Y0));
  508. arc4random_buf(&Z0, sizeof(Z0));
  509. arc4random_buf(&X1, sizeof(X1));
  510. arc4random_buf(&Y1, sizeof(Y1));
  511. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  512. MultTriple T0, T1;
  513. T0 = std::make_tuple(X0, Y0, Z0);
  514. T1 = std::make_tuple(X1, Y1, Z1);
  515. queue_p0(&T0, sizeof(T0));
  516. queue_p1(&T1, sizeof(T1));
  517. }
  518. return val;
  519. }
  520. HalfTriple MPCTIO::halftriple()
  521. {
  522. HalfTriple val;
  523. if (mpcio.player < 2) {
  524. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  525. if (mpcpio.preprocessing) {
  526. recv_server(&val, sizeof(val));
  527. } else {
  528. mpcpio.halftriples[thread_num].get(val);
  529. }
  530. } else if (mpcio.preprocessing) {
  531. // Create half-triples (X0,Z0),(Y1,Z1) such that
  532. // X0*Y1 = Z0 + Z1
  533. value_t X0, Z0, Y1, Z1;
  534. arc4random_buf(&X0, sizeof(X0));
  535. arc4random_buf(&Z0, sizeof(Z0));
  536. arc4random_buf(&Y1, sizeof(Y1));
  537. Z1 = X0 * Y1 - Z0;
  538. HalfTriple H0, H1;
  539. H0 = std::make_tuple(X0, Z0);
  540. H1 = std::make_tuple(Y1, Z1);
  541. queue_p0(&H0, sizeof(H0));
  542. queue_p1(&H1, sizeof(H1));
  543. }
  544. return val;
  545. }
  546. SelectTriple MPCTIO::selecttriple()
  547. {
  548. SelectTriple val;
  549. if (mpcio.player < 2) {
  550. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  551. if (mpcpio.preprocessing) {
  552. uint8_t Xbyte;
  553. recv_server(&Xbyte, sizeof(Xbyte));
  554. val.X = Xbyte & 1;
  555. recv_server(&val.Y, sizeof(val.Y));
  556. recv_server(&val.Z, sizeof(val.Z));
  557. } else {
  558. std::cerr << "Attempted to read SelectTriple in online phase\n";
  559. }
  560. } else if (mpcio.preprocessing) {
  561. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  562. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  563. bit_t X0, X1;
  564. DPFnode Y0, Z0, Y1, Z1;
  565. X0 = arc4random() & 1;
  566. arc4random_buf(&Y0, sizeof(Y0));
  567. arc4random_buf(&Z0, sizeof(Z0));
  568. X1 = arc4random() & 1;
  569. arc4random_buf(&Y1, sizeof(Y1));
  570. DPFnode X0ext, X1ext;
  571. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  572. // 1 -> 1111...1)
  573. X0ext = if128_mask[X0];
  574. X1ext = if128_mask[X1];
  575. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  576. queue_p0(&X0, sizeof(X0));
  577. queue_p0(&Y0, sizeof(Y0));
  578. queue_p0(&Z0, sizeof(Z0));
  579. queue_p1(&X1, sizeof(X1));
  580. queue_p1(&Y1, sizeof(Y1));
  581. queue_p1(&Z1, sizeof(Z1));
  582. }
  583. return val;
  584. }
  585. RDPFTriple MPCTIO::rdpftriple(nbits_t depth)
  586. {
  587. RDPFTriple val;
  588. if (!mpcio.preprocessing && mpcio.player <= 2) {
  589. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  590. mpcpio.rdpftriples[thread_num][depth-1].get(val);
  591. }
  592. return val;
  593. }
  594. RDPFPair MPCTIO::rdpfpair(nbits_t depth)
  595. {
  596. RDPFPair val;
  597. if (!mpcio.preprocessing && mpcio.player == 2) {
  598. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  599. mpcsrvio.rdpfpairs[thread_num][depth-1].get(val);
  600. }
  601. return val;
  602. }
  603. CDPF MPCTIO::cdpf()
  604. {
  605. CDPF val;
  606. if (mpcio.player < 2) {
  607. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  608. if (mpcpio.preprocessing) {
  609. iostream_server() >> val;
  610. } else {
  611. mpcpio.cdpfs[thread_num].get(val);
  612. }
  613. } else if (mpcio.preprocessing) {
  614. auto [ cdpf0, cdpf1 ] = CDPF::generate(aes_ops());
  615. iostream_p0() << cdpf0;
  616. iostream_p1() << cdpf1;
  617. }
  618. return val;
  619. }
  620. // The port number for the P1 -> P0 connection
  621. static const unsigned short port_p1_p0 = 2115;
  622. // The port number for the P2 -> P0 connection
  623. static const unsigned short port_p2_p0 = 2116;
  624. // The port number for the P2 -> P1 connection
  625. static const unsigned short port_p2_p1 = 2117;
  626. void mpcio_setup_computational(unsigned player,
  627. boost::asio::io_context &io_context,
  628. const char *p0addr, // can be NULL when player=0
  629. int num_threads,
  630. std::deque<tcp::socket> &peersocks,
  631. std::deque<tcp::socket> &serversocks)
  632. {
  633. if (player == 0) {
  634. // Listen for connections from P1 and from P2
  635. tcp::acceptor acceptor_p1(io_context,
  636. tcp::endpoint(tcp::v4(), port_p1_p0));
  637. tcp::acceptor acceptor_p2(io_context,
  638. tcp::endpoint(tcp::v4(), port_p2_p0));
  639. peersocks.clear();
  640. serversocks.clear();
  641. for (int i=0;i<num_threads;++i) {
  642. peersocks.emplace_back(io_context);
  643. serversocks.emplace_back(io_context);
  644. }
  645. for (int i=0;i<num_threads;++i) {
  646. tcp::socket peersock = acceptor_p1.accept();
  647. // Read 2 bytes from the socket, which will be the thread
  648. // number
  649. unsigned short thread_num;
  650. boost::asio::read(peersock,
  651. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  652. if (thread_num >= num_threads) {
  653. std::cerr << "Received bad thread number from peer\n";
  654. } else {
  655. peersocks[thread_num] = std::move(peersock);
  656. }
  657. }
  658. for (int i=0;i<num_threads;++i) {
  659. tcp::socket serversock = acceptor_p2.accept();
  660. // Read 2 bytes from the socket, which will be the thread
  661. // number
  662. unsigned short thread_num;
  663. boost::asio::read(serversock,
  664. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  665. if (thread_num >= num_threads) {
  666. std::cerr << "Received bad thread number from server\n";
  667. } else {
  668. serversocks[thread_num] = std::move(serversock);
  669. }
  670. }
  671. } else if (player == 1) {
  672. // Listen for connections from P2, make num_threads connections to P0
  673. tcp::acceptor acceptor_p2(io_context,
  674. tcp::endpoint(tcp::v4(), port_p2_p1));
  675. tcp::resolver resolver(io_context);
  676. boost::system::error_code err;
  677. peersocks.clear();
  678. serversocks.clear();
  679. for (int i=0;i<num_threads;++i) {
  680. serversocks.emplace_back(io_context);
  681. }
  682. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  683. tcp::socket peersock(io_context);
  684. while(1) {
  685. boost::asio::connect(peersock,
  686. resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
  687. if (!err) break;
  688. std::cerr << "Connection to p0 refused, will retry.\n";
  689. sleep(1);
  690. }
  691. // Write 2 bytes to the socket indicating which thread
  692. // number this socket is for
  693. boost::asio::write(peersock,
  694. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  695. peersocks.push_back(std::move(peersock));
  696. }
  697. for (int i=0;i<num_threads;++i) {
  698. tcp::socket serversock = acceptor_p2.accept();
  699. // Read 2 bytes from the socket, which will be the thread
  700. // number
  701. unsigned short thread_num;
  702. boost::asio::read(serversock,
  703. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  704. if (thread_num >= num_threads) {
  705. std::cerr << "Received bad thread number from server\n";
  706. } else {
  707. serversocks[thread_num] = std::move(serversock);
  708. }
  709. }
  710. } else {
  711. std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
  712. }
  713. }
  714. void mpcio_setup_server(boost::asio::io_context &io_context,
  715. const char *p0addr, const char *p1addr, int num_threads,
  716. std::deque<tcp::socket> &p0socks,
  717. std::deque<tcp::socket> &p1socks)
  718. {
  719. // Make connections to P0 and P1
  720. tcp::resolver resolver(io_context);
  721. boost::system::error_code err;
  722. p0socks.clear();
  723. p1socks.clear();
  724. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  725. tcp::socket p0sock(io_context);
  726. while(1) {
  727. boost::asio::connect(p0sock,
  728. resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
  729. if (!err) break;
  730. std::cerr << "Connection to p0 refused, will retry.\n";
  731. sleep(1);
  732. }
  733. // Write 2 bytes to the socket indicating which thread
  734. // number this socket is for
  735. boost::asio::write(p0sock,
  736. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  737. p0socks.push_back(std::move(p0sock));
  738. }
  739. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  740. tcp::socket p1sock(io_context);
  741. while(1) {
  742. boost::asio::connect(p1sock,
  743. resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
  744. if (!err) break;
  745. std::cerr << "Connection to p1 refused, will retry.\n";
  746. sleep(1);
  747. }
  748. // Write 2 bytes to the socket indicating which thread
  749. // number this socket is for
  750. boost::asio::write(p1sock,
  751. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  752. p1socks.push_back(std::move(p1sock));
  753. }
  754. }