mpcio.cpp 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830
  1. #include <sys/time.h> // getrusage
  2. #include <sys/resource.h> // getrusage
  3. #include "mpcio.hpp"
  4. #include "rdpf.hpp"
  5. #include "cdpf.hpp"
  6. #include "bitutils.hpp"
  7. #include "coroutine.hpp"
  8. // T is the type being stored
  9. // N is a type whose "name" static member is a string naming the type
  10. // so that we can report something useful to the user if they try
  11. // to read a type that we don't have any more values for
  12. template<typename T, typename N>
  13. PreCompStorage<T,N>::PreCompStorage(unsigned player, ProcessingMode mode,
  14. const char *filenameprefix, unsigned thread_num) :
  15. name(N::name), depth(0)
  16. {
  17. init(player, mode, filenameprefix, thread_num);
  18. }
  19. template<typename T, typename N>
  20. void PreCompStorage<T,N>::init(unsigned player, ProcessingMode mode,
  21. const char *filenameprefix, unsigned thread_num, nbits_t depth)
  22. {
  23. if (mode != MODE_ONLINE) return;
  24. std::string filename(filenameprefix);
  25. char suffix[20];
  26. if (depth) {
  27. this->depth = depth;
  28. sprintf(suffix, "%02d.p%d.t%u", depth, player%10, thread_num);
  29. } else {
  30. sprintf(suffix, ".p%d.t%u", player%10, thread_num);
  31. }
  32. filename.append(suffix);
  33. storage.open(filename);
  34. // It's OK if not every file exists; so don't worry about checking
  35. // for errors here. We'll report an error in get() if we actually
  36. // try to use a value for which we don't have a precomputed file.
  37. count = 0;
  38. }
  39. template<typename T, typename N>
  40. void PreCompStorage<T,N>::get(T& nextval)
  41. {
  42. storage >> nextval;
  43. if (!storage.good()) {
  44. std::cerr << "Failed to read precomputed value from " << name;
  45. if (depth) {
  46. std::cerr << (int)depth;
  47. }
  48. std::cerr << " storage\n";
  49. exit(1);
  50. }
  51. ++count;
  52. }
  53. void MPCSingleIO::async_send_from_msgqueue()
  54. {
  55. #ifdef SEND_LAMPORT_CLOCKS
  56. std::vector<boost::asio::const_buffer> tosend;
  57. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  58. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  59. #endif
  60. boost::asio::async_write(sock,
  61. #ifdef SEND_LAMPORT_CLOCKS
  62. tosend,
  63. #else
  64. boost::asio::buffer(messagequeue.front()),
  65. #endif
  66. [&](boost::system::error_code ec, std::size_t amt){
  67. messagequeuelock.lock();
  68. messagequeue.pop();
  69. if (messagequeue.size() > 0) {
  70. async_send_from_msgqueue();
  71. }
  72. messagequeuelock.unlock();
  73. });
  74. }
  75. size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
  76. {
  77. // Is this a new message?
  78. size_t newmsg = 0;
  79. dataqueue.append((const char *)data, len);
  80. // If this is the first queue() since the last explicit send(),
  81. // which we'll know because message_lamport will be nullopt, set
  82. // message_lamport to the current Lamport clock. Note that the
  83. // boolean test tests whether message_lamport is nullopt, not
  84. // whether its value is zero.
  85. if (!message_lamport) {
  86. message_lamport = lamport;
  87. newmsg = 1;
  88. }
  89. // If we already have some full packets worth of data, may as
  90. // well send it.
  91. if (dataqueue.size() > 28800) {
  92. send(true);
  93. }
  94. return newmsg;
  95. }
  96. void MPCSingleIO::send(bool implicit_send)
  97. {
  98. size_t thissize = dataqueue.size();
  99. // Ignore spurious calls to send(), except for resetting
  100. // message_lamport if this was an explicit send().
  101. if (thissize == 0) {
  102. #ifdef SEND_LAMPORT_CLOCKS
  103. // If this was an explicit send(), reset the message_lamport so
  104. // that it gets updated at the next queue().
  105. if (!implicit_send) {
  106. message_lamport.reset();
  107. }
  108. #endif
  109. return;
  110. }
  111. #ifdef RECORD_IOTRACE
  112. iotrace.push_back(thissize);
  113. #endif
  114. messagequeuelock.lock();
  115. // Move the current message to send into the message queue (this
  116. // moves a pointer to the data, not copying the data itself)
  117. #ifdef SEND_LAMPORT_CLOCKS
  118. messagequeue.emplace(std::move(dataqueue),
  119. message_lamport.value());
  120. // If this was an explicit send(), reset the message_lamport so
  121. // that it gets updated at the next queue().
  122. if (!implicit_send) {
  123. message_lamport.reset();
  124. }
  125. #else
  126. messagequeue.emplace(std::move(dataqueue));
  127. #endif
  128. // If this is now the first thing in the message queue, launch
  129. // an async_write to write it
  130. if (messagequeue.size() == 1) {
  131. async_send_from_msgqueue();
  132. }
  133. messagequeuelock.unlock();
  134. }
  135. size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
  136. {
  137. #ifdef SEND_LAMPORT_CLOCKS
  138. char *cdata = (char *)data;
  139. size_t res = 0;
  140. while (len > 0) {
  141. while (recvdataremain == 0) {
  142. // Read a new header
  143. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  144. uint32_t datalen;
  145. lamport_t recv_lamport;
  146. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  147. memmove(&datalen, hdr, sizeof(datalen));
  148. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  149. lamport_t new_lamport = recv_lamport + 1;
  150. if (lamport < new_lamport) {
  151. lamport = new_lamport;
  152. }
  153. if (datalen > 0) {
  154. recvdata.resize(datalen, '\0');
  155. boost::asio::read(sock, boost::asio::buffer(recvdata));
  156. recvdataremain = datalen;
  157. }
  158. }
  159. size_t amttoread = len;
  160. if (amttoread > recvdataremain) {
  161. amttoread = recvdataremain;
  162. }
  163. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  164. amttoread);
  165. cdata += amttoread;
  166. len -= amttoread;
  167. recvdataremain -= amttoread;
  168. res += amttoread;
  169. }
  170. #else
  171. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  172. #endif
  173. #ifdef RECORD_IOTRACE
  174. iotrace.push_back(-(ssize_t(res)));
  175. #endif
  176. return res;
  177. }
  178. #ifdef RECORD_IOTRACE
  179. void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
  180. {
  181. if (label) {
  182. os << label << " ";
  183. }
  184. os << "IO trace:";
  185. for (auto& s: iotrace) {
  186. os << " " << s;
  187. }
  188. os << "\n";
  189. }
  190. #endif
  191. void MPCIO::reset_stats()
  192. {
  193. msgs_sent.clear();
  194. msg_bytes_sent.clear();
  195. aes_ops.clear();
  196. for (size_t i=0; i<num_threads; ++i) {
  197. msgs_sent.push_back(0);
  198. msg_bytes_sent.push_back(0);
  199. aes_ops.push_back(0);
  200. }
  201. steady_start = boost::chrono::steady_clock::now();
  202. cpu_start = boost::chrono::process_cpu_clock::now();
  203. }
  204. // Report the memory usage
  205. void MPCIO::dump_memusage(std::ostream &os)
  206. {
  207. struct rusage ru;
  208. getrusage(RUSAGE_SELF, &ru);
  209. os << "Mem: " << ru.ru_maxrss << " KiB\n";
  210. }
  211. void MPCIO::dump_stats(std::ostream &os)
  212. {
  213. size_t tot_msgs_sent = 0;
  214. size_t tot_msg_bytes_sent = 0;
  215. size_t tot_aes_ops = 0;
  216. for (auto& n : msgs_sent) {
  217. tot_msgs_sent += n;
  218. }
  219. for (auto& n : msg_bytes_sent) {
  220. tot_msg_bytes_sent += n;
  221. }
  222. for (auto& n : aes_ops) {
  223. tot_aes_ops += n;
  224. }
  225. auto steady_elapsed =
  226. boost::chrono::steady_clock::now() - steady_start;
  227. auto cpu_elapsed =
  228. boost::chrono::process_cpu_clock::now() - cpu_start;
  229. os << tot_msgs_sent << " messages sent\n";
  230. os << tot_msg_bytes_sent << " message bytes sent\n";
  231. os << lamport << " Lamport clock (latencies)\n";
  232. os << tot_aes_ops << " local AES operations\n";
  233. os << boost::chrono::duration_cast
  234. <boost::chrono::milliseconds>(steady_elapsed) <<
  235. " wall clock time\n";
  236. os << cpu_elapsed << " {real;user;system}\n";
  237. dump_memusage(os);
  238. }
  239. MPCPeerIO::MPCPeerIO(unsigned player, ProcessingMode mode,
  240. std::deque<tcp::socket> &peersocks,
  241. std::deque<tcp::socket> &serversocks) :
  242. MPCIO(player, mode, peersocks.size())
  243. {
  244. unsigned num_threads = unsigned(peersocks.size());
  245. for (unsigned i=0; i<num_threads; ++i) {
  246. triples.emplace_back(player, mode, "triples", i);
  247. }
  248. for (unsigned i=0; i<num_threads; ++i) {
  249. halftriples.emplace_back(player, mode, "halves", i);
  250. }
  251. rdpftriples.resize(num_threads);
  252. for (unsigned i=0; i<num_threads; ++i) {
  253. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  254. rdpftriples[i][depth-1].init(player, mode,
  255. "rdpf", i, depth);
  256. }
  257. }
  258. for (unsigned i=0; i<num_threads; ++i) {
  259. cdpfs.emplace_back(player, mode, "cdpf", i);
  260. }
  261. for (auto &&sock : peersocks) {
  262. peerios.emplace_back(std::move(sock));
  263. }
  264. for (auto &&sock : serversocks) {
  265. serverios.emplace_back(std::move(sock));
  266. }
  267. }
  268. void MPCPeerIO::dump_precomp_stats(std::ostream &os)
  269. {
  270. for (size_t i=0; i<triples.size(); ++i) {
  271. if (i > 0) {
  272. os << " ";
  273. }
  274. os << "T" << i << " t:" << triples[i].get_stats() <<
  275. " h:" << halftriples[i].get_stats();
  276. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  277. size_t cnt = rdpftriples[i][depth-1].get_stats();
  278. if (cnt > 0) {
  279. os << " r" << int(depth) << ":" << cnt;
  280. }
  281. }
  282. size_t ccnt = cdpfs[i].get_stats();
  283. if (ccnt > 0) {
  284. os << " c:" << ccnt;
  285. }
  286. }
  287. os << "\n";
  288. }
  289. void MPCPeerIO::reset_precomp_stats()
  290. {
  291. for (size_t i=0; i<triples.size(); ++i) {
  292. triples[i].reset_stats();
  293. halftriples[i].reset_stats();
  294. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  295. rdpftriples[i][depth-1].reset_stats();
  296. }
  297. }
  298. }
  299. void MPCPeerIO::dump_stats(std::ostream &os)
  300. {
  301. MPCIO::dump_stats(os);
  302. os << "Precomputed values used: ";
  303. dump_precomp_stats(os);
  304. }
  305. MPCServerIO::MPCServerIO(ProcessingMode mode,
  306. std::deque<tcp::socket> &p0socks,
  307. std::deque<tcp::socket> &p1socks) :
  308. MPCIO(2, mode, p0socks.size())
  309. {
  310. rdpfpairs.resize(num_threads);
  311. for (unsigned i=0; i<num_threads; ++i) {
  312. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  313. rdpfpairs[i][depth-1].init(player, mode,
  314. "rdpf", i, depth);
  315. }
  316. }
  317. for (auto &&sock : p0socks) {
  318. p0ios.emplace_back(std::move(sock));
  319. }
  320. for (auto &&sock : p1socks) {
  321. p1ios.emplace_back(std::move(sock));
  322. }
  323. }
  324. void MPCServerIO::dump_precomp_stats(std::ostream &os)
  325. {
  326. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  327. if (i > 0) {
  328. os << " ";
  329. }
  330. os << "T" << i;
  331. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  332. size_t cnt = rdpfpairs[i][depth-1].get_stats();
  333. if (cnt > 0) {
  334. os << " r" << int(depth) << ":" << cnt;
  335. }
  336. }
  337. }
  338. os << "\n";
  339. }
  340. void MPCServerIO::reset_precomp_stats()
  341. {
  342. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  343. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  344. rdpfpairs[i][depth-1].reset_stats();
  345. }
  346. }
  347. }
  348. void MPCServerIO::dump_stats(std::ostream &os)
  349. {
  350. MPCIO::dump_stats(os);
  351. os << "Precomputed values used: ";
  352. dump_precomp_stats(os);
  353. }
  354. MPCTIO::MPCTIO(MPCIO &mpcio, int thread_num) :
  355. thread_num(thread_num), thread_lamport(mpcio.lamport),
  356. mpcio(mpcio)
  357. {
  358. if (mpcio.player < 2) {
  359. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  360. peer_iostream.emplace(mpcpio.peerios[thread_num],
  361. thread_lamport, mpcpio.msgs_sent[thread_num],
  362. mpcpio.msg_bytes_sent[thread_num]);
  363. server_iostream.emplace(mpcpio.serverios[thread_num],
  364. thread_lamport, mpcpio.msgs_sent[thread_num],
  365. mpcpio.msg_bytes_sent[thread_num]);
  366. } else {
  367. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  368. p0_iostream.emplace(mpcsrvio.p0ios[thread_num],
  369. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  370. mpcsrvio.msg_bytes_sent[thread_num]);
  371. p1_iostream.emplace(mpcsrvio.p1ios[thread_num],
  372. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  373. mpcsrvio.msg_bytes_sent[thread_num]);
  374. }
  375. }
  376. // Sync our per-thread lamport clock with the master one in the
  377. // mpcio. You only need to call this explicitly if your MPCTIO
  378. // outlives your thread (in which case call it after the join), or
  379. // if your threads do interthread communication amongst themselves
  380. // (in which case call it in the sending thread before the send, and
  381. // call it in the receiving thread after the receive).
  382. void MPCTIO::sync_lamport()
  383. {
  384. // Update the mpcio Lamport time to be max of the thread Lamport
  385. // time and what we thought it was before. We use this
  386. // compare_exchange construction in order to atomically
  387. // do the comparison, computation, and replacement
  388. lamport_t old_lamport = mpcio.lamport;
  389. lamport_t new_lamport = thread_lamport;
  390. do {
  391. if (new_lamport < old_lamport) {
  392. new_lamport = old_lamport;
  393. }
  394. // The next line atomically checks if lamport still has
  395. // the value old_lamport; if so, it changes its value to
  396. // new_lamport and returns true (ending the loop). If
  397. // not, it sets old_lamport to the current value of
  398. // lamport, and returns false (continuing the loop so
  399. // that new_lamport can be recomputed based on this new
  400. // value).
  401. } while (!mpcio.lamport.compare_exchange_weak(
  402. old_lamport, new_lamport));
  403. thread_lamport = new_lamport;
  404. }
  405. // Queue up data to the peer or to the server
  406. void MPCTIO::queue_peer(const void *data, size_t len)
  407. {
  408. if (mpcio.player < 2) {
  409. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  410. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  411. mpcpio.msgs_sent[thread_num] += newmsg;
  412. mpcpio.msg_bytes_sent[thread_num] += len;
  413. }
  414. }
  415. void MPCTIO::queue_server(const void *data, size_t len)
  416. {
  417. if (mpcio.player < 2) {
  418. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  419. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  420. mpcpio.msgs_sent[thread_num] += newmsg;
  421. mpcpio.msg_bytes_sent[thread_num] += len;
  422. }
  423. }
  424. // Receive data from the peer or to the server
  425. size_t MPCTIO::recv_peer(void *data, size_t len)
  426. {
  427. if (mpcio.player < 2) {
  428. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  429. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  430. }
  431. return 0;
  432. }
  433. size_t MPCTIO::recv_server(void *data, size_t len)
  434. {
  435. if (mpcio.player < 2) {
  436. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  437. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  438. }
  439. return 0;
  440. }
  441. // Queue up data to p0 or p1
  442. void MPCTIO::queue_p0(const void *data, size_t len)
  443. {
  444. if (mpcio.player == 2) {
  445. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  446. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  447. mpcsrvio.msgs_sent[thread_num] += newmsg;
  448. mpcsrvio.msg_bytes_sent[thread_num] += len;
  449. }
  450. }
  451. void MPCTIO::queue_p1(const void *data, size_t len)
  452. {
  453. if (mpcio.player == 2) {
  454. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  455. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  456. mpcsrvio.msgs_sent[thread_num] += newmsg;
  457. mpcsrvio.msg_bytes_sent[thread_num] += len;
  458. }
  459. }
  460. // Receive data from p0 or p1
  461. size_t MPCTIO::recv_p0(void *data, size_t len)
  462. {
  463. if (mpcio.player == 2) {
  464. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  465. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  466. }
  467. return 0;
  468. }
  469. size_t MPCTIO::recv_p1(void *data, size_t len)
  470. {
  471. if (mpcio.player == 2) {
  472. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  473. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  474. }
  475. return 0;
  476. }
  477. // Send all queued data for this thread
  478. void MPCTIO::send()
  479. {
  480. if (mpcio.player < 2) {
  481. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  482. mpcpio.peerios[thread_num].send();
  483. mpcpio.serverios[thread_num].send();
  484. } else {
  485. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  486. mpcsrvio.p0ios[thread_num].send();
  487. mpcsrvio.p1ios[thread_num].send();
  488. }
  489. }
  490. // Functions to get precomputed values. If we're in the online
  491. // phase, get them from PreCompStorage. If we're in the
  492. // preprocessing or online-only phase, read them from the server.
  493. MultTriple MPCTIO::triple()
  494. {
  495. MultTriple val;
  496. if (mpcio.player < 2) {
  497. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  498. if (mpcpio.mode != MODE_ONLINE) {
  499. recv_server(&val, sizeof(val));
  500. mpcpio.triples[thread_num].inc();
  501. } else {
  502. mpcpio.triples[thread_num].get(val);
  503. }
  504. } else if (mpcio.mode != MODE_ONLINE) {
  505. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  506. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  507. value_t X0, Y0, Z0, X1, Y1, Z1;
  508. arc4random_buf(&X0, sizeof(X0));
  509. arc4random_buf(&Y0, sizeof(Y0));
  510. arc4random_buf(&Z0, sizeof(Z0));
  511. arc4random_buf(&X1, sizeof(X1));
  512. arc4random_buf(&Y1, sizeof(Y1));
  513. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  514. MultTriple T0, T1;
  515. T0 = std::make_tuple(X0, Y0, Z0);
  516. T1 = std::make_tuple(X1, Y1, Z1);
  517. queue_p0(&T0, sizeof(T0));
  518. queue_p1(&T1, sizeof(T1));
  519. }
  520. return val;
  521. }
  522. HalfTriple MPCTIO::halftriple()
  523. {
  524. HalfTriple val;
  525. if (mpcio.player < 2) {
  526. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  527. if (mpcpio.mode != MODE_ONLINE) {
  528. recv_server(&val, sizeof(val));
  529. mpcpio.halftriples[thread_num].inc();
  530. } else {
  531. mpcpio.halftriples[thread_num].get(val);
  532. }
  533. } else if (mpcio.mode != MODE_ONLINE) {
  534. // Create half-triples (X0,Z0),(Y1,Z1) such that
  535. // X0*Y1 = Z0 + Z1
  536. value_t X0, Z0, Y1, Z1;
  537. arc4random_buf(&X0, sizeof(X0));
  538. arc4random_buf(&Z0, sizeof(Z0));
  539. arc4random_buf(&Y1, sizeof(Y1));
  540. Z1 = X0 * Y1 - Z0;
  541. HalfTriple H0, H1;
  542. H0 = std::make_tuple(X0, Z0);
  543. H1 = std::make_tuple(Y1, Z1);
  544. queue_p0(&H0, sizeof(H0));
  545. queue_p1(&H1, sizeof(H1));
  546. }
  547. return val;
  548. }
  549. SelectTriple MPCTIO::selecttriple()
  550. {
  551. SelectTriple val;
  552. if (mpcio.player < 2) {
  553. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  554. if (mpcpio.mode != MODE_ONLINE) {
  555. uint8_t Xbyte;
  556. recv_server(&Xbyte, sizeof(Xbyte));
  557. val.X = Xbyte & 1;
  558. recv_server(&val.Y, sizeof(val.Y));
  559. recv_server(&val.Z, sizeof(val.Z));
  560. } else {
  561. std::cerr << "Attempted to read SelectTriple in online phase\n";
  562. }
  563. } else if (mpcio.mode != MODE_ONLINE) {
  564. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  565. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  566. bit_t X0, X1;
  567. DPFnode Y0, Z0, Y1, Z1;
  568. X0 = arc4random() & 1;
  569. arc4random_buf(&Y0, sizeof(Y0));
  570. arc4random_buf(&Z0, sizeof(Z0));
  571. X1 = arc4random() & 1;
  572. arc4random_buf(&Y1, sizeof(Y1));
  573. DPFnode X0ext, X1ext;
  574. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  575. // 1 -> 1111...1)
  576. X0ext = if128_mask[X0];
  577. X1ext = if128_mask[X1];
  578. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  579. queue_p0(&X0, sizeof(X0));
  580. queue_p0(&Y0, sizeof(Y0));
  581. queue_p0(&Z0, sizeof(Z0));
  582. queue_p1(&X1, sizeof(X1));
  583. queue_p1(&Y1, sizeof(Y1));
  584. queue_p1(&Z1, sizeof(Z1));
  585. }
  586. return val;
  587. }
  588. RDPFTriple MPCTIO::rdpftriple(yield_t &yield, nbits_t depth,
  589. bool keep_expansion)
  590. {
  591. RDPFTriple val;
  592. if (mpcio.player <= 2) {
  593. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  594. if (mpcio.mode == MODE_ONLINE) {
  595. mpcpio.rdpftriples[thread_num][depth-1].get(val);
  596. } else {
  597. val = RDPFTriple(*this, yield, depth,
  598. keep_expansion);
  599. iostream_server() <<
  600. val.dpf[(mpcio.player == 0) ? 1 : 2];
  601. mpcpio.rdpftriples[thread_num][depth-1].inc();
  602. }
  603. }
  604. return val;
  605. }
  606. RDPFPair MPCTIO::rdpfpair(yield_t &yield, nbits_t depth)
  607. {
  608. RDPFPair val;
  609. if (mpcio.player == 2) {
  610. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  611. if (mpcio.mode == MODE_ONLINE) {
  612. mpcsrvio.rdpfpairs[thread_num][depth-1].get(val);
  613. } else {
  614. RDPFTriple trip(*this, yield, depth, true);
  615. iostream_p0() >> val.dpf[0];
  616. iostream_p1() >> val.dpf[1];
  617. mpcsrvio.rdpfpairs[thread_num][depth-1].inc();
  618. }
  619. }
  620. return val;
  621. }
  622. CDPF MPCTIO::cdpf()
  623. {
  624. CDPF val;
  625. if (mpcio.player < 2) {
  626. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  627. if (mpcpio.mode != MODE_ONLINE) {
  628. iostream_server() >> val;
  629. mpcpio.cdpfs[thread_num].inc();
  630. } else {
  631. mpcpio.cdpfs[thread_num].get(val);
  632. }
  633. } else if (mpcio.mode != MODE_ONLINE) {
  634. auto [ cdpf0, cdpf1 ] = CDPF::generate(aes_ops());
  635. iostream_p0() << cdpf0;
  636. iostream_p1() << cdpf1;
  637. }
  638. return val;
  639. }
  640. // The port number for the P1 -> P0 connection
  641. static const unsigned short port_p1_p0 = 2115;
  642. // The port number for the P2 -> P0 connection
  643. static const unsigned short port_p2_p0 = 2116;
  644. // The port number for the P2 -> P1 connection
  645. static const unsigned short port_p2_p1 = 2117;
  646. void mpcio_setup_computational(unsigned player,
  647. boost::asio::io_context &io_context,
  648. const char *p0addr, // can be NULL when player=0
  649. int num_threads,
  650. std::deque<tcp::socket> &peersocks,
  651. std::deque<tcp::socket> &serversocks)
  652. {
  653. if (player == 0) {
  654. // Listen for connections from P1 and from P2
  655. tcp::acceptor acceptor_p1(io_context,
  656. tcp::endpoint(tcp::v4(), port_p1_p0));
  657. tcp::acceptor acceptor_p2(io_context,
  658. tcp::endpoint(tcp::v4(), port_p2_p0));
  659. peersocks.clear();
  660. serversocks.clear();
  661. for (int i=0;i<num_threads;++i) {
  662. peersocks.emplace_back(io_context);
  663. serversocks.emplace_back(io_context);
  664. }
  665. for (int i=0;i<num_threads;++i) {
  666. tcp::socket peersock = acceptor_p1.accept();
  667. // Read 2 bytes from the socket, which will be the thread
  668. // number
  669. unsigned short thread_num;
  670. boost::asio::read(peersock,
  671. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  672. if (thread_num >= num_threads) {
  673. std::cerr << "Received bad thread number from peer\n";
  674. } else {
  675. peersocks[thread_num] = std::move(peersock);
  676. }
  677. }
  678. for (int i=0;i<num_threads;++i) {
  679. tcp::socket serversock = acceptor_p2.accept();
  680. // Read 2 bytes from the socket, which will be the thread
  681. // number
  682. unsigned short thread_num;
  683. boost::asio::read(serversock,
  684. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  685. if (thread_num >= num_threads) {
  686. std::cerr << "Received bad thread number from server\n";
  687. } else {
  688. serversocks[thread_num] = std::move(serversock);
  689. }
  690. }
  691. } else if (player == 1) {
  692. // Listen for connections from P2, make num_threads connections to P0
  693. tcp::acceptor acceptor_p2(io_context,
  694. tcp::endpoint(tcp::v4(), port_p2_p1));
  695. tcp::resolver resolver(io_context);
  696. boost::system::error_code err;
  697. peersocks.clear();
  698. serversocks.clear();
  699. for (int i=0;i<num_threads;++i) {
  700. serversocks.emplace_back(io_context);
  701. }
  702. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  703. tcp::socket peersock(io_context);
  704. while(1) {
  705. boost::asio::connect(peersock,
  706. resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
  707. if (!err) break;
  708. std::cerr << "Connection to p0 refused, will retry.\n";
  709. sleep(1);
  710. }
  711. // Write 2 bytes to the socket indicating which thread
  712. // number this socket is for
  713. boost::asio::write(peersock,
  714. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  715. peersocks.push_back(std::move(peersock));
  716. }
  717. for (int i=0;i<num_threads;++i) {
  718. tcp::socket serversock = acceptor_p2.accept();
  719. // Read 2 bytes from the socket, which will be the thread
  720. // number
  721. unsigned short thread_num;
  722. boost::asio::read(serversock,
  723. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  724. if (thread_num >= num_threads) {
  725. std::cerr << "Received bad thread number from server\n";
  726. } else {
  727. serversocks[thread_num] = std::move(serversock);
  728. }
  729. }
  730. } else {
  731. std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
  732. }
  733. }
  734. void mpcio_setup_server(boost::asio::io_context &io_context,
  735. const char *p0addr, const char *p1addr, int num_threads,
  736. std::deque<tcp::socket> &p0socks,
  737. std::deque<tcp::socket> &p1socks)
  738. {
  739. // Make connections to P0 and P1
  740. tcp::resolver resolver(io_context);
  741. boost::system::error_code err;
  742. p0socks.clear();
  743. p1socks.clear();
  744. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  745. tcp::socket p0sock(io_context);
  746. while(1) {
  747. boost::asio::connect(p0sock,
  748. resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
  749. if (!err) break;
  750. std::cerr << "Connection to p0 refused, will retry.\n";
  751. sleep(1);
  752. }
  753. // Write 2 bytes to the socket indicating which thread
  754. // number this socket is for
  755. boost::asio::write(p0sock,
  756. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  757. p0socks.push_back(std::move(p0sock));
  758. }
  759. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  760. tcp::socket p1sock(io_context);
  761. while(1) {
  762. boost::asio::connect(p1sock,
  763. resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
  764. if (!err) break;
  765. std::cerr << "Connection to p1 refused, will retry.\n";
  766. sleep(1);
  767. }
  768. // Write 2 bytes to the socket indicating which thread
  769. // number this socket is for
  770. boost::asio::write(p1sock,
  771. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  772. p1socks.push_back(std::move(p1sock));
  773. }
  774. }