mpcio.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887
  1. #include <sys/time.h> // getrusage
  2. #include <sys/resource.h> // getrusage
  3. #include "mpcio.hpp"
  4. #include "rdpf.hpp"
  5. #include "cdpf.hpp"
  6. #include "bitutils.hpp"
  7. #include "coroutine.hpp"
  8. // T is the type being stored
  9. // N is a type whose "name" static member is a string naming the type
  10. // so that we can report something useful to the user if they try
  11. // to read a type that we don't have any more values for
  12. template<typename T, typename N>
  13. PreCompStorage<T,N>::PreCompStorage(unsigned player, ProcessingMode mode,
  14. const char *filenameprefix, unsigned thread_num) :
  15. name(N::name), depth(0)
  16. {
  17. init(player, mode, filenameprefix, thread_num);
  18. }
  19. template<typename T, typename N>
  20. void PreCompStorage<T,N>::init(unsigned player, ProcessingMode mode,
  21. const char *filenameprefix, unsigned thread_num, nbits_t depth)
  22. {
  23. if (mode != MODE_ONLINE) return;
  24. std::string filename(filenameprefix);
  25. char suffix[20];
  26. if (depth) {
  27. this->depth = depth;
  28. sprintf(suffix, "%02d.p%d.t%u", depth, player%10, thread_num);
  29. } else {
  30. sprintf(suffix, ".p%d.t%u", player%10, thread_num);
  31. }
  32. filename.append(suffix);
  33. storage.open(filename);
  34. // It's OK if not every file exists; so don't worry about checking
  35. // for errors here. We'll report an error in get() if we actually
  36. // try to use a value for which we don't have a precomputed file.
  37. count = 0;
  38. }
  39. template<typename T, typename N>
  40. void PreCompStorage<T,N>::get(T& nextval)
  41. {
  42. storage >> nextval;
  43. if (!storage.good()) {
  44. std::cerr << "Failed to read precomputed value from " << name;
  45. if (depth) {
  46. std::cerr << (int)depth;
  47. }
  48. std::cerr << " storage\n";
  49. exit(1);
  50. }
  51. ++count;
  52. }
  53. void MPCSingleIO::async_send_from_msgqueue()
  54. {
  55. #ifdef SEND_LAMPORT_CLOCKS
  56. std::vector<boost::asio::const_buffer> tosend;
  57. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  58. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  59. #endif
  60. boost::asio::async_write(sock,
  61. #ifdef SEND_LAMPORT_CLOCKS
  62. tosend,
  63. #else
  64. boost::asio::buffer(messagequeue.front()),
  65. #endif
  66. [&](boost::system::error_code ec, std::size_t amt){
  67. messagequeuelock.lock();
  68. messagequeue.pop();
  69. if (messagequeue.size() > 0) {
  70. async_send_from_msgqueue();
  71. }
  72. messagequeuelock.unlock();
  73. });
  74. }
  75. size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
  76. {
  77. // Is this a new message?
  78. size_t newmsg = 0;
  79. dataqueue.append((const char *)data, len);
  80. // If this is the first queue() since the last explicit send(),
  81. // which we'll know because message_lamport will be nullopt, set
  82. // message_lamport to the current Lamport clock. Note that the
  83. // boolean test tests whether message_lamport is nullopt, not
  84. // whether its value is zero.
  85. if (!message_lamport) {
  86. message_lamport = lamport;
  87. newmsg = 1;
  88. }
  89. #ifdef VERBOSE_COMMS
  90. printf("Queue %s.%d len=%lu lamp=%u: ", dest.c_str(), thread_num,
  91. len, message_lamport.value());
  92. for (size_t i=0;i<len;++i) {
  93. printf("%02x", ((const unsigned char*)data)[i]);
  94. }
  95. printf("\n");
  96. #endif
  97. // If we already have some full packets worth of data, may as
  98. // well send it.
  99. if (dataqueue.size() > 28800) {
  100. send(true);
  101. }
  102. return newmsg;
  103. }
  104. void MPCSingleIO::send(bool implicit_send)
  105. {
  106. size_t thissize = dataqueue.size();
  107. // Ignore spurious calls to send(), except for resetting
  108. // message_lamport if this was an explicit send().
  109. if (thissize == 0) {
  110. #ifdef SEND_LAMPORT_CLOCKS
  111. // If this was an explicit send(), reset the message_lamport so
  112. // that it gets updated at the next queue().
  113. if (!implicit_send) {
  114. message_lamport.reset();
  115. }
  116. #endif
  117. return;
  118. }
  119. #ifdef RECORD_IOTRACE
  120. iotrace.push_back(thissize);
  121. #endif
  122. messagequeuelock.lock();
  123. // Move the current message to send into the message queue (this
  124. // moves a pointer to the data, not copying the data itself)
  125. #ifdef SEND_LAMPORT_CLOCKS
  126. messagequeue.emplace(std::move(dataqueue),
  127. message_lamport.value());
  128. // If this was an explicit send(), reset the message_lamport so
  129. // that it gets updated at the next queue().
  130. if (!implicit_send) {
  131. message_lamport.reset();
  132. }
  133. #else
  134. messagequeue.emplace(std::move(dataqueue));
  135. #endif
  136. // If this is now the first thing in the message queue, launch
  137. // an async_write to write it
  138. if (messagequeue.size() == 1) {
  139. async_send_from_msgqueue();
  140. }
  141. messagequeuelock.unlock();
  142. }
  143. size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
  144. {
  145. #ifdef VERBOSE_COMMS
  146. size_t orig_len = len;
  147. printf("Recv %s.%d len=%lu lamp=%u ", dest.c_str(), thread_num,
  148. len, lamport);
  149. #endif
  150. #ifdef SEND_LAMPORT_CLOCKS
  151. char *cdata = (char *)data;
  152. size_t res = 0;
  153. while (len > 0) {
  154. while (recvdataremain == 0) {
  155. // Read a new header
  156. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  157. uint32_t datalen;
  158. lamport_t recv_lamport;
  159. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  160. memmove(&datalen, hdr, sizeof(datalen));
  161. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  162. lamport_t new_lamport = recv_lamport + 1;
  163. if (lamport < new_lamport) {
  164. lamport = new_lamport;
  165. }
  166. if (datalen > 0) {
  167. recvdata.resize(datalen, '\0');
  168. boost::asio::read(sock, boost::asio::buffer(recvdata));
  169. recvdataremain = datalen;
  170. }
  171. }
  172. size_t amttoread = len;
  173. if (amttoread > recvdataremain) {
  174. amttoread = recvdataremain;
  175. }
  176. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  177. amttoread);
  178. cdata += amttoread;
  179. len -= amttoread;
  180. recvdataremain -= amttoread;
  181. res += amttoread;
  182. }
  183. #else
  184. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  185. #endif
  186. #ifdef VERBOSE_COMMS
  187. printf("nlamp=%u: ", lamport);
  188. for (size_t i=0;i<orig_len;++i) {
  189. printf("%02x", ((const unsigned char*)data)[i]);
  190. }
  191. printf("\n");
  192. #endif
  193. #ifdef RECORD_IOTRACE
  194. iotrace.push_back(-(ssize_t(res)));
  195. #endif
  196. return res;
  197. }
  198. #ifdef RECORD_IOTRACE
  199. void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
  200. {
  201. if (label) {
  202. os << label << " ";
  203. }
  204. os << "IO trace:";
  205. for (auto& s: iotrace) {
  206. os << " " << s;
  207. }
  208. os << "\n";
  209. }
  210. #endif
  211. void MPCIO::reset_stats()
  212. {
  213. msgs_sent.clear();
  214. msg_bytes_sent.clear();
  215. aes_ops.clear();
  216. for (size_t i=0; i<num_threads; ++i) {
  217. msgs_sent.push_back(0);
  218. msg_bytes_sent.push_back(0);
  219. aes_ops.push_back(0);
  220. }
  221. steady_start = boost::chrono::steady_clock::now();
  222. cpu_start = boost::chrono::process_cpu_clock::now();
  223. }
  224. // Report the memory usage
  225. void MPCIO::dump_memusage(std::ostream &os)
  226. {
  227. struct rusage ru;
  228. getrusage(RUSAGE_SELF, &ru);
  229. os << "Mem: " << ru.ru_maxrss << " KiB\n";
  230. }
  231. void MPCIO::dump_stats(std::ostream &os)
  232. {
  233. size_t tot_msgs_sent = 0;
  234. size_t tot_msg_bytes_sent = 0;
  235. size_t tot_aes_ops = 0;
  236. for (auto& n : msgs_sent) {
  237. tot_msgs_sent += n;
  238. }
  239. for (auto& n : msg_bytes_sent) {
  240. tot_msg_bytes_sent += n;
  241. }
  242. for (auto& n : aes_ops) {
  243. tot_aes_ops += n;
  244. }
  245. auto steady_elapsed =
  246. boost::chrono::steady_clock::now() - steady_start;
  247. auto cpu_elapsed =
  248. boost::chrono::process_cpu_clock::now() - cpu_start;
  249. os << tot_msgs_sent << " messages sent\n";
  250. os << tot_msg_bytes_sent << " message bytes sent\n";
  251. os << lamport << " Lamport clock (latencies)\n";
  252. os << tot_aes_ops << " local AES operations\n";
  253. os << boost::chrono::duration_cast
  254. <boost::chrono::milliseconds>(steady_elapsed) <<
  255. " wall clock time\n";
  256. os << cpu_elapsed << " {real;user;system}\n";
  257. dump_memusage(os);
  258. }
  259. MPCPeerIO::MPCPeerIO(unsigned player, ProcessingMode mode,
  260. std::deque<tcp::socket> &peersocks,
  261. std::deque<tcp::socket> &serversocks) :
  262. MPCIO(player, mode, peersocks.size())
  263. {
  264. unsigned num_threads = unsigned(peersocks.size());
  265. for (unsigned i=0; i<num_threads; ++i) {
  266. triples.emplace_back(player, mode, "triples", i);
  267. }
  268. for (unsigned i=0; i<num_threads; ++i) {
  269. halftriples.emplace_back(player, mode, "halves", i);
  270. }
  271. rdpftriples.resize(num_threads);
  272. for (unsigned i=0; i<num_threads; ++i) {
  273. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  274. rdpftriples[i][depth-1].init(player, mode,
  275. "rdpf", i, depth);
  276. }
  277. }
  278. for (unsigned i=0; i<num_threads; ++i) {
  279. cdpfs.emplace_back(player, mode, "cdpf", i);
  280. }
  281. for (unsigned i=0; i<num_threads; ++i) {
  282. peerios.emplace_back(std::move(peersocks[i]), "peer", i);
  283. }
  284. for (unsigned i=0; i<num_threads; ++i) {
  285. serverios.emplace_back(std::move(serversocks[i]), "srv", i);
  286. }
  287. }
  288. void MPCPeerIO::dump_precomp_stats(std::ostream &os)
  289. {
  290. for (size_t i=0; i<triples.size(); ++i) {
  291. size_t cnt;
  292. if (i > 0) {
  293. os << " ";
  294. }
  295. os << "T" << i;
  296. cnt = triples[i].get_stats();
  297. if (cnt > 0) {
  298. os << " t:" << cnt;
  299. }
  300. cnt = halftriples[i].get_stats();
  301. if (cnt > 0) {
  302. os << " h:" << cnt;
  303. }
  304. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  305. cnt = rdpftriples[i][depth-1].get_stats();
  306. if (cnt > 0) {
  307. os << " r" << int(depth) << ":" << cnt;
  308. }
  309. }
  310. cnt = cdpfs[i].get_stats();
  311. if (cnt > 0) {
  312. os << " c:" << cnt;
  313. }
  314. }
  315. os << "\n";
  316. }
  317. void MPCPeerIO::reset_precomp_stats()
  318. {
  319. for (size_t i=0; i<triples.size(); ++i) {
  320. triples[i].reset_stats();
  321. halftriples[i].reset_stats();
  322. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  323. rdpftriples[i][depth-1].reset_stats();
  324. }
  325. }
  326. }
  327. void MPCPeerIO::dump_stats(std::ostream &os)
  328. {
  329. MPCIO::dump_stats(os);
  330. os << "Precomputed values used: ";
  331. dump_precomp_stats(os);
  332. }
  333. MPCServerIO::MPCServerIO(ProcessingMode mode,
  334. std::deque<tcp::socket> &p0socks,
  335. std::deque<tcp::socket> &p1socks) :
  336. MPCIO(2, mode, p0socks.size())
  337. {
  338. rdpfpairs.resize(num_threads);
  339. for (unsigned i=0; i<num_threads; ++i) {
  340. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  341. rdpfpairs[i][depth-1].init(player, mode,
  342. "rdpf", i, depth);
  343. }
  344. }
  345. for (unsigned i=0; i<num_threads; ++i) {
  346. p0ios.emplace_back(std::move(p0socks[i]), "p0", i);
  347. }
  348. for (unsigned i=0; i<num_threads; ++i) {
  349. p1ios.emplace_back(std::move(p1socks[i]), "p1", i);
  350. }
  351. }
  352. void MPCServerIO::dump_precomp_stats(std::ostream &os)
  353. {
  354. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  355. if (i > 0) {
  356. os << " ";
  357. }
  358. os << "T" << i;
  359. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  360. size_t cnt = rdpfpairs[i][depth-1].get_stats();
  361. if (cnt > 0) {
  362. os << " r" << int(depth) << ":" << cnt;
  363. }
  364. }
  365. }
  366. os << "\n";
  367. }
  368. void MPCServerIO::reset_precomp_stats()
  369. {
  370. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  371. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  372. rdpfpairs[i][depth-1].reset_stats();
  373. }
  374. }
  375. }
  376. void MPCServerIO::dump_stats(std::ostream &os)
  377. {
  378. MPCIO::dump_stats(os);
  379. os << "Precomputed values used: ";
  380. dump_precomp_stats(os);
  381. }
  382. MPCTIO::MPCTIO(MPCIO &mpcio, int thread_num, int num_threads) :
  383. thread_num(thread_num), local_cpu_nthreads(num_threads),
  384. communication_nthreads(num_threads),
  385. thread_lamport(mpcio.lamport), mpcio(mpcio)
  386. #ifdef VERBOSE_COMMS
  387. , round_num(0)
  388. #endif
  389. {
  390. if (mpcio.player < 2) {
  391. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  392. peer_iostream.emplace(mpcpio.peerios[thread_num],
  393. thread_lamport, mpcpio.msgs_sent[thread_num],
  394. mpcpio.msg_bytes_sent[thread_num]);
  395. server_iostream.emplace(mpcpio.serverios[thread_num],
  396. thread_lamport, mpcpio.msgs_sent[thread_num],
  397. mpcpio.msg_bytes_sent[thread_num]);
  398. } else {
  399. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  400. p0_iostream.emplace(mpcsrvio.p0ios[thread_num],
  401. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  402. mpcsrvio.msg_bytes_sent[thread_num]);
  403. p1_iostream.emplace(mpcsrvio.p1ios[thread_num],
  404. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  405. mpcsrvio.msg_bytes_sent[thread_num]);
  406. }
  407. }
  408. // Sync our per-thread lamport clock with the master one in the
  409. // mpcio. You only need to call this explicitly if your MPCTIO
  410. // outlives your thread (in which case call it after the join), or
  411. // if your threads do interthread communication amongst themselves
  412. // (in which case call it in the sending thread before the send, and
  413. // call it in the receiving thread after the receive).
  414. void MPCTIO::sync_lamport()
  415. {
  416. // Update the mpcio Lamport time to be max of the thread Lamport
  417. // time and what we thought it was before. We use this
  418. // compare_exchange construction in order to atomically
  419. // do the comparison, computation, and replacement
  420. lamport_t old_lamport = mpcio.lamport;
  421. lamport_t new_lamport = thread_lamport;
  422. do {
  423. if (new_lamport < old_lamport) {
  424. new_lamport = old_lamport;
  425. }
  426. // The next line atomically checks if lamport still has
  427. // the value old_lamport; if so, it changes its value to
  428. // new_lamport and returns true (ending the loop). If
  429. // not, it sets old_lamport to the current value of
  430. // lamport, and returns false (continuing the loop so
  431. // that new_lamport can be recomputed based on this new
  432. // value).
  433. } while (!mpcio.lamport.compare_exchange_weak(
  434. old_lamport, new_lamport));
  435. thread_lamport = new_lamport;
  436. }
  437. // Queue up data to the peer or to the server
  438. void MPCTIO::queue_peer(const void *data, size_t len)
  439. {
  440. if (mpcio.player < 2) {
  441. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  442. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  443. mpcpio.msgs_sent[thread_num] += newmsg;
  444. mpcpio.msg_bytes_sent[thread_num] += len;
  445. }
  446. }
  447. void MPCTIO::queue_server(const void *data, size_t len)
  448. {
  449. if (mpcio.player < 2) {
  450. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  451. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  452. mpcpio.msgs_sent[thread_num] += newmsg;
  453. mpcpio.msg_bytes_sent[thread_num] += len;
  454. }
  455. }
  456. // Receive data from the peer or to the server
  457. size_t MPCTIO::recv_peer(void *data, size_t len)
  458. {
  459. if (mpcio.player < 2) {
  460. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  461. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  462. }
  463. return 0;
  464. }
  465. size_t MPCTIO::recv_server(void *data, size_t len)
  466. {
  467. if (mpcio.player < 2) {
  468. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  469. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  470. }
  471. return 0;
  472. }
  473. // Queue up data to p0 or p1
  474. void MPCTIO::queue_p0(const void *data, size_t len)
  475. {
  476. if (mpcio.player == 2) {
  477. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  478. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  479. mpcsrvio.msgs_sent[thread_num] += newmsg;
  480. mpcsrvio.msg_bytes_sent[thread_num] += len;
  481. }
  482. }
  483. void MPCTIO::queue_p1(const void *data, size_t len)
  484. {
  485. if (mpcio.player == 2) {
  486. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  487. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  488. mpcsrvio.msgs_sent[thread_num] += newmsg;
  489. mpcsrvio.msg_bytes_sent[thread_num] += len;
  490. }
  491. }
  492. // Receive data from p0 or p1
  493. size_t MPCTIO::recv_p0(void *data, size_t len)
  494. {
  495. if (mpcio.player == 2) {
  496. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  497. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  498. }
  499. return 0;
  500. }
  501. size_t MPCTIO::recv_p1(void *data, size_t len)
  502. {
  503. if (mpcio.player == 2) {
  504. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  505. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  506. }
  507. return 0;
  508. }
  509. // Send all queued data for this thread
  510. void MPCTIO::send()
  511. {
  512. #ifdef VERBOSE_COMMS
  513. printf("Thread %u sending round %lu\n", thread_num, ++round_num);
  514. #endif
  515. if (mpcio.player < 2) {
  516. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  517. mpcpio.peerios[thread_num].send();
  518. mpcpio.serverios[thread_num].send();
  519. } else {
  520. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  521. mpcsrvio.p0ios[thread_num].send();
  522. mpcsrvio.p1ios[thread_num].send();
  523. }
  524. }
  525. // Functions to get precomputed values. If we're in the online
  526. // phase, get them from PreCompStorage. If we're in the
  527. // preprocessing or online-only phase, read them from the server.
  528. MultTriple MPCTIO::triple(yield_t &yield)
  529. {
  530. MultTriple val;
  531. if (mpcio.player < 2) {
  532. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  533. if (mpcpio.mode != MODE_ONLINE) {
  534. yield();
  535. recv_server(&val, sizeof(val));
  536. mpcpio.triples[thread_num].inc();
  537. } else {
  538. mpcpio.triples[thread_num].get(val);
  539. }
  540. } else if (mpcio.mode != MODE_ONLINE) {
  541. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  542. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  543. value_t X0, Y0, Z0, X1, Y1, Z1;
  544. arc4random_buf(&X0, sizeof(X0));
  545. arc4random_buf(&Y0, sizeof(Y0));
  546. arc4random_buf(&Z0, sizeof(Z0));
  547. arc4random_buf(&X1, sizeof(X1));
  548. arc4random_buf(&Y1, sizeof(Y1));
  549. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  550. MultTriple T0, T1;
  551. T0 = std::make_tuple(X0, Y0, Z0);
  552. T1 = std::make_tuple(X1, Y1, Z1);
  553. queue_p0(&T0, sizeof(T0));
  554. queue_p1(&T1, sizeof(T1));
  555. yield();
  556. }
  557. return val;
  558. }
  559. // When halftriple() is used internally to another preprocessing
  560. // operation, don't tally it, so that it doesn't appear sepearately in
  561. // the stats from the preprocessing operation that invoked it
  562. HalfTriple MPCTIO::halftriple(yield_t &yield, bool tally)
  563. {
  564. HalfTriple val;
  565. if (mpcio.player < 2) {
  566. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  567. if (mpcpio.mode != MODE_ONLINE) {
  568. yield();
  569. recv_server(&val, sizeof(val));
  570. if (tally) {
  571. mpcpio.halftriples[thread_num].inc();
  572. }
  573. } else {
  574. mpcpio.halftriples[thread_num].get(val);
  575. }
  576. } else if (mpcio.mode != MODE_ONLINE) {
  577. // Create half-triples (X0,Z0),(Y1,Z1) such that
  578. // X0*Y1 = Z0 + Z1
  579. value_t X0, Z0, Y1, Z1;
  580. arc4random_buf(&X0, sizeof(X0));
  581. arc4random_buf(&Z0, sizeof(Z0));
  582. arc4random_buf(&Y1, sizeof(Y1));
  583. Z1 = X0 * Y1 - Z0;
  584. HalfTriple H0, H1;
  585. H0 = std::make_tuple(X0, Z0);
  586. H1 = std::make_tuple(Y1, Z1);
  587. queue_p0(&H0, sizeof(H0));
  588. queue_p1(&H1, sizeof(H1));
  589. yield();
  590. }
  591. return val;
  592. }
  593. SelectTriple MPCTIO::selecttriple(yield_t &yield)
  594. {
  595. SelectTriple val;
  596. if (mpcio.player < 2) {
  597. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  598. if (mpcpio.mode != MODE_ONLINE) {
  599. uint8_t Xbyte;
  600. yield();
  601. recv_server(&Xbyte, sizeof(Xbyte));
  602. val.X = Xbyte & 1;
  603. recv_server(&val.Y, sizeof(val.Y));
  604. recv_server(&val.Z, sizeof(val.Z));
  605. } else {
  606. std::cerr << "Attempted to read SelectTriple in online phase\n";
  607. }
  608. } else if (mpcio.mode != MODE_ONLINE) {
  609. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  610. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  611. bit_t X0, X1;
  612. DPFnode Y0, Z0, Y1, Z1;
  613. X0 = arc4random() & 1;
  614. arc4random_buf(&Y0, sizeof(Y0));
  615. arc4random_buf(&Z0, sizeof(Z0));
  616. X1 = arc4random() & 1;
  617. arc4random_buf(&Y1, sizeof(Y1));
  618. DPFnode X0ext, X1ext;
  619. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  620. // 1 -> 1111...1)
  621. X0ext = if128_mask[X0];
  622. X1ext = if128_mask[X1];
  623. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  624. queue_p0(&X0, sizeof(X0));
  625. queue_p0(&Y0, sizeof(Y0));
  626. queue_p0(&Z0, sizeof(Z0));
  627. queue_p1(&X1, sizeof(X1));
  628. queue_p1(&Y1, sizeof(Y1));
  629. queue_p1(&Z1, sizeof(Z1));
  630. yield();
  631. }
  632. return val;
  633. }
  634. // Only computational peers call this; the server should be calling
  635. // rdpfpair() at the same time
  636. RDPFTriple MPCTIO::rdpftriple(yield_t &yield, nbits_t depth,
  637. bool keep_expansion)
  638. {
  639. assert(mpcio.player < 2);
  640. RDPFTriple val;
  641. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  642. if (mpcio.mode == MODE_ONLINE) {
  643. mpcpio.rdpftriples[thread_num][depth-1].get(val);
  644. } else {
  645. val = RDPFTriple(*this, yield, depth,
  646. keep_expansion);
  647. iostream_server() <<
  648. val.dpf[(mpcio.player == 0) ? 1 : 2];
  649. mpcpio.rdpftriples[thread_num][depth-1].inc();
  650. yield();
  651. }
  652. return val;
  653. }
  654. // Only the server calls this; the computational peers should be calling
  655. // rdpftriple() at the same time
  656. RDPFPair MPCTIO::rdpfpair(yield_t &yield, nbits_t depth)
  657. {
  658. assert(mpcio.player == 2);
  659. RDPFPair val;
  660. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  661. if (mpcio.mode == MODE_ONLINE) {
  662. mpcsrvio.rdpfpairs[thread_num][depth-1].get(val);
  663. } else {
  664. RDPFTriple trip(*this, yield, depth, true);
  665. yield();
  666. iostream_p0() >> val.dpf[0];
  667. iostream_p1() >> val.dpf[1];
  668. mpcsrvio.rdpfpairs[thread_num][depth-1].inc();
  669. }
  670. return val;
  671. }
  672. CDPF MPCTIO::cdpf(yield_t &yield)
  673. {
  674. CDPF val;
  675. if (mpcio.player < 2) {
  676. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  677. if (mpcpio.mode != MODE_ONLINE) {
  678. yield();
  679. iostream_server() >> val;
  680. mpcpio.cdpfs[thread_num].inc();
  681. } else {
  682. mpcpio.cdpfs[thread_num].get(val);
  683. }
  684. } else if (mpcio.mode != MODE_ONLINE) {
  685. auto [ cdpf0, cdpf1 ] = CDPF::generate(aes_ops());
  686. iostream_p0() << cdpf0;
  687. iostream_p1() << cdpf1;
  688. yield();
  689. }
  690. return val;
  691. }
  692. // The port number for the P1 -> P0 connection
  693. static const unsigned short port_p1_p0 = 2115;
  694. // The port number for the P2 -> P0 connection
  695. static const unsigned short port_p2_p0 = 2116;
  696. // The port number for the P2 -> P1 connection
  697. static const unsigned short port_p2_p1 = 2117;
  698. void mpcio_setup_computational(unsigned player,
  699. boost::asio::io_context &io_context,
  700. const char *p0addr, // can be NULL when player=0
  701. int num_threads,
  702. std::deque<tcp::socket> &peersocks,
  703. std::deque<tcp::socket> &serversocks)
  704. {
  705. if (player == 0) {
  706. // Listen for connections from P1 and from P2
  707. tcp::acceptor acceptor_p1(io_context,
  708. tcp::endpoint(tcp::v4(), port_p1_p0));
  709. tcp::acceptor acceptor_p2(io_context,
  710. tcp::endpoint(tcp::v4(), port_p2_p0));
  711. peersocks.clear();
  712. serversocks.clear();
  713. for (int i=0;i<num_threads;++i) {
  714. peersocks.emplace_back(io_context);
  715. serversocks.emplace_back(io_context);
  716. }
  717. for (int i=0;i<num_threads;++i) {
  718. tcp::socket peersock = acceptor_p1.accept();
  719. // Read 2 bytes from the socket, which will be the thread
  720. // number
  721. unsigned short thread_num;
  722. boost::asio::read(peersock,
  723. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  724. if (thread_num >= num_threads) {
  725. std::cerr << "Received bad thread number from peer\n";
  726. } else {
  727. peersocks[thread_num] = std::move(peersock);
  728. }
  729. }
  730. for (int i=0;i<num_threads;++i) {
  731. tcp::socket serversock = acceptor_p2.accept();
  732. // Read 2 bytes from the socket, which will be the thread
  733. // number
  734. unsigned short thread_num;
  735. boost::asio::read(serversock,
  736. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  737. if (thread_num >= num_threads) {
  738. std::cerr << "Received bad thread number from server\n";
  739. } else {
  740. serversocks[thread_num] = std::move(serversock);
  741. }
  742. }
  743. } else if (player == 1) {
  744. // Listen for connections from P2, make num_threads connections to P0
  745. tcp::acceptor acceptor_p2(io_context,
  746. tcp::endpoint(tcp::v4(), port_p2_p1));
  747. tcp::resolver resolver(io_context);
  748. boost::system::error_code err;
  749. peersocks.clear();
  750. serversocks.clear();
  751. for (int i=0;i<num_threads;++i) {
  752. serversocks.emplace_back(io_context);
  753. }
  754. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  755. tcp::socket peersock(io_context);
  756. while(1) {
  757. boost::asio::connect(peersock,
  758. resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
  759. if (!err) break;
  760. std::cerr << "Connection to p0 refused, will retry.\n";
  761. sleep(1);
  762. }
  763. // Write 2 bytes to the socket indicating which thread
  764. // number this socket is for
  765. boost::asio::write(peersock,
  766. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  767. peersocks.push_back(std::move(peersock));
  768. }
  769. for (int i=0;i<num_threads;++i) {
  770. tcp::socket serversock = acceptor_p2.accept();
  771. // Read 2 bytes from the socket, which will be the thread
  772. // number
  773. unsigned short thread_num;
  774. boost::asio::read(serversock,
  775. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  776. if (thread_num >= num_threads) {
  777. std::cerr << "Received bad thread number from server\n";
  778. } else {
  779. serversocks[thread_num] = std::move(serversock);
  780. }
  781. }
  782. } else {
  783. std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
  784. }
  785. }
  786. void mpcio_setup_server(boost::asio::io_context &io_context,
  787. const char *p0addr, const char *p1addr, int num_threads,
  788. std::deque<tcp::socket> &p0socks,
  789. std::deque<tcp::socket> &p1socks)
  790. {
  791. // Make connections to P0 and P1
  792. tcp::resolver resolver(io_context);
  793. boost::system::error_code err;
  794. p0socks.clear();
  795. p1socks.clear();
  796. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  797. tcp::socket p0sock(io_context);
  798. while(1) {
  799. boost::asio::connect(p0sock,
  800. resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
  801. if (!err) break;
  802. std::cerr << "Connection to p0 refused, will retry.\n";
  803. sleep(1);
  804. }
  805. // Write 2 bytes to the socket indicating which thread
  806. // number this socket is for
  807. boost::asio::write(p0sock,
  808. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  809. p0socks.push_back(std::move(p0sock));
  810. }
  811. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  812. tcp::socket p1sock(io_context);
  813. while(1) {
  814. boost::asio::connect(p1sock,
  815. resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
  816. if (!err) break;
  817. std::cerr << "Connection to p1 refused, will retry.\n";
  818. sleep(1);
  819. }
  820. // Write 2 bytes to the socket indicating which thread
  821. // number this socket is for
  822. boost::asio::write(p1sock,
  823. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  824. p1socks.push_back(std::move(p1sock));
  825. }
  826. }