mpcio.cpp 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. #include <sys/time.h> // getrusage
  2. #include <sys/resource.h> // getrusage
  3. #include "mpcio.hpp"
  4. #include "rdpf.hpp"
  5. #include "cdpf.hpp"
  6. #include "bitutils.hpp"
  7. #include "coroutine.hpp"
  8. // T is the type being stored
  9. // N is a type whose "name" static member is a string naming the type
  10. // so that we can report something useful to the user if they try
  11. // to read a type that we don't have any more values for
  12. template<typename T, typename N>
  13. PreCompStorage<T,N>::PreCompStorage(unsigned player, ProcessingMode mode,
  14. const char *filenameprefix, unsigned thread_num) :
  15. name(N::name), depth(0)
  16. {
  17. init(player, mode, filenameprefix, thread_num);
  18. }
  19. template<typename T, typename N>
  20. void PreCompStorage<T,N>::init(unsigned player, ProcessingMode mode,
  21. const char *filenameprefix, unsigned thread_num, nbits_t depth)
  22. {
  23. if (mode != MODE_ONLINE) return;
  24. std::string filename(filenameprefix);
  25. char suffix[20];
  26. if (depth) {
  27. this->depth = depth;
  28. sprintf(suffix, "%02d.p%d.t%u", depth, player%10, thread_num);
  29. } else {
  30. sprintf(suffix, ".p%d.t%u", player%10, thread_num);
  31. }
  32. filename.append(suffix);
  33. storage.open(filename);
  34. // It's OK if not every file exists; so don't worry about checking
  35. // for errors here. We'll report an error in get() if we actually
  36. // try to use a value for which we don't have a precomputed file.
  37. count = 0;
  38. }
  39. template<typename T, typename N>
  40. void PreCompStorage<T,N>::get(T& nextval)
  41. {
  42. storage >> nextval;
  43. if (!storage.good()) {
  44. std::cerr << "Failed to read precomputed value from " << name;
  45. if (depth) {
  46. std::cerr << (int)depth;
  47. }
  48. std::cerr << " storage\n";
  49. exit(1);
  50. }
  51. ++count;
  52. }
  53. void MPCSingleIO::async_send_from_msgqueue()
  54. {
  55. #ifdef SEND_LAMPORT_CLOCKS
  56. std::vector<boost::asio::const_buffer> tosend;
  57. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  58. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  59. #endif
  60. boost::asio::async_write(sock,
  61. #ifdef SEND_LAMPORT_CLOCKS
  62. tosend,
  63. #else
  64. boost::asio::buffer(messagequeue.front()),
  65. #endif
  66. [&](boost::system::error_code ec, std::size_t amt){
  67. messagequeuelock.lock();
  68. messagequeue.pop();
  69. if (messagequeue.size() > 0) {
  70. async_send_from_msgqueue();
  71. }
  72. messagequeuelock.unlock();
  73. });
  74. }
  75. size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
  76. {
  77. // Is this a new message?
  78. size_t newmsg = 0;
  79. dataqueue.append((const char *)data, len);
  80. // If this is the first queue() since the last explicit send(),
  81. // which we'll know because message_lamport will be nullopt, set
  82. // message_lamport to the current Lamport clock. Note that the
  83. // boolean test tests whether message_lamport is nullopt, not
  84. // whether its value is zero.
  85. if (!message_lamport) {
  86. message_lamport = lamport;
  87. newmsg = 1;
  88. }
  89. #ifdef VERBOSE_COMMS
  90. printf("Queue %s.%d len=%lu lamp=%u: ", dest.c_str(), thread_num,
  91. len, message_lamport.value());
  92. for (size_t i=0;i<len;++i) {
  93. printf("%02x", ((const unsigned char*)data)[i]);
  94. }
  95. printf("\n");
  96. #endif
  97. // If we already have some full packets worth of data, may as
  98. // well send it.
  99. if (dataqueue.size() > 28800) {
  100. send(true);
  101. }
  102. return newmsg;
  103. }
  104. void MPCSingleIO::send(bool implicit_send)
  105. {
  106. size_t thissize = dataqueue.size();
  107. // Ignore spurious calls to send(), except for resetting
  108. // message_lamport if this was an explicit send().
  109. if (thissize == 0) {
  110. #ifdef SEND_LAMPORT_CLOCKS
  111. // If this was an explicit send(), reset the message_lamport so
  112. // that it gets updated at the next queue().
  113. if (!implicit_send) {
  114. message_lamport.reset();
  115. }
  116. #endif
  117. return;
  118. }
  119. #ifdef RECORD_IOTRACE
  120. iotrace.push_back(thissize);
  121. #endif
  122. messagequeuelock.lock();
  123. // Move the current message to send into the message queue (this
  124. // moves a pointer to the data, not copying the data itself)
  125. #ifdef SEND_LAMPORT_CLOCKS
  126. messagequeue.emplace(std::move(dataqueue),
  127. message_lamport.value());
  128. // If this was an explicit send(), reset the message_lamport so
  129. // that it gets updated at the next queue().
  130. if (!implicit_send) {
  131. message_lamport.reset();
  132. }
  133. #else
  134. messagequeue.emplace(std::move(dataqueue));
  135. #endif
  136. // If this is now the first thing in the message queue, launch
  137. // an async_write to write it
  138. if (messagequeue.size() == 1) {
  139. async_send_from_msgqueue();
  140. }
  141. messagequeuelock.unlock();
  142. }
  143. size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
  144. {
  145. #ifdef VERBOSE_COMMS
  146. size_t orig_len = len;
  147. printf("Recv %s.%d len=%lu lamp=%u ", dest.c_str(), thread_num,
  148. len, lamport);
  149. #endif
  150. #ifdef SEND_LAMPORT_CLOCKS
  151. char *cdata = (char *)data;
  152. size_t res = 0;
  153. while (len > 0) {
  154. while (recvdataremain == 0) {
  155. // Read a new header
  156. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  157. uint32_t datalen;
  158. lamport_t recv_lamport;
  159. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  160. memmove(&datalen, hdr, sizeof(datalen));
  161. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  162. lamport_t new_lamport = recv_lamport + 1;
  163. if (lamport < new_lamport) {
  164. lamport = new_lamport;
  165. }
  166. if (datalen > 0) {
  167. recvdata.resize(datalen, '\0');
  168. boost::asio::read(sock, boost::asio::buffer(recvdata));
  169. recvdataremain = datalen;
  170. }
  171. }
  172. size_t amttoread = len;
  173. if (amttoread > recvdataremain) {
  174. amttoread = recvdataremain;
  175. }
  176. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  177. amttoread);
  178. cdata += amttoread;
  179. len -= amttoread;
  180. recvdataremain -= amttoread;
  181. res += amttoread;
  182. }
  183. #else
  184. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  185. #endif
  186. #ifdef VERBOSE_COMMS
  187. printf("nlamp=%u: ", lamport);
  188. for (size_t i=0;i<orig_len;++i) {
  189. printf("%02x", ((const unsigned char*)data)[i]);
  190. }
  191. printf("\n");
  192. #endif
  193. #ifdef RECORD_IOTRACE
  194. iotrace.push_back(-(ssize_t(res)));
  195. #endif
  196. return res;
  197. }
  198. #ifdef RECORD_IOTRACE
  199. void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
  200. {
  201. if (label) {
  202. os << label << " ";
  203. }
  204. os << "IO trace:";
  205. for (auto& s: iotrace) {
  206. os << " " << s;
  207. }
  208. os << "\n";
  209. }
  210. #endif
  211. void MPCIO::reset_stats()
  212. {
  213. msgs_sent.clear();
  214. msg_bytes_sent.clear();
  215. aes_ops.clear();
  216. for (size_t i=0; i<num_threads; ++i) {
  217. msgs_sent.push_back(0);
  218. msg_bytes_sent.push_back(0);
  219. aes_ops.push_back(0);
  220. }
  221. steady_start = boost::chrono::steady_clock::now();
  222. cpu_start = boost::chrono::process_cpu_clock::now();
  223. }
  224. // Report the memory usage
  225. void MPCIO::dump_memusage(std::ostream &os)
  226. {
  227. struct rusage ru;
  228. getrusage(RUSAGE_SELF, &ru);
  229. os << "Mem: " << ru.ru_maxrss << " KiB\n";
  230. }
  231. void MPCIO::dump_stats(std::ostream &os)
  232. {
  233. size_t tot_msgs_sent = 0;
  234. size_t tot_msg_bytes_sent = 0;
  235. size_t tot_aes_ops = 0;
  236. for (auto& n : msgs_sent) {
  237. tot_msgs_sent += n;
  238. }
  239. for (auto& n : msg_bytes_sent) {
  240. tot_msg_bytes_sent += n;
  241. }
  242. for (auto& n : aes_ops) {
  243. tot_aes_ops += n;
  244. }
  245. auto steady_elapsed =
  246. boost::chrono::steady_clock::now() - steady_start;
  247. auto cpu_elapsed =
  248. boost::chrono::process_cpu_clock::now() - cpu_start;
  249. os << tot_msgs_sent << " messages sent\n";
  250. os << tot_msg_bytes_sent << " message bytes sent\n";
  251. os << lamport << " Lamport clock (latencies)\n";
  252. os << tot_aes_ops << " local AES operations\n";
  253. os << boost::chrono::duration_cast
  254. <boost::chrono::milliseconds>(steady_elapsed) <<
  255. " wall clock time\n";
  256. os << cpu_elapsed << " {real;user;system}\n";
  257. dump_memusage(os);
  258. }
  259. MPCPeerIO::MPCPeerIO(unsigned player, ProcessingMode mode,
  260. std::deque<tcp::socket> &peersocks,
  261. std::deque<tcp::socket> &serversocks) :
  262. MPCIO(player, mode, peersocks.size())
  263. {
  264. unsigned num_threads = unsigned(peersocks.size());
  265. for (unsigned i=0; i<num_threads; ++i) {
  266. multtriples.emplace_back(player, mode, "mults", i);
  267. }
  268. for (unsigned i=0; i<num_threads; ++i) {
  269. halftriples.emplace_back(player, mode, "halves", i);
  270. }
  271. for (unsigned i=0; i<num_threads; ++i) {
  272. andtriples.emplace_back(player, mode, "ands", i);
  273. }
  274. for (unsigned i=0; i<num_threads; ++i) {
  275. valselecttriples.emplace_back(player, mode, "selects", i);
  276. }
  277. rdpftriples.resize(num_threads);
  278. for (unsigned i=0; i<num_threads; ++i) {
  279. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  280. rdpftriples[i][depth-1].init(player, mode,
  281. "rdpf", i, depth);
  282. }
  283. }
  284. for (unsigned i=0; i<num_threads; ++i) {
  285. cdpfs.emplace_back(player, mode, "cdpf", i);
  286. }
  287. for (unsigned i=0; i<num_threads; ++i) {
  288. peerios.emplace_back(std::move(peersocks[i]), "peer", i);
  289. }
  290. for (unsigned i=0; i<num_threads; ++i) {
  291. serverios.emplace_back(std::move(serversocks[i]), "srv", i);
  292. }
  293. }
  294. void MPCPeerIO::dump_precomp_stats(std::ostream &os)
  295. {
  296. for (size_t i=0; i<multtriples.size(); ++i) {
  297. size_t cnt;
  298. if (i > 0) {
  299. os << " ";
  300. }
  301. os << "T" << i;
  302. cnt = multtriples[i].get_stats();
  303. if (cnt > 0) {
  304. os << " m:" << cnt;
  305. }
  306. cnt = halftriples[i].get_stats();
  307. if (cnt > 0) {
  308. os << " h:" << cnt;
  309. }
  310. cnt = andtriples[i].get_stats();
  311. if (cnt > 0) {
  312. os << " a:" << cnt;
  313. }
  314. cnt = valselecttriples[i].get_stats();
  315. if (cnt > 0) {
  316. os << " s:" << cnt;
  317. }
  318. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  319. cnt = rdpftriples[i][depth-1].get_stats();
  320. if (cnt > 0) {
  321. os << " r" << int(depth) << ":" << cnt;
  322. }
  323. }
  324. cnt = cdpfs[i].get_stats();
  325. if (cnt > 0) {
  326. os << " c:" << cnt;
  327. }
  328. }
  329. os << "\n";
  330. }
  331. void MPCPeerIO::reset_precomp_stats()
  332. {
  333. for (size_t i=0; i<multtriples.size(); ++i) {
  334. multtriples[i].reset_stats();
  335. halftriples[i].reset_stats();
  336. andtriples[i].reset_stats();
  337. valselecttriples[i].reset_stats();
  338. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  339. rdpftriples[i][depth-1].reset_stats();
  340. }
  341. }
  342. }
  343. void MPCPeerIO::dump_stats(std::ostream &os)
  344. {
  345. MPCIO::dump_stats(os);
  346. os << "Precomputed values used: ";
  347. dump_precomp_stats(os);
  348. }
  349. MPCServerIO::MPCServerIO(ProcessingMode mode,
  350. std::deque<tcp::socket> &p0socks,
  351. std::deque<tcp::socket> &p1socks) :
  352. MPCIO(2, mode, p0socks.size())
  353. {
  354. rdpfpairs.resize(num_threads);
  355. for (unsigned i=0; i<num_threads; ++i) {
  356. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  357. rdpfpairs[i][depth-1].init(player, mode,
  358. "rdpf", i, depth);
  359. }
  360. }
  361. for (unsigned i=0; i<num_threads; ++i) {
  362. p0ios.emplace_back(std::move(p0socks[i]), "p0", i);
  363. }
  364. for (unsigned i=0; i<num_threads; ++i) {
  365. p1ios.emplace_back(std::move(p1socks[i]), "p1", i);
  366. }
  367. }
  368. void MPCServerIO::dump_precomp_stats(std::ostream &os)
  369. {
  370. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  371. if (i > 0) {
  372. os << " ";
  373. }
  374. os << "T" << i;
  375. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  376. size_t cnt = rdpfpairs[i][depth-1].get_stats();
  377. if (cnt > 0) {
  378. os << " r" << int(depth) << ":" << cnt;
  379. }
  380. }
  381. }
  382. os << "\n";
  383. }
  384. void MPCServerIO::reset_precomp_stats()
  385. {
  386. for (size_t i=0; i<rdpfpairs.size(); ++i) {
  387. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  388. rdpfpairs[i][depth-1].reset_stats();
  389. }
  390. }
  391. }
  392. void MPCServerIO::dump_stats(std::ostream &os)
  393. {
  394. MPCIO::dump_stats(os);
  395. os << "Precomputed values used: ";
  396. dump_precomp_stats(os);
  397. }
  398. MPCTIO::MPCTIO(MPCIO &mpcio, int thread_num, int num_threads) :
  399. thread_num(thread_num), local_cpu_nthreads(num_threads),
  400. communication_nthreads(num_threads),
  401. thread_lamport(mpcio.lamport), mpcio(mpcio),
  402. #ifdef VERBOSE_COMMS
  403. round_num(0),
  404. #endif
  405. last_andtriple_bits_remaining(0)
  406. {
  407. if (mpcio.player < 2) {
  408. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  409. peer_iostream.emplace(mpcpio.peerios[thread_num],
  410. thread_lamport, mpcpio.msgs_sent[thread_num],
  411. mpcpio.msg_bytes_sent[thread_num]);
  412. server_iostream.emplace(mpcpio.serverios[thread_num],
  413. thread_lamport, mpcpio.msgs_sent[thread_num],
  414. mpcpio.msg_bytes_sent[thread_num]);
  415. } else {
  416. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  417. p0_iostream.emplace(mpcsrvio.p0ios[thread_num],
  418. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  419. mpcsrvio.msg_bytes_sent[thread_num]);
  420. p1_iostream.emplace(mpcsrvio.p1ios[thread_num],
  421. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  422. mpcsrvio.msg_bytes_sent[thread_num]);
  423. }
  424. }
  425. // Sync our per-thread lamport clock with the master one in the
  426. // mpcio. You only need to call this explicitly if your MPCTIO
  427. // outlives your thread (in which case call it after the join), or
  428. // if your threads do interthread communication amongst themselves
  429. // (in which case call it in the sending thread before the send, and
  430. // call it in the receiving thread after the receive).
  431. void MPCTIO::sync_lamport()
  432. {
  433. // Update the mpcio Lamport time to be max of the thread Lamport
  434. // time and what we thought it was before. We use this
  435. // compare_exchange construction in order to atomically
  436. // do the comparison, computation, and replacement
  437. lamport_t old_lamport = mpcio.lamport;
  438. lamport_t new_lamport = thread_lamport;
  439. do {
  440. if (new_lamport < old_lamport) {
  441. new_lamport = old_lamport;
  442. }
  443. // The next line atomically checks if lamport still has
  444. // the value old_lamport; if so, it changes its value to
  445. // new_lamport and returns true (ending the loop). If
  446. // not, it sets old_lamport to the current value of
  447. // lamport, and returns false (continuing the loop so
  448. // that new_lamport can be recomputed based on this new
  449. // value).
  450. } while (!mpcio.lamport.compare_exchange_weak(
  451. old_lamport, new_lamport));
  452. thread_lamport = new_lamport;
  453. }
  454. // Only call this if you can be sure that there are no outstanding
  455. // messages in flight, you can call it on all existing MPCTIOs, and
  456. // you really want to reset the Lamport clock in the midding of a
  457. // run.
  458. void MPCTIO::reset_lamport()
  459. {
  460. // Reset both our own Lamport clock and the parent MPCIO's
  461. thread_lamport = 0;
  462. mpcio.lamport = 0;
  463. }
  464. // Queue up data to the peer or to the server
  465. void MPCTIO::queue_peer(const void *data, size_t len)
  466. {
  467. if (mpcio.player < 2) {
  468. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  469. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  470. mpcpio.msgs_sent[thread_num] += newmsg;
  471. mpcpio.msg_bytes_sent[thread_num] += len;
  472. }
  473. }
  474. void MPCTIO::queue_server(const void *data, size_t len)
  475. {
  476. if (mpcio.player < 2) {
  477. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  478. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  479. mpcpio.msgs_sent[thread_num] += newmsg;
  480. mpcpio.msg_bytes_sent[thread_num] += len;
  481. }
  482. }
  483. // Receive data from the peer or to the server
  484. size_t MPCTIO::recv_peer(void *data, size_t len)
  485. {
  486. if (mpcio.player < 2) {
  487. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  488. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  489. }
  490. return 0;
  491. }
  492. size_t MPCTIO::recv_server(void *data, size_t len)
  493. {
  494. if (mpcio.player < 2) {
  495. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  496. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  497. }
  498. return 0;
  499. }
  500. // Queue up data to p0 or p1
  501. void MPCTIO::queue_p0(const void *data, size_t len)
  502. {
  503. if (mpcio.player == 2) {
  504. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  505. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  506. mpcsrvio.msgs_sent[thread_num] += newmsg;
  507. mpcsrvio.msg_bytes_sent[thread_num] += len;
  508. }
  509. }
  510. void MPCTIO::queue_p1(const void *data, size_t len)
  511. {
  512. if (mpcio.player == 2) {
  513. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  514. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  515. mpcsrvio.msgs_sent[thread_num] += newmsg;
  516. mpcsrvio.msg_bytes_sent[thread_num] += len;
  517. }
  518. }
  519. // Receive data from p0 or p1
  520. size_t MPCTIO::recv_p0(void *data, size_t len)
  521. {
  522. if (mpcio.player == 2) {
  523. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  524. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  525. }
  526. return 0;
  527. }
  528. size_t MPCTIO::recv_p1(void *data, size_t len)
  529. {
  530. if (mpcio.player == 2) {
  531. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  532. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  533. }
  534. return 0;
  535. }
  536. // Send all queued data for this thread
  537. void MPCTIO::send()
  538. {
  539. #ifdef VERBOSE_COMMS
  540. printf("Thread %u sending round %lu\n", thread_num, ++round_num);
  541. #endif
  542. if (mpcio.player < 2) {
  543. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  544. mpcpio.peerios[thread_num].send();
  545. mpcpio.serverios[thread_num].send();
  546. } else {
  547. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  548. mpcsrvio.p0ios[thread_num].send();
  549. mpcsrvio.p1ios[thread_num].send();
  550. }
  551. }
  552. // Functions to get precomputed values. If we're in the online
  553. // phase, get them from PreCompStorage. If we're in the
  554. // preprocessing or online-only phase, read them from the server.
  555. MultTriple MPCTIO::multtriple(yield_t &yield)
  556. {
  557. MultTriple val;
  558. if (mpcio.player < 2) {
  559. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  560. if (mpcpio.mode != MODE_ONLINE) {
  561. yield();
  562. recv_server(&val, sizeof(val));
  563. mpcpio.multtriples[thread_num].inc();
  564. } else {
  565. mpcpio.multtriples[thread_num].get(val);
  566. }
  567. } else if (mpcio.mode != MODE_ONLINE) {
  568. // Create multiplication triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  569. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  570. value_t X0, Y0, Z0, X1, Y1, Z1;
  571. arc4random_buf(&X0, sizeof(X0));
  572. arc4random_buf(&Y0, sizeof(Y0));
  573. arc4random_buf(&Z0, sizeof(Z0));
  574. arc4random_buf(&X1, sizeof(X1));
  575. arc4random_buf(&Y1, sizeof(Y1));
  576. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  577. MultTriple T0, T1;
  578. T0 = std::make_tuple(X0, Y0, Z0);
  579. T1 = std::make_tuple(X1, Y1, Z1);
  580. queue_p0(&T0, sizeof(T0));
  581. queue_p1(&T1, sizeof(T1));
  582. yield();
  583. }
  584. return val;
  585. }
  586. // When halftriple() is used internally to another preprocessing
  587. // operation, don't tally it, so that it doesn't appear sepearately in
  588. // the stats from the preprocessing operation that invoked it
  589. HalfTriple MPCTIO::halftriple(yield_t &yield, bool tally)
  590. {
  591. HalfTriple val;
  592. if (mpcio.player < 2) {
  593. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  594. if (mpcpio.mode != MODE_ONLINE) {
  595. yield();
  596. recv_server(&val, sizeof(val));
  597. if (tally) {
  598. mpcpio.halftriples[thread_num].inc();
  599. }
  600. } else {
  601. mpcpio.halftriples[thread_num].get(val);
  602. }
  603. } else if (mpcio.mode != MODE_ONLINE) {
  604. // Create half-triples (X0,Z0),(Y1,Z1) such that
  605. // X0*Y1 = Z0 + Z1
  606. value_t X0, Z0, Y1, Z1;
  607. arc4random_buf(&X0, sizeof(X0));
  608. arc4random_buf(&Z0, sizeof(Z0));
  609. arc4random_buf(&Y1, sizeof(Y1));
  610. Z1 = X0 * Y1 - Z0;
  611. HalfTriple H0, H1;
  612. H0 = std::make_tuple(X0, Z0);
  613. H1 = std::make_tuple(Y1, Z1);
  614. queue_p0(&H0, sizeof(H0));
  615. queue_p1(&H1, sizeof(H1));
  616. yield();
  617. }
  618. return val;
  619. }
  620. MultTriple MPCTIO::andtriple(yield_t &yield)
  621. {
  622. AndTriple val;
  623. if (mpcio.player < 2) {
  624. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  625. if (mpcpio.mode != MODE_ONLINE) {
  626. yield();
  627. recv_server(&val, sizeof(val));
  628. mpcpio.andtriples[thread_num].inc();
  629. } else {
  630. mpcpio.andtriples[thread_num].get(val);
  631. }
  632. } else if (mpcio.mode != MODE_ONLINE) {
  633. // Create AND triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  634. // (X0&Y1 ^ Y0&X1) = (Z0^Z1)
  635. value_t X0, Y0, Z0, X1, Y1, Z1;
  636. arc4random_buf(&X0, sizeof(X0));
  637. arc4random_buf(&Y0, sizeof(Y0));
  638. arc4random_buf(&Z0, sizeof(Z0));
  639. arc4random_buf(&X1, sizeof(X1));
  640. arc4random_buf(&Y1, sizeof(Y1));
  641. Z1 = (X0 & Y1) ^ (X1 & Y0) ^ Z0;
  642. AndTriple T0, T1;
  643. T0 = std::make_tuple(X0, Y0, Z0);
  644. T1 = std::make_tuple(X1, Y1, Z1);
  645. queue_p0(&T0, sizeof(T0));
  646. queue_p1(&T1, sizeof(T1));
  647. yield();
  648. }
  649. return val;
  650. }
  651. SelectTriple<DPFnode> MPCTIO::nodeselecttriple(yield_t &yield)
  652. {
  653. SelectTriple<DPFnode> val;
  654. if (mpcio.player < 2) {
  655. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  656. if (mpcpio.mode != MODE_ONLINE) {
  657. uint8_t Xbyte;
  658. yield();
  659. recv_server(&Xbyte, sizeof(Xbyte));
  660. val.X = Xbyte & 1;
  661. recv_server(&val.Y, sizeof(val.Y));
  662. recv_server(&val.Z, sizeof(val.Z));
  663. } else {
  664. std::cerr << "Attempted to read SelectTriple<DPFnode> in online phase\n";
  665. }
  666. } else if (mpcio.mode != MODE_ONLINE) {
  667. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  668. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  669. bit_t X0, X1;
  670. DPFnode Y0, Z0, Y1, Z1;
  671. X0 = arc4random() & 1;
  672. arc4random_buf(&Y0, sizeof(Y0));
  673. arc4random_buf(&Z0, sizeof(Z0));
  674. X1 = arc4random() & 1;
  675. arc4random_buf(&Y1, sizeof(Y1));
  676. DPFnode X0ext, X1ext;
  677. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  678. // 1 -> 1111...1)
  679. X0ext = if128_mask[X0];
  680. X1ext = if128_mask[X1];
  681. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  682. queue_p0(&X0, sizeof(X0));
  683. queue_p0(&Y0, sizeof(Y0));
  684. queue_p0(&Z0, sizeof(Z0));
  685. queue_p1(&X1, sizeof(X1));
  686. queue_p1(&Y1, sizeof(Y1));
  687. queue_p1(&Z1, sizeof(Z1));
  688. yield();
  689. }
  690. return val;
  691. }
  692. SelectTriple<value_t> MPCTIO::valselecttriple(yield_t &yield)
  693. {
  694. SelectTriple<value_t> val;
  695. if (mpcio.player < 2) {
  696. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  697. if (mpcpio.mode != MODE_ONLINE) {
  698. uint8_t Xbyte;
  699. yield();
  700. recv_server(&Xbyte, sizeof(Xbyte));
  701. val.X = Xbyte & 1;
  702. recv_server(&val.Y, sizeof(val.Y));
  703. recv_server(&val.Z, sizeof(val.Z));
  704. mpcpio.valselecttriples[thread_num].inc();
  705. } else {
  706. mpcpio.valselecttriples[thread_num].get(val);
  707. }
  708. } else if (mpcio.mode != MODE_ONLINE) {
  709. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  710. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  711. bit_t X0, X1;
  712. value_t Y0, Z0, Y1, Z1;
  713. X0 = arc4random() & 1;
  714. arc4random_buf(&Y0, sizeof(Y0));
  715. arc4random_buf(&Z0, sizeof(Z0));
  716. X1 = arc4random() & 1;
  717. arc4random_buf(&Y1, sizeof(Y1));
  718. value_t X0ext, X1ext;
  719. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  720. // 1 -> 1111...1)
  721. X0ext = -value_t(X0);
  722. X1ext = -value_t(X1);
  723. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  724. queue_p0(&X0, sizeof(X0));
  725. queue_p0(&Y0, sizeof(Y0));
  726. queue_p0(&Z0, sizeof(Z0));
  727. queue_p1(&X1, sizeof(X1));
  728. queue_p1(&Y1, sizeof(Y1));
  729. queue_p1(&Z1, sizeof(Z1));
  730. yield();
  731. }
  732. return val;
  733. }
  734. SelectTriple<bit_t> MPCTIO::bitselecttriple(yield_t &yield)
  735. {
  736. // Do we need to fetch a new AND triple?
  737. if (last_andtriple_bits_remaining == 0) {
  738. last_andtriple = andtriple(yield);
  739. last_andtriple_bits_remaining = 8*sizeof(value_t);
  740. }
  741. --last_andtriple_bits_remaining;
  742. value_t mask = value_t(1) << last_andtriple_bits_remaining;
  743. SelectTriple<bit_t> val;
  744. val.X = !!(std::get<0>(last_andtriple) & mask);
  745. val.Y = !!(std::get<1>(last_andtriple) & mask);
  746. val.Z = !!(std::get<2>(last_andtriple) & mask);
  747. return val;
  748. }
  749. // Only computational peers call this; the server should be calling
  750. // rdpfpair() at the same time
  751. RDPFTriple<1> MPCTIO::rdpftriple(yield_t &yield, nbits_t depth,
  752. bool keep_expansion)
  753. {
  754. assert(mpcio.player < 2);
  755. RDPFTriple<1> val;
  756. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  757. if (mpcio.mode == MODE_ONLINE) {
  758. mpcpio.rdpftriples[thread_num][depth-1].get(val);
  759. } else {
  760. val = RDPFTriple<1>(*this, yield, depth,
  761. keep_expansion);
  762. iostream_server() <<
  763. val.dpf[(mpcio.player == 0) ? 1 : 2];
  764. mpcpio.rdpftriples[thread_num][depth-1].inc();
  765. yield();
  766. }
  767. return val;
  768. }
  769. // Only the server calls this; the computational peers should be calling
  770. // rdpftriple() at the same time
  771. RDPFPair<1> MPCTIO::rdpfpair(yield_t &yield, nbits_t depth)
  772. {
  773. assert(mpcio.player == 2);
  774. RDPFPair<1> val;
  775. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  776. if (mpcio.mode == MODE_ONLINE) {
  777. mpcsrvio.rdpfpairs[thread_num][depth-1].get(val);
  778. } else {
  779. RDPFTriple<1> trip(*this, yield, depth, true);
  780. yield();
  781. iostream_p0() >> val.dpf[0];
  782. iostream_p1() >> val.dpf[1];
  783. mpcsrvio.rdpfpairs[thread_num][depth-1].inc();
  784. }
  785. return val;
  786. }
  787. CDPF MPCTIO::cdpf(yield_t &yield)
  788. {
  789. CDPF val;
  790. if (mpcio.player < 2) {
  791. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  792. if (mpcpio.mode != MODE_ONLINE) {
  793. yield();
  794. iostream_server() >> val;
  795. mpcpio.cdpfs[thread_num].inc();
  796. } else {
  797. mpcpio.cdpfs[thread_num].get(val);
  798. }
  799. } else if (mpcio.mode != MODE_ONLINE) {
  800. auto [ cdpf0, cdpf1 ] = CDPF::generate(aes_ops());
  801. iostream_p0() << cdpf0;
  802. iostream_p1() << cdpf1;
  803. yield();
  804. }
  805. return val;
  806. }
  807. // The port number for the P1 -> P0 connection
  808. static const unsigned short port_p1_p0 = 2115;
  809. // The port number for the P2 -> P0 connection
  810. static const unsigned short port_p2_p0 = 2116;
  811. // The port number for the P2 -> P1 connection
  812. static const unsigned short port_p2_p1 = 2117;
  813. void mpcio_setup_computational(unsigned player,
  814. boost::asio::io_context &io_context,
  815. const char *p0addr, // can be NULL when player=0
  816. int num_threads,
  817. std::deque<tcp::socket> &peersocks,
  818. std::deque<tcp::socket> &serversocks)
  819. {
  820. if (player == 0) {
  821. // Listen for connections from P1 and from P2
  822. tcp::acceptor acceptor_p1(io_context,
  823. tcp::endpoint(tcp::v4(), port_p1_p0));
  824. tcp::acceptor acceptor_p2(io_context,
  825. tcp::endpoint(tcp::v4(), port_p2_p0));
  826. peersocks.clear();
  827. serversocks.clear();
  828. for (int i=0;i<num_threads;++i) {
  829. peersocks.emplace_back(io_context);
  830. serversocks.emplace_back(io_context);
  831. }
  832. for (int i=0;i<num_threads;++i) {
  833. tcp::socket peersock = acceptor_p1.accept();
  834. // Read 2 bytes from the socket, which will be the thread
  835. // number
  836. unsigned short thread_num;
  837. boost::asio::read(peersock,
  838. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  839. if (thread_num >= num_threads) {
  840. std::cerr << "Received bad thread number from peer\n";
  841. } else {
  842. peersocks[thread_num] = std::move(peersock);
  843. }
  844. }
  845. for (int i=0;i<num_threads;++i) {
  846. tcp::socket serversock = acceptor_p2.accept();
  847. // Read 2 bytes from the socket, which will be the thread
  848. // number
  849. unsigned short thread_num;
  850. boost::asio::read(serversock,
  851. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  852. if (thread_num >= num_threads) {
  853. std::cerr << "Received bad thread number from server\n";
  854. } else {
  855. serversocks[thread_num] = std::move(serversock);
  856. }
  857. }
  858. } else if (player == 1) {
  859. // Listen for connections from P2, make num_threads connections to P0
  860. tcp::acceptor acceptor_p2(io_context,
  861. tcp::endpoint(tcp::v4(), port_p2_p1));
  862. tcp::resolver resolver(io_context);
  863. boost::system::error_code err;
  864. peersocks.clear();
  865. serversocks.clear();
  866. for (int i=0;i<num_threads;++i) {
  867. serversocks.emplace_back(io_context);
  868. }
  869. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  870. tcp::socket peersock(io_context);
  871. while(1) {
  872. boost::asio::connect(peersock,
  873. resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
  874. if (!err) break;
  875. std::cerr << "Connection to p0 refused, will retry.\n";
  876. sleep(1);
  877. }
  878. // Write 2 bytes to the socket indicating which thread
  879. // number this socket is for
  880. boost::asio::write(peersock,
  881. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  882. peersocks.push_back(std::move(peersock));
  883. }
  884. for (int i=0;i<num_threads;++i) {
  885. tcp::socket serversock = acceptor_p2.accept();
  886. // Read 2 bytes from the socket, which will be the thread
  887. // number
  888. unsigned short thread_num;
  889. boost::asio::read(serversock,
  890. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  891. if (thread_num >= num_threads) {
  892. std::cerr << "Received bad thread number from server\n";
  893. } else {
  894. serversocks[thread_num] = std::move(serversock);
  895. }
  896. }
  897. } else {
  898. std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
  899. }
  900. }
  901. void mpcio_setup_server(boost::asio::io_context &io_context,
  902. const char *p0addr, const char *p1addr, int num_threads,
  903. std::deque<tcp::socket> &p0socks,
  904. std::deque<tcp::socket> &p1socks)
  905. {
  906. // Make connections to P0 and P1
  907. tcp::resolver resolver(io_context);
  908. boost::system::error_code err;
  909. p0socks.clear();
  910. p1socks.clear();
  911. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  912. tcp::socket p0sock(io_context);
  913. while(1) {
  914. boost::asio::connect(p0sock,
  915. resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
  916. if (!err) break;
  917. std::cerr << "Connection to p0 refused, will retry.\n";
  918. sleep(1);
  919. }
  920. // Write 2 bytes to the socket indicating which thread
  921. // number this socket is for
  922. boost::asio::write(p0sock,
  923. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  924. p0socks.push_back(std::move(p0sock));
  925. }
  926. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  927. tcp::socket p1sock(io_context);
  928. while(1) {
  929. boost::asio::connect(p1sock,
  930. resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
  931. if (!err) break;
  932. std::cerr << "Connection to p1 refused, will retry.\n";
  933. sleep(1);
  934. }
  935. // Write 2 bytes to the socket indicating which thread
  936. // number this socket is for
  937. boost::asio::write(p1sock,
  938. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  939. p1socks.push_back(std::move(p1sock));
  940. }
  941. }