mpcio.cpp 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991
  1. #include <sys/time.h> // getrusage
  2. #include <sys/resource.h> // getrusage
  3. #include "mpcio.hpp"
  4. #include "rdpf.hpp"
  5. #include "cdpf.hpp"
  6. #include "bitutils.hpp"
  7. #include "coroutine.hpp"
  8. void MPCSingleIO::async_send_from_msgqueue()
  9. {
  10. #ifdef SEND_LAMPORT_CLOCKS
  11. std::vector<boost::asio::const_buffer> tosend;
  12. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  13. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  14. #endif
  15. boost::asio::async_write(sock,
  16. #ifdef SEND_LAMPORT_CLOCKS
  17. tosend,
  18. #else
  19. boost::asio::buffer(messagequeue.front()),
  20. #endif
  21. [&](boost::system::error_code ec, std::size_t amt){
  22. messagequeuelock.lock();
  23. messagequeue.pop();
  24. if (messagequeue.size() > 0) {
  25. async_send_from_msgqueue();
  26. }
  27. messagequeuelock.unlock();
  28. });
  29. }
  30. size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
  31. {
  32. // Is this a new message?
  33. size_t newmsg = 0;
  34. dataqueue.append((const char *)data, len);
  35. // If this is the first queue() since the last explicit send(),
  36. // which we'll know because message_lamport will be nullopt, set
  37. // message_lamport to the current Lamport clock. Note that the
  38. // boolean test tests whether message_lamport is nullopt, not
  39. // whether its value is zero.
  40. if (!message_lamport) {
  41. message_lamport = lamport;
  42. newmsg = 1;
  43. }
  44. #ifdef VERBOSE_COMMS
  45. printf("Queue %s.%d len=%lu lamp=%u: ", dest.c_str(), thread_num,
  46. len, message_lamport.value());
  47. for (size_t i=0;i<len;++i) {
  48. printf("%02x", ((const unsigned char*)data)[i]);
  49. }
  50. printf("\n");
  51. #endif
  52. // If we already have some full packets worth of data, may as
  53. // well send it.
  54. if (dataqueue.size() > 28800) {
  55. send(true);
  56. }
  57. return newmsg;
  58. }
  59. void MPCSingleIO::send(bool implicit_send)
  60. {
  61. size_t thissize = dataqueue.size();
  62. // Ignore spurious calls to send(), except for resetting
  63. // message_lamport if this was an explicit send().
  64. if (thissize == 0) {
  65. #ifdef SEND_LAMPORT_CLOCKS
  66. // If this was an explicit send(), reset the message_lamport so
  67. // that it gets updated at the next queue().
  68. if (!implicit_send) {
  69. message_lamport.reset();
  70. }
  71. #endif
  72. return;
  73. }
  74. #ifdef RECORD_IOTRACE
  75. iotrace.push_back(thissize);
  76. #endif
  77. messagequeuelock.lock();
  78. // Move the current message to send into the message queue (this
  79. // moves a pointer to the data, not copying the data itself)
  80. #ifdef SEND_LAMPORT_CLOCKS
  81. messagequeue.emplace(std::move(dataqueue),
  82. message_lamport.value());
  83. // If this was an explicit send(), reset the message_lamport so
  84. // that it gets updated at the next queue().
  85. if (!implicit_send) {
  86. message_lamport.reset();
  87. }
  88. #else
  89. messagequeue.emplace(std::move(dataqueue));
  90. #endif
  91. // If this is now the first thing in the message queue, launch
  92. // an async_write to write it
  93. if (messagequeue.size() == 1) {
  94. async_send_from_msgqueue();
  95. }
  96. messagequeuelock.unlock();
  97. }
  98. size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
  99. {
  100. #ifdef VERBOSE_COMMS
  101. size_t orig_len = len;
  102. printf("Recv %s.%d len=%lu lamp=%u ", dest.c_str(), thread_num,
  103. len, lamport);
  104. #endif
  105. #ifdef SEND_LAMPORT_CLOCKS
  106. char *cdata = (char *)data;
  107. size_t res = 0;
  108. while (len > 0) {
  109. while (recvdataremain == 0) {
  110. // Read a new header
  111. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  112. uint32_t datalen;
  113. lamport_t recv_lamport;
  114. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  115. memmove(&datalen, hdr, sizeof(datalen));
  116. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  117. lamport_t new_lamport = recv_lamport + 1;
  118. if (lamport < new_lamport) {
  119. lamport = new_lamport;
  120. }
  121. if (datalen > 0) {
  122. recvdata.resize(datalen, '\0');
  123. boost::asio::read(sock, boost::asio::buffer(recvdata));
  124. recvdataremain = datalen;
  125. }
  126. }
  127. size_t amttoread = len;
  128. if (amttoread > recvdataremain) {
  129. amttoread = recvdataremain;
  130. }
  131. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  132. amttoread);
  133. cdata += amttoread;
  134. len -= amttoread;
  135. recvdataremain -= amttoread;
  136. res += amttoread;
  137. }
  138. #else
  139. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  140. #endif
  141. #ifdef VERBOSE_COMMS
  142. printf("nlamp=%u: ", lamport);
  143. for (size_t i=0;i<orig_len;++i) {
  144. printf("%02x", ((const unsigned char*)data)[i]);
  145. }
  146. printf("\n");
  147. #endif
  148. #ifdef RECORD_IOTRACE
  149. iotrace.push_back(-(ssize_t(res)));
  150. #endif
  151. return res;
  152. }
  153. #ifdef RECORD_IOTRACE
  154. void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
  155. {
  156. if (label) {
  157. os << label << " ";
  158. }
  159. os << "IO trace:";
  160. for (auto& s: iotrace) {
  161. os << " " << s;
  162. }
  163. os << "\n";
  164. }
  165. #endif
  166. void MPCIO::reset_stats()
  167. {
  168. msgs_sent.clear();
  169. msg_bytes_sent.clear();
  170. aes_ops.clear();
  171. for (size_t i=0; i<num_threads; ++i) {
  172. msgs_sent.push_back(0);
  173. msg_bytes_sent.push_back(0);
  174. aes_ops.push_back(0);
  175. }
  176. steady_start = boost::chrono::steady_clock::now();
  177. cpu_start = boost::chrono::process_cpu_clock::now();
  178. }
  179. // Report the memory usage
  180. void MPCIO::dump_memusage(std::ostream &os)
  181. {
  182. struct rusage ru;
  183. getrusage(RUSAGE_SELF, &ru);
  184. os << "Mem: " << ru.ru_maxrss << " KiB\n";
  185. }
  186. void MPCIO::dump_stats(std::ostream &os)
  187. {
  188. size_t tot_msgs_sent = 0;
  189. size_t tot_msg_bytes_sent = 0;
  190. size_t tot_aes_ops = 0;
  191. for (auto& n : msgs_sent) {
  192. tot_msgs_sent += n;
  193. }
  194. for (auto& n : msg_bytes_sent) {
  195. tot_msg_bytes_sent += n;
  196. }
  197. for (auto& n : aes_ops) {
  198. tot_aes_ops += n;
  199. }
  200. auto steady_elapsed =
  201. boost::chrono::steady_clock::now() - steady_start;
  202. auto cpu_elapsed =
  203. boost::chrono::process_cpu_clock::now() - cpu_start;
  204. os << tot_msgs_sent << " messages sent\n";
  205. os << tot_msg_bytes_sent << " message bytes sent\n";
  206. os << lamport << " Lamport clock (latencies)\n";
  207. os << tot_aes_ops << " local AES operations\n";
  208. os << boost::chrono::duration_cast
  209. <boost::chrono::milliseconds>(steady_elapsed) <<
  210. " wall clock time\n";
  211. os << cpu_elapsed << " {real;user;system}\n";
  212. dump_memusage(os);
  213. }
  214. // TVA is a tuple of vectors of arrays of PreCompStorage
  215. template <nbits_t WIDTH, typename TVA>
  216. static void rdpfstorage_init(TVA &storage, unsigned player,
  217. ProcessingMode mode, unsigned num_threads, bool incremental)
  218. {
  219. auto &VA = std::get<WIDTH-1>(storage);
  220. VA.resize(num_threads);
  221. char prefix[12];
  222. strcpy(prefix, incremental ? "irdpf" : "rdpf");
  223. if (WIDTH > 1) {
  224. sprintf(prefix+strlen(prefix), "%d_", WIDTH);
  225. }
  226. for (unsigned i=0; i<num_threads; ++i) {
  227. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  228. VA[i][depth-1].init(player, mode, prefix, i, depth, WIDTH);
  229. }
  230. }
  231. }
  232. // TVA is a tuple of vectors of arrays of PreCompStorage
  233. template <nbits_t WIDTH, typename TVA>
  234. static void rdpfstorage_dumpstats(std::ostream &os, TVA &storage,
  235. size_t thread_num, bool incremental)
  236. {
  237. auto &VA = std::get<WIDTH-1>(storage);
  238. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  239. size_t cnt = VA[thread_num][depth-1].get_stats();
  240. if (cnt > 0) {
  241. os << (incremental ? " i" : " r") << int(depth);
  242. if (WIDTH > 1) {
  243. os << "." << int(WIDTH);
  244. }
  245. os << ":" << cnt;
  246. }
  247. }
  248. }
  249. // TVA is a tuple of vectors of arrays of PreCompStorage
  250. template <nbits_t WIDTH, typename TVA>
  251. static void rdpfstorage_resetstats(TVA &storage, size_t thread_num)
  252. {
  253. auto &VA = std::get<WIDTH-1>(storage);
  254. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  255. VA[thread_num][depth-1].reset_stats();
  256. }
  257. }
  258. MPCPeerIO::MPCPeerIO(unsigned player, ProcessingMode mode,
  259. std::deque<tcp::socket> &peersocks,
  260. std::deque<tcp::socket> &serversocks) :
  261. MPCIO(player, mode, peersocks.size())
  262. {
  263. unsigned num_threads = unsigned(peersocks.size());
  264. for (unsigned i=0; i<num_threads; ++i) {
  265. multtriples.emplace_back(player, mode, "mults", i);
  266. }
  267. for (unsigned i=0; i<num_threads; ++i) {
  268. halftriples.emplace_back(player, mode, "halves", i);
  269. }
  270. for (unsigned i=0; i<num_threads; ++i) {
  271. andtriples.emplace_back(player, mode, "ands", i);
  272. }
  273. for (unsigned i=0; i<num_threads; ++i) {
  274. valselecttriples.emplace_back(player, mode, "selects", i);
  275. }
  276. rdpfstorage_init<1>(rdpftriples, player, mode, num_threads, false);
  277. rdpfstorage_init<2>(rdpftriples, player, mode, num_threads, false);
  278. rdpfstorage_init<3>(rdpftriples, player, mode, num_threads, false);
  279. rdpfstorage_init<4>(rdpftriples, player, mode, num_threads, false);
  280. rdpfstorage_init<5>(rdpftriples, player, mode, num_threads, false);
  281. rdpfstorage_init<1>(irdpftriples, player, mode, num_threads, true);
  282. rdpfstorage_init<2>(irdpftriples, player, mode, num_threads, true);
  283. rdpfstorage_init<3>(irdpftriples, player, mode, num_threads, true);
  284. rdpfstorage_init<4>(irdpftriples, player, mode, num_threads, true);
  285. rdpfstorage_init<5>(irdpftriples, player, mode, num_threads, true);
  286. for (unsigned i=0; i<num_threads; ++i) {
  287. cdpfs.emplace_back(player, mode, "cdpf", i);
  288. }
  289. for (unsigned i=0; i<num_threads; ++i) {
  290. peerios.emplace_back(std::move(peersocks[i]), "peer", i);
  291. }
  292. for (unsigned i=0; i<num_threads; ++i) {
  293. serverios.emplace_back(std::move(serversocks[i]), "srv", i);
  294. }
  295. }
  296. void MPCPeerIO::dump_precomp_stats(std::ostream &os)
  297. {
  298. for (size_t i=0; i<multtriples.size(); ++i) {
  299. size_t cnt;
  300. if (i > 0) {
  301. os << " ";
  302. }
  303. os << "T" << i;
  304. cnt = multtriples[i].get_stats();
  305. if (cnt > 0) {
  306. os << " m:" << cnt;
  307. }
  308. cnt = halftriples[i].get_stats();
  309. if (cnt > 0) {
  310. os << " h:" << cnt;
  311. }
  312. cnt = andtriples[i].get_stats();
  313. if (cnt > 0) {
  314. os << " a:" << cnt;
  315. }
  316. cnt = valselecttriples[i].get_stats();
  317. if (cnt > 0) {
  318. os << " s:" << cnt;
  319. }
  320. rdpfstorage_dumpstats<1>(os, rdpftriples, i, false);
  321. rdpfstorage_dumpstats<2>(os, rdpftriples, i, false);
  322. rdpfstorage_dumpstats<3>(os, rdpftriples, i, false);
  323. rdpfstorage_dumpstats<4>(os, rdpftriples, i, false);
  324. rdpfstorage_dumpstats<5>(os, rdpftriples, i, false);
  325. rdpfstorage_dumpstats<1>(os, irdpftriples, i, true);
  326. rdpfstorage_dumpstats<2>(os, irdpftriples, i, true);
  327. rdpfstorage_dumpstats<3>(os, irdpftriples, i, true);
  328. rdpfstorage_dumpstats<4>(os, irdpftriples, i, true);
  329. rdpfstorage_dumpstats<5>(os, irdpftriples, i, true);
  330. cnt = cdpfs[i].get_stats();
  331. if (cnt > 0) {
  332. os << " c:" << cnt;
  333. }
  334. }
  335. os << "\n";
  336. }
  337. void MPCPeerIO::reset_precomp_stats()
  338. {
  339. for (size_t i=0; i<multtriples.size(); ++i) {
  340. multtriples[i].reset_stats();
  341. halftriples[i].reset_stats();
  342. andtriples[i].reset_stats();
  343. valselecttriples[i].reset_stats();
  344. rdpfstorage_resetstats<1>(rdpftriples, i);
  345. rdpfstorage_resetstats<2>(rdpftriples, i);
  346. rdpfstorage_resetstats<3>(rdpftriples, i);
  347. rdpfstorage_resetstats<4>(rdpftriples, i);
  348. rdpfstorage_resetstats<5>(rdpftriples, i);
  349. rdpfstorage_resetstats<1>(irdpftriples, i);
  350. rdpfstorage_resetstats<2>(irdpftriples, i);
  351. rdpfstorage_resetstats<3>(irdpftriples, i);
  352. rdpfstorage_resetstats<4>(irdpftriples, i);
  353. rdpfstorage_resetstats<5>(irdpftriples, i);
  354. }
  355. }
  356. void MPCPeerIO::dump_stats(std::ostream &os)
  357. {
  358. MPCIO::dump_stats(os);
  359. os << "Precomputed values used: ";
  360. dump_precomp_stats(os);
  361. }
  362. MPCServerIO::MPCServerIO(ProcessingMode mode,
  363. std::deque<tcp::socket> &p0socks,
  364. std::deque<tcp::socket> &p1socks) :
  365. MPCIO(2, mode, p0socks.size())
  366. {
  367. rdpfstorage_init<1>(rdpfpairs, player, mode, num_threads, false);
  368. rdpfstorage_init<2>(rdpfpairs, player, mode, num_threads, false);
  369. rdpfstorage_init<3>(rdpfpairs, player, mode, num_threads, false);
  370. rdpfstorage_init<4>(rdpfpairs, player, mode, num_threads, false);
  371. rdpfstorage_init<5>(rdpfpairs, player, mode, num_threads, false);
  372. rdpfstorage_init<1>(irdpfpairs, player, mode, num_threads, true);
  373. rdpfstorage_init<2>(irdpfpairs, player, mode, num_threads, true);
  374. rdpfstorage_init<3>(irdpfpairs, player, mode, num_threads, true);
  375. rdpfstorage_init<4>(irdpfpairs, player, mode, num_threads, true);
  376. rdpfstorage_init<5>(irdpfpairs, player, mode, num_threads, true);
  377. for (unsigned i=0; i<num_threads; ++i) {
  378. p0ios.emplace_back(std::move(p0socks[i]), "p0", i);
  379. }
  380. for (unsigned i=0; i<num_threads; ++i) {
  381. p1ios.emplace_back(std::move(p1socks[i]), "p1", i);
  382. }
  383. }
  384. void MPCServerIO::dump_precomp_stats(std::ostream &os)
  385. {
  386. for (size_t i=0; i<std::get<0>(rdpfpairs).size(); ++i) {
  387. if (i > 0) {
  388. os << " ";
  389. }
  390. os << "T" << i;
  391. rdpfstorage_dumpstats<1>(os, rdpfpairs, i, false);
  392. rdpfstorage_dumpstats<2>(os, rdpfpairs, i, false);
  393. rdpfstorage_dumpstats<3>(os, rdpfpairs, i, false);
  394. rdpfstorage_dumpstats<4>(os, rdpfpairs, i, false);
  395. rdpfstorage_dumpstats<5>(os, rdpfpairs, i, false);
  396. rdpfstorage_dumpstats<1>(os, irdpfpairs, i, true);
  397. rdpfstorage_dumpstats<2>(os, irdpfpairs, i, true);
  398. rdpfstorage_dumpstats<3>(os, irdpfpairs, i, true);
  399. rdpfstorage_dumpstats<4>(os, irdpfpairs, i, true);
  400. rdpfstorage_dumpstats<5>(os, irdpfpairs, i, true);
  401. }
  402. os << "\n";
  403. }
  404. void MPCServerIO::reset_precomp_stats()
  405. {
  406. for (size_t i=0; i<std::get<0>(rdpfpairs).size(); ++i) {
  407. rdpfstorage_resetstats<1>(rdpfpairs, i);
  408. rdpfstorage_resetstats<2>(rdpfpairs, i);
  409. rdpfstorage_resetstats<3>(rdpfpairs, i);
  410. rdpfstorage_resetstats<4>(rdpfpairs, i);
  411. rdpfstorage_resetstats<5>(rdpfpairs, i);
  412. rdpfstorage_resetstats<1>(irdpfpairs, i);
  413. rdpfstorage_resetstats<2>(irdpfpairs, i);
  414. rdpfstorage_resetstats<3>(irdpfpairs, i);
  415. rdpfstorage_resetstats<4>(irdpfpairs, i);
  416. rdpfstorage_resetstats<5>(irdpfpairs, i);
  417. }
  418. }
  419. void MPCServerIO::dump_stats(std::ostream &os)
  420. {
  421. MPCIO::dump_stats(os);
  422. os << "Precomputed values used: ";
  423. dump_precomp_stats(os);
  424. }
  425. MPCTIO::MPCTIO(MPCIO &mpcio, int thread_num, int num_threads) :
  426. thread_num(thread_num), local_cpu_nthreads(num_threads),
  427. communication_nthreads(num_threads),
  428. thread_lamport(mpcio.lamport), mpcio(mpcio),
  429. #ifdef VERBOSE_COMMS
  430. round_num(0),
  431. #endif
  432. last_andtriple_bits_remaining(0)
  433. {
  434. if (mpcio.player < 2) {
  435. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  436. peer_iostream.emplace(mpcpio.peerios[thread_num],
  437. thread_lamport, mpcpio.msgs_sent[thread_num],
  438. mpcpio.msg_bytes_sent[thread_num]);
  439. server_iostream.emplace(mpcpio.serverios[thread_num],
  440. thread_lamport, mpcpio.msgs_sent[thread_num],
  441. mpcpio.msg_bytes_sent[thread_num]);
  442. } else {
  443. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  444. p0_iostream.emplace(mpcsrvio.p0ios[thread_num],
  445. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  446. mpcsrvio.msg_bytes_sent[thread_num]);
  447. p1_iostream.emplace(mpcsrvio.p1ios[thread_num],
  448. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  449. mpcsrvio.msg_bytes_sent[thread_num]);
  450. }
  451. }
  452. // Sync our per-thread lamport clock with the master one in the
  453. // mpcio. You only need to call this explicitly if your MPCTIO
  454. // outlives your thread (in which case call it after the join), or
  455. // if your threads do interthread communication amongst themselves
  456. // (in which case call it in the sending thread before the send, and
  457. // call it in the receiving thread after the receive).
  458. void MPCTIO::sync_lamport()
  459. {
  460. // Update the mpcio Lamport time to be max of the thread Lamport
  461. // time and what we thought it was before. We use this
  462. // compare_exchange construction in order to atomically
  463. // do the comparison, computation, and replacement
  464. lamport_t old_lamport = mpcio.lamport;
  465. lamport_t new_lamport = thread_lamport;
  466. do {
  467. if (new_lamport < old_lamport) {
  468. new_lamport = old_lamport;
  469. }
  470. // The next line atomically checks if lamport still has
  471. // the value old_lamport; if so, it changes its value to
  472. // new_lamport and returns true (ending the loop). If
  473. // not, it sets old_lamport to the current value of
  474. // lamport, and returns false (continuing the loop so
  475. // that new_lamport can be recomputed based on this new
  476. // value).
  477. } while (!mpcio.lamport.compare_exchange_weak(
  478. old_lamport, new_lamport));
  479. thread_lamport = new_lamport;
  480. }
  481. // Only call this if you can be sure that there are no outstanding
  482. // messages in flight, you can call it on all existing MPCTIOs, and
  483. // you really want to reset the Lamport clock in the midding of a
  484. // run.
  485. void MPCTIO::reset_lamport()
  486. {
  487. // Reset both our own Lamport clock and the parent MPCIO's
  488. thread_lamport = 0;
  489. mpcio.lamport = 0;
  490. }
  491. // Queue up data to the peer or to the server
  492. void MPCTIO::queue_peer(const void *data, size_t len)
  493. {
  494. if (mpcio.player < 2) {
  495. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  496. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  497. mpcpio.msgs_sent[thread_num] += newmsg;
  498. mpcpio.msg_bytes_sent[thread_num] += len;
  499. }
  500. }
  501. void MPCTIO::queue_server(const void *data, size_t len)
  502. {
  503. if (mpcio.player < 2) {
  504. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  505. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  506. mpcpio.msgs_sent[thread_num] += newmsg;
  507. mpcpio.msg_bytes_sent[thread_num] += len;
  508. }
  509. }
  510. // Receive data from the peer or to the server
  511. size_t MPCTIO::recv_peer(void *data, size_t len)
  512. {
  513. if (mpcio.player < 2) {
  514. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  515. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  516. }
  517. return 0;
  518. }
  519. size_t MPCTIO::recv_server(void *data, size_t len)
  520. {
  521. if (mpcio.player < 2) {
  522. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  523. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  524. }
  525. return 0;
  526. }
  527. // Queue up data to p0 or p1
  528. void MPCTIO::queue_p0(const void *data, size_t len)
  529. {
  530. if (mpcio.player == 2) {
  531. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  532. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  533. mpcsrvio.msgs_sent[thread_num] += newmsg;
  534. mpcsrvio.msg_bytes_sent[thread_num] += len;
  535. }
  536. }
  537. void MPCTIO::queue_p1(const void *data, size_t len)
  538. {
  539. if (mpcio.player == 2) {
  540. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  541. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  542. mpcsrvio.msgs_sent[thread_num] += newmsg;
  543. mpcsrvio.msg_bytes_sent[thread_num] += len;
  544. }
  545. }
  546. // Receive data from p0 or p1
  547. size_t MPCTIO::recv_p0(void *data, size_t len)
  548. {
  549. if (mpcio.player == 2) {
  550. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  551. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  552. }
  553. return 0;
  554. }
  555. size_t MPCTIO::recv_p1(void *data, size_t len)
  556. {
  557. if (mpcio.player == 2) {
  558. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  559. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  560. }
  561. return 0;
  562. }
  563. // Send all queued data for this thread
  564. void MPCTIO::send()
  565. {
  566. #ifdef VERBOSE_COMMS
  567. printf("Thread %u sending round %lu\n", thread_num, ++round_num);
  568. #endif
  569. if (mpcio.player < 2) {
  570. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  571. mpcpio.peerios[thread_num].send();
  572. mpcpio.serverios[thread_num].send();
  573. } else {
  574. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  575. mpcsrvio.p0ios[thread_num].send();
  576. mpcsrvio.p1ios[thread_num].send();
  577. }
  578. }
  579. // Functions to get precomputed values. If we're in the online
  580. // phase, get them from PreCompStorage. If we're in the
  581. // preprocessing or online-only phase, read them from the server.
  582. MultTriple MPCTIO::multtriple(yield_t &yield)
  583. {
  584. MultTriple val;
  585. if (mpcio.player < 2) {
  586. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  587. if (mpcpio.mode != MODE_ONLINE) {
  588. yield();
  589. recv_server(&val, sizeof(val));
  590. mpcpio.multtriples[thread_num].inc();
  591. } else {
  592. mpcpio.multtriples[thread_num].get(val);
  593. }
  594. } else if (mpcio.mode != MODE_ONLINE) {
  595. // Create multiplication triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  596. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  597. value_t X0, Y0, Z0, X1, Y1, Z1;
  598. arc4random_buf(&X0, sizeof(X0));
  599. arc4random_buf(&Y0, sizeof(Y0));
  600. arc4random_buf(&Z0, sizeof(Z0));
  601. arc4random_buf(&X1, sizeof(X1));
  602. arc4random_buf(&Y1, sizeof(Y1));
  603. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  604. MultTriple T0, T1;
  605. T0 = std::make_tuple(X0, Y0, Z0);
  606. T1 = std::make_tuple(X1, Y1, Z1);
  607. queue_p0(&T0, sizeof(T0));
  608. queue_p1(&T1, sizeof(T1));
  609. yield();
  610. }
  611. return val;
  612. }
  613. // When halftriple() is used internally to another preprocessing
  614. // operation, don't tally it, so that it doesn't appear sepearately in
  615. // the stats from the preprocessing operation that invoked it
  616. HalfTriple MPCTIO::halftriple(yield_t &yield, bool tally)
  617. {
  618. HalfTriple val;
  619. if (mpcio.player < 2) {
  620. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  621. if (mpcpio.mode != MODE_ONLINE) {
  622. yield();
  623. recv_server(&val, sizeof(val));
  624. if (tally) {
  625. mpcpio.halftriples[thread_num].inc();
  626. }
  627. } else {
  628. mpcpio.halftriples[thread_num].get(val);
  629. }
  630. } else if (mpcio.mode != MODE_ONLINE) {
  631. // Create half-triples (X0,Z0),(Y1,Z1) such that
  632. // X0*Y1 = Z0 + Z1
  633. value_t X0, Z0, Y1, Z1;
  634. arc4random_buf(&X0, sizeof(X0));
  635. arc4random_buf(&Z0, sizeof(Z0));
  636. arc4random_buf(&Y1, sizeof(Y1));
  637. Z1 = X0 * Y1 - Z0;
  638. HalfTriple H0, H1;
  639. H0 = std::make_tuple(X0, Z0);
  640. H1 = std::make_tuple(Y1, Z1);
  641. queue_p0(&H0, sizeof(H0));
  642. queue_p1(&H1, sizeof(H1));
  643. yield();
  644. }
  645. return val;
  646. }
  647. MultTriple MPCTIO::andtriple(yield_t &yield)
  648. {
  649. AndTriple val;
  650. if (mpcio.player < 2) {
  651. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  652. if (mpcpio.mode != MODE_ONLINE) {
  653. yield();
  654. recv_server(&val, sizeof(val));
  655. mpcpio.andtriples[thread_num].inc();
  656. } else {
  657. mpcpio.andtriples[thread_num].get(val);
  658. }
  659. } else if (mpcio.mode != MODE_ONLINE) {
  660. // Create AND triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  661. // (X0&Y1 ^ Y0&X1) = (Z0^Z1)
  662. value_t X0, Y0, Z0, X1, Y1, Z1;
  663. arc4random_buf(&X0, sizeof(X0));
  664. arc4random_buf(&Y0, sizeof(Y0));
  665. arc4random_buf(&Z0, sizeof(Z0));
  666. arc4random_buf(&X1, sizeof(X1));
  667. arc4random_buf(&Y1, sizeof(Y1));
  668. Z1 = (X0 & Y1) ^ (X1 & Y0) ^ Z0;
  669. AndTriple T0, T1;
  670. T0 = std::make_tuple(X0, Y0, Z0);
  671. T1 = std::make_tuple(X1, Y1, Z1);
  672. queue_p0(&T0, sizeof(T0));
  673. queue_p1(&T1, sizeof(T1));
  674. yield();
  675. }
  676. return val;
  677. }
  678. SelectTriple<DPFnode> MPCTIO::nodeselecttriple(yield_t &yield)
  679. {
  680. SelectTriple<DPFnode> val;
  681. if (mpcio.player < 2) {
  682. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  683. if (mpcpio.mode != MODE_ONLINE) {
  684. uint8_t Xbyte;
  685. yield();
  686. recv_server(&Xbyte, sizeof(Xbyte));
  687. val.X = Xbyte & 1;
  688. recv_server(&val.Y, sizeof(val.Y));
  689. recv_server(&val.Z, sizeof(val.Z));
  690. } else {
  691. std::cerr << "Attempted to read SelectTriple<DPFnode> in online phase\n";
  692. }
  693. } else if (mpcio.mode != MODE_ONLINE) {
  694. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  695. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  696. bit_t X0, X1;
  697. DPFnode Y0, Z0, Y1, Z1;
  698. X0 = arc4random() & 1;
  699. arc4random_buf(&Y0, sizeof(Y0));
  700. arc4random_buf(&Z0, sizeof(Z0));
  701. X1 = arc4random() & 1;
  702. arc4random_buf(&Y1, sizeof(Y1));
  703. DPFnode X0ext, X1ext;
  704. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  705. // 1 -> 1111...1)
  706. X0ext = if128_mask[X0];
  707. X1ext = if128_mask[X1];
  708. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  709. queue_p0(&X0, sizeof(X0));
  710. queue_p0(&Y0, sizeof(Y0));
  711. queue_p0(&Z0, sizeof(Z0));
  712. queue_p1(&X1, sizeof(X1));
  713. queue_p1(&Y1, sizeof(Y1));
  714. queue_p1(&Z1, sizeof(Z1));
  715. yield();
  716. }
  717. return val;
  718. }
  719. SelectTriple<value_t> MPCTIO::valselecttriple(yield_t &yield)
  720. {
  721. SelectTriple<value_t> val;
  722. if (mpcio.player < 2) {
  723. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  724. if (mpcpio.mode != MODE_ONLINE) {
  725. uint8_t Xbyte;
  726. yield();
  727. recv_server(&Xbyte, sizeof(Xbyte));
  728. val.X = Xbyte & 1;
  729. recv_server(&val.Y, sizeof(val.Y));
  730. recv_server(&val.Z, sizeof(val.Z));
  731. mpcpio.valselecttriples[thread_num].inc();
  732. } else {
  733. mpcpio.valselecttriples[thread_num].get(val);
  734. }
  735. } else if (mpcio.mode != MODE_ONLINE) {
  736. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  737. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  738. bit_t X0, X1;
  739. value_t Y0, Z0, Y1, Z1;
  740. X0 = arc4random() & 1;
  741. arc4random_buf(&Y0, sizeof(Y0));
  742. arc4random_buf(&Z0, sizeof(Z0));
  743. X1 = arc4random() & 1;
  744. arc4random_buf(&Y1, sizeof(Y1));
  745. value_t X0ext, X1ext;
  746. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  747. // 1 -> 1111...1)
  748. X0ext = -value_t(X0);
  749. X1ext = -value_t(X1);
  750. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  751. queue_p0(&X0, sizeof(X0));
  752. queue_p0(&Y0, sizeof(Y0));
  753. queue_p0(&Z0, sizeof(Z0));
  754. queue_p1(&X1, sizeof(X1));
  755. queue_p1(&Y1, sizeof(Y1));
  756. queue_p1(&Z1, sizeof(Z1));
  757. yield();
  758. }
  759. return val;
  760. }
  761. SelectTriple<bit_t> MPCTIO::bitselecttriple(yield_t &yield)
  762. {
  763. // Do we need to fetch a new AND triple?
  764. if (last_andtriple_bits_remaining == 0) {
  765. last_andtriple = andtriple(yield);
  766. last_andtriple_bits_remaining = 8*sizeof(value_t);
  767. }
  768. --last_andtriple_bits_remaining;
  769. value_t mask = value_t(1) << last_andtriple_bits_remaining;
  770. SelectTriple<bit_t> val;
  771. val.X = !!(std::get<0>(last_andtriple) & mask);
  772. val.Y = !!(std::get<1>(last_andtriple) & mask);
  773. val.Z = !!(std::get<2>(last_andtriple) & mask);
  774. return val;
  775. }
  776. CDPF MPCTIO::cdpf(yield_t &yield)
  777. {
  778. CDPF val;
  779. if (mpcio.player < 2) {
  780. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  781. if (mpcpio.mode != MODE_ONLINE) {
  782. yield();
  783. iostream_server() >> val;
  784. mpcpio.cdpfs[thread_num].inc();
  785. } else {
  786. mpcpio.cdpfs[thread_num].get(val);
  787. }
  788. } else if (mpcio.mode != MODE_ONLINE) {
  789. auto [ cdpf0, cdpf1 ] = CDPF::generate(aes_ops());
  790. iostream_p0() << cdpf0;
  791. iostream_p1() << cdpf1;
  792. yield();
  793. }
  794. return val;
  795. }
  796. // The port number for the P1 -> P0 connection
  797. static const unsigned short port_p1_p0 = 2115;
  798. // The port number for the P2 -> P0 connection
  799. static const unsigned short port_p2_p0 = 2116;
  800. // The port number for the P2 -> P1 connection
  801. static const unsigned short port_p2_p1 = 2117;
  802. void mpcio_setup_computational(unsigned player,
  803. boost::asio::io_context &io_context,
  804. const char *p0addr, // can be NULL when player=0
  805. int num_threads,
  806. std::deque<tcp::socket> &peersocks,
  807. std::deque<tcp::socket> &serversocks)
  808. {
  809. if (player == 0) {
  810. // Listen for connections from P1 and from P2
  811. tcp::acceptor acceptor_p1(io_context,
  812. tcp::endpoint(tcp::v4(), port_p1_p0));
  813. tcp::acceptor acceptor_p2(io_context,
  814. tcp::endpoint(tcp::v4(), port_p2_p0));
  815. peersocks.clear();
  816. serversocks.clear();
  817. for (int i=0;i<num_threads;++i) {
  818. peersocks.emplace_back(io_context);
  819. serversocks.emplace_back(io_context);
  820. }
  821. for (int i=0;i<num_threads;++i) {
  822. tcp::socket peersock = acceptor_p1.accept();
  823. // Read 2 bytes from the socket, which will be the thread
  824. // number
  825. unsigned short thread_num;
  826. boost::asio::read(peersock,
  827. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  828. if (thread_num >= num_threads) {
  829. std::cerr << "Received bad thread number from peer\n";
  830. } else {
  831. peersocks[thread_num] = std::move(peersock);
  832. }
  833. }
  834. for (int i=0;i<num_threads;++i) {
  835. tcp::socket serversock = acceptor_p2.accept();
  836. // Read 2 bytes from the socket, which will be the thread
  837. // number
  838. unsigned short thread_num;
  839. boost::asio::read(serversock,
  840. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  841. if (thread_num >= num_threads) {
  842. std::cerr << "Received bad thread number from server\n";
  843. } else {
  844. serversocks[thread_num] = std::move(serversock);
  845. }
  846. }
  847. } else if (player == 1) {
  848. // Listen for connections from P2, make num_threads connections to P0
  849. tcp::acceptor acceptor_p2(io_context,
  850. tcp::endpoint(tcp::v4(), port_p2_p1));
  851. tcp::resolver resolver(io_context);
  852. boost::system::error_code err;
  853. peersocks.clear();
  854. serversocks.clear();
  855. for (int i=0;i<num_threads;++i) {
  856. serversocks.emplace_back(io_context);
  857. }
  858. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  859. tcp::socket peersock(io_context);
  860. while(1) {
  861. boost::asio::connect(peersock,
  862. resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
  863. if (!err) break;
  864. std::cerr << "Connection to p0 refused, will retry.\n";
  865. sleep(1);
  866. }
  867. // Write 2 bytes to the socket indicating which thread
  868. // number this socket is for
  869. boost::asio::write(peersock,
  870. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  871. peersocks.push_back(std::move(peersock));
  872. }
  873. for (int i=0;i<num_threads;++i) {
  874. tcp::socket serversock = acceptor_p2.accept();
  875. // Read 2 bytes from the socket, which will be the thread
  876. // number
  877. unsigned short thread_num;
  878. boost::asio::read(serversock,
  879. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  880. if (thread_num >= num_threads) {
  881. std::cerr << "Received bad thread number from server\n";
  882. } else {
  883. serversocks[thread_num] = std::move(serversock);
  884. }
  885. }
  886. } else {
  887. std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
  888. }
  889. }
  890. void mpcio_setup_server(boost::asio::io_context &io_context,
  891. const char *p0addr, const char *p1addr, int num_threads,
  892. std::deque<tcp::socket> &p0socks,
  893. std::deque<tcp::socket> &p1socks)
  894. {
  895. // Make connections to P0 and P1
  896. tcp::resolver resolver(io_context);
  897. boost::system::error_code err;
  898. p0socks.clear();
  899. p1socks.clear();
  900. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  901. tcp::socket p0sock(io_context);
  902. while(1) {
  903. boost::asio::connect(p0sock,
  904. resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
  905. if (!err) break;
  906. std::cerr << "Connection to p0 refused, will retry.\n";
  907. sleep(1);
  908. }
  909. // Write 2 bytes to the socket indicating which thread
  910. // number this socket is for
  911. boost::asio::write(p0sock,
  912. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  913. p0socks.push_back(std::move(p0sock));
  914. }
  915. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  916. tcp::socket p1sock(io_context);
  917. while(1) {
  918. boost::asio::connect(p1sock,
  919. resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
  920. if (!err) break;
  921. std::cerr << "Connection to p1 refused, will retry.\n";
  922. sleep(1);
  923. }
  924. // Write 2 bytes to the socket indicating which thread
  925. // number this socket is for
  926. boost::asio::write(p1sock,
  927. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  928. p1socks.push_back(std::move(p1sock));
  929. }
  930. }