mpcio.cpp 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044
  1. #include <sys/time.h> // getrusage
  2. #include <sys/resource.h> // getrusage
  3. #include "mpcio.hpp"
  4. #include "rdpf.hpp"
  5. #include "cdpf.hpp"
  6. #include "bitutils.hpp"
  7. #include "coroutine.hpp"
  8. void MPCSingleIO::async_send_from_msgqueue()
  9. {
  10. #ifdef SEND_LAMPORT_CLOCKS
  11. std::vector<boost::asio::const_buffer> tosend;
  12. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  13. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  14. #endif
  15. boost::asio::async_write(sock,
  16. #ifdef SEND_LAMPORT_CLOCKS
  17. tosend,
  18. #else
  19. boost::asio::buffer(messagequeue.front()),
  20. #endif
  21. [&](boost::system::error_code ec, std::size_t amt){
  22. messagequeuelock.lock();
  23. messagequeue.pop();
  24. if (messagequeue.size() > 0) {
  25. async_send_from_msgqueue();
  26. }
  27. messagequeuelock.unlock();
  28. });
  29. }
  30. size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
  31. {
  32. // Is this a new message?
  33. size_t newmsg = 0;
  34. dataqueue.append((const char *)data, len);
  35. // If this is the first queue() since the last explicit send(),
  36. // which we'll know because message_lamport will be nullopt, set
  37. // message_lamport to the current Lamport clock. Note that the
  38. // boolean test tests whether message_lamport is nullopt, not
  39. // whether its value is zero.
  40. if (!message_lamport) {
  41. message_lamport = lamport;
  42. newmsg = 1;
  43. }
  44. #ifdef VERBOSE_COMMS
  45. struct timeval tv;
  46. gettimeofday(&tv, NULL);
  47. printf("%lu.%06lu: Queue %s.%d len=%lu lamp=%u: ", tv.tv_sec,
  48. tv.tv_usec, dest.c_str(), thread_num, len,
  49. message_lamport.value());
  50. for (size_t i=0;i<len;++i) {
  51. printf("%02x", ((const unsigned char*)data)[i]);
  52. }
  53. printf("\n");
  54. #endif
  55. // If we already have some full packets worth of data, may as
  56. // well send it.
  57. if (dataqueue.size() > 28800) {
  58. send(true);
  59. }
  60. return newmsg;
  61. }
  62. void MPCSingleIO::send(bool implicit_send)
  63. {
  64. size_t thissize = dataqueue.size();
  65. // Ignore spurious calls to send(), except for resetting
  66. // message_lamport if this was an explicit send().
  67. if (thissize == 0) {
  68. #ifdef SEND_LAMPORT_CLOCKS
  69. // If this was an explicit send(), reset the message_lamport so
  70. // that it gets updated at the next queue().
  71. if (!implicit_send) {
  72. message_lamport.reset();
  73. }
  74. #endif
  75. return;
  76. }
  77. #ifdef RECORD_IOTRACE
  78. iotrace.push_back(thissize);
  79. #endif
  80. messagequeuelock.lock();
  81. // Move the current message to send into the message queue (this
  82. // moves a pointer to the data, not copying the data itself)
  83. #ifdef SEND_LAMPORT_CLOCKS
  84. messagequeue.emplace(std::move(dataqueue),
  85. message_lamport.value());
  86. // If this was an explicit send(), reset the message_lamport so
  87. // that it gets updated at the next queue().
  88. if (!implicit_send) {
  89. message_lamport.reset();
  90. }
  91. #else
  92. messagequeue.emplace(std::move(dataqueue));
  93. #endif
  94. // If this is now the first thing in the message queue, launch
  95. // an async_write to write it
  96. if (messagequeue.size() == 1) {
  97. async_send_from_msgqueue();
  98. }
  99. messagequeuelock.unlock();
  100. }
  101. size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
  102. {
  103. #ifdef VERBOSE_COMMS
  104. struct timeval tv;
  105. gettimeofday(&tv, NULL);
  106. size_t orig_len = len;
  107. printf("%lu.%06lu: Recv %s.%d len=%lu lamp=%u ", tv.tv_sec,
  108. tv.tv_usec, dest.c_str(), thread_num, len, lamport);
  109. #endif
  110. #ifdef SEND_LAMPORT_CLOCKS
  111. char *cdata = (char *)data;
  112. size_t res = 0;
  113. while (len > 0) {
  114. while (recvdataremain == 0) {
  115. // Read a new header
  116. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  117. uint32_t datalen;
  118. lamport_t recv_lamport;
  119. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  120. memmove(&datalen, hdr, sizeof(datalen));
  121. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  122. lamport_t new_lamport = recv_lamport + 1;
  123. if (lamport < new_lamport) {
  124. lamport = new_lamport;
  125. }
  126. if (datalen > 0) {
  127. recvdata.resize(datalen, '\0');
  128. boost::asio::read(sock, boost::asio::buffer(recvdata));
  129. recvdataremain = datalen;
  130. }
  131. }
  132. size_t amttoread = len;
  133. if (amttoread > recvdataremain) {
  134. amttoread = recvdataremain;
  135. }
  136. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  137. amttoread);
  138. cdata += amttoread;
  139. len -= amttoread;
  140. recvdataremain -= amttoread;
  141. res += amttoread;
  142. }
  143. #else
  144. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  145. #endif
  146. #ifdef VERBOSE_COMMS
  147. gettimeofday(&tv, NULL);
  148. printf("nlamp=%u %lu.%06lu: ", lamport, tv.tv_sec, tv.tv_usec);
  149. for (size_t i=0;i<orig_len;++i) {
  150. printf("%02x", ((const unsigned char*)data)[i]);
  151. }
  152. printf("\n");
  153. #endif
  154. #ifdef RECORD_IOTRACE
  155. iotrace.push_back(-(ssize_t(res)));
  156. #endif
  157. return res;
  158. }
  159. #ifdef RECORD_IOTRACE
  160. void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
  161. {
  162. if (label) {
  163. os << label << " ";
  164. }
  165. os << "IO trace:";
  166. for (auto& s: iotrace) {
  167. os << " " << s;
  168. }
  169. os << "\n";
  170. }
  171. #endif
  172. void MPCIO::reset_stats()
  173. {
  174. msgs_sent.clear();
  175. msg_bytes_sent.clear();
  176. aes_ops.clear();
  177. for (size_t i=0; i<num_threads; ++i) {
  178. msgs_sent.push_back(0);
  179. msg_bytes_sent.push_back(0);
  180. aes_ops.push_back(0);
  181. }
  182. steady_start = boost::chrono::steady_clock::now();
  183. cpu_start = boost::chrono::process_cpu_clock::now();
  184. }
  185. // Report the memory usage
  186. void MPCIO::dump_memusage(std::ostream &os)
  187. {
  188. struct rusage ru;
  189. getrusage(RUSAGE_SELF, &ru);
  190. os << "Mem: " << ru.ru_maxrss << " KiB\n";
  191. }
  192. void MPCIO::dump_stats(std::ostream &os)
  193. {
  194. size_t tot_msgs_sent = 0;
  195. size_t tot_msg_bytes_sent = 0;
  196. size_t tot_aes_ops = 0;
  197. for (auto& n : msgs_sent) {
  198. tot_msgs_sent += n;
  199. }
  200. for (auto& n : msg_bytes_sent) {
  201. tot_msg_bytes_sent += n;
  202. }
  203. for (auto& n : aes_ops) {
  204. tot_aes_ops += n;
  205. }
  206. auto steady_elapsed =
  207. boost::chrono::steady_clock::now() - steady_start;
  208. auto cpu_elapsed =
  209. boost::chrono::process_cpu_clock::now() - cpu_start;
  210. os << tot_msgs_sent << " messages sent\n";
  211. os << tot_msg_bytes_sent << " message bytes sent\n";
  212. os << lamport << " Lamport clock (latencies)\n";
  213. os << tot_aes_ops << " local AES operations\n";
  214. os << boost::chrono::duration_cast
  215. <boost::chrono::milliseconds>(steady_elapsed) <<
  216. " wall clock time\n";
  217. os << cpu_elapsed << " {real;user;system}\n";
  218. dump_memusage(os);
  219. }
  220. // TVA is a tuple of vectors of arrays of PreCompStorage
  221. template <nbits_t WIDTH, typename TVA>
  222. static void rdpfstorage_init(TVA &storage, unsigned player,
  223. ProcessingMode mode, unsigned num_threads, bool incremental)
  224. {
  225. auto &VA = std::get<WIDTH-1>(storage);
  226. VA.resize(num_threads);
  227. char prefix[12];
  228. strcpy(prefix, incremental ? "irdpf" : "rdpf");
  229. if (WIDTH > 1) {
  230. sprintf(prefix+strlen(prefix), "%d_", WIDTH);
  231. }
  232. for (unsigned i=0; i<num_threads; ++i) {
  233. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  234. VA[i][depth-1].init(player, mode, prefix, i, depth, WIDTH);
  235. }
  236. }
  237. }
  238. // TVA is a tuple of vectors of arrays of PreCompStorage
  239. template <nbits_t WIDTH, typename TVA>
  240. static void rdpfstorage_dumpstats(std::ostream &os, TVA &storage,
  241. size_t thread_num, bool incremental)
  242. {
  243. auto &VA = std::get<WIDTH-1>(storage);
  244. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  245. size_t cnt = VA[thread_num][depth-1].get_stats();
  246. if (cnt > 0) {
  247. os << (incremental ? " i" : " r") << int(depth);
  248. if (WIDTH > 1) {
  249. os << "." << int(WIDTH);
  250. }
  251. os << ":" << cnt;
  252. }
  253. }
  254. }
  255. // TVA is a tuple of vectors of arrays of PreCompStorage
  256. template <nbits_t WIDTH, typename TVA>
  257. static void rdpfstorage_resetstats(TVA &storage, size_t thread_num)
  258. {
  259. auto &VA = std::get<WIDTH-1>(storage);
  260. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  261. VA[thread_num][depth-1].reset_stats();
  262. }
  263. }
  264. MPCPeerIO::MPCPeerIO(unsigned player, ProcessingMode mode,
  265. std::deque<tcp::socket> &peersocks,
  266. std::deque<tcp::socket> &serversocks) :
  267. MPCIO(player, mode, peersocks.size())
  268. {
  269. unsigned num_threads = unsigned(peersocks.size());
  270. for (unsigned i=0; i<num_threads; ++i) {
  271. multtriples.emplace_back(player, mode, "mults", i);
  272. }
  273. for (unsigned i=0; i<num_threads; ++i) {
  274. halftriples.emplace_back(player, mode, "halves", i);
  275. }
  276. for (unsigned i=0; i<num_threads; ++i) {
  277. andtriples.emplace_back(player, mode, "ands", i);
  278. }
  279. for (unsigned i=0; i<num_threads; ++i) {
  280. valselecttriples.emplace_back(player, mode, "selects", i);
  281. }
  282. rdpfstorage_init<1>(rdpftriples, player, mode, num_threads, false);
  283. rdpfstorage_init<2>(rdpftriples, player, mode, num_threads, false);
  284. rdpfstorage_init<3>(rdpftriples, player, mode, num_threads, false);
  285. rdpfstorage_init<4>(rdpftriples, player, mode, num_threads, false);
  286. rdpfstorage_init<5>(rdpftriples, player, mode, num_threads, false);
  287. rdpfstorage_init<1>(irdpftriples, player, mode, num_threads, true);
  288. rdpfstorage_init<2>(irdpftriples, player, mode, num_threads, true);
  289. rdpfstorage_init<3>(irdpftriples, player, mode, num_threads, true);
  290. rdpfstorage_init<4>(irdpftriples, player, mode, num_threads, true);
  291. rdpfstorage_init<5>(irdpftriples, player, mode, num_threads, true);
  292. for (unsigned i=0; i<num_threads; ++i) {
  293. cdpfs.emplace_back(player, mode, "cdpf", i);
  294. }
  295. for (unsigned i=0; i<num_threads; ++i) {
  296. peerios.emplace_back(std::move(peersocks[i]), "peer", i);
  297. }
  298. for (unsigned i=0; i<num_threads; ++i) {
  299. serverios.emplace_back(std::move(serversocks[i]), "srv", i);
  300. }
  301. }
  302. void MPCPeerIO::dump_precomp_stats(std::ostream &os)
  303. {
  304. for (size_t i=0; i<multtriples.size(); ++i) {
  305. size_t cnt;
  306. if (i > 0) {
  307. os << " ";
  308. }
  309. os << "T" << i;
  310. cnt = multtriples[i].get_stats();
  311. if (cnt > 0) {
  312. os << " m:" << cnt;
  313. }
  314. cnt = halftriples[i].get_stats();
  315. if (cnt > 0) {
  316. os << " h:" << cnt;
  317. }
  318. cnt = andtriples[i].get_stats();
  319. if (cnt > 0) {
  320. os << " a:" << cnt;
  321. }
  322. cnt = valselecttriples[i].get_stats();
  323. if (cnt > 0) {
  324. os << " s:" << cnt;
  325. }
  326. rdpfstorage_dumpstats<1>(os, rdpftriples, i, false);
  327. rdpfstorage_dumpstats<2>(os, rdpftriples, i, false);
  328. rdpfstorage_dumpstats<3>(os, rdpftriples, i, false);
  329. rdpfstorage_dumpstats<4>(os, rdpftriples, i, false);
  330. rdpfstorage_dumpstats<5>(os, rdpftriples, i, false);
  331. rdpfstorage_dumpstats<1>(os, irdpftriples, i, true);
  332. rdpfstorage_dumpstats<2>(os, irdpftriples, i, true);
  333. rdpfstorage_dumpstats<3>(os, irdpftriples, i, true);
  334. rdpfstorage_dumpstats<4>(os, irdpftriples, i, true);
  335. rdpfstorage_dumpstats<5>(os, irdpftriples, i, true);
  336. cnt = cdpfs[i].get_stats();
  337. if (cnt > 0) {
  338. os << " c:" << cnt;
  339. }
  340. }
  341. os << "\n";
  342. }
  343. void MPCPeerIO::reset_precomp_stats()
  344. {
  345. for (size_t i=0; i<multtriples.size(); ++i) {
  346. multtriples[i].reset_stats();
  347. halftriples[i].reset_stats();
  348. andtriples[i].reset_stats();
  349. valselecttriples[i].reset_stats();
  350. rdpfstorage_resetstats<1>(rdpftriples, i);
  351. rdpfstorage_resetstats<2>(rdpftriples, i);
  352. rdpfstorage_resetstats<3>(rdpftriples, i);
  353. rdpfstorage_resetstats<4>(rdpftriples, i);
  354. rdpfstorage_resetstats<5>(rdpftriples, i);
  355. rdpfstorage_resetstats<1>(irdpftriples, i);
  356. rdpfstorage_resetstats<2>(irdpftriples, i);
  357. rdpfstorage_resetstats<3>(irdpftriples, i);
  358. rdpfstorage_resetstats<4>(irdpftriples, i);
  359. rdpfstorage_resetstats<5>(irdpftriples, i);
  360. }
  361. }
  362. void MPCPeerIO::dump_stats(std::ostream &os)
  363. {
  364. MPCIO::dump_stats(os);
  365. os << "Precomputed values used: ";
  366. dump_precomp_stats(os);
  367. }
  368. MPCServerIO::MPCServerIO(ProcessingMode mode,
  369. std::deque<tcp::socket> &p0socks,
  370. std::deque<tcp::socket> &p1socks) :
  371. MPCIO(2, mode, p0socks.size())
  372. {
  373. rdpfstorage_init<1>(rdpfpairs, player, mode, num_threads, false);
  374. rdpfstorage_init<2>(rdpfpairs, player, mode, num_threads, false);
  375. rdpfstorage_init<3>(rdpfpairs, player, mode, num_threads, false);
  376. rdpfstorage_init<4>(rdpfpairs, player, mode, num_threads, false);
  377. rdpfstorage_init<5>(rdpfpairs, player, mode, num_threads, false);
  378. rdpfstorage_init<1>(irdpfpairs, player, mode, num_threads, true);
  379. rdpfstorage_init<2>(irdpfpairs, player, mode, num_threads, true);
  380. rdpfstorage_init<3>(irdpfpairs, player, mode, num_threads, true);
  381. rdpfstorage_init<4>(irdpfpairs, player, mode, num_threads, true);
  382. rdpfstorage_init<5>(irdpfpairs, player, mode, num_threads, true);
  383. for (unsigned i=0; i<num_threads; ++i) {
  384. p0ios.emplace_back(std::move(p0socks[i]), "p0", i);
  385. }
  386. for (unsigned i=0; i<num_threads; ++i) {
  387. p1ios.emplace_back(std::move(p1socks[i]), "p1", i);
  388. }
  389. }
  390. void MPCServerIO::dump_precomp_stats(std::ostream &os)
  391. {
  392. for (size_t i=0; i<std::get<0>(rdpfpairs).size(); ++i) {
  393. if (i > 0) {
  394. os << " ";
  395. }
  396. os << "T" << i;
  397. rdpfstorage_dumpstats<1>(os, rdpfpairs, i, false);
  398. rdpfstorage_dumpstats<2>(os, rdpfpairs, i, false);
  399. rdpfstorage_dumpstats<3>(os, rdpfpairs, i, false);
  400. rdpfstorage_dumpstats<4>(os, rdpfpairs, i, false);
  401. rdpfstorage_dumpstats<5>(os, rdpfpairs, i, false);
  402. rdpfstorage_dumpstats<1>(os, irdpfpairs, i, true);
  403. rdpfstorage_dumpstats<2>(os, irdpfpairs, i, true);
  404. rdpfstorage_dumpstats<3>(os, irdpfpairs, i, true);
  405. rdpfstorage_dumpstats<4>(os, irdpfpairs, i, true);
  406. rdpfstorage_dumpstats<5>(os, irdpfpairs, i, true);
  407. }
  408. os << "\n";
  409. }
  410. void MPCServerIO::reset_precomp_stats()
  411. {
  412. for (size_t i=0; i<std::get<0>(rdpfpairs).size(); ++i) {
  413. rdpfstorage_resetstats<1>(rdpfpairs, i);
  414. rdpfstorage_resetstats<2>(rdpfpairs, i);
  415. rdpfstorage_resetstats<3>(rdpfpairs, i);
  416. rdpfstorage_resetstats<4>(rdpfpairs, i);
  417. rdpfstorage_resetstats<5>(rdpfpairs, i);
  418. rdpfstorage_resetstats<1>(irdpfpairs, i);
  419. rdpfstorage_resetstats<2>(irdpfpairs, i);
  420. rdpfstorage_resetstats<3>(irdpfpairs, i);
  421. rdpfstorage_resetstats<4>(irdpfpairs, i);
  422. rdpfstorage_resetstats<5>(irdpfpairs, i);
  423. }
  424. }
  425. void MPCServerIO::dump_stats(std::ostream &os)
  426. {
  427. MPCIO::dump_stats(os);
  428. os << "Precomputed values used: ";
  429. dump_precomp_stats(os);
  430. }
  431. MPCTIO::MPCTIO(MPCIO &mpcio, int thread_num, int num_threads) :
  432. thread_num(thread_num), local_cpu_nthreads(num_threads),
  433. communication_nthreads(num_threads),
  434. thread_lamport(mpcio.lamport), mpcio(mpcio),
  435. #ifdef VERBOSE_COMMS
  436. round_num(0),
  437. #endif
  438. last_andtriple_bits_remaining(0),
  439. remaining_nodesselecttriples(0)
  440. {
  441. if (mpcio.player < 2) {
  442. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  443. peer_iostream.emplace(mpcpio.peerios[thread_num],
  444. thread_lamport, mpcpio.msgs_sent[thread_num],
  445. mpcpio.msg_bytes_sent[thread_num]);
  446. server_iostream.emplace(mpcpio.serverios[thread_num],
  447. thread_lamport, mpcpio.msgs_sent[thread_num],
  448. mpcpio.msg_bytes_sent[thread_num]);
  449. } else {
  450. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  451. p0_iostream.emplace(mpcsrvio.p0ios[thread_num],
  452. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  453. mpcsrvio.msg_bytes_sent[thread_num]);
  454. p1_iostream.emplace(mpcsrvio.p1ios[thread_num],
  455. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  456. mpcsrvio.msg_bytes_sent[thread_num]);
  457. }
  458. }
  459. // Sync our per-thread lamport clock with the master one in the
  460. // mpcio. You only need to call this explicitly if your MPCTIO
  461. // outlives your thread (in which case call it after the join), or
  462. // if your threads do interthread communication amongst themselves
  463. // (in which case call it in the sending thread before the send, and
  464. // call it in the receiving thread after the receive).
  465. void MPCTIO::sync_lamport()
  466. {
  467. // Update the mpcio Lamport time to be max of the thread Lamport
  468. // time and what we thought it was before. We use this
  469. // compare_exchange construction in order to atomically
  470. // do the comparison, computation, and replacement
  471. lamport_t old_lamport = mpcio.lamport;
  472. lamport_t new_lamport = thread_lamport;
  473. do {
  474. if (new_lamport < old_lamport) {
  475. new_lamport = old_lamport;
  476. }
  477. // The next line atomically checks if lamport still has
  478. // the value old_lamport; if so, it changes its value to
  479. // new_lamport and returns true (ending the loop). If
  480. // not, it sets old_lamport to the current value of
  481. // lamport, and returns false (continuing the loop so
  482. // that new_lamport can be recomputed based on this new
  483. // value).
  484. } while (!mpcio.lamport.compare_exchange_weak(
  485. old_lamport, new_lamport));
  486. thread_lamport = new_lamport;
  487. }
  488. // Only call this if you can be sure that there are no outstanding
  489. // messages in flight, you can call it on all existing MPCTIOs, and
  490. // you really want to reset the Lamport clock in the midding of a
  491. // run.
  492. void MPCTIO::reset_lamport()
  493. {
  494. // Reset both our own Lamport clock and the parent MPCIO's
  495. thread_lamport = 0;
  496. mpcio.lamport = 0;
  497. }
  498. // Queue up data to the peer or to the server
  499. void MPCTIO::queue_peer(const void *data, size_t len)
  500. {
  501. if (mpcio.player < 2) {
  502. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  503. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  504. mpcpio.msgs_sent[thread_num] += newmsg;
  505. mpcpio.msg_bytes_sent[thread_num] += len;
  506. }
  507. }
  508. void MPCTIO::queue_server(const void *data, size_t len)
  509. {
  510. if (mpcio.player < 2) {
  511. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  512. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  513. mpcpio.msgs_sent[thread_num] += newmsg;
  514. mpcpio.msg_bytes_sent[thread_num] += len;
  515. }
  516. }
  517. // Receive data from the peer or to the server
  518. size_t MPCTIO::recv_peer(void *data, size_t len)
  519. {
  520. if (mpcio.player < 2) {
  521. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  522. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  523. }
  524. return 0;
  525. }
  526. size_t MPCTIO::recv_server(void *data, size_t len)
  527. {
  528. if (mpcio.player < 2) {
  529. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  530. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  531. }
  532. return 0;
  533. }
  534. // Queue up data to p0 or p1
  535. void MPCTIO::queue_p0(const void *data, size_t len)
  536. {
  537. if (mpcio.player == 2) {
  538. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  539. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  540. mpcsrvio.msgs_sent[thread_num] += newmsg;
  541. mpcsrvio.msg_bytes_sent[thread_num] += len;
  542. }
  543. }
  544. void MPCTIO::queue_p1(const void *data, size_t len)
  545. {
  546. if (mpcio.player == 2) {
  547. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  548. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  549. mpcsrvio.msgs_sent[thread_num] += newmsg;
  550. mpcsrvio.msg_bytes_sent[thread_num] += len;
  551. }
  552. }
  553. // Receive data from p0 or p1
  554. size_t MPCTIO::recv_p0(void *data, size_t len)
  555. {
  556. if (mpcio.player == 2) {
  557. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  558. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  559. }
  560. return 0;
  561. }
  562. size_t MPCTIO::recv_p1(void *data, size_t len)
  563. {
  564. if (mpcio.player == 2) {
  565. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  566. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  567. }
  568. return 0;
  569. }
  570. // Send all queued data for this thread
  571. void MPCTIO::send()
  572. {
  573. #ifdef VERBOSE_COMMS
  574. struct timeval tv;
  575. gettimeofday(&tv, NULL);
  576. printf("%lu.%06lu: Thread %u sending round %lu\n", tv.tv_sec,
  577. tv.tv_usec, thread_num, ++round_num);
  578. #endif
  579. if (mpcio.player < 2) {
  580. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  581. mpcpio.peerios[thread_num].send();
  582. mpcpio.serverios[thread_num].send();
  583. } else {
  584. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  585. mpcsrvio.p0ios[thread_num].send();
  586. mpcsrvio.p1ios[thread_num].send();
  587. }
  588. }
  589. // Functions to get precomputed values. If we're in the online
  590. // phase, get them from PreCompStorage. If we're in the
  591. // preprocessing or online-only phase, read them from the server.
  592. MultTriple MPCTIO::multtriple(yield_t &yield)
  593. {
  594. MultTriple val;
  595. if (mpcio.player < 2) {
  596. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  597. if (mpcpio.mode != MODE_ONLINE) {
  598. yield();
  599. recv_server(&val, sizeof(val));
  600. mpcpio.multtriples[thread_num].inc();
  601. } else {
  602. mpcpio.multtriples[thread_num].get(val);
  603. }
  604. } else if (mpcio.mode != MODE_ONLINE) {
  605. // Create multiplication triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  606. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  607. value_t X0, Y0, Z0, X1, Y1, Z1;
  608. arc4random_buf(&X0, sizeof(X0));
  609. arc4random_buf(&Y0, sizeof(Y0));
  610. arc4random_buf(&Z0, sizeof(Z0));
  611. arc4random_buf(&X1, sizeof(X1));
  612. arc4random_buf(&Y1, sizeof(Y1));
  613. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  614. MultTriple T0, T1;
  615. T0 = std::make_tuple(X0, Y0, Z0);
  616. T1 = std::make_tuple(X1, Y1, Z1);
  617. queue_p0(&T0, sizeof(T0));
  618. queue_p1(&T1, sizeof(T1));
  619. yield();
  620. }
  621. return val;
  622. }
  623. // When halftriple() is used internally to another preprocessing
  624. // operation, don't tally it, so that it doesn't appear sepearately in
  625. // the stats from the preprocessing operation that invoked it
  626. HalfTriple MPCTIO::halftriple(yield_t &yield, bool tally)
  627. {
  628. HalfTriple val;
  629. if (mpcio.player < 2) {
  630. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  631. if (mpcpio.mode != MODE_ONLINE) {
  632. yield();
  633. recv_server(&val, sizeof(val));
  634. if (tally) {
  635. mpcpio.halftriples[thread_num].inc();
  636. }
  637. } else {
  638. mpcpio.halftriples[thread_num].get(val);
  639. }
  640. } else if (mpcio.mode != MODE_ONLINE) {
  641. // Create half-triples (X0,Z0),(Y1,Z1) such that
  642. // X0*Y1 = Z0 + Z1
  643. value_t X0, Z0, Y1, Z1;
  644. arc4random_buf(&X0, sizeof(X0));
  645. arc4random_buf(&Z0, sizeof(Z0));
  646. arc4random_buf(&Y1, sizeof(Y1));
  647. Z1 = X0 * Y1 - Z0;
  648. HalfTriple H0, H1;
  649. H0 = std::make_tuple(X0, Z0);
  650. H1 = std::make_tuple(Y1, Z1);
  651. queue_p0(&H0, sizeof(H0));
  652. queue_p1(&H1, sizeof(H1));
  653. yield();
  654. }
  655. return val;
  656. }
  657. MultTriple MPCTIO::andtriple(yield_t &yield)
  658. {
  659. AndTriple val;
  660. if (mpcio.player < 2) {
  661. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  662. if (mpcpio.mode != MODE_ONLINE) {
  663. yield();
  664. recv_server(&val, sizeof(val));
  665. mpcpio.andtriples[thread_num].inc();
  666. } else {
  667. mpcpio.andtriples[thread_num].get(val);
  668. }
  669. } else if (mpcio.mode != MODE_ONLINE) {
  670. // Create AND triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  671. // (X0&Y1 ^ Y0&X1) = (Z0^Z1)
  672. value_t X0, Y0, Z0, X1, Y1, Z1;
  673. arc4random_buf(&X0, sizeof(X0));
  674. arc4random_buf(&Y0, sizeof(Y0));
  675. arc4random_buf(&Z0, sizeof(Z0));
  676. arc4random_buf(&X1, sizeof(X1));
  677. arc4random_buf(&Y1, sizeof(Y1));
  678. Z1 = (X0 & Y1) ^ (X1 & Y0) ^ Z0;
  679. AndTriple T0, T1;
  680. T0 = std::make_tuple(X0, Y0, Z0);
  681. T1 = std::make_tuple(X1, Y1, Z1);
  682. queue_p0(&T0, sizeof(T0));
  683. queue_p1(&T1, sizeof(T1));
  684. yield();
  685. }
  686. return val;
  687. }
  688. void MPCTIO::request_nodeselecttriples(yield_t &yield, size_t num)
  689. {
  690. if (mpcio.player < 2) {
  691. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  692. if (mpcpio.mode != MODE_ONLINE) {
  693. yield();
  694. for (size_t i=0; i<num; ++i) {
  695. SelectTriple<DPFnode> v;
  696. uint8_t Xbyte;
  697. recv_server(&Xbyte, sizeof(Xbyte));
  698. v.X = Xbyte & 1;
  699. recv_server(&v.Y, sizeof(v.Y));
  700. recv_server(&v.Z, sizeof(v.Z));
  701. queued_nodeselecttriples.push_back(v);
  702. }
  703. remaining_nodesselecttriples += num;
  704. } else {
  705. std::cerr << "Attempted to read SelectTriple<DPFnode> in online phase\n";
  706. }
  707. } else if (mpcio.mode != MODE_ONLINE) {
  708. for (size_t i=0; i<num; ++i) {
  709. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  710. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  711. bit_t X0, X1;
  712. DPFnode Y0, Z0, Y1, Z1;
  713. X0 = arc4random() & 1;
  714. arc4random_buf(&Y0, sizeof(Y0));
  715. arc4random_buf(&Z0, sizeof(Z0));
  716. X1 = arc4random() & 1;
  717. arc4random_buf(&Y1, sizeof(Y1));
  718. DPFnode X0ext, X1ext;
  719. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  720. // 1 -> 1111...1)
  721. X0ext = if128_mask[X0];
  722. X1ext = if128_mask[X1];
  723. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  724. queue_p0(&X0, sizeof(X0));
  725. queue_p0(&Y0, sizeof(Y0));
  726. queue_p0(&Z0, sizeof(Z0));
  727. queue_p1(&X1, sizeof(X1));
  728. queue_p1(&Y1, sizeof(Y1));
  729. queue_p1(&Z1, sizeof(Z1));
  730. }
  731. yield();
  732. remaining_nodesselecttriples += num;
  733. }
  734. }
  735. SelectTriple<DPFnode> MPCTIO::nodeselecttriple(yield_t &yield)
  736. {
  737. SelectTriple<DPFnode> val;
  738. if (remaining_nodesselecttriples == 0) {
  739. request_nodeselecttriples(yield, 1);
  740. }
  741. if (mpcio.player < 2) {
  742. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  743. if (mpcpio.mode != MODE_ONLINE) {
  744. val = queued_nodeselecttriples.front();
  745. queued_nodeselecttriples.pop_front();
  746. --remaining_nodesselecttriples;
  747. } else {
  748. std::cerr << "Attempted to read SelectTriple<DPFnode> in online phase\n";
  749. }
  750. } else if (mpcio.mode != MODE_ONLINE) {
  751. --remaining_nodesselecttriples;
  752. }
  753. return val;
  754. }
  755. SelectTriple<value_t> MPCTIO::valselecttriple(yield_t &yield)
  756. {
  757. SelectTriple<value_t> val;
  758. if (mpcio.player < 2) {
  759. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  760. if (mpcpio.mode != MODE_ONLINE) {
  761. uint8_t Xbyte;
  762. yield();
  763. recv_server(&Xbyte, sizeof(Xbyte));
  764. val.X = Xbyte & 1;
  765. recv_server(&val.Y, sizeof(val.Y));
  766. recv_server(&val.Z, sizeof(val.Z));
  767. mpcpio.valselecttriples[thread_num].inc();
  768. } else {
  769. mpcpio.valselecttriples[thread_num].get(val);
  770. }
  771. } else if (mpcio.mode != MODE_ONLINE) {
  772. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  773. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  774. bit_t X0, X1;
  775. value_t Y0, Z0, Y1, Z1;
  776. X0 = arc4random() & 1;
  777. arc4random_buf(&Y0, sizeof(Y0));
  778. arc4random_buf(&Z0, sizeof(Z0));
  779. X1 = arc4random() & 1;
  780. arc4random_buf(&Y1, sizeof(Y1));
  781. value_t X0ext, X1ext;
  782. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  783. // 1 -> 1111...1)
  784. X0ext = -value_t(X0);
  785. X1ext = -value_t(X1);
  786. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  787. queue_p0(&X0, sizeof(X0));
  788. queue_p0(&Y0, sizeof(Y0));
  789. queue_p0(&Z0, sizeof(Z0));
  790. queue_p1(&X1, sizeof(X1));
  791. queue_p1(&Y1, sizeof(Y1));
  792. queue_p1(&Z1, sizeof(Z1));
  793. yield();
  794. }
  795. return val;
  796. }
  797. SelectTriple<bit_t> MPCTIO::bitselecttriple(yield_t &yield)
  798. {
  799. // Do we need to fetch a new AND triple?
  800. if (last_andtriple_bits_remaining == 0) {
  801. last_andtriple = andtriple(yield);
  802. last_andtriple_bits_remaining = 8*sizeof(value_t);
  803. }
  804. --last_andtriple_bits_remaining;
  805. value_t mask = value_t(1) << last_andtriple_bits_remaining;
  806. SelectTriple<bit_t> val;
  807. val.X = !!(std::get<0>(last_andtriple) & mask);
  808. val.Y = !!(std::get<1>(last_andtriple) & mask);
  809. val.Z = !!(std::get<2>(last_andtriple) & mask);
  810. return val;
  811. }
  812. CDPF MPCTIO::cdpf(yield_t &yield)
  813. {
  814. CDPF val;
  815. if (mpcio.player < 2) {
  816. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  817. if (mpcpio.mode != MODE_ONLINE) {
  818. yield();
  819. iostream_server() >> val;
  820. mpcpio.cdpfs[thread_num].inc();
  821. } else {
  822. mpcpio.cdpfs[thread_num].get(val);
  823. }
  824. } else if (mpcio.mode != MODE_ONLINE) {
  825. auto [ cdpf0, cdpf1 ] = CDPF::generate(aes_ops());
  826. iostream_p0() << cdpf0;
  827. iostream_p1() << cdpf1;
  828. yield();
  829. }
  830. return val;
  831. }
  832. // The port number for the P1 -> P0 connection
  833. static const unsigned short port_p1_p0 = 2115;
  834. // The port number for the P2 -> P0 connection
  835. static const unsigned short port_p2_p0 = 2116;
  836. // The port number for the P2 -> P1 connection
  837. static const unsigned short port_p2_p1 = 2117;
  838. void mpcio_setup_computational(unsigned player,
  839. boost::asio::io_context &io_context,
  840. const char *p0addr, // can be NULL when player=0
  841. int num_threads,
  842. std::deque<tcp::socket> &peersocks,
  843. std::deque<tcp::socket> &serversocks)
  844. {
  845. if (player == 0) {
  846. // Listen for connections from P1 and from P2
  847. tcp::acceptor acceptor_p1(io_context,
  848. tcp::endpoint(tcp::v4(), port_p1_p0));
  849. tcp::acceptor acceptor_p2(io_context,
  850. tcp::endpoint(tcp::v4(), port_p2_p0));
  851. peersocks.clear();
  852. serversocks.clear();
  853. for (int i=0;i<num_threads;++i) {
  854. peersocks.emplace_back(io_context);
  855. serversocks.emplace_back(io_context);
  856. }
  857. for (int i=0;i<num_threads;++i) {
  858. tcp::socket peersock = acceptor_p1.accept();
  859. // Read 2 bytes from the socket, which will be the thread
  860. // number
  861. unsigned short thread_num;
  862. boost::asio::read(peersock,
  863. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  864. if (thread_num >= num_threads) {
  865. std::cerr << "Received bad thread number from peer\n";
  866. } else {
  867. peersocks[thread_num] = std::move(peersock);
  868. }
  869. }
  870. for (int i=0;i<num_threads;++i) {
  871. tcp::socket serversock = acceptor_p2.accept();
  872. // Read 2 bytes from the socket, which will be the thread
  873. // number
  874. unsigned short thread_num;
  875. boost::asio::read(serversock,
  876. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  877. if (thread_num >= num_threads) {
  878. std::cerr << "Received bad thread number from server\n";
  879. } else {
  880. serversocks[thread_num] = std::move(serversock);
  881. }
  882. }
  883. } else if (player == 1) {
  884. // Listen for connections from P2, make num_threads connections to P0
  885. tcp::acceptor acceptor_p2(io_context,
  886. tcp::endpoint(tcp::v4(), port_p2_p1));
  887. tcp::resolver resolver(io_context);
  888. boost::system::error_code err;
  889. peersocks.clear();
  890. serversocks.clear();
  891. for (int i=0;i<num_threads;++i) {
  892. serversocks.emplace_back(io_context);
  893. }
  894. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  895. tcp::socket peersock(io_context);
  896. while(1) {
  897. boost::asio::connect(peersock,
  898. resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
  899. if (!err) break;
  900. std::cerr << "Connection to p0 refused, will retry.\n";
  901. sleep(1);
  902. }
  903. // Write 2 bytes to the socket indicating which thread
  904. // number this socket is for
  905. boost::asio::write(peersock,
  906. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  907. peersocks.push_back(std::move(peersock));
  908. }
  909. for (int i=0;i<num_threads;++i) {
  910. tcp::socket serversock = acceptor_p2.accept();
  911. // Read 2 bytes from the socket, which will be the thread
  912. // number
  913. unsigned short thread_num;
  914. boost::asio::read(serversock,
  915. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  916. if (thread_num >= num_threads) {
  917. std::cerr << "Received bad thread number from server\n";
  918. } else {
  919. serversocks[thread_num] = std::move(serversock);
  920. }
  921. }
  922. } else {
  923. std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
  924. }
  925. // Read the start signal from P2
  926. char ack[1];
  927. boost::asio::read(serversocks[0], boost::asio::buffer(ack, 1));
  928. // Send the start ack to P2
  929. boost::asio::write(serversocks[0], boost::asio::buffer("", 1));
  930. }
  931. void mpcio_setup_server(boost::asio::io_context &io_context,
  932. const char *p0addr, const char *p1addr, int num_threads,
  933. std::deque<tcp::socket> &p0socks,
  934. std::deque<tcp::socket> &p1socks)
  935. {
  936. // Make connections to P0 and P1
  937. tcp::resolver resolver(io_context);
  938. boost::system::error_code err;
  939. p0socks.clear();
  940. p1socks.clear();
  941. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  942. tcp::socket p0sock(io_context);
  943. while(1) {
  944. boost::asio::connect(p0sock,
  945. resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
  946. if (!err) break;
  947. std::cerr << "Connection to p0 refused, will retry.\n";
  948. sleep(1);
  949. }
  950. // Write 2 bytes to the socket indicating which thread
  951. // number this socket is for
  952. boost::asio::write(p0sock,
  953. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  954. p0socks.push_back(std::move(p0sock));
  955. }
  956. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  957. tcp::socket p1sock(io_context);
  958. while(1) {
  959. boost::asio::connect(p1sock,
  960. resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
  961. if (!err) break;
  962. std::cerr << "Connection to p1 refused, will retry.\n";
  963. sleep(1);
  964. }
  965. // Write 2 bytes to the socket indicating which thread
  966. // number this socket is for
  967. boost::asio::write(p1sock,
  968. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  969. p1socks.push_back(std::move(p1sock));
  970. }
  971. // Send the start signal to P0 and P1
  972. boost::asio::write(p0socks[0], boost::asio::buffer("", 1));
  973. boost::asio::write(p1socks[0], boost::asio::buffer("", 1));
  974. // Read the start ack from P0 and P1
  975. char ack[1];
  976. boost::asio::read(p0socks[0], boost::asio::buffer(ack, 1));
  977. boost::asio::read(p1socks[0], boost::asio::buffer(ack, 1));
  978. }