mpcio.cpp 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019
  1. #include <sys/time.h> // getrusage
  2. #include <sys/resource.h> // getrusage
  3. #include "mpcio.hpp"
  4. #include "rdpf.hpp"
  5. #include "cdpf.hpp"
  6. #include "bitutils.hpp"
  7. #include "coroutine.hpp"
  8. void MPCSingleIO::async_send_from_msgqueue()
  9. {
  10. #ifdef SEND_LAMPORT_CLOCKS
  11. std::vector<boost::asio::const_buffer> tosend;
  12. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  13. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  14. #endif
  15. boost::asio::async_write(sock,
  16. #ifdef SEND_LAMPORT_CLOCKS
  17. tosend,
  18. #else
  19. boost::asio::buffer(messagequeue.front()),
  20. #endif
  21. [&](boost::system::error_code ec, std::size_t amt){
  22. messagequeuelock.lock();
  23. messagequeue.pop();
  24. if (messagequeue.size() > 0) {
  25. async_send_from_msgqueue();
  26. }
  27. messagequeuelock.unlock();
  28. });
  29. }
  30. size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
  31. {
  32. // Is this a new message?
  33. size_t newmsg = 0;
  34. dataqueue.append((const char *)data, len);
  35. // If this is the first queue() since the last explicit send(),
  36. // which we'll know because message_lamport will be nullopt, set
  37. // message_lamport to the current Lamport clock. Note that the
  38. // boolean test tests whether message_lamport is nullopt, not
  39. // whether its value is zero.
  40. if (!message_lamport) {
  41. message_lamport = lamport;
  42. newmsg = 1;
  43. }
  44. #ifdef VERBOSE_COMMS
  45. printf("Queue %s.%d len=%lu lamp=%u: ", dest.c_str(), thread_num,
  46. len, message_lamport.value());
  47. for (size_t i=0;i<len;++i) {
  48. printf("%02x", ((const unsigned char*)data)[i]);
  49. }
  50. printf("\n");
  51. #endif
  52. // If we already have some full packets worth of data, may as
  53. // well send it.
  54. if (dataqueue.size() > 28800) {
  55. send(true);
  56. }
  57. return newmsg;
  58. }
  59. void MPCSingleIO::send(bool implicit_send)
  60. {
  61. size_t thissize = dataqueue.size();
  62. // Ignore spurious calls to send(), except for resetting
  63. // message_lamport if this was an explicit send().
  64. if (thissize == 0) {
  65. #ifdef SEND_LAMPORT_CLOCKS
  66. // If this was an explicit send(), reset the message_lamport so
  67. // that it gets updated at the next queue().
  68. if (!implicit_send) {
  69. message_lamport.reset();
  70. }
  71. #endif
  72. return;
  73. }
  74. #ifdef RECORD_IOTRACE
  75. iotrace.push_back(thissize);
  76. #endif
  77. messagequeuelock.lock();
  78. // Move the current message to send into the message queue (this
  79. // moves a pointer to the data, not copying the data itself)
  80. #ifdef SEND_LAMPORT_CLOCKS
  81. messagequeue.emplace(std::move(dataqueue),
  82. message_lamport.value());
  83. // If this was an explicit send(), reset the message_lamport so
  84. // that it gets updated at the next queue().
  85. if (!implicit_send) {
  86. message_lamport.reset();
  87. }
  88. #else
  89. messagequeue.emplace(std::move(dataqueue));
  90. #endif
  91. // If this is now the first thing in the message queue, launch
  92. // an async_write to write it
  93. if (messagequeue.size() == 1) {
  94. async_send_from_msgqueue();
  95. }
  96. messagequeuelock.unlock();
  97. }
  98. size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
  99. {
  100. #ifdef VERBOSE_COMMS
  101. size_t orig_len = len;
  102. printf("Recv %s.%d len=%lu lamp=%u ", dest.c_str(), thread_num,
  103. len, lamport);
  104. #endif
  105. #ifdef SEND_LAMPORT_CLOCKS
  106. char *cdata = (char *)data;
  107. size_t res = 0;
  108. while (len > 0) {
  109. while (recvdataremain == 0) {
  110. // Read a new header
  111. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  112. uint32_t datalen;
  113. lamport_t recv_lamport;
  114. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  115. memmove(&datalen, hdr, sizeof(datalen));
  116. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  117. lamport_t new_lamport = recv_lamport + 1;
  118. if (lamport < new_lamport) {
  119. lamport = new_lamport;
  120. }
  121. if (datalen > 0) {
  122. recvdata.resize(datalen, '\0');
  123. boost::asio::read(sock, boost::asio::buffer(recvdata));
  124. recvdataremain = datalen;
  125. }
  126. }
  127. size_t amttoread = len;
  128. if (amttoread > recvdataremain) {
  129. amttoread = recvdataremain;
  130. }
  131. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  132. amttoread);
  133. cdata += amttoread;
  134. len -= amttoread;
  135. recvdataremain -= amttoread;
  136. res += amttoread;
  137. }
  138. #else
  139. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  140. #endif
  141. #ifdef VERBOSE_COMMS
  142. printf("nlamp=%u: ", lamport);
  143. for (size_t i=0;i<orig_len;++i) {
  144. printf("%02x", ((const unsigned char*)data)[i]);
  145. }
  146. printf("\n");
  147. #endif
  148. #ifdef RECORD_IOTRACE
  149. iotrace.push_back(-(ssize_t(res)));
  150. #endif
  151. return res;
  152. }
  153. #ifdef RECORD_IOTRACE
  154. void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
  155. {
  156. if (label) {
  157. os << label << " ";
  158. }
  159. os << "IO trace:";
  160. for (auto& s: iotrace) {
  161. os << " " << s;
  162. }
  163. os << "\n";
  164. }
  165. #endif
  166. void MPCIO::reset_stats()
  167. {
  168. msgs_sent.clear();
  169. msg_bytes_sent.clear();
  170. aes_ops.clear();
  171. for (size_t i=0; i<num_threads; ++i) {
  172. msgs_sent.push_back(0);
  173. msg_bytes_sent.push_back(0);
  174. aes_ops.push_back(0);
  175. }
  176. steady_start = boost::chrono::steady_clock::now();
  177. cpu_start = boost::chrono::process_cpu_clock::now();
  178. }
  179. // Report the memory usage
  180. void MPCIO::dump_memusage(std::ostream &os)
  181. {
  182. struct rusage ru;
  183. getrusage(RUSAGE_SELF, &ru);
  184. os << "Mem: " << ru.ru_maxrss << " KiB\n";
  185. }
  186. void MPCIO::dump_stats(std::ostream &os)
  187. {
  188. size_t tot_msgs_sent = 0;
  189. size_t tot_msg_bytes_sent = 0;
  190. size_t tot_aes_ops = 0;
  191. for (auto& n : msgs_sent) {
  192. tot_msgs_sent += n;
  193. }
  194. for (auto& n : msg_bytes_sent) {
  195. tot_msg_bytes_sent += n;
  196. }
  197. for (auto& n : aes_ops) {
  198. tot_aes_ops += n;
  199. }
  200. auto steady_elapsed =
  201. boost::chrono::steady_clock::now() - steady_start;
  202. auto cpu_elapsed =
  203. boost::chrono::process_cpu_clock::now() - cpu_start;
  204. os << tot_msgs_sent << " messages sent\n";
  205. os << tot_msg_bytes_sent << " message bytes sent\n";
  206. os << lamport << " Lamport clock (latencies)\n";
  207. os << tot_aes_ops << " local AES operations\n";
  208. os << boost::chrono::duration_cast
  209. <boost::chrono::milliseconds>(steady_elapsed) <<
  210. " wall clock time\n";
  211. os << cpu_elapsed << " {real;user;system}\n";
  212. dump_memusage(os);
  213. }
  214. // TVA is a tuple of vectors of arrays of PreCompStorage
  215. template <nbits_t WIDTH, typename TVA>
  216. static void rdpfstorage_init(TVA &storage, unsigned player,
  217. ProcessingMode mode, unsigned num_threads, bool incremental)
  218. {
  219. auto &VA = std::get<WIDTH-1>(storage);
  220. VA.resize(num_threads);
  221. char prefix[12];
  222. strcpy(prefix, incremental ? "irdpf" : "rdpf");
  223. if (WIDTH > 1) {
  224. sprintf(prefix+strlen(prefix), "%d_", WIDTH);
  225. }
  226. for (unsigned i=0; i<num_threads; ++i) {
  227. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  228. VA[i][depth-1].init(player, mode, prefix, i, depth, WIDTH);
  229. }
  230. }
  231. }
  232. // TVA is a tuple of vectors of arrays of PreCompStorage
  233. template <nbits_t WIDTH, typename TVA>
  234. static void rdpfstorage_dumpstats(std::ostream &os, TVA &storage,
  235. size_t thread_num, bool incremental)
  236. {
  237. auto &VA = std::get<WIDTH-1>(storage);
  238. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  239. size_t cnt = VA[thread_num][depth-1].get_stats();
  240. if (cnt > 0) {
  241. os << (incremental ? " i" : " r") << int(depth);
  242. if (WIDTH > 1) {
  243. os << "." << int(WIDTH);
  244. }
  245. os << ":" << cnt;
  246. }
  247. }
  248. }
  249. // TVA is a tuple of vectors of arrays of PreCompStorage
  250. template <nbits_t WIDTH, typename TVA>
  251. static void rdpfstorage_resetstats(TVA &storage, size_t thread_num)
  252. {
  253. auto &VA = std::get<WIDTH-1>(storage);
  254. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  255. VA[thread_num][depth-1].reset_stats();
  256. }
  257. }
  258. MPCPeerIO::MPCPeerIO(unsigned player, ProcessingMode mode,
  259. std::deque<tcp::socket> &peersocks,
  260. std::deque<tcp::socket> &serversocks) :
  261. MPCIO(player, mode, peersocks.size())
  262. {
  263. unsigned num_threads = unsigned(peersocks.size());
  264. for (unsigned i=0; i<num_threads; ++i) {
  265. multtriples.emplace_back(player, mode, "mults", i);
  266. }
  267. for (unsigned i=0; i<num_threads; ++i) {
  268. halftriples.emplace_back(player, mode, "halves", i);
  269. }
  270. for (unsigned i=0; i<num_threads; ++i) {
  271. andtriples.emplace_back(player, mode, "ands", i);
  272. }
  273. for (unsigned i=0; i<num_threads; ++i) {
  274. valselecttriples.emplace_back(player, mode, "selects", i);
  275. }
  276. rdpfstorage_init<1>(rdpftriples, player, mode, num_threads, false);
  277. rdpfstorage_init<2>(rdpftriples, player, mode, num_threads, false);
  278. rdpfstorage_init<3>(rdpftriples, player, mode, num_threads, false);
  279. rdpfstorage_init<4>(rdpftriples, player, mode, num_threads, false);
  280. rdpfstorage_init<5>(rdpftriples, player, mode, num_threads, false);
  281. rdpfstorage_init<1>(irdpftriples, player, mode, num_threads, true);
  282. rdpfstorage_init<2>(irdpftriples, player, mode, num_threads, true);
  283. rdpfstorage_init<3>(irdpftriples, player, mode, num_threads, true);
  284. rdpfstorage_init<4>(irdpftriples, player, mode, num_threads, true);
  285. rdpfstorage_init<5>(irdpftriples, player, mode, num_threads, true);
  286. for (unsigned i=0; i<num_threads; ++i) {
  287. cdpfs.emplace_back(player, mode, "cdpf", i);
  288. }
  289. for (unsigned i=0; i<num_threads; ++i) {
  290. peerios.emplace_back(std::move(peersocks[i]), "peer", i);
  291. }
  292. for (unsigned i=0; i<num_threads; ++i) {
  293. serverios.emplace_back(std::move(serversocks[i]), "srv", i);
  294. }
  295. }
  296. void MPCPeerIO::dump_precomp_stats(std::ostream &os)
  297. {
  298. for (size_t i=0; i<multtriples.size(); ++i) {
  299. size_t cnt;
  300. if (i > 0) {
  301. os << " ";
  302. }
  303. os << "T" << i;
  304. cnt = multtriples[i].get_stats();
  305. if (cnt > 0) {
  306. os << " m:" << cnt;
  307. }
  308. cnt = halftriples[i].get_stats();
  309. if (cnt > 0) {
  310. os << " h:" << cnt;
  311. }
  312. cnt = andtriples[i].get_stats();
  313. if (cnt > 0) {
  314. os << " a:" << cnt;
  315. }
  316. cnt = valselecttriples[i].get_stats();
  317. if (cnt > 0) {
  318. os << " s:" << cnt;
  319. }
  320. rdpfstorage_dumpstats<1>(os, rdpftriples, i, false);
  321. rdpfstorage_dumpstats<2>(os, rdpftriples, i, false);
  322. rdpfstorage_dumpstats<3>(os, rdpftriples, i, false);
  323. rdpfstorage_dumpstats<4>(os, rdpftriples, i, false);
  324. rdpfstorage_dumpstats<5>(os, rdpftriples, i, false);
  325. rdpfstorage_dumpstats<1>(os, irdpftriples, i, true);
  326. rdpfstorage_dumpstats<2>(os, irdpftriples, i, true);
  327. rdpfstorage_dumpstats<3>(os, irdpftriples, i, true);
  328. rdpfstorage_dumpstats<4>(os, irdpftriples, i, true);
  329. rdpfstorage_dumpstats<5>(os, irdpftriples, i, true);
  330. cnt = cdpfs[i].get_stats();
  331. if (cnt > 0) {
  332. os << " c:" << cnt;
  333. }
  334. }
  335. os << "\n";
  336. }
  337. void MPCPeerIO::reset_precomp_stats()
  338. {
  339. for (size_t i=0; i<multtriples.size(); ++i) {
  340. multtriples[i].reset_stats();
  341. halftriples[i].reset_stats();
  342. andtriples[i].reset_stats();
  343. valselecttriples[i].reset_stats();
  344. rdpfstorage_resetstats<1>(rdpftriples, i);
  345. rdpfstorage_resetstats<2>(rdpftriples, i);
  346. rdpfstorage_resetstats<3>(rdpftriples, i);
  347. rdpfstorage_resetstats<4>(rdpftriples, i);
  348. rdpfstorage_resetstats<5>(rdpftriples, i);
  349. rdpfstorage_resetstats<1>(irdpftriples, i);
  350. rdpfstorage_resetstats<2>(irdpftriples, i);
  351. rdpfstorage_resetstats<3>(irdpftriples, i);
  352. rdpfstorage_resetstats<4>(irdpftriples, i);
  353. rdpfstorage_resetstats<5>(irdpftriples, i);
  354. }
  355. }
  356. void MPCPeerIO::dump_stats(std::ostream &os)
  357. {
  358. MPCIO::dump_stats(os);
  359. os << "Precomputed values used: ";
  360. dump_precomp_stats(os);
  361. }
  362. MPCServerIO::MPCServerIO(ProcessingMode mode,
  363. std::deque<tcp::socket> &p0socks,
  364. std::deque<tcp::socket> &p1socks) :
  365. MPCIO(2, mode, p0socks.size())
  366. {
  367. rdpfstorage_init<1>(rdpfpairs, player, mode, num_threads, false);
  368. rdpfstorage_init<2>(rdpfpairs, player, mode, num_threads, false);
  369. rdpfstorage_init<3>(rdpfpairs, player, mode, num_threads, false);
  370. rdpfstorage_init<4>(rdpfpairs, player, mode, num_threads, false);
  371. rdpfstorage_init<5>(rdpfpairs, player, mode, num_threads, false);
  372. rdpfstorage_init<1>(irdpfpairs, player, mode, num_threads, true);
  373. rdpfstorage_init<2>(irdpfpairs, player, mode, num_threads, true);
  374. rdpfstorage_init<3>(irdpfpairs, player, mode, num_threads, true);
  375. rdpfstorage_init<4>(irdpfpairs, player, mode, num_threads, true);
  376. rdpfstorage_init<5>(irdpfpairs, player, mode, num_threads, true);
  377. for (unsigned i=0; i<num_threads; ++i) {
  378. p0ios.emplace_back(std::move(p0socks[i]), "p0", i);
  379. }
  380. for (unsigned i=0; i<num_threads; ++i) {
  381. p1ios.emplace_back(std::move(p1socks[i]), "p1", i);
  382. }
  383. }
  384. void MPCServerIO::dump_precomp_stats(std::ostream &os)
  385. {
  386. for (size_t i=0; i<std::get<0>(rdpfpairs).size(); ++i) {
  387. if (i > 0) {
  388. os << " ";
  389. }
  390. os << "T" << i;
  391. rdpfstorage_dumpstats<1>(os, rdpfpairs, i, false);
  392. rdpfstorage_dumpstats<2>(os, rdpfpairs, i, false);
  393. rdpfstorage_dumpstats<3>(os, rdpfpairs, i, false);
  394. rdpfstorage_dumpstats<4>(os, rdpfpairs, i, false);
  395. rdpfstorage_dumpstats<5>(os, rdpfpairs, i, false);
  396. rdpfstorage_dumpstats<1>(os, irdpfpairs, i, true);
  397. rdpfstorage_dumpstats<2>(os, irdpfpairs, i, true);
  398. rdpfstorage_dumpstats<3>(os, irdpfpairs, i, true);
  399. rdpfstorage_dumpstats<4>(os, irdpfpairs, i, true);
  400. rdpfstorage_dumpstats<5>(os, irdpfpairs, i, true);
  401. }
  402. os << "\n";
  403. }
  404. void MPCServerIO::reset_precomp_stats()
  405. {
  406. for (size_t i=0; i<std::get<0>(rdpfpairs).size(); ++i) {
  407. rdpfstorage_resetstats<1>(rdpfpairs, i);
  408. rdpfstorage_resetstats<2>(rdpfpairs, i);
  409. rdpfstorage_resetstats<3>(rdpfpairs, i);
  410. rdpfstorage_resetstats<4>(rdpfpairs, i);
  411. rdpfstorage_resetstats<5>(rdpfpairs, i);
  412. rdpfstorage_resetstats<1>(irdpfpairs, i);
  413. rdpfstorage_resetstats<2>(irdpfpairs, i);
  414. rdpfstorage_resetstats<3>(irdpfpairs, i);
  415. rdpfstorage_resetstats<4>(irdpfpairs, i);
  416. rdpfstorage_resetstats<5>(irdpfpairs, i);
  417. }
  418. }
  419. void MPCServerIO::dump_stats(std::ostream &os)
  420. {
  421. MPCIO::dump_stats(os);
  422. os << "Precomputed values used: ";
  423. dump_precomp_stats(os);
  424. }
  425. MPCTIO::MPCTIO(MPCIO &mpcio, int thread_num, int num_threads) :
  426. thread_num(thread_num), local_cpu_nthreads(num_threads),
  427. communication_nthreads(num_threads),
  428. thread_lamport(mpcio.lamport), mpcio(mpcio),
  429. #ifdef VERBOSE_COMMS
  430. round_num(0),
  431. #endif
  432. last_andtriple_bits_remaining(0),
  433. remaining_nodesselecttriples(0)
  434. {
  435. if (mpcio.player < 2) {
  436. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  437. peer_iostream.emplace(mpcpio.peerios[thread_num],
  438. thread_lamport, mpcpio.msgs_sent[thread_num],
  439. mpcpio.msg_bytes_sent[thread_num]);
  440. server_iostream.emplace(mpcpio.serverios[thread_num],
  441. thread_lamport, mpcpio.msgs_sent[thread_num],
  442. mpcpio.msg_bytes_sent[thread_num]);
  443. } else {
  444. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  445. p0_iostream.emplace(mpcsrvio.p0ios[thread_num],
  446. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  447. mpcsrvio.msg_bytes_sent[thread_num]);
  448. p1_iostream.emplace(mpcsrvio.p1ios[thread_num],
  449. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  450. mpcsrvio.msg_bytes_sent[thread_num]);
  451. }
  452. }
  453. // Sync our per-thread lamport clock with the master one in the
  454. // mpcio. You only need to call this explicitly if your MPCTIO
  455. // outlives your thread (in which case call it after the join), or
  456. // if your threads do interthread communication amongst themselves
  457. // (in which case call it in the sending thread before the send, and
  458. // call it in the receiving thread after the receive).
  459. void MPCTIO::sync_lamport()
  460. {
  461. // Update the mpcio Lamport time to be max of the thread Lamport
  462. // time and what we thought it was before. We use this
  463. // compare_exchange construction in order to atomically
  464. // do the comparison, computation, and replacement
  465. lamport_t old_lamport = mpcio.lamport;
  466. lamport_t new_lamport = thread_lamport;
  467. do {
  468. if (new_lamport < old_lamport) {
  469. new_lamport = old_lamport;
  470. }
  471. // The next line atomically checks if lamport still has
  472. // the value old_lamport; if so, it changes its value to
  473. // new_lamport and returns true (ending the loop). If
  474. // not, it sets old_lamport to the current value of
  475. // lamport, and returns false (continuing the loop so
  476. // that new_lamport can be recomputed based on this new
  477. // value).
  478. } while (!mpcio.lamport.compare_exchange_weak(
  479. old_lamport, new_lamport));
  480. thread_lamport = new_lamport;
  481. }
  482. // Only call this if you can be sure that there are no outstanding
  483. // messages in flight, you can call it on all existing MPCTIOs, and
  484. // you really want to reset the Lamport clock in the midding of a
  485. // run.
  486. void MPCTIO::reset_lamport()
  487. {
  488. // Reset both our own Lamport clock and the parent MPCIO's
  489. thread_lamport = 0;
  490. mpcio.lamport = 0;
  491. }
  492. // Queue up data to the peer or to the server
  493. void MPCTIO::queue_peer(const void *data, size_t len)
  494. {
  495. if (mpcio.player < 2) {
  496. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  497. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  498. mpcpio.msgs_sent[thread_num] += newmsg;
  499. mpcpio.msg_bytes_sent[thread_num] += len;
  500. }
  501. }
  502. void MPCTIO::queue_server(const void *data, size_t len)
  503. {
  504. if (mpcio.player < 2) {
  505. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  506. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  507. mpcpio.msgs_sent[thread_num] += newmsg;
  508. mpcpio.msg_bytes_sent[thread_num] += len;
  509. }
  510. }
  511. // Receive data from the peer or to the server
  512. size_t MPCTIO::recv_peer(void *data, size_t len)
  513. {
  514. if (mpcio.player < 2) {
  515. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  516. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  517. }
  518. return 0;
  519. }
  520. size_t MPCTIO::recv_server(void *data, size_t len)
  521. {
  522. if (mpcio.player < 2) {
  523. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  524. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  525. }
  526. return 0;
  527. }
  528. // Queue up data to p0 or p1
  529. void MPCTIO::queue_p0(const void *data, size_t len)
  530. {
  531. if (mpcio.player == 2) {
  532. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  533. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  534. mpcsrvio.msgs_sent[thread_num] += newmsg;
  535. mpcsrvio.msg_bytes_sent[thread_num] += len;
  536. }
  537. }
  538. void MPCTIO::queue_p1(const void *data, size_t len)
  539. {
  540. if (mpcio.player == 2) {
  541. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  542. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  543. mpcsrvio.msgs_sent[thread_num] += newmsg;
  544. mpcsrvio.msg_bytes_sent[thread_num] += len;
  545. }
  546. }
  547. // Receive data from p0 or p1
  548. size_t MPCTIO::recv_p0(void *data, size_t len)
  549. {
  550. if (mpcio.player == 2) {
  551. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  552. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  553. }
  554. return 0;
  555. }
  556. size_t MPCTIO::recv_p1(void *data, size_t len)
  557. {
  558. if (mpcio.player == 2) {
  559. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  560. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  561. }
  562. return 0;
  563. }
  564. // Send all queued data for this thread
  565. void MPCTIO::send()
  566. {
  567. #ifdef VERBOSE_COMMS
  568. printf("Thread %u sending round %lu\n", thread_num, ++round_num);
  569. #endif
  570. if (mpcio.player < 2) {
  571. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  572. mpcpio.peerios[thread_num].send();
  573. mpcpio.serverios[thread_num].send();
  574. } else {
  575. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  576. mpcsrvio.p0ios[thread_num].send();
  577. mpcsrvio.p1ios[thread_num].send();
  578. }
  579. }
  580. // Functions to get precomputed values. If we're in the online
  581. // phase, get them from PreCompStorage. If we're in the
  582. // preprocessing or online-only phase, read them from the server.
  583. MultTriple MPCTIO::multtriple(yield_t &yield)
  584. {
  585. MultTriple val;
  586. if (mpcio.player < 2) {
  587. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  588. if (mpcpio.mode != MODE_ONLINE) {
  589. yield();
  590. recv_server(&val, sizeof(val));
  591. mpcpio.multtriples[thread_num].inc();
  592. } else {
  593. mpcpio.multtriples[thread_num].get(val);
  594. }
  595. } else if (mpcio.mode != MODE_ONLINE) {
  596. // Create multiplication triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  597. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  598. value_t X0, Y0, Z0, X1, Y1, Z1;
  599. arc4random_buf(&X0, sizeof(X0));
  600. arc4random_buf(&Y0, sizeof(Y0));
  601. arc4random_buf(&Z0, sizeof(Z0));
  602. arc4random_buf(&X1, sizeof(X1));
  603. arc4random_buf(&Y1, sizeof(Y1));
  604. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  605. MultTriple T0, T1;
  606. T0 = std::make_tuple(X0, Y0, Z0);
  607. T1 = std::make_tuple(X1, Y1, Z1);
  608. queue_p0(&T0, sizeof(T0));
  609. queue_p1(&T1, sizeof(T1));
  610. yield();
  611. }
  612. return val;
  613. }
  614. // When halftriple() is used internally to another preprocessing
  615. // operation, don't tally it, so that it doesn't appear sepearately in
  616. // the stats from the preprocessing operation that invoked it
  617. HalfTriple MPCTIO::halftriple(yield_t &yield, bool tally)
  618. {
  619. HalfTriple val;
  620. if (mpcio.player < 2) {
  621. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  622. if (mpcpio.mode != MODE_ONLINE) {
  623. yield();
  624. recv_server(&val, sizeof(val));
  625. if (tally) {
  626. mpcpio.halftriples[thread_num].inc();
  627. }
  628. } else {
  629. mpcpio.halftriples[thread_num].get(val);
  630. }
  631. } else if (mpcio.mode != MODE_ONLINE) {
  632. // Create half-triples (X0,Z0),(Y1,Z1) such that
  633. // X0*Y1 = Z0 + Z1
  634. value_t X0, Z0, Y1, Z1;
  635. arc4random_buf(&X0, sizeof(X0));
  636. arc4random_buf(&Z0, sizeof(Z0));
  637. arc4random_buf(&Y1, sizeof(Y1));
  638. Z1 = X0 * Y1 - Z0;
  639. HalfTriple H0, H1;
  640. H0 = std::make_tuple(X0, Z0);
  641. H1 = std::make_tuple(Y1, Z1);
  642. queue_p0(&H0, sizeof(H0));
  643. queue_p1(&H1, sizeof(H1));
  644. yield();
  645. }
  646. return val;
  647. }
  648. MultTriple MPCTIO::andtriple(yield_t &yield)
  649. {
  650. AndTriple val;
  651. if (mpcio.player < 2) {
  652. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  653. if (mpcpio.mode != MODE_ONLINE) {
  654. yield();
  655. recv_server(&val, sizeof(val));
  656. mpcpio.andtriples[thread_num].inc();
  657. } else {
  658. mpcpio.andtriples[thread_num].get(val);
  659. }
  660. } else if (mpcio.mode != MODE_ONLINE) {
  661. // Create AND triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  662. // (X0&Y1 ^ Y0&X1) = (Z0^Z1)
  663. value_t X0, Y0, Z0, X1, Y1, Z1;
  664. arc4random_buf(&X0, sizeof(X0));
  665. arc4random_buf(&Y0, sizeof(Y0));
  666. arc4random_buf(&Z0, sizeof(Z0));
  667. arc4random_buf(&X1, sizeof(X1));
  668. arc4random_buf(&Y1, sizeof(Y1));
  669. Z1 = (X0 & Y1) ^ (X1 & Y0) ^ Z0;
  670. AndTriple T0, T1;
  671. T0 = std::make_tuple(X0, Y0, Z0);
  672. T1 = std::make_tuple(X1, Y1, Z1);
  673. queue_p0(&T0, sizeof(T0));
  674. queue_p1(&T1, sizeof(T1));
  675. yield();
  676. }
  677. return val;
  678. }
  679. void MPCTIO::request_nodeselecttriples(yield_t &yield, size_t num)
  680. {
  681. if (mpcio.player < 2) {
  682. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  683. if (mpcpio.mode != MODE_ONLINE) {
  684. yield();
  685. for (size_t i=0; i<num; ++i) {
  686. SelectTriple<DPFnode> v;
  687. uint8_t Xbyte;
  688. recv_server(&Xbyte, sizeof(Xbyte));
  689. v.X = Xbyte & 1;
  690. recv_server(&v.Y, sizeof(v.Y));
  691. recv_server(&v.Z, sizeof(v.Z));
  692. queued_nodeselecttriples.push_back(v);
  693. }
  694. remaining_nodesselecttriples += num;
  695. } else {
  696. std::cerr << "Attempted to read SelectTriple<DPFnode> in online phase\n";
  697. }
  698. } else if (mpcio.mode != MODE_ONLINE) {
  699. for (size_t i=0; i<num; ++i) {
  700. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  701. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  702. bit_t X0, X1;
  703. DPFnode Y0, Z0, Y1, Z1;
  704. X0 = arc4random() & 1;
  705. arc4random_buf(&Y0, sizeof(Y0));
  706. arc4random_buf(&Z0, sizeof(Z0));
  707. X1 = arc4random() & 1;
  708. arc4random_buf(&Y1, sizeof(Y1));
  709. DPFnode X0ext, X1ext;
  710. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  711. // 1 -> 1111...1)
  712. X0ext = if128_mask[X0];
  713. X1ext = if128_mask[X1];
  714. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  715. queue_p0(&X0, sizeof(X0));
  716. queue_p0(&Y0, sizeof(Y0));
  717. queue_p0(&Z0, sizeof(Z0));
  718. queue_p1(&X1, sizeof(X1));
  719. queue_p1(&Y1, sizeof(Y1));
  720. queue_p1(&Z1, sizeof(Z1));
  721. }
  722. yield();
  723. remaining_nodesselecttriples += num;
  724. }
  725. }
  726. SelectTriple<DPFnode> MPCTIO::nodeselecttriple(yield_t &yield)
  727. {
  728. SelectTriple<DPFnode> val;
  729. if (remaining_nodesselecttriples == 0) {
  730. request_nodeselecttriples(yield, 1);
  731. }
  732. if (mpcio.player < 2) {
  733. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  734. if (mpcpio.mode != MODE_ONLINE) {
  735. val = queued_nodeselecttriples.front();
  736. queued_nodeselecttriples.pop_front();
  737. --remaining_nodesselecttriples;
  738. } else {
  739. std::cerr << "Attempted to read SelectTriple<DPFnode> in online phase\n";
  740. }
  741. } else if (mpcio.mode != MODE_ONLINE) {
  742. --remaining_nodesselecttriples;
  743. }
  744. return val;
  745. }
  746. SelectTriple<value_t> MPCTIO::valselecttriple(yield_t &yield)
  747. {
  748. SelectTriple<value_t> val;
  749. if (mpcio.player < 2) {
  750. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  751. if (mpcpio.mode != MODE_ONLINE) {
  752. uint8_t Xbyte;
  753. yield();
  754. recv_server(&Xbyte, sizeof(Xbyte));
  755. val.X = Xbyte & 1;
  756. recv_server(&val.Y, sizeof(val.Y));
  757. recv_server(&val.Z, sizeof(val.Z));
  758. mpcpio.valselecttriples[thread_num].inc();
  759. } else {
  760. mpcpio.valselecttriples[thread_num].get(val);
  761. }
  762. } else if (mpcio.mode != MODE_ONLINE) {
  763. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  764. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  765. bit_t X0, X1;
  766. value_t Y0, Z0, Y1, Z1;
  767. X0 = arc4random() & 1;
  768. arc4random_buf(&Y0, sizeof(Y0));
  769. arc4random_buf(&Z0, sizeof(Z0));
  770. X1 = arc4random() & 1;
  771. arc4random_buf(&Y1, sizeof(Y1));
  772. value_t X0ext, X1ext;
  773. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  774. // 1 -> 1111...1)
  775. X0ext = -value_t(X0);
  776. X1ext = -value_t(X1);
  777. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  778. queue_p0(&X0, sizeof(X0));
  779. queue_p0(&Y0, sizeof(Y0));
  780. queue_p0(&Z0, sizeof(Z0));
  781. queue_p1(&X1, sizeof(X1));
  782. queue_p1(&Y1, sizeof(Y1));
  783. queue_p1(&Z1, sizeof(Z1));
  784. yield();
  785. }
  786. return val;
  787. }
  788. SelectTriple<bit_t> MPCTIO::bitselecttriple(yield_t &yield)
  789. {
  790. // Do we need to fetch a new AND triple?
  791. if (last_andtriple_bits_remaining == 0) {
  792. last_andtriple = andtriple(yield);
  793. last_andtriple_bits_remaining = 8*sizeof(value_t);
  794. }
  795. --last_andtriple_bits_remaining;
  796. value_t mask = value_t(1) << last_andtriple_bits_remaining;
  797. SelectTriple<bit_t> val;
  798. val.X = !!(std::get<0>(last_andtriple) & mask);
  799. val.Y = !!(std::get<1>(last_andtriple) & mask);
  800. val.Z = !!(std::get<2>(last_andtriple) & mask);
  801. return val;
  802. }
  803. CDPF MPCTIO::cdpf(yield_t &yield)
  804. {
  805. CDPF val;
  806. if (mpcio.player < 2) {
  807. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  808. if (mpcpio.mode != MODE_ONLINE) {
  809. yield();
  810. iostream_server() >> val;
  811. mpcpio.cdpfs[thread_num].inc();
  812. } else {
  813. mpcpio.cdpfs[thread_num].get(val);
  814. }
  815. } else if (mpcio.mode != MODE_ONLINE) {
  816. auto [ cdpf0, cdpf1 ] = CDPF::generate(aes_ops());
  817. iostream_p0() << cdpf0;
  818. iostream_p1() << cdpf1;
  819. yield();
  820. }
  821. return val;
  822. }
  823. // The port number for the P1 -> P0 connection
  824. static const unsigned short port_p1_p0 = 2115;
  825. // The port number for the P2 -> P0 connection
  826. static const unsigned short port_p2_p0 = 2116;
  827. // The port number for the P2 -> P1 connection
  828. static const unsigned short port_p2_p1 = 2117;
  829. void mpcio_setup_computational(unsigned player,
  830. boost::asio::io_context &io_context,
  831. const char *p0addr, // can be NULL when player=0
  832. int num_threads,
  833. std::deque<tcp::socket> &peersocks,
  834. std::deque<tcp::socket> &serversocks)
  835. {
  836. if (player == 0) {
  837. // Listen for connections from P1 and from P2
  838. tcp::acceptor acceptor_p1(io_context,
  839. tcp::endpoint(tcp::v4(), port_p1_p0));
  840. tcp::acceptor acceptor_p2(io_context,
  841. tcp::endpoint(tcp::v4(), port_p2_p0));
  842. peersocks.clear();
  843. serversocks.clear();
  844. for (int i=0;i<num_threads;++i) {
  845. peersocks.emplace_back(io_context);
  846. serversocks.emplace_back(io_context);
  847. }
  848. for (int i=0;i<num_threads;++i) {
  849. tcp::socket peersock = acceptor_p1.accept();
  850. // Read 2 bytes from the socket, which will be the thread
  851. // number
  852. unsigned short thread_num;
  853. boost::asio::read(peersock,
  854. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  855. if (thread_num >= num_threads) {
  856. std::cerr << "Received bad thread number from peer\n";
  857. } else {
  858. peersocks[thread_num] = std::move(peersock);
  859. }
  860. }
  861. for (int i=0;i<num_threads;++i) {
  862. tcp::socket serversock = acceptor_p2.accept();
  863. // Read 2 bytes from the socket, which will be the thread
  864. // number
  865. unsigned short thread_num;
  866. boost::asio::read(serversock,
  867. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  868. if (thread_num >= num_threads) {
  869. std::cerr << "Received bad thread number from server\n";
  870. } else {
  871. serversocks[thread_num] = std::move(serversock);
  872. }
  873. }
  874. } else if (player == 1) {
  875. // Listen for connections from P2, make num_threads connections to P0
  876. tcp::acceptor acceptor_p2(io_context,
  877. tcp::endpoint(tcp::v4(), port_p2_p1));
  878. tcp::resolver resolver(io_context);
  879. boost::system::error_code err;
  880. peersocks.clear();
  881. serversocks.clear();
  882. for (int i=0;i<num_threads;++i) {
  883. serversocks.emplace_back(io_context);
  884. }
  885. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  886. tcp::socket peersock(io_context);
  887. while(1) {
  888. boost::asio::connect(peersock,
  889. resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
  890. if (!err) break;
  891. std::cerr << "Connection to p0 refused, will retry.\n";
  892. sleep(1);
  893. }
  894. // Write 2 bytes to the socket indicating which thread
  895. // number this socket is for
  896. boost::asio::write(peersock,
  897. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  898. peersocks.push_back(std::move(peersock));
  899. }
  900. for (int i=0;i<num_threads;++i) {
  901. tcp::socket serversock = acceptor_p2.accept();
  902. // Read 2 bytes from the socket, which will be the thread
  903. // number
  904. unsigned short thread_num;
  905. boost::asio::read(serversock,
  906. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  907. if (thread_num >= num_threads) {
  908. std::cerr << "Received bad thread number from server\n";
  909. } else {
  910. serversocks[thread_num] = std::move(serversock);
  911. }
  912. }
  913. } else {
  914. std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
  915. }
  916. }
  917. void mpcio_setup_server(boost::asio::io_context &io_context,
  918. const char *p0addr, const char *p1addr, int num_threads,
  919. std::deque<tcp::socket> &p0socks,
  920. std::deque<tcp::socket> &p1socks)
  921. {
  922. // Make connections to P0 and P1
  923. tcp::resolver resolver(io_context);
  924. boost::system::error_code err;
  925. p0socks.clear();
  926. p1socks.clear();
  927. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  928. tcp::socket p0sock(io_context);
  929. while(1) {
  930. boost::asio::connect(p0sock,
  931. resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
  932. if (!err) break;
  933. std::cerr << "Connection to p0 refused, will retry.\n";
  934. sleep(1);
  935. }
  936. // Write 2 bytes to the socket indicating which thread
  937. // number this socket is for
  938. boost::asio::write(p0sock,
  939. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  940. p0socks.push_back(std::move(p0sock));
  941. }
  942. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  943. tcp::socket p1sock(io_context);
  944. while(1) {
  945. boost::asio::connect(p1sock,
  946. resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
  947. if (!err) break;
  948. std::cerr << "Connection to p1 refused, will retry.\n";
  949. sleep(1);
  950. }
  951. // Write 2 bytes to the socket indicating which thread
  952. // number this socket is for
  953. boost::asio::write(p1sock,
  954. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  955. p1socks.push_back(std::move(p1sock));
  956. }
  957. }