mpcio.cpp 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028
  1. #include <sys/time.h> // getrusage
  2. #include <sys/resource.h> // getrusage
  3. #include "mpcio.hpp"
  4. #include "rdpf.hpp"
  5. #include "cdpf.hpp"
  6. #include "bitutils.hpp"
  7. #include "coroutine.hpp"
  8. void MPCSingleIO::async_send_from_msgqueue()
  9. {
  10. #ifdef SEND_LAMPORT_CLOCKS
  11. std::vector<boost::asio::const_buffer> tosend;
  12. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  13. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  14. #endif
  15. boost::asio::async_write(sock,
  16. #ifdef SEND_LAMPORT_CLOCKS
  17. tosend,
  18. #else
  19. boost::asio::buffer(messagequeue.front()),
  20. #endif
  21. [&](boost::system::error_code ec, std::size_t amt){
  22. messagequeuelock.lock();
  23. messagequeue.pop();
  24. if (messagequeue.size() > 0) {
  25. async_send_from_msgqueue();
  26. }
  27. messagequeuelock.unlock();
  28. });
  29. }
  30. size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
  31. {
  32. // Is this a new message?
  33. size_t newmsg = 0;
  34. dataqueue.append((const char *)data, len);
  35. // If this is the first queue() since the last explicit send(),
  36. // which we'll know because message_lamport will be nullopt, set
  37. // message_lamport to the current Lamport clock. Note that the
  38. // boolean test tests whether message_lamport is nullopt, not
  39. // whether its value is zero.
  40. if (!message_lamport) {
  41. message_lamport = lamport;
  42. newmsg = 1;
  43. }
  44. #ifdef VERBOSE_COMMS
  45. struct timeval tv;
  46. gettimeofday(&tv, NULL);
  47. printf("%lu.%06lu: Queue %s.%d len=%lu lamp=%u: ", tv.tv_sec,
  48. tv.tv_usec, dest.c_str(), thread_num, len,
  49. message_lamport.value());
  50. for (size_t i=0;i<len;++i) {
  51. printf("%02x", ((const unsigned char*)data)[i]);
  52. }
  53. printf("\n");
  54. #endif
  55. // If we already have some full packets worth of data, may as
  56. // well send it.
  57. if (dataqueue.size() > 28800) {
  58. send(true);
  59. }
  60. return newmsg;
  61. }
  62. void MPCSingleIO::send(bool implicit_send)
  63. {
  64. size_t thissize = dataqueue.size();
  65. // Ignore spurious calls to send(), except for resetting
  66. // message_lamport if this was an explicit send().
  67. if (thissize == 0) {
  68. #ifdef SEND_LAMPORT_CLOCKS
  69. // If this was an explicit send(), reset the message_lamport so
  70. // that it gets updated at the next queue().
  71. if (!implicit_send) {
  72. message_lamport.reset();
  73. }
  74. #endif
  75. return;
  76. }
  77. #ifdef RECORD_IOTRACE
  78. iotrace.push_back(thissize);
  79. #endif
  80. messagequeuelock.lock();
  81. // Move the current message to send into the message queue (this
  82. // moves a pointer to the data, not copying the data itself)
  83. #ifdef SEND_LAMPORT_CLOCKS
  84. messagequeue.emplace(std::move(dataqueue),
  85. message_lamport.value());
  86. // If this was an explicit send(), reset the message_lamport so
  87. // that it gets updated at the next queue().
  88. if (!implicit_send) {
  89. message_lamport.reset();
  90. }
  91. #else
  92. messagequeue.emplace(std::move(dataqueue));
  93. #endif
  94. // If this is now the first thing in the message queue, launch
  95. // an async_write to write it
  96. if (messagequeue.size() == 1) {
  97. async_send_from_msgqueue();
  98. }
  99. messagequeuelock.unlock();
  100. }
  101. size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
  102. {
  103. #ifdef VERBOSE_COMMS
  104. struct timeval tv;
  105. gettimeofday(&tv, NULL);
  106. size_t orig_len = len;
  107. printf("%lu.%06lu: Recv %s.%d len=%lu lamp=%u ", tv.tv_sec,
  108. tv.tv_usec, dest.c_str(), thread_num, len, lamport);
  109. #endif
  110. #ifdef SEND_LAMPORT_CLOCKS
  111. char *cdata = (char *)data;
  112. size_t res = 0;
  113. while (len > 0) {
  114. while (recvdataremain == 0) {
  115. // Read a new header
  116. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  117. uint32_t datalen;
  118. lamport_t recv_lamport;
  119. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  120. memmove(&datalen, hdr, sizeof(datalen));
  121. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  122. lamport_t new_lamport = recv_lamport + 1;
  123. if (lamport < new_lamport) {
  124. lamport = new_lamport;
  125. }
  126. if (datalen > 0) {
  127. recvdata.resize(datalen, '\0');
  128. boost::asio::read(sock, boost::asio::buffer(recvdata));
  129. recvdataremain = datalen;
  130. }
  131. }
  132. size_t amttoread = len;
  133. if (amttoread > recvdataremain) {
  134. amttoread = recvdataremain;
  135. }
  136. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  137. amttoread);
  138. cdata += amttoread;
  139. len -= amttoread;
  140. recvdataremain -= amttoread;
  141. res += amttoread;
  142. }
  143. #else
  144. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  145. #endif
  146. #ifdef VERBOSE_COMMS
  147. gettimeofday(&tv, NULL);
  148. printf("nlamp=%u %lu.%06lu: ", lamport, tv.tv_sec, tv.tv_usec);
  149. for (size_t i=0;i<orig_len;++i) {
  150. printf("%02x", ((const unsigned char*)data)[i]);
  151. }
  152. printf("\n");
  153. #endif
  154. #ifdef RECORD_IOTRACE
  155. iotrace.push_back(-(ssize_t(res)));
  156. #endif
  157. return res;
  158. }
  159. #ifdef RECORD_IOTRACE
  160. void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
  161. {
  162. if (label) {
  163. os << label << " ";
  164. }
  165. os << "IO trace:";
  166. for (auto& s: iotrace) {
  167. os << " " << s;
  168. }
  169. os << "\n";
  170. }
  171. #endif
  172. void MPCIO::reset_stats()
  173. {
  174. msgs_sent.clear();
  175. msg_bytes_sent.clear();
  176. aes_ops.clear();
  177. for (size_t i=0; i<num_threads; ++i) {
  178. msgs_sent.push_back(0);
  179. msg_bytes_sent.push_back(0);
  180. aes_ops.push_back(0);
  181. }
  182. steady_start = boost::chrono::steady_clock::now();
  183. cpu_start = boost::chrono::process_cpu_clock::now();
  184. }
  185. // Report the memory usage
  186. void MPCIO::dump_memusage(std::ostream &os)
  187. {
  188. struct rusage ru;
  189. getrusage(RUSAGE_SELF, &ru);
  190. os << "Mem: " << ru.ru_maxrss << " KiB\n";
  191. }
  192. void MPCIO::dump_stats(std::ostream &os)
  193. {
  194. size_t tot_msgs_sent = 0;
  195. size_t tot_msg_bytes_sent = 0;
  196. size_t tot_aes_ops = 0;
  197. for (auto& n : msgs_sent) {
  198. tot_msgs_sent += n;
  199. }
  200. for (auto& n : msg_bytes_sent) {
  201. tot_msg_bytes_sent += n;
  202. }
  203. for (auto& n : aes_ops) {
  204. tot_aes_ops += n;
  205. }
  206. auto steady_elapsed =
  207. boost::chrono::steady_clock::now() - steady_start;
  208. auto cpu_elapsed =
  209. boost::chrono::process_cpu_clock::now() - cpu_start;
  210. os << tot_msgs_sent << " messages sent\n";
  211. os << tot_msg_bytes_sent << " message bytes sent\n";
  212. os << lamport << " Lamport clock (latencies)\n";
  213. os << tot_aes_ops << " local AES operations\n";
  214. os << boost::chrono::duration_cast
  215. <boost::chrono::milliseconds>(steady_elapsed) <<
  216. " wall clock time\n";
  217. os << cpu_elapsed << " {real;user;system}\n";
  218. dump_memusage(os);
  219. }
  220. // TVA is a tuple of vectors of arrays of PreCompStorage
  221. template <nbits_t WIDTH, typename TVA>
  222. static void rdpfstorage_init(TVA &storage, unsigned player,
  223. ProcessingMode mode, unsigned num_threads, bool incremental)
  224. {
  225. auto &VA = std::get<WIDTH-1>(storage);
  226. VA.resize(num_threads);
  227. char prefix[12];
  228. strcpy(prefix, incremental ? "irdpf" : "rdpf");
  229. if (WIDTH > 1) {
  230. sprintf(prefix+strlen(prefix), "%d_", WIDTH);
  231. }
  232. for (unsigned i=0; i<num_threads; ++i) {
  233. for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  234. VA[i][depth-1].init(player, mode, prefix, i, depth, WIDTH);
  235. }
  236. }
  237. }
  238. // TVA is a tuple of vectors of arrays of PreCompStorage
  239. template <nbits_t WIDTH, typename TVA>
  240. static void rdpfstorage_dumpstats(std::ostream &os, TVA &storage,
  241. size_t thread_num, bool incremental)
  242. {
  243. auto &VA = std::get<WIDTH-1>(storage);
  244. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  245. size_t cnt = VA[thread_num][depth-1].get_stats();
  246. if (cnt > 0) {
  247. os << (incremental ? " i" : " r") << int(depth);
  248. if (WIDTH > 1) {
  249. os << "." << int(WIDTH);
  250. }
  251. os << ":" << cnt;
  252. }
  253. }
  254. }
  255. // TVA is a tuple of vectors of arrays of PreCompStorage
  256. template <nbits_t WIDTH, typename TVA>
  257. static void rdpfstorage_resetstats(TVA &storage, size_t thread_num)
  258. {
  259. auto &VA = std::get<WIDTH-1>(storage);
  260. for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
  261. VA[thread_num][depth-1].reset_stats();
  262. }
  263. }
  264. MPCPeerIO::MPCPeerIO(unsigned player, ProcessingMode mode,
  265. std::deque<tcp::socket> &peersocks,
  266. std::deque<tcp::socket> &serversocks) :
  267. MPCIO(player, mode, peersocks.size())
  268. {
  269. unsigned num_threads = unsigned(peersocks.size());
  270. for (unsigned i=0; i<num_threads; ++i) {
  271. multtriples.emplace_back(player, mode, "mults", i);
  272. }
  273. for (unsigned i=0; i<num_threads; ++i) {
  274. halftriples.emplace_back(player, mode, "halves", i);
  275. }
  276. for (unsigned i=0; i<num_threads; ++i) {
  277. andtriples.emplace_back(player, mode, "ands", i);
  278. }
  279. for (unsigned i=0; i<num_threads; ++i) {
  280. valselecttriples.emplace_back(player, mode, "selects", i);
  281. }
  282. rdpfstorage_init<1>(rdpftriples, player, mode, num_threads, false);
  283. rdpfstorage_init<2>(rdpftriples, player, mode, num_threads, false);
  284. rdpfstorage_init<3>(rdpftriples, player, mode, num_threads, false);
  285. rdpfstorage_init<4>(rdpftriples, player, mode, num_threads, false);
  286. rdpfstorage_init<5>(rdpftriples, player, mode, num_threads, false);
  287. rdpfstorage_init<1>(irdpftriples, player, mode, num_threads, true);
  288. rdpfstorage_init<2>(irdpftriples, player, mode, num_threads, true);
  289. rdpfstorage_init<3>(irdpftriples, player, mode, num_threads, true);
  290. rdpfstorage_init<4>(irdpftriples, player, mode, num_threads, true);
  291. rdpfstorage_init<5>(irdpftriples, player, mode, num_threads, true);
  292. for (unsigned i=0; i<num_threads; ++i) {
  293. cdpfs.emplace_back(player, mode, "cdpf", i);
  294. }
  295. for (unsigned i=0; i<num_threads; ++i) {
  296. peerios.emplace_back(std::move(peersocks[i]), "peer", i);
  297. }
  298. for (unsigned i=0; i<num_threads; ++i) {
  299. serverios.emplace_back(std::move(serversocks[i]), "srv", i);
  300. }
  301. }
  302. void MPCPeerIO::dump_precomp_stats(std::ostream &os)
  303. {
  304. for (size_t i=0; i<multtriples.size(); ++i) {
  305. size_t cnt;
  306. if (i > 0) {
  307. os << " ";
  308. }
  309. os << "T" << i;
  310. cnt = multtriples[i].get_stats();
  311. if (cnt > 0) {
  312. os << " m:" << cnt;
  313. }
  314. cnt = halftriples[i].get_stats();
  315. if (cnt > 0) {
  316. os << " h:" << cnt;
  317. }
  318. cnt = andtriples[i].get_stats();
  319. if (cnt > 0) {
  320. os << " a:" << cnt;
  321. }
  322. cnt = valselecttriples[i].get_stats();
  323. if (cnt > 0) {
  324. os << " s:" << cnt;
  325. }
  326. rdpfstorage_dumpstats<1>(os, rdpftriples, i, false);
  327. rdpfstorage_dumpstats<2>(os, rdpftriples, i, false);
  328. rdpfstorage_dumpstats<3>(os, rdpftriples, i, false);
  329. rdpfstorage_dumpstats<4>(os, rdpftriples, i, false);
  330. rdpfstorage_dumpstats<5>(os, rdpftriples, i, false);
  331. rdpfstorage_dumpstats<1>(os, irdpftriples, i, true);
  332. rdpfstorage_dumpstats<2>(os, irdpftriples, i, true);
  333. rdpfstorage_dumpstats<3>(os, irdpftriples, i, true);
  334. rdpfstorage_dumpstats<4>(os, irdpftriples, i, true);
  335. rdpfstorage_dumpstats<5>(os, irdpftriples, i, true);
  336. cnt = cdpfs[i].get_stats();
  337. if (cnt > 0) {
  338. os << " c:" << cnt;
  339. }
  340. }
  341. os << "\n";
  342. }
  343. void MPCPeerIO::reset_precomp_stats()
  344. {
  345. for (size_t i=0; i<multtriples.size(); ++i) {
  346. multtriples[i].reset_stats();
  347. halftriples[i].reset_stats();
  348. andtriples[i].reset_stats();
  349. valselecttriples[i].reset_stats();
  350. rdpfstorage_resetstats<1>(rdpftriples, i);
  351. rdpfstorage_resetstats<2>(rdpftriples, i);
  352. rdpfstorage_resetstats<3>(rdpftriples, i);
  353. rdpfstorage_resetstats<4>(rdpftriples, i);
  354. rdpfstorage_resetstats<5>(rdpftriples, i);
  355. rdpfstorage_resetstats<1>(irdpftriples, i);
  356. rdpfstorage_resetstats<2>(irdpftriples, i);
  357. rdpfstorage_resetstats<3>(irdpftriples, i);
  358. rdpfstorage_resetstats<4>(irdpftriples, i);
  359. rdpfstorage_resetstats<5>(irdpftriples, i);
  360. }
  361. }
  362. void MPCPeerIO::dump_stats(std::ostream &os)
  363. {
  364. MPCIO::dump_stats(os);
  365. os << "Precomputed values used: ";
  366. dump_precomp_stats(os);
  367. }
  368. MPCServerIO::MPCServerIO(ProcessingMode mode,
  369. std::deque<tcp::socket> &p0socks,
  370. std::deque<tcp::socket> &p1socks) :
  371. MPCIO(2, mode, p0socks.size())
  372. {
  373. rdpfstorage_init<1>(rdpfpairs, player, mode, num_threads, false);
  374. rdpfstorage_init<2>(rdpfpairs, player, mode, num_threads, false);
  375. rdpfstorage_init<3>(rdpfpairs, player, mode, num_threads, false);
  376. rdpfstorage_init<4>(rdpfpairs, player, mode, num_threads, false);
  377. rdpfstorage_init<5>(rdpfpairs, player, mode, num_threads, false);
  378. rdpfstorage_init<1>(irdpfpairs, player, mode, num_threads, true);
  379. rdpfstorage_init<2>(irdpfpairs, player, mode, num_threads, true);
  380. rdpfstorage_init<3>(irdpfpairs, player, mode, num_threads, true);
  381. rdpfstorage_init<4>(irdpfpairs, player, mode, num_threads, true);
  382. rdpfstorage_init<5>(irdpfpairs, player, mode, num_threads, true);
  383. for (unsigned i=0; i<num_threads; ++i) {
  384. p0ios.emplace_back(std::move(p0socks[i]), "p0", i);
  385. }
  386. for (unsigned i=0; i<num_threads; ++i) {
  387. p1ios.emplace_back(std::move(p1socks[i]), "p1", i);
  388. }
  389. }
  390. void MPCServerIO::dump_precomp_stats(std::ostream &os)
  391. {
  392. for (size_t i=0; i<std::get<0>(rdpfpairs).size(); ++i) {
  393. if (i > 0) {
  394. os << " ";
  395. }
  396. os << "T" << i;
  397. rdpfstorage_dumpstats<1>(os, rdpfpairs, i, false);
  398. rdpfstorage_dumpstats<2>(os, rdpfpairs, i, false);
  399. rdpfstorage_dumpstats<3>(os, rdpfpairs, i, false);
  400. rdpfstorage_dumpstats<4>(os, rdpfpairs, i, false);
  401. rdpfstorage_dumpstats<5>(os, rdpfpairs, i, false);
  402. rdpfstorage_dumpstats<1>(os, irdpfpairs, i, true);
  403. rdpfstorage_dumpstats<2>(os, irdpfpairs, i, true);
  404. rdpfstorage_dumpstats<3>(os, irdpfpairs, i, true);
  405. rdpfstorage_dumpstats<4>(os, irdpfpairs, i, true);
  406. rdpfstorage_dumpstats<5>(os, irdpfpairs, i, true);
  407. }
  408. os << "\n";
  409. }
  410. void MPCServerIO::reset_precomp_stats()
  411. {
  412. for (size_t i=0; i<std::get<0>(rdpfpairs).size(); ++i) {
  413. rdpfstorage_resetstats<1>(rdpfpairs, i);
  414. rdpfstorage_resetstats<2>(rdpfpairs, i);
  415. rdpfstorage_resetstats<3>(rdpfpairs, i);
  416. rdpfstorage_resetstats<4>(rdpfpairs, i);
  417. rdpfstorage_resetstats<5>(rdpfpairs, i);
  418. rdpfstorage_resetstats<1>(irdpfpairs, i);
  419. rdpfstorage_resetstats<2>(irdpfpairs, i);
  420. rdpfstorage_resetstats<3>(irdpfpairs, i);
  421. rdpfstorage_resetstats<4>(irdpfpairs, i);
  422. rdpfstorage_resetstats<5>(irdpfpairs, i);
  423. }
  424. }
  425. void MPCServerIO::dump_stats(std::ostream &os)
  426. {
  427. MPCIO::dump_stats(os);
  428. os << "Precomputed values used: ";
  429. dump_precomp_stats(os);
  430. }
  431. MPCTIO::MPCTIO(MPCIO &mpcio, int thread_num, int num_threads) :
  432. thread_num(thread_num), local_cpu_nthreads(num_threads),
  433. communication_nthreads(num_threads),
  434. thread_lamport(mpcio.lamport), mpcio(mpcio),
  435. #ifdef VERBOSE_COMMS
  436. round_num(0),
  437. #endif
  438. last_andtriple_bits_remaining(0),
  439. remaining_nodesselecttriples(0)
  440. {
  441. if (mpcio.player < 2) {
  442. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  443. peer_iostream.emplace(mpcpio.peerios[thread_num],
  444. thread_lamport, mpcpio.msgs_sent[thread_num],
  445. mpcpio.msg_bytes_sent[thread_num]);
  446. server_iostream.emplace(mpcpio.serverios[thread_num],
  447. thread_lamport, mpcpio.msgs_sent[thread_num],
  448. mpcpio.msg_bytes_sent[thread_num]);
  449. } else {
  450. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  451. p0_iostream.emplace(mpcsrvio.p0ios[thread_num],
  452. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  453. mpcsrvio.msg_bytes_sent[thread_num]);
  454. p1_iostream.emplace(mpcsrvio.p1ios[thread_num],
  455. thread_lamport, mpcsrvio.msgs_sent[thread_num],
  456. mpcsrvio.msg_bytes_sent[thread_num]);
  457. }
  458. }
  459. // Sync our per-thread lamport clock with the master one in the
  460. // mpcio. You only need to call this explicitly if your MPCTIO
  461. // outlives your thread (in which case call it after the join), or
  462. // if your threads do interthread communication amongst themselves
  463. // (in which case call it in the sending thread before the send, and
  464. // call it in the receiving thread after the receive).
  465. void MPCTIO::sync_lamport()
  466. {
  467. // Update the mpcio Lamport time to be max of the thread Lamport
  468. // time and what we thought it was before. We use this
  469. // compare_exchange construction in order to atomically
  470. // do the comparison, computation, and replacement
  471. lamport_t old_lamport = mpcio.lamport;
  472. lamport_t new_lamport = thread_lamport;
  473. do {
  474. if (new_lamport < old_lamport) {
  475. new_lamport = old_lamport;
  476. }
  477. // The next line atomically checks if lamport still has
  478. // the value old_lamport; if so, it changes its value to
  479. // new_lamport and returns true (ending the loop). If
  480. // not, it sets old_lamport to the current value of
  481. // lamport, and returns false (continuing the loop so
  482. // that new_lamport can be recomputed based on this new
  483. // value).
  484. } while (!mpcio.lamport.compare_exchange_weak(
  485. old_lamport, new_lamport));
  486. thread_lamport = new_lamport;
  487. }
  488. // Only call this if you can be sure that there are no outstanding
  489. // messages in flight, you can call it on all existing MPCTIOs, and
  490. // you really want to reset the Lamport clock in the midding of a
  491. // run.
  492. void MPCTIO::reset_lamport()
  493. {
  494. // Reset both our own Lamport clock and the parent MPCIO's
  495. thread_lamport = 0;
  496. mpcio.lamport = 0;
  497. }
  498. // Queue up data to the peer or to the server
  499. void MPCTIO::queue_peer(const void *data, size_t len)
  500. {
  501. if (mpcio.player < 2) {
  502. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  503. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  504. mpcpio.msgs_sent[thread_num] += newmsg;
  505. mpcpio.msg_bytes_sent[thread_num] += len;
  506. }
  507. }
  508. void MPCTIO::queue_server(const void *data, size_t len)
  509. {
  510. if (mpcio.player < 2) {
  511. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  512. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  513. mpcpio.msgs_sent[thread_num] += newmsg;
  514. mpcpio.msg_bytes_sent[thread_num] += len;
  515. }
  516. }
  517. // Receive data from the peer or to the server
  518. size_t MPCTIO::recv_peer(void *data, size_t len)
  519. {
  520. if (mpcio.player < 2) {
  521. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  522. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  523. }
  524. return 0;
  525. }
  526. size_t MPCTIO::recv_server(void *data, size_t len)
  527. {
  528. if (mpcio.player < 2) {
  529. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  530. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  531. }
  532. return 0;
  533. }
  534. // Queue up data to p0 or p1
  535. void MPCTIO::queue_p0(const void *data, size_t len)
  536. {
  537. if (mpcio.player == 2) {
  538. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  539. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  540. mpcsrvio.msgs_sent[thread_num] += newmsg;
  541. mpcsrvio.msg_bytes_sent[thread_num] += len;
  542. }
  543. }
  544. void MPCTIO::queue_p1(const void *data, size_t len)
  545. {
  546. if (mpcio.player == 2) {
  547. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  548. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  549. mpcsrvio.msgs_sent[thread_num] += newmsg;
  550. mpcsrvio.msg_bytes_sent[thread_num] += len;
  551. }
  552. }
  553. // Receive data from p0 or p1
  554. size_t MPCTIO::recv_p0(void *data, size_t len)
  555. {
  556. if (mpcio.player == 2) {
  557. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  558. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  559. }
  560. return 0;
  561. }
  562. size_t MPCTIO::recv_p1(void *data, size_t len)
  563. {
  564. if (mpcio.player == 2) {
  565. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  566. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  567. }
  568. return 0;
  569. }
  570. // Send all queued data for this thread
  571. void MPCTIO::send()
  572. {
  573. #ifdef VERBOSE_COMMS
  574. struct timeval tv;
  575. gettimeofday(&tv, NULL);
  576. printf("%lu.%06lu: Thread %u sending round %lu\n", tv.tv_sec,
  577. tv.tv_usec, thread_num, ++round_num);
  578. #endif
  579. if (mpcio.player < 2) {
  580. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  581. mpcpio.peerios[thread_num].send();
  582. mpcpio.serverios[thread_num].send();
  583. } else {
  584. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  585. mpcsrvio.p0ios[thread_num].send();
  586. mpcsrvio.p1ios[thread_num].send();
  587. }
  588. }
  589. // Functions to get precomputed values. If we're in the online
  590. // phase, get them from PreCompStorage. If we're in the
  591. // preprocessing or online-only phase, read them from the server.
  592. MultTriple MPCTIO::multtriple(yield_t &yield)
  593. {
  594. MultTriple val;
  595. if (mpcio.player < 2) {
  596. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  597. if (mpcpio.mode != MODE_ONLINE) {
  598. yield();
  599. recv_server(&val, sizeof(val));
  600. mpcpio.multtriples[thread_num].inc();
  601. } else {
  602. mpcpio.multtriples[thread_num].get(val);
  603. }
  604. } else if (mpcio.mode != MODE_ONLINE) {
  605. // Create multiplication triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  606. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  607. value_t X0, Y0, Z0, X1, Y1, Z1;
  608. arc4random_buf(&X0, sizeof(X0));
  609. arc4random_buf(&Y0, sizeof(Y0));
  610. arc4random_buf(&Z0, sizeof(Z0));
  611. arc4random_buf(&X1, sizeof(X1));
  612. arc4random_buf(&Y1, sizeof(Y1));
  613. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  614. MultTriple T0, T1;
  615. T0 = std::make_tuple(X0, Y0, Z0);
  616. T1 = std::make_tuple(X1, Y1, Z1);
  617. queue_p0(&T0, sizeof(T0));
  618. queue_p1(&T1, sizeof(T1));
  619. yield();
  620. }
  621. return val;
  622. }
  623. // When halftriple() is used internally to another preprocessing
  624. // operation, don't tally it, so that it doesn't appear sepearately in
  625. // the stats from the preprocessing operation that invoked it
  626. HalfTriple MPCTIO::halftriple(yield_t &yield, bool tally)
  627. {
  628. HalfTriple val;
  629. if (mpcio.player < 2) {
  630. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  631. if (mpcpio.mode != MODE_ONLINE) {
  632. yield();
  633. recv_server(&val, sizeof(val));
  634. if (tally) {
  635. mpcpio.halftriples[thread_num].inc();
  636. }
  637. } else {
  638. mpcpio.halftriples[thread_num].get(val);
  639. }
  640. } else if (mpcio.mode != MODE_ONLINE) {
  641. // Create half-triples (X0,Z0),(Y1,Z1) such that
  642. // X0*Y1 = Z0 + Z1
  643. value_t X0, Z0, Y1, Z1;
  644. arc4random_buf(&X0, sizeof(X0));
  645. arc4random_buf(&Z0, sizeof(Z0));
  646. arc4random_buf(&Y1, sizeof(Y1));
  647. Z1 = X0 * Y1 - Z0;
  648. HalfTriple H0, H1;
  649. H0 = std::make_tuple(X0, Z0);
  650. H1 = std::make_tuple(Y1, Z1);
  651. queue_p0(&H0, sizeof(H0));
  652. queue_p1(&H1, sizeof(H1));
  653. yield();
  654. }
  655. return val;
  656. }
  657. MultTriple MPCTIO::andtriple(yield_t &yield)
  658. {
  659. AndTriple val;
  660. if (mpcio.player < 2) {
  661. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  662. if (mpcpio.mode != MODE_ONLINE) {
  663. yield();
  664. recv_server(&val, sizeof(val));
  665. mpcpio.andtriples[thread_num].inc();
  666. } else {
  667. mpcpio.andtriples[thread_num].get(val);
  668. }
  669. } else if (mpcio.mode != MODE_ONLINE) {
  670. // Create AND triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  671. // (X0&Y1 ^ Y0&X1) = (Z0^Z1)
  672. value_t X0, Y0, Z0, X1, Y1, Z1;
  673. arc4random_buf(&X0, sizeof(X0));
  674. arc4random_buf(&Y0, sizeof(Y0));
  675. arc4random_buf(&Z0, sizeof(Z0));
  676. arc4random_buf(&X1, sizeof(X1));
  677. arc4random_buf(&Y1, sizeof(Y1));
  678. Z1 = (X0 & Y1) ^ (X1 & Y0) ^ Z0;
  679. AndTriple T0, T1;
  680. T0 = std::make_tuple(X0, Y0, Z0);
  681. T1 = std::make_tuple(X1, Y1, Z1);
  682. queue_p0(&T0, sizeof(T0));
  683. queue_p1(&T1, sizeof(T1));
  684. yield();
  685. }
  686. return val;
  687. }
  688. void MPCTIO::request_nodeselecttriples(yield_t &yield, size_t num)
  689. {
  690. if (mpcio.player < 2) {
  691. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  692. if (mpcpio.mode != MODE_ONLINE) {
  693. yield();
  694. for (size_t i=0; i<num; ++i) {
  695. SelectTriple<DPFnode> v;
  696. uint8_t Xbyte;
  697. recv_server(&Xbyte, sizeof(Xbyte));
  698. v.X = Xbyte & 1;
  699. recv_server(&v.Y, sizeof(v.Y));
  700. recv_server(&v.Z, sizeof(v.Z));
  701. queued_nodeselecttriples.push_back(v);
  702. }
  703. remaining_nodesselecttriples += num;
  704. } else {
  705. std::cerr << "Attempted to read SelectTriple<DPFnode> in online phase\n";
  706. }
  707. } else if (mpcio.mode != MODE_ONLINE) {
  708. for (size_t i=0; i<num; ++i) {
  709. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  710. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  711. bit_t X0, X1;
  712. DPFnode Y0, Z0, Y1, Z1;
  713. X0 = arc4random() & 1;
  714. arc4random_buf(&Y0, sizeof(Y0));
  715. arc4random_buf(&Z0, sizeof(Z0));
  716. X1 = arc4random() & 1;
  717. arc4random_buf(&Y1, sizeof(Y1));
  718. DPFnode X0ext, X1ext;
  719. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  720. // 1 -> 1111...1)
  721. X0ext = if128_mask[X0];
  722. X1ext = if128_mask[X1];
  723. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  724. queue_p0(&X0, sizeof(X0));
  725. queue_p0(&Y0, sizeof(Y0));
  726. queue_p0(&Z0, sizeof(Z0));
  727. queue_p1(&X1, sizeof(X1));
  728. queue_p1(&Y1, sizeof(Y1));
  729. queue_p1(&Z1, sizeof(Z1));
  730. }
  731. yield();
  732. remaining_nodesselecttriples += num;
  733. }
  734. }
  735. SelectTriple<DPFnode> MPCTIO::nodeselecttriple(yield_t &yield)
  736. {
  737. SelectTriple<DPFnode> val;
  738. if (remaining_nodesselecttriples == 0) {
  739. request_nodeselecttriples(yield, 1);
  740. }
  741. if (mpcio.player < 2) {
  742. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  743. if (mpcpio.mode != MODE_ONLINE) {
  744. val = queued_nodeselecttriples.front();
  745. queued_nodeselecttriples.pop_front();
  746. --remaining_nodesselecttriples;
  747. } else {
  748. std::cerr << "Attempted to read SelectTriple<DPFnode> in online phase\n";
  749. }
  750. } else if (mpcio.mode != MODE_ONLINE) {
  751. --remaining_nodesselecttriples;
  752. }
  753. return val;
  754. }
  755. SelectTriple<value_t> MPCTIO::valselecttriple(yield_t &yield)
  756. {
  757. SelectTriple<value_t> val;
  758. if (mpcio.player < 2) {
  759. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  760. if (mpcpio.mode != MODE_ONLINE) {
  761. uint8_t Xbyte;
  762. yield();
  763. recv_server(&Xbyte, sizeof(Xbyte));
  764. val.X = Xbyte & 1;
  765. recv_server(&val.Y, sizeof(val.Y));
  766. recv_server(&val.Z, sizeof(val.Z));
  767. mpcpio.valselecttriples[thread_num].inc();
  768. } else {
  769. mpcpio.valselecttriples[thread_num].get(val);
  770. }
  771. } else if (mpcio.mode != MODE_ONLINE) {
  772. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  773. // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
  774. bit_t X0, X1;
  775. value_t Y0, Z0, Y1, Z1;
  776. X0 = arc4random() & 1;
  777. arc4random_buf(&Y0, sizeof(Y0));
  778. arc4random_buf(&Z0, sizeof(Z0));
  779. X1 = arc4random() & 1;
  780. arc4random_buf(&Y1, sizeof(Y1));
  781. value_t X0ext, X1ext;
  782. // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
  783. // 1 -> 1111...1)
  784. X0ext = -value_t(X0);
  785. X1ext = -value_t(X1);
  786. Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
  787. queue_p0(&X0, sizeof(X0));
  788. queue_p0(&Y0, sizeof(Y0));
  789. queue_p0(&Z0, sizeof(Z0));
  790. queue_p1(&X1, sizeof(X1));
  791. queue_p1(&Y1, sizeof(Y1));
  792. queue_p1(&Z1, sizeof(Z1));
  793. yield();
  794. }
  795. return val;
  796. }
  797. SelectTriple<bit_t> MPCTIO::bitselecttriple(yield_t &yield)
  798. {
  799. // Do we need to fetch a new AND triple?
  800. if (last_andtriple_bits_remaining == 0) {
  801. last_andtriple = andtriple(yield);
  802. last_andtriple_bits_remaining = 8*sizeof(value_t);
  803. }
  804. --last_andtriple_bits_remaining;
  805. value_t mask = value_t(1) << last_andtriple_bits_remaining;
  806. SelectTriple<bit_t> val;
  807. val.X = !!(std::get<0>(last_andtriple) & mask);
  808. val.Y = !!(std::get<1>(last_andtriple) & mask);
  809. val.Z = !!(std::get<2>(last_andtriple) & mask);
  810. return val;
  811. }
  812. CDPF MPCTIO::cdpf(yield_t &yield)
  813. {
  814. CDPF val;
  815. if (mpcio.player < 2) {
  816. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  817. if (mpcpio.mode != MODE_ONLINE) {
  818. yield();
  819. iostream_server() >> val;
  820. mpcpio.cdpfs[thread_num].inc();
  821. } else {
  822. mpcpio.cdpfs[thread_num].get(val);
  823. }
  824. } else if (mpcio.mode != MODE_ONLINE) {
  825. auto [ cdpf0, cdpf1 ] = CDPF::generate(aes_ops());
  826. iostream_p0() << cdpf0;
  827. iostream_p1() << cdpf1;
  828. yield();
  829. }
  830. return val;
  831. }
  832. // The port number for the P1 -> P0 connection
  833. static const unsigned short port_p1_p0 = 2115;
  834. // The port number for the P2 -> P0 connection
  835. static const unsigned short port_p2_p0 = 2116;
  836. // The port number for the P2 -> P1 connection
  837. static const unsigned short port_p2_p1 = 2117;
  838. void mpcio_setup_computational(unsigned player,
  839. boost::asio::io_context &io_context,
  840. const char *p0addr, // can be NULL when player=0
  841. int num_threads,
  842. std::deque<tcp::socket> &peersocks,
  843. std::deque<tcp::socket> &serversocks)
  844. {
  845. if (player == 0) {
  846. // Listen for connections from P1 and from P2
  847. tcp::acceptor acceptor_p1(io_context,
  848. tcp::endpoint(tcp::v4(), port_p1_p0));
  849. tcp::acceptor acceptor_p2(io_context,
  850. tcp::endpoint(tcp::v4(), port_p2_p0));
  851. peersocks.clear();
  852. serversocks.clear();
  853. for (int i=0;i<num_threads;++i) {
  854. peersocks.emplace_back(io_context);
  855. serversocks.emplace_back(io_context);
  856. }
  857. for (int i=0;i<num_threads;++i) {
  858. tcp::socket peersock = acceptor_p1.accept();
  859. // Read 2 bytes from the socket, which will be the thread
  860. // number
  861. unsigned short thread_num;
  862. boost::asio::read(peersock,
  863. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  864. if (thread_num >= num_threads) {
  865. std::cerr << "Received bad thread number from peer\n";
  866. } else {
  867. peersocks[thread_num] = std::move(peersock);
  868. }
  869. }
  870. for (int i=0;i<num_threads;++i) {
  871. tcp::socket serversock = acceptor_p2.accept();
  872. // Read 2 bytes from the socket, which will be the thread
  873. // number
  874. unsigned short thread_num;
  875. boost::asio::read(serversock,
  876. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  877. if (thread_num >= num_threads) {
  878. std::cerr << "Received bad thread number from server\n";
  879. } else {
  880. serversocks[thread_num] = std::move(serversock);
  881. }
  882. }
  883. } else if (player == 1) {
  884. // Listen for connections from P2, make num_threads connections to P0
  885. tcp::acceptor acceptor_p2(io_context,
  886. tcp::endpoint(tcp::v4(), port_p2_p1));
  887. tcp::resolver resolver(io_context);
  888. boost::system::error_code err;
  889. peersocks.clear();
  890. serversocks.clear();
  891. for (int i=0;i<num_threads;++i) {
  892. serversocks.emplace_back(io_context);
  893. }
  894. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  895. tcp::socket peersock(io_context);
  896. while(1) {
  897. boost::asio::connect(peersock,
  898. resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
  899. if (!err) break;
  900. std::cerr << "Connection to p0 refused, will retry.\n";
  901. sleep(1);
  902. }
  903. // Write 2 bytes to the socket indicating which thread
  904. // number this socket is for
  905. boost::asio::write(peersock,
  906. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  907. peersocks.push_back(std::move(peersock));
  908. }
  909. for (int i=0;i<num_threads;++i) {
  910. tcp::socket serversock = acceptor_p2.accept();
  911. // Read 2 bytes from the socket, which will be the thread
  912. // number
  913. unsigned short thread_num;
  914. boost::asio::read(serversock,
  915. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  916. if (thread_num >= num_threads) {
  917. std::cerr << "Received bad thread number from server\n";
  918. } else {
  919. serversocks[thread_num] = std::move(serversock);
  920. }
  921. }
  922. } else {
  923. std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
  924. }
  925. }
  926. void mpcio_setup_server(boost::asio::io_context &io_context,
  927. const char *p0addr, const char *p1addr, int num_threads,
  928. std::deque<tcp::socket> &p0socks,
  929. std::deque<tcp::socket> &p1socks)
  930. {
  931. // Make connections to P0 and P1
  932. tcp::resolver resolver(io_context);
  933. boost::system::error_code err;
  934. p0socks.clear();
  935. p1socks.clear();
  936. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  937. tcp::socket p0sock(io_context);
  938. while(1) {
  939. boost::asio::connect(p0sock,
  940. resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
  941. if (!err) break;
  942. std::cerr << "Connection to p0 refused, will retry.\n";
  943. sleep(1);
  944. }
  945. // Write 2 bytes to the socket indicating which thread
  946. // number this socket is for
  947. boost::asio::write(p0sock,
  948. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  949. p0socks.push_back(std::move(p0sock));
  950. }
  951. for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
  952. tcp::socket p1sock(io_context);
  953. while(1) {
  954. boost::asio::connect(p1sock,
  955. resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
  956. if (!err) break;
  957. std::cerr << "Connection to p1 refused, will retry.\n";
  958. sleep(1);
  959. }
  960. // Write 2 bytes to the socket indicating which thread
  961. // number this socket is for
  962. boost::asio::write(p1sock,
  963. boost::asio::buffer(&thread_num, sizeof(thread_num)));
  964. p1socks.push_back(std::move(p1sock));
  965. }
  966. }