mpcio.hpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. #ifndef __MCPIO_HPP__
  2. #define __MCPIO_HPP__
  3. #include <iostream>
  4. #include <fstream>
  5. #include <vector>
  6. #include <deque>
  7. #include <queue>
  8. #include <string>
  9. #include <atomic>
  10. #include <optional>
  11. #include <bsd/stdlib.h> // arc4random_buf
  12. #include <boost/asio.hpp>
  13. #include <boost/thread.hpp>
  14. #include <boost/chrono.hpp>
  15. #include "types.hpp"
  16. using boost::asio::ip::tcp;
  17. // Classes to represent stored precomputed data (e.g., multiplication triples)
  18. template<typename T>
  19. class PreCompStorage {
  20. public:
  21. PreCompStorage(unsigned player, bool preprocessing,
  22. const char *filenameprefix, unsigned thread_num);
  23. void get(T& nextval);
  24. inline size_t get_stats() { return count; }
  25. inline void reset_stats() { count = 0; }
  26. private:
  27. std::ifstream storage;
  28. size_t count;
  29. };
  30. template<typename T>
  31. PreCompStorage<T>::PreCompStorage(unsigned player, bool preprocessing,
  32. const char *filenameprefix, unsigned thread_num) {
  33. if (preprocessing) return;
  34. std::string filename(filenameprefix);
  35. char suffix[20];
  36. sprintf(suffix, ".p%d.t%u", player%10, thread_num);
  37. filename.append(suffix);
  38. storage.open(filename);
  39. if (storage.fail()) {
  40. std::cerr << "Failed to open " << filename << "\n";
  41. exit(1);
  42. }
  43. count = 0;
  44. }
  45. template<typename T>
  46. void PreCompStorage<T>::get(T& nextval) {
  47. storage.read((char *)&nextval, sizeof(T));
  48. if (storage.gcount() != sizeof(T)) {
  49. std::cerr << "Failed to read precomputed value from storage\n";
  50. exit(1);
  51. }
  52. ++count;
  53. }
  54. // If we want to send Lamport clocks in messages, define this. It adds
  55. // an 8-byte header to each message (length and Lamport clock), so it
  56. // has a small network cost. We always define and pass the Lamport
  57. // clock member of MPCIO to the IO functions for simplicity, but they're
  58. // ignored if this isn't defined
  59. #define SEND_LAMPORT_CLOCKS
  60. using lamport_t = uint32_t;
  61. using atomic_lamport_t = std::atomic<lamport_t>;
  62. using opt_lamport_t = std::optional<lamport_t>;
  63. #ifdef SEND_LAMPORT_CLOCKS
  64. struct MessageWithHeader {
  65. std::string header;
  66. std::string message;
  67. MessageWithHeader(std::string &&msg, lamport_t lamport) :
  68. message(std::move(msg)) {
  69. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  70. uint32_t msglen = uint32_t(message.size());
  71. memmove(hdr, &msglen, sizeof(msglen));
  72. memmove(hdr+sizeof(msglen), &lamport, sizeof(lamport));
  73. header.assign(hdr, sizeof(hdr));
  74. }
  75. };
  76. #endif
  77. // A class to wrap a socket to another MPC party. This wrapping allows
  78. // us to do some useful logging, and perform async_writes transparently
  79. // to the application.
  80. class MPCSingleIO {
  81. tcp::socket sock;
  82. size_t totread, totwritten;
  83. #ifdef RECORD_IOTRACE
  84. std::vector<ssize_t> iotrace;
  85. #endif
  86. // To avoid blocking if both we and our peer are trying to send
  87. // something very large, and neither side is receiving, we will send
  88. // with async_write. But this has a number of implications:
  89. // - The data to be sent has to be copied into this MPCSingleIO,
  90. // since asio::buffer pointers are not guaranteed to remain valid
  91. // after the end of the coroutine that created them
  92. // - We have to keep a queue of messages to be sent, in case
  93. // coroutines call send() before the previous message has finished
  94. // being sent
  95. // - This queue may be accessed from the async_write thread as well
  96. // as the work thread that uses this MPCSingleIO directly (there
  97. // should be only one of the latter), so we need some locking
  98. // This is where we accumulate data passed in queue()
  99. std::string dataqueue;
  100. // When send() is called, the above dataqueue is appended to this
  101. // messagequeue, and the dataqueue is reset. If messagequeue was
  102. // empty before this append, launch async_write to write the first
  103. // thing in the messagequeue. When async_write completes, it will
  104. // delete the first thing in the messagequeue, and see if there are
  105. // any more elements. If so, it will start another async_write.
  106. // The invariant is that there is an async_write currently running
  107. // iff messagequeue is nonempty.
  108. #ifdef SEND_LAMPORT_CLOCKS
  109. std::queue<MessageWithHeader> messagequeue;
  110. #else
  111. std::queue<std::string> messagequeue;
  112. #endif
  113. // If a single message is broken into chunks in order to get the
  114. // first part of it out on the wire while the rest of it is still
  115. // being computed, we want the Lamport clock of all the chunks to be
  116. // that of when the message is first created. This value will be
  117. // nullopt when there has been no queue() since the last explicit
  118. // send() (as opposed to the implicit send() called by queue()
  119. // itself if it wants to get a chunk on its way), and will be set to
  120. // the current lamport clock when that first queue() after each
  121. // explicit send() happens.
  122. opt_lamport_t message_lamport;
  123. #ifdef SEND_LAMPORT_CLOCKS
  124. // If Lamport clocks are being sent, then the data stream is divided
  125. // into chunks, each with a header containing the length of the
  126. // chunk and the Lamport clock. So when we read, we'll read a whole
  127. // chunk, and store it here. Then calls to recv() will read pieces
  128. // of this buffer until it has all been read, and then read the next
  129. // header and chunk.
  130. std::string recvdata;
  131. size_t recvdataremain;
  132. #endif
  133. // Never touch the above messagequeue without holding this lock (you
  134. // _can_ touch the strings it contains, though, if you looked one up
  135. // while holding the lock).
  136. boost::mutex messagequeuelock;
  137. // Asynchronously send the first message from the message queue.
  138. // * The messagequeuelock must be held when this is called! *
  139. // This method may be called from either thread (the work thread or
  140. // the async_write handler thread).
  141. void async_send_from_msgqueue() {
  142. #ifdef SEND_LAMPORT_CLOCKS
  143. std::vector<boost::asio::const_buffer> tosend;
  144. tosend.push_back(boost::asio::buffer(messagequeue.front().header));
  145. tosend.push_back(boost::asio::buffer(messagequeue.front().message));
  146. #endif
  147. boost::asio::async_write(sock,
  148. #ifdef SEND_LAMPORT_CLOCKS
  149. tosend,
  150. #else
  151. boost::asio::buffer(messagequeue.front()),
  152. #endif
  153. [&](boost::system::error_code ec, std::size_t amt){
  154. messagequeuelock.lock();
  155. messagequeue.pop();
  156. if (messagequeue.size() > 0) {
  157. async_send_from_msgqueue();
  158. }
  159. messagequeuelock.unlock();
  160. });
  161. }
  162. public:
  163. MPCSingleIO(tcp::socket &&sock) :
  164. sock(std::move(sock)), totread(0), totwritten(0) {}
  165. // Returns 1 if a new message is started, 0 otherwise
  166. size_t queue(const void *data, size_t len, lamport_t lamport) {
  167. // Is this a new message?
  168. size_t newmsg = 0;
  169. dataqueue.append((const char *)data, len);
  170. // If this is the first queue() since the last explicit send(),
  171. // which we'll know because message_lamport will be nullopt, set
  172. // message_lamport to the current Lamport clock. Note that the
  173. // boolean test tests whether message_lamport is nullopt, not
  174. // whether its value is zero.
  175. if (!message_lamport) {
  176. message_lamport = lamport;
  177. newmsg = 1;
  178. }
  179. // If we already have some full packets worth of data, may as
  180. // well send it.
  181. if (dataqueue.size() > 28800) {
  182. send(true);
  183. }
  184. return newmsg;
  185. }
  186. void send(bool implicit_send = false) {
  187. size_t thissize = dataqueue.size();
  188. // Ignore spurious calls to send(), except for resetting
  189. // message_lamport if this was an explicit send().
  190. if (thissize == 0) {
  191. #ifdef SEND_LAMPORT_CLOCKS
  192. // If this was an explicit send(), reset the message_lamport so
  193. // that it gets updated at the next queue().
  194. if (!implicit_send) {
  195. message_lamport.reset();
  196. }
  197. #endif
  198. return;
  199. }
  200. #ifdef RECORD_IOTRACE
  201. iotrace.push_back(thissize);
  202. #endif
  203. messagequeuelock.lock();
  204. // Move the current message to send into the message queue (this
  205. // moves a pointer to the data, not copying the data itself)
  206. #ifdef SEND_LAMPORT_CLOCKS
  207. messagequeue.emplace(std::move(dataqueue),
  208. message_lamport.value());
  209. // If this was an explicit send(), reset the message_lamport so
  210. // that it gets updated at the next queue().
  211. if (!implicit_send) {
  212. message_lamport.reset();
  213. }
  214. #else
  215. messagequeue.emplace(std::move(dataqueue));
  216. #endif
  217. // If this is now the first thing in the message queue, launch
  218. // an async_write to write it
  219. if (messagequeue.size() == 1) {
  220. async_send_from_msgqueue();
  221. }
  222. messagequeuelock.unlock();
  223. }
  224. size_t recv(void *data, size_t len, lamport_t &lamport) {
  225. #ifdef SEND_LAMPORT_CLOCKS
  226. char *cdata = (char *)data;
  227. size_t res = 0;
  228. while (len > 0) {
  229. while (recvdataremain == 0) {
  230. // Read a new header
  231. char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
  232. uint32_t datalen;
  233. lamport_t recv_lamport;
  234. boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
  235. memmove(&datalen, hdr, sizeof(datalen));
  236. memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
  237. lamport_t new_lamport = recv_lamport + 1;
  238. if (lamport < new_lamport) {
  239. lamport = new_lamport;
  240. }
  241. if (datalen > 0) {
  242. recvdata.resize(datalen, '\0');
  243. boost::asio::read(sock, boost::asio::buffer(recvdata));
  244. recvdataremain = datalen;
  245. }
  246. }
  247. size_t amttoread = len;
  248. if (amttoread > recvdataremain) {
  249. amttoread = recvdataremain;
  250. }
  251. memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
  252. amttoread);
  253. cdata += amttoread;
  254. len -= amttoread;
  255. recvdataremain -= amttoread;
  256. res += amttoread;
  257. }
  258. return res;
  259. #else
  260. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  261. #ifdef RECORD_IOTRACE
  262. iotrace.push_back(-(ssize_t(res)));
  263. #endif
  264. return res;
  265. #endif
  266. }
  267. #ifdef RECORD_IOTRACE
  268. void dumptrace(std::ostream &os, const char *label = NULL) {
  269. if (label) {
  270. os << label << " ";
  271. }
  272. os << "IO trace:";
  273. for (auto& s: iotrace) {
  274. os << " " << s;
  275. }
  276. os << "\n";
  277. }
  278. void resettrace() {
  279. iotrace.clear();
  280. }
  281. #endif
  282. };
  283. // A base class to represent all of a computation peer or server's IO,
  284. // either to other parties or to local storage (the computation and
  285. // server cases are separate subclasses below).
  286. struct MPCIO {
  287. int player;
  288. bool preprocessing;
  289. size_t num_threads;
  290. atomic_lamport_t lamport;
  291. std::vector<size_t> msgs_sent;
  292. std::vector<size_t> msg_bytes_sent;
  293. boost::chrono::steady_clock::time_point steady_start;
  294. boost::chrono::process_cpu_clock::time_point cpu_start;
  295. MPCIO(int player, bool preprocessing, size_t num_threads) :
  296. player(player), preprocessing(preprocessing),
  297. num_threads(num_threads), lamport(0)
  298. {
  299. reset_stats();
  300. }
  301. void reset_stats()
  302. {
  303. msgs_sent.clear();
  304. msg_bytes_sent.clear();
  305. for (size_t i=0; i<num_threads; ++i) {
  306. msgs_sent.push_back(0);
  307. msg_bytes_sent.push_back(0);
  308. }
  309. steady_start = boost::chrono::steady_clock::now();
  310. cpu_start = boost::chrono::process_cpu_clock::now();
  311. }
  312. void dump_stats(std::ostream &os)
  313. {
  314. size_t tot_msgs_sent = 0;
  315. size_t tot_msg_bytes_sent = 0;
  316. for (auto& n : msgs_sent) {
  317. tot_msgs_sent += n;
  318. }
  319. for (auto& n : msg_bytes_sent) {
  320. tot_msg_bytes_sent += n;
  321. }
  322. auto steady_elapsed =
  323. boost::chrono::steady_clock::now() - steady_start;
  324. auto cpu_elapsed =
  325. boost::chrono::process_cpu_clock::now() - cpu_start;
  326. os << tot_msgs_sent << " messages sent\n";
  327. os << tot_msg_bytes_sent << " message bytes sent\n";
  328. os << lamport << " Lamport clock (latencies)\n";
  329. os << boost::chrono::duration_cast
  330. <boost::chrono::milliseconds>(steady_elapsed) <<
  331. " wall clock time\n";
  332. os << cpu_elapsed << " {real;user;system}\n";
  333. }
  334. };
  335. // A class to represent all of a computation peer's IO, either to other
  336. // parties or to local storage
  337. struct MPCPeerIO : public MPCIO {
  338. // We use a deque here instead of a vector because you can't have a
  339. // vector of a type without a copy constructor (tcp::socket is the
  340. // culprit), but you can have a deque of those for some reason.
  341. std::deque<MPCSingleIO> peerios;
  342. std::deque<MPCSingleIO> serverios;
  343. std::vector<PreCompStorage<MultTriple>> triples;
  344. std::vector<PreCompStorage<HalfTriple>> halftriples;
  345. MPCPeerIO(unsigned player, bool preprocessing,
  346. std::deque<tcp::socket> &peersocks,
  347. std::deque<tcp::socket> &serversocks) :
  348. MPCIO(player, preprocessing, peersocks.size())
  349. {
  350. unsigned num_threads = unsigned(peersocks.size());
  351. for (unsigned i=0; i<num_threads; ++i) {
  352. triples.emplace_back(player, preprocessing, "triples", i);
  353. }
  354. for (unsigned i=0; i<num_threads; ++i) {
  355. halftriples.emplace_back(player, preprocessing, "halves", i);
  356. }
  357. for (auto &&sock : peersocks) {
  358. peerios.emplace_back(std::move(sock));
  359. }
  360. for (auto &&sock : serversocks) {
  361. serverios.emplace_back(std::move(sock));
  362. }
  363. }
  364. void dump_precomp_stats(std::ostream &os)
  365. {
  366. for (size_t i=0; i<triples.size(); ++i) {
  367. if (i > 0) {
  368. os << " ";
  369. }
  370. os << "T" << i << " t:" << triples[i].get_stats() <<
  371. " h:" << halftriples[i].get_stats();
  372. }
  373. os << "\n";
  374. }
  375. void reset_precomp_stats()
  376. {
  377. for (size_t i=0; i<triples.size(); ++i) {
  378. triples[i].reset_stats();
  379. halftriples[i].reset_stats();
  380. }
  381. }
  382. void dump_stats(std::ostream &os)
  383. {
  384. MPCIO::dump_stats(os);
  385. os << "Precomputed values used: ";
  386. dump_precomp_stats(os);
  387. }
  388. };
  389. // A class to represent all of the server party's IO, either to
  390. // computational parties or to local storage
  391. struct MPCServerIO : public MPCIO {
  392. std::deque<MPCSingleIO> p0ios;
  393. std::deque<MPCSingleIO> p1ios;
  394. MPCServerIO(bool preprocessing,
  395. std::deque<tcp::socket> &p0socks,
  396. std::deque<tcp::socket> &p1socks) :
  397. MPCIO(2, preprocessing, p0socks.size())
  398. {
  399. for (auto &&sock : p0socks) {
  400. p0ios.emplace_back(std::move(sock));
  401. }
  402. for (auto &&sock : p1socks) {
  403. p1ios.emplace_back(std::move(sock));
  404. }
  405. }
  406. };
  407. // A handle to one thread's sockets and streams in a MPCIO
  408. class MPCTIO {
  409. int thread_num;
  410. lamport_t thread_lamport;
  411. MPCIO &mpcio;
  412. public:
  413. MPCTIO(MPCIO &mpcio, int thread_num):
  414. thread_num(thread_num), thread_lamport(mpcio.lamport),
  415. mpcio(mpcio) {}
  416. // Sync our per-thread lamport clock with the master one in the
  417. // mpcio. You only need to call this explicitly if your MPCTIO
  418. // outlives your thread (in which case call it after the join), or
  419. // if your threads do interthread communication amongst themselves
  420. // (in which case call it in the sending thread before the send, and
  421. // call it in the receiving thread after the receive).
  422. void sync_lamport() {
  423. // Update the mpcio Lamport time to be max of the thread Lamport
  424. // time and what we thought it was before. We use this
  425. // compare_exchange construction in order to atomically
  426. // do the comparison, computation, and replacement
  427. lamport_t old_lamport = mpcio.lamport;
  428. lamport_t new_lamport = thread_lamport;
  429. do {
  430. if (new_lamport < old_lamport) {
  431. new_lamport = old_lamport;
  432. }
  433. // The next line atomically checks if lamport still has
  434. // the value old_lamport; if so, it changes its value to
  435. // new_lamport and returns true (ending the loop). If
  436. // not, it sets old_lamport to the current value of
  437. // lamport, and returns false (continuing the loop so
  438. // that new_lamport can be recomputed based on this new
  439. // value).
  440. } while (!mpcio.lamport.compare_exchange_weak(
  441. old_lamport, new_lamport));
  442. thread_lamport = new_lamport;
  443. }
  444. // The normal case, where the MPCIO is created inside the thread,
  445. // and so destructed when the thread ends, is handles automatically
  446. // here.
  447. ~MPCTIO() {
  448. sync_lamport();
  449. }
  450. // Queue up data to the peer or to the server
  451. void queue_peer(const void *data, size_t len) {
  452. if (mpcio.player < 2) {
  453. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  454. size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
  455. mpcpio.msgs_sent[thread_num] += newmsg;
  456. mpcpio.msg_bytes_sent[thread_num] += len;
  457. }
  458. }
  459. void queue_server(const void *data, size_t len) {
  460. if (mpcio.player < 2) {
  461. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  462. size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
  463. mpcpio.msgs_sent[thread_num] += newmsg;
  464. mpcpio.msg_bytes_sent[thread_num] += len;
  465. }
  466. }
  467. // Receive data from the peer or to the server
  468. size_t recv_peer(void *data, size_t len) {
  469. if (mpcio.player < 2) {
  470. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  471. return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
  472. }
  473. return 0;
  474. }
  475. size_t recv_server(void *data, size_t len) {
  476. if (mpcio.player < 2) {
  477. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  478. return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
  479. }
  480. return 0;
  481. }
  482. // Queue up data to p0 or p1
  483. void queue_p0(const void *data, size_t len) {
  484. if (mpcio.player == 2) {
  485. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  486. size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
  487. mpcsrvio.msgs_sent[thread_num] += newmsg;
  488. mpcsrvio.msg_bytes_sent[thread_num] += len;
  489. }
  490. }
  491. void queue_p1(const void *data, size_t len) {
  492. if (mpcio.player == 2) {
  493. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  494. size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
  495. mpcsrvio.msgs_sent[thread_num] += newmsg;
  496. mpcsrvio.msg_bytes_sent[thread_num] += len;
  497. }
  498. }
  499. // Receive data from p0 or p1
  500. size_t recv_p0(void *data, size_t len) {
  501. if (mpcio.player == 2) {
  502. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  503. return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
  504. }
  505. return 0;
  506. }
  507. size_t recv_p1(void *data, size_t len) {
  508. if (mpcio.player == 2) {
  509. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  510. return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
  511. }
  512. return 0;
  513. }
  514. // Send all queued data for this thread
  515. void send() {
  516. if (mpcio.player < 2) {
  517. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  518. mpcpio.peerios[thread_num].send();
  519. mpcpio.serverios[thread_num].send();
  520. } else {
  521. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  522. mpcsrvio.p0ios[thread_num].send();
  523. mpcsrvio.p1ios[thread_num].send();
  524. }
  525. }
  526. // Functions to get precomputed values. If we're in the online
  527. // phase, get them from PreCompStorage. If we're in the
  528. // preprocessing phase, read them from the server.
  529. MultTriple triple() {
  530. MultTriple val;
  531. if (mpcio.player < 2) {
  532. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  533. if (mpcpio.preprocessing) {
  534. recv_server(&val, sizeof(val));
  535. } else {
  536. mpcpio.triples[thread_num].get(val);
  537. }
  538. } else if (mpcio.preprocessing) {
  539. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  540. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  541. value_t X0, Y0, Z0, X1, Y1, Z1;
  542. arc4random_buf(&X0, sizeof(X0));
  543. arc4random_buf(&Y0, sizeof(Y0));
  544. arc4random_buf(&Z0, sizeof(Z0));
  545. arc4random_buf(&X1, sizeof(X1));
  546. arc4random_buf(&Y1, sizeof(Y1));
  547. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  548. MultTriple T0, T1;
  549. T0 = std::make_tuple(X0, Y0, Z0);
  550. T1 = std::make_tuple(X1, Y1, Z1);
  551. queue_p0(&T0, sizeof(T0));
  552. queue_p1(&T1, sizeof(T1));
  553. }
  554. return val;
  555. }
  556. HalfTriple halftriple() {
  557. HalfTriple val;
  558. if (mpcio.player < 2) {
  559. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  560. if (mpcpio.preprocessing) {
  561. recv_server(&val, sizeof(val));
  562. } else {
  563. mpcpio.halftriples[thread_num].get(val);
  564. }
  565. } else if (mpcio.preprocessing) {
  566. // Create half-triples (X0,Z0),(Y1,Z1) such that
  567. // X0*Y1 = Z0 + Z1
  568. value_t X0, Z0, Y1, Z1;
  569. arc4random_buf(&X0, sizeof(X0));
  570. arc4random_buf(&Z0, sizeof(Z0));
  571. arc4random_buf(&Y1, sizeof(Y1));
  572. Z1 = X0 * Y1 - Z0;
  573. HalfTriple H0, H1;
  574. H0 = std::make_tuple(X0, Z0);
  575. H1 = std::make_tuple(Y1, Z1);
  576. queue_p0(&H0, sizeof(H0));
  577. queue_p1(&H1, sizeof(H1));
  578. }
  579. return val;
  580. }
  581. // Accessors
  582. inline int player() { return mpcio.player; }
  583. inline bool preprocessing() { return mpcio.preprocessing; }
  584. inline bool is_server() { return mpcio.player == 2; }
  585. };
  586. // Set up the socket connections between the two computational parties
  587. // (P0 and P1) and the server party (P2). For each connection, the
  588. // lower-numbered party does the accept() and the higher-numbered party
  589. // does the connect().
  590. // Computational parties call this version with player=0 or 1
  591. void mpcio_setup_computational(unsigned player,
  592. boost::asio::io_context &io_context,
  593. const char *p0addr, // can be NULL when player=0
  594. int num_threads,
  595. std::deque<tcp::socket> &peersocks,
  596. std::deque<tcp::socket> &serversocks);
  597. // Server calls this version
  598. void mpcio_setup_server(boost::asio::io_context &io_context,
  599. const char *p0addr, const char *p1addr, int num_threads,
  600. std::deque<tcp::socket> &p0socks,
  601. std::deque<tcp::socket> &p1socks);
  602. #endif