mpcio.hpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446
  1. #ifndef __MCPIO_HPP__
  2. #define __MCPIO_HPP__
  3. #include <iostream>
  4. #include <fstream>
  5. #include <vector>
  6. #include <deque>
  7. #include <queue>
  8. #include <string>
  9. #include <bsd/stdlib.h> // arc4random_buf
  10. #include <boost/asio.hpp>
  11. #include <boost/thread.hpp>
  12. #include "types.hpp"
  13. using boost::asio::ip::tcp;
  14. // Classes to represent stored precomputed data (e.g., multiplication triples)
  15. template<typename T>
  16. class PreCompStorage {
  17. public:
  18. PreCompStorage(unsigned player, bool preprocessing,
  19. const char *filenameprefix, unsigned thread_num);
  20. void get(T& nextval);
  21. inline size_t get_stats() { return count; }
  22. inline void reset_stats() { count = 0; }
  23. private:
  24. std::ifstream storage;
  25. size_t count;
  26. };
  27. template<typename T>
  28. PreCompStorage<T>::PreCompStorage(unsigned player, bool preprocessing,
  29. const char *filenameprefix, unsigned thread_num) {
  30. if (preprocessing) return;
  31. std::string filename(filenameprefix);
  32. char suffix[20];
  33. sprintf(suffix, ".p%d.t%u", player%10, thread_num);
  34. filename.append(suffix);
  35. storage.open(filename);
  36. if (storage.fail()) {
  37. std::cerr << "Failed to open " << filename << "\n";
  38. exit(1);
  39. }
  40. count = 0;
  41. }
  42. template<typename T>
  43. void PreCompStorage<T>::get(T& nextval) {
  44. storage.read((char *)&nextval, sizeof(T));
  45. if (storage.gcount() != sizeof(T)) {
  46. std::cerr << "Failed to read precomputed value from storage\n";
  47. exit(1);
  48. }
  49. ++count;
  50. }
  51. // A class to wrap a socket to another MPC party. This wrapping allows
  52. // us to do some useful logging, and perform async_writes transparently
  53. // to the application.
  54. class MPCSingleIO {
  55. tcp::socket sock;
  56. size_t totread, totwritten;
  57. std::vector<ssize_t> iotrace;
  58. // To avoid blocking if both we and our peer are trying to send
  59. // something very large, and neither side is receiving, we will send
  60. // with async_write. But this has a number of implications:
  61. // - The data to be sent has to be copied into this MPCSingleIO,
  62. // since asio::buffer pointers are not guaranteed to remain valid
  63. // after the end of the coroutine that created them
  64. // - We have to keep a queue of messages to be sent, in case
  65. // coroutines call send() before the previous message has finished
  66. // being sent
  67. // - This queue may be accessed from the async_write thread as well
  68. // as the work thread that uses this MPCSingleIO directly (there
  69. // should be only one of the latter), so we need some locking
  70. // This is where we accumulate data passed in queue()
  71. std::string dataqueue;
  72. // When send() is called, the above dataqueue is appended to this
  73. // messagequeue, and the dataqueue is reset. If messagequeue was
  74. // empty before this append, launch async_write to write the first
  75. // thing in the messagequeue. When async_write completes, it will
  76. // delete the first thing in the messagequeue, and see if there are
  77. // any more elements. If so, it will start another async_write.
  78. // The invariant is that there is an async_write currently running
  79. // iff messagequeue is nonempty.
  80. std::queue<std::string> messagequeue;
  81. // Never touch the above messagequeue without holding this lock (you
  82. // _can_ touch the strings it contains, though, if you looked one up
  83. // while holding the lock).
  84. boost::mutex messagequeuelock;
  85. // Asynchronously send the first message from the message queue.
  86. // * The messagequeuelock must be held when this is called! *
  87. // This method may be called from either thread (the work thread or
  88. // the async_write handler thread).
  89. void async_send_from_msgqueue() {
  90. boost::asio::async_write(sock,
  91. boost::asio::buffer(messagequeue.front()),
  92. [&](boost::system::error_code ec, std::size_t amt){
  93. messagequeuelock.lock();
  94. messagequeue.pop();
  95. if (messagequeue.size() > 0) {
  96. async_send_from_msgqueue();
  97. }
  98. messagequeuelock.unlock();
  99. });
  100. }
  101. public:
  102. MPCSingleIO(tcp::socket &&sock) :
  103. sock(std::move(sock)), totread(0), totwritten(0) {}
  104. void queue(const void *data, size_t len) {
  105. dataqueue.append((const char *)data, len);
  106. // If we already have some full packets worth of data, may as
  107. // well send it.
  108. if (dataqueue.size() > 28800) {
  109. send();
  110. }
  111. }
  112. void send() {
  113. size_t thissize = dataqueue.size();
  114. // Ignore spurious calls to send()
  115. if (thissize == 0) return;
  116. iotrace.push_back(thissize);
  117. messagequeuelock.lock();
  118. // Move the current message to send into the message queue (this
  119. // moves a pointer to the data, not copying the data itself)
  120. messagequeue.emplace(std::move(dataqueue));
  121. // If this is now the first thing in the message queue, launch
  122. // an async_write to write it
  123. if (messagequeue.size() == 1) {
  124. async_send_from_msgqueue();
  125. }
  126. messagequeuelock.unlock();
  127. }
  128. size_t recv(const std::vector<boost::asio::mutable_buffer>& buffers) {
  129. size_t res = boost::asio::read(sock, buffers);
  130. iotrace.push_back(-(ssize_t(res)));
  131. return res;
  132. }
  133. size_t recv(const boost::asio::mutable_buffer& buffer) {
  134. size_t res = boost::asio::read(sock, buffer);
  135. iotrace.push_back(-(ssize_t(res)));
  136. return res;
  137. }
  138. size_t recv(void *data, size_t len) {
  139. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  140. iotrace.push_back(-(ssize_t(res)));
  141. return res;
  142. }
  143. void dumptrace(std::ostream &os, const char *label = NULL) {
  144. if (label) {
  145. os << label << " ";
  146. }
  147. os << "IO trace:";
  148. for (auto& s: iotrace) {
  149. os << " " << s;
  150. }
  151. os << "\n";
  152. }
  153. void resettrace() {
  154. iotrace.clear();
  155. }
  156. };
  157. // A base class to represent all of a computation peer or server's IO,
  158. // either to other parties or to local storage (the computation and
  159. // server cases are separate subclasses below).
  160. struct MPCIO {
  161. int player;
  162. bool preprocessing;
  163. MPCIO(int player, bool preprocessing) :
  164. player(player), preprocessing(preprocessing) {}
  165. };
  166. // A class to represent all of a computation peer's IO, either to other
  167. // parties or to local storage
  168. struct MPCPeerIO : public MPCIO {
  169. // We use a deque here instead of a vector because you can't have a
  170. // vector of a type without a copy constructor (tcp::socket is the
  171. // culprit), but you can have a deque of those for some reason.
  172. std::deque<MPCSingleIO> peerios;
  173. std::deque<MPCSingleIO> serverios;
  174. std::vector<PreCompStorage<MultTriple>> triples;
  175. std::vector<PreCompStorage<HalfTriple>> halftriples;
  176. MPCPeerIO(unsigned player, bool preprocessing,
  177. std::deque<tcp::socket> &peersocks,
  178. std::deque<tcp::socket> &serversocks) :
  179. MPCIO(player, preprocessing)
  180. {
  181. unsigned num_threads = unsigned(peersocks.size());
  182. for (unsigned i=0; i<num_threads; ++i) {
  183. triples.emplace_back(player, preprocessing, "triples", i);
  184. }
  185. for (unsigned i=0; i<num_threads; ++i) {
  186. halftriples.emplace_back(player, preprocessing, "halves", i);
  187. }
  188. for (auto &&sock : peersocks) {
  189. peerios.emplace_back(std::move(sock));
  190. }
  191. for (auto &&sock : serversocks) {
  192. serverios.emplace_back(std::move(sock));
  193. }
  194. }
  195. void dump_precomp_stats(std::ostream &os)
  196. {
  197. for (size_t i=0; i<triples.size(); ++i) {
  198. if (i > 0) {
  199. os << " ";
  200. }
  201. os << "T" << i << " t:" << triples[i].get_stats() <<
  202. " h:" << halftriples[i].get_stats();
  203. }
  204. os << "\n";
  205. }
  206. void reset_precomp_stats()
  207. {
  208. for (size_t i=0; i<triples.size(); ++i) {
  209. triples[i].reset_stats();
  210. halftriples[i].reset_stats();
  211. }
  212. }
  213. };
  214. // A class to represent all of the server party's IO, either to
  215. // computational parties or to local storage
  216. struct MPCServerIO : public MPCIO {
  217. std::deque<MPCSingleIO> p0ios;
  218. std::deque<MPCSingleIO> p1ios;
  219. MPCServerIO(bool preprocessing,
  220. std::deque<tcp::socket> &p0socks,
  221. std::deque<tcp::socket> &p1socks) :
  222. MPCIO(2, preprocessing)
  223. {
  224. for (auto &&sock : p0socks) {
  225. p0ios.emplace_back(std::move(sock));
  226. }
  227. for (auto &&sock : p1socks) {
  228. p1ios.emplace_back(std::move(sock));
  229. }
  230. }
  231. };
  232. // A handle to one thread's sockets and streams in a MPCIO
  233. class MPCTIO {
  234. int thread_num;
  235. MPCIO &mpcio;
  236. public:
  237. MPCTIO(MPCIO &mpcio, int thread_num):
  238. thread_num(thread_num), mpcio(mpcio) {}
  239. // Queue up data to the peer or to the server
  240. void queue_peer(const void *data, size_t len) {
  241. if (mpcio.player < 2) {
  242. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  243. mpcpio.peerios[thread_num].queue(data, len);
  244. }
  245. }
  246. void queue_server(const void *data, size_t len) {
  247. if (mpcio.player < 2) {
  248. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  249. mpcpio.serverios[thread_num].queue(data, len);
  250. }
  251. }
  252. // Receive data from the peer or to the server
  253. size_t recv_peer(void *data, size_t len) {
  254. if (mpcio.player < 2) {
  255. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  256. return mpcpio.peerios[thread_num].recv(data, len);
  257. }
  258. return 0;
  259. }
  260. size_t recv_server(void *data, size_t len) {
  261. if (mpcio.player < 2) {
  262. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  263. return mpcpio.serverios[thread_num].recv(data, len);
  264. }
  265. return 0;
  266. }
  267. // Queue up data to p0 or p1
  268. void queue_p0(const void *data, size_t len) {
  269. if (mpcio.player == 2) {
  270. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  271. mpcsrvio.p0ios[thread_num].queue(data, len);
  272. }
  273. }
  274. void queue_p1(const void *data, size_t len) {
  275. if (mpcio.player == 2) {
  276. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  277. mpcsrvio.p1ios[thread_num].queue(data, len);
  278. }
  279. }
  280. // Receive data from p0 or p1
  281. size_t recv_p0(void *data, size_t len) {
  282. if (mpcio.player == 2) {
  283. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  284. return mpcsrvio.p0ios[thread_num].recv(data, len);
  285. }
  286. return 0;
  287. }
  288. size_t recv_p1(void *data, size_t len) {
  289. if (mpcio.player == 2) {
  290. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  291. return mpcsrvio.p1ios[thread_num].recv(data, len);
  292. }
  293. return 0;
  294. }
  295. // Send all queued data for this thread
  296. void send() {
  297. if (mpcio.player < 2) {
  298. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  299. mpcpio.peerios[thread_num].send();
  300. mpcpio.serverios[thread_num].send();
  301. } else {
  302. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  303. mpcsrvio.p0ios[thread_num].send();
  304. mpcsrvio.p1ios[thread_num].send();
  305. }
  306. }
  307. // Functions to get precomputed values. If we're in the online
  308. // phase, get them from PreCompStorage. If we're in the
  309. // preprocessing phase, read them from the server.
  310. MultTriple triple() {
  311. MultTriple val;
  312. if (mpcio.player < 2) {
  313. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  314. if (mpcpio.preprocessing) {
  315. recv_server(&val, sizeof(val));
  316. } else {
  317. mpcpio.triples[thread_num].get(val);
  318. }
  319. } else if (mpcio.preprocessing) {
  320. // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
  321. // (X0*Y1 + Y0*X1) = (Z0+Z1)
  322. value_t X0, Y0, Z0, X1, Y1, Z1;
  323. arc4random_buf(&X0, sizeof(X0));
  324. arc4random_buf(&Y0, sizeof(Y0));
  325. arc4random_buf(&Z0, sizeof(Z0));
  326. arc4random_buf(&X1, sizeof(X1));
  327. arc4random_buf(&Y1, sizeof(Y1));
  328. Z1 = X0 * Y1 + X1 * Y0 - Z0;
  329. MultTriple T0, T1;
  330. T0 = std::make_tuple(X0, Y0, Z0);
  331. T1 = std::make_tuple(X1, Y1, Z1);
  332. queue_p0(&T0, sizeof(T0));
  333. queue_p1(&T1, sizeof(T1));
  334. }
  335. return val;
  336. }
  337. HalfTriple halftriple() {
  338. HalfTriple val;
  339. if (mpcio.player < 2) {
  340. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  341. if (mpcpio.preprocessing) {
  342. mpcpio.serverios[thread_num].recv(boost::asio::buffer(&val, sizeof(val)));
  343. } else {
  344. mpcpio.halftriples[thread_num].get(val);
  345. }
  346. } else if (mpcio.preprocessing) {
  347. // Create half-triples (X0,Z0),(Y1,Z1) such that
  348. // X0*Y1 = Z0 + Z1
  349. value_t X0, Z0, Y1, Z1;
  350. arc4random_buf(&X0, sizeof(X0));
  351. arc4random_buf(&Z0, sizeof(Z0));
  352. arc4random_buf(&Y1, sizeof(Y1));
  353. Z1 = X0 * Y1 - Z0;
  354. HalfTriple H0, H1;
  355. H0 = std::make_tuple(X0, Z0);
  356. H1 = std::make_tuple(Y1, Z1);
  357. queue_p0(&H0, sizeof(H0));
  358. queue_p1(&H1, sizeof(H1));
  359. }
  360. return val;
  361. }
  362. // Accessors
  363. inline int player() { return mpcio.player; }
  364. inline bool preprocessing() { return mpcio.preprocessing; }
  365. inline bool is_server() { return mpcio.player == 2; }
  366. };
  367. // Set up the socket connections between the two computational parties
  368. // (P0 and P1) and the server party (P2). For each connection, the
  369. // lower-numbered party does the accept() and the higher-numbered party
  370. // does the connect().
  371. // Computational parties call this version with player=0 or 1
  372. void mpcio_setup_computational(unsigned player,
  373. boost::asio::io_context &io_context,
  374. const char *p0addr, // can be NULL when player=0
  375. int num_threads,
  376. std::deque<tcp::socket> &peersocks,
  377. std::deque<tcp::socket> &serversocks);
  378. // Server calls this version
  379. void mpcio_setup_server(boost::asio::io_context &io_context,
  380. const char *p0addr, const char *p1addr, int num_threads,
  381. std::deque<tcp::socket> &p0socks,
  382. std::deque<tcp::socket> &p1socks);
  383. #endif