mpcio.hpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. #ifndef __MCPIO_HPP__
  2. #define __MCPIO_HPP__
  3. #include <iostream>
  4. #include <fstream>
  5. #include <vector>
  6. #include <deque>
  7. #include <queue>
  8. #include <string>
  9. #include <boost/asio.hpp>
  10. #include <boost/thread.hpp>
  11. #include "types.hpp"
  12. using boost::asio::ip::tcp;
  13. // Classes to represent stored precomputed data (e.g., multiplication triples)
  14. template<typename T>
  15. class PreCompStorage {
  16. public:
  17. PreCompStorage(unsigned player, bool preprocessing,
  18. const char *filenameprefix, unsigned thread_num);
  19. void get(T& nextval);
  20. private:
  21. std::ifstream storage;
  22. };
  23. template<typename T>
  24. PreCompStorage<T>::PreCompStorage(unsigned player, bool preprocessing,
  25. const char *filenameprefix, unsigned thread_num) {
  26. if (preprocessing) return;
  27. std::string filename(filenameprefix);
  28. char suffix[20];
  29. sprintf(suffix, ".p%d.t%u", player%10, thread_num);
  30. filename.append(suffix);
  31. storage.open(filename);
  32. if (storage.fail()) {
  33. std::cerr << "Failed to open " << filename << "\n";
  34. exit(1);
  35. }
  36. }
  37. template<typename T>
  38. void PreCompStorage<T>::get(T& nextval) {
  39. storage.read((char *)&nextval, sizeof(T));
  40. if (storage.gcount() != sizeof(T)) {
  41. std::cerr << "Failed to read precomputed value from storage\n";
  42. exit(1);
  43. }
  44. }
  45. // A class to wrap a socket to another MPC party. This wrapping allows
  46. // us to do some useful logging, and perform async_writes transparently
  47. // to the application.
  48. class MPCSingleIO {
  49. tcp::socket sock;
  50. size_t totread, totwritten;
  51. std::vector<ssize_t> iotrace;
  52. // To avoid blocking if both we and our peer are trying to send
  53. // something very large, and neither side is receiving, we will send
  54. // with async_write. But this has a number of implications:
  55. // - The data to be sent has to be copied into this MPCSingleIO,
  56. // since asio::buffer pointers are not guaranteed to remain valid
  57. // after the end of the coroutine that created them
  58. // - We have to keep a queue of messages to be sent, in case
  59. // coroutines call send() before the previous message has finished
  60. // being sent
  61. // - This queue may be accessed from the async_write thread as well
  62. // as the work thread that uses this MPCSingleIO directly (there
  63. // should be only one of the latter), so we need some locking
  64. // This is where we accumulate data passed in queue()
  65. std::string dataqueue;
  66. // When send() is called, the above dataqueue is appended to this
  67. // messagequeue, and the dataqueue is reset. If messagequeue was
  68. // empty before this append, launch async_write to write the first
  69. // thing in the messagequeue. When async_write completes, it will
  70. // delete the first thing in the messagequeue, and see if there are
  71. // any more elements. If so, it will start another async_write.
  72. // The invariant is that there is an async_write currently running
  73. // iff messagequeue is nonempty.
  74. std::queue<std::string> messagequeue;
  75. // Never touch the above messagequeue without holding this lock (you
  76. // _can_ touch the strings it contains, though, if you looked one up
  77. // while holding the lock).
  78. boost::mutex messagequeuelock;
  79. // Asynchronously send the first message from the message queue.
  80. // * The messagequeuelock must be held when this is called! *
  81. // This method may be called from either thread (the work thread or
  82. // the async_write handler thread).
  83. void async_send_from_msgqueue() {
  84. boost::asio::async_write(sock,
  85. boost::asio::buffer(messagequeue.front()),
  86. [&](boost::system::error_code ec, std::size_t amt){
  87. messagequeuelock.lock();
  88. messagequeue.pop();
  89. if (messagequeue.size() > 0) {
  90. async_send_from_msgqueue();
  91. }
  92. messagequeuelock.unlock();
  93. });
  94. }
  95. public:
  96. MPCSingleIO(tcp::socket &&sock) :
  97. sock(std::move(sock)), totread(0), totwritten(0) {}
  98. void queue(const void *data, size_t len) {
  99. dataqueue.append((const char *)data, len);
  100. // If we already have some full packets worth of data, may as
  101. // well send it.
  102. if (dataqueue.size() > 28800) {
  103. send();
  104. }
  105. }
  106. void send() {
  107. size_t thissize = dataqueue.size();
  108. // Ignore spurious calls to send()
  109. if (thissize == 0) return;
  110. iotrace.push_back(thissize);
  111. messagequeuelock.lock();
  112. // Move the current message to send into the message queue (this
  113. // moves a pointer to the data, not copying the data itself)
  114. messagequeue.emplace(std::move(dataqueue));
  115. // If this is now the first thing in the message queue, launch
  116. // an async_write to write it
  117. if (messagequeue.size() == 1) {
  118. async_send_from_msgqueue();
  119. }
  120. messagequeuelock.unlock();
  121. }
  122. size_t recv(const std::vector<boost::asio::mutable_buffer>& buffers) {
  123. size_t res = boost::asio::read(sock, buffers);
  124. iotrace.push_back(-(ssize_t(res)));
  125. return res;
  126. }
  127. size_t recv(const boost::asio::mutable_buffer& buffer) {
  128. size_t res = boost::asio::read(sock, buffer);
  129. iotrace.push_back(-(ssize_t(res)));
  130. return res;
  131. }
  132. size_t recv(void *data, size_t len) {
  133. size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
  134. iotrace.push_back(-(ssize_t(res)));
  135. return res;
  136. }
  137. void dumptrace(std::ostream &os, const char *label = NULL) {
  138. if (label) {
  139. os << label << " ";
  140. }
  141. os << "IO trace:";
  142. for (auto& s: iotrace) {
  143. os << " " << s;
  144. }
  145. os << "\n";
  146. }
  147. void resettrace() {
  148. iotrace.clear();
  149. }
  150. };
  151. // A base class to represent all of a computation peer or server's IO,
  152. // either to other parties or to local storage (the computation and
  153. // server cases are separate subclasses below).
  154. struct MPCIO {
  155. int player;
  156. bool preprocessing;
  157. MPCIO(int player, bool preprocessing) :
  158. player(player), preprocessing(preprocessing) {}
  159. };
  160. // A class to represent all of a computation peer's IO, either to other
  161. // parties or to local storage
  162. struct MPCPeerIO : public MPCIO {
  163. // We use a deque here instead of a vector because you can't have a
  164. // vector of a type without a copy constructor (tcp::socket is the
  165. // culprit), but you can have a deque of those for some reason.
  166. std::deque<MPCSingleIO> peerios;
  167. std::deque<MPCSingleIO> serverios;
  168. std::vector<PreCompStorage<MultTriple>> triples;
  169. std::vector<PreCompStorage<HalfTriple>> halftriples;
  170. MPCPeerIO(unsigned player, bool preprocessing,
  171. std::deque<tcp::socket> &peersocks,
  172. std::deque<tcp::socket> &serversocks) :
  173. MPCIO(player, preprocessing)
  174. {
  175. unsigned num_threads = unsigned(peersocks.size());
  176. for (unsigned i=0; i<num_threads; ++i) {
  177. triples.emplace_back(player, preprocessing, "triples", i);
  178. }
  179. for (unsigned i=0; i<num_threads; ++i) {
  180. halftriples.emplace_back(player, preprocessing, "halves", i);
  181. }
  182. for (auto &&sock : peersocks) {
  183. peerios.emplace_back(std::move(sock));
  184. }
  185. for (auto &&sock : serversocks) {
  186. serverios.emplace_back(std::move(sock));
  187. }
  188. }
  189. };
  190. // A class to represent all of the server party's IO, either to
  191. // computational parties or to local storage
  192. struct MPCServerIO : public MPCIO {
  193. std::deque<MPCSingleIO> p0ios;
  194. std::deque<MPCSingleIO> p1ios;
  195. MPCServerIO(bool preprocessing,
  196. std::deque<tcp::socket> &p0socks,
  197. std::deque<tcp::socket> &p1socks) :
  198. MPCIO(2, preprocessing)
  199. {
  200. for (auto &&sock : p0socks) {
  201. p0ios.emplace_back(std::move(sock));
  202. }
  203. for (auto &&sock : p1socks) {
  204. p1ios.emplace_back(std::move(sock));
  205. }
  206. }
  207. };
  208. // A handle to one thread's sockets and streams in a MPCIO
  209. class MPCTIO {
  210. int thread_num;
  211. MPCIO &mpcio;
  212. public:
  213. MPCTIO(MPCIO &mpcio, int thread_num):
  214. thread_num(thread_num), mpcio(mpcio) {}
  215. // Queue up data to the peer or to the server
  216. void queue_peer(const void *data, size_t len) {
  217. assert(mpcio.player < 2);
  218. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  219. mpcpio.peerios[thread_num].queue(data, len);
  220. }
  221. void queue_server(const void *data, size_t len) {
  222. assert(mpcio.player < 2);
  223. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  224. mpcpio.serverios[thread_num].queue(data, len);
  225. }
  226. // Receive data from the peer or to the server
  227. size_t recv_peer(void *data, size_t len) {
  228. assert(mpcio.player < 2);
  229. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  230. return mpcpio.peerios[thread_num].recv(data, len);
  231. }
  232. size_t recv_server(void *data, size_t len) {
  233. assert(mpcio.player < 2);
  234. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  235. return mpcpio.serverios[thread_num].recv(data, len);
  236. }
  237. // Queue up data to p0 or p1
  238. void queue_p0(const void *data, size_t len) {
  239. assert(mpcio.player == 2);
  240. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  241. mpcsrvio.p0ios[thread_num].queue(data, len);
  242. }
  243. void queue_p1(const void *data, size_t len) {
  244. assert(mpcio.player == 2);
  245. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  246. mpcsrvio.p1ios[thread_num].queue(data, len);
  247. }
  248. // Receive data from p0 or p1
  249. size_t recv_p0(void *data, size_t len) {
  250. assert(mpcio.player == 2);
  251. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  252. return mpcsrvio.p0ios[thread_num].recv(data, len);
  253. }
  254. size_t recv_p1(void *data, size_t len) {
  255. assert(mpcio.player == 2);
  256. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  257. return mpcsrvio.p1ios[thread_num].recv(data, len);
  258. }
  259. // Send all queued data for this thread
  260. void send() {
  261. if (mpcio.player < 2) {
  262. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  263. mpcpio.peerios[thread_num].send();
  264. mpcpio.serverios[thread_num].send();
  265. } else {
  266. MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
  267. mpcsrvio.p0ios[thread_num].send();
  268. mpcsrvio.p1ios[thread_num].send();
  269. }
  270. }
  271. // Functions to get precomputed values. If we're in the online
  272. // phase, get them from PreCompStorage. If we're in the
  273. // preprocessing phase, read them from the server.
  274. MultTriple triple() {
  275. assert(mpcio.player < 2);
  276. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  277. MultTriple val;
  278. if (mpcpio.preprocessing) {
  279. mpcpio.serverios[thread_num].recv(boost::asio::buffer(&val, sizeof(val)));
  280. } else {
  281. mpcpio.triples[thread_num].get(val);
  282. }
  283. return val;
  284. }
  285. HalfTriple halftriple() {
  286. assert(mpcio.player < 2);
  287. MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
  288. HalfTriple val;
  289. if (mpcpio.preprocessing) {
  290. mpcpio.serverios[thread_num].recv(boost::asio::buffer(&val, sizeof(val)));
  291. } else {
  292. mpcpio.halftriples[thread_num].get(val);
  293. }
  294. return val;
  295. }
  296. // Accessors
  297. inline int player() { return mpcio.player; }
  298. inline bool preprocessing() { return mpcio.preprocessing; }
  299. inline bool is_server() { return mpcio.player == 2; }
  300. };
  301. // Set up the socket connections between the two computational parties
  302. // (P0 and P1) and the server party (P2). For each connection, the
  303. // lower-numbered party does the accept() and the higher-numbered party
  304. // does the connect().
  305. // Computational parties call this version with player=0 or 1
  306. void mpcio_setup_computational(unsigned player,
  307. boost::asio::io_context &io_context,
  308. const char *p0addr, // can be NULL when player=0
  309. int num_threads,
  310. std::deque<tcp::socket> &peersocks,
  311. std::deque<tcp::socket> &serversocks);
  312. // Server calls this version
  313. void mpcio_setup_server(boost::asio::io_context &io_context,
  314. const char *p0addr, const char *p1addr, int num_threads,
  315. std::deque<tcp::socket> &p0socks,
  316. std::deque<tcp::socket> &p1socks);
  317. #endif