1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006 |
- #include <sys/time.h> // getrusage
- #include <sys/resource.h> // getrusage
- #include "mpcio.hpp"
- #include "rdpf.hpp"
- #include "cdpf.hpp"
- #include "bitutils.hpp"
- #include "coroutine.hpp"
- // T is the type being stored
- // N is a type whose "name" static member is a string naming the type
- // so that we can report something useful to the user if they try
- // to read a type that we don't have any more values for
- template<typename T, typename N>
- PreCompStorage<T,N>::PreCompStorage(unsigned player, ProcessingMode mode,
- const char *filenameprefix, unsigned thread_num) :
- name(N::name), depth(0)
- {
- init(player, mode, filenameprefix, thread_num);
- }
- template<typename T, typename N>
- void PreCompStorage<T,N>::init(unsigned player, ProcessingMode mode,
- const char *filenameprefix, unsigned thread_num, nbits_t depth)
- {
- if (mode != MODE_ONLINE) return;
- std::string filename(filenameprefix);
- char suffix[20];
- if (depth) {
- this->depth = depth;
- sprintf(suffix, "%02d.p%d.t%u", depth, player%10, thread_num);
- } else {
- sprintf(suffix, ".p%d.t%u", player%10, thread_num);
- }
- filename.append(suffix);
- storage.open(filename);
- // It's OK if not every file exists; so don't worry about checking
- // for errors here. We'll report an error in get() if we actually
- // try to use a value for which we don't have a precomputed file.
- count = 0;
- }
- template<typename T, typename N>
- void PreCompStorage<T,N>::get(T& nextval)
- {
- storage >> nextval;
- if (!storage.good()) {
- std::cerr << "Failed to read precomputed value from " << name;
- if (depth) {
- std::cerr << (int)depth;
- }
- std::cerr << " storage\n";
- exit(1);
- }
- ++count;
- }
- void MPCSingleIO::async_send_from_msgqueue()
- {
- #ifdef SEND_LAMPORT_CLOCKS
- std::vector<boost::asio::const_buffer> tosend;
- tosend.push_back(boost::asio::buffer(messagequeue.front().header));
- tosend.push_back(boost::asio::buffer(messagequeue.front().message));
- #endif
- boost::asio::async_write(sock,
- #ifdef SEND_LAMPORT_CLOCKS
- tosend,
- #else
- boost::asio::buffer(messagequeue.front()),
- #endif
- [&](boost::system::error_code ec, std::size_t amt){
- messagequeuelock.lock();
- messagequeue.pop();
- if (messagequeue.size() > 0) {
- async_send_from_msgqueue();
- }
- messagequeuelock.unlock();
- });
- }
- size_t MPCSingleIO::queue(const void *data, size_t len, lamport_t lamport)
- {
- // Is this a new message?
- size_t newmsg = 0;
- dataqueue.append((const char *)data, len);
- // If this is the first queue() since the last explicit send(),
- // which we'll know because message_lamport will be nullopt, set
- // message_lamport to the current Lamport clock. Note that the
- // boolean test tests whether message_lamport is nullopt, not
- // whether its value is zero.
- if (!message_lamport) {
- message_lamport = lamport;
- newmsg = 1;
- }
- #ifdef VERBOSE_COMMS
- printf("Queue %s.%d len=%lu lamp=%u: ", dest.c_str(), thread_num,
- len, message_lamport.value());
- for (size_t i=0;i<len;++i) {
- printf("%02x", ((const unsigned char*)data)[i]);
- }
- printf("\n");
- #endif
- // If we already have some full packets worth of data, may as
- // well send it.
- if (dataqueue.size() > 28800) {
- send(true);
- }
- return newmsg;
- }
- void MPCSingleIO::send(bool implicit_send)
- {
- size_t thissize = dataqueue.size();
- // Ignore spurious calls to send(), except for resetting
- // message_lamport if this was an explicit send().
- if (thissize == 0) {
- #ifdef SEND_LAMPORT_CLOCKS
- // If this was an explicit send(), reset the message_lamport so
- // that it gets updated at the next queue().
- if (!implicit_send) {
- message_lamport.reset();
- }
- #endif
- return;
- }
- #ifdef RECORD_IOTRACE
- iotrace.push_back(thissize);
- #endif
- messagequeuelock.lock();
- // Move the current message to send into the message queue (this
- // moves a pointer to the data, not copying the data itself)
- #ifdef SEND_LAMPORT_CLOCKS
- messagequeue.emplace(std::move(dataqueue),
- message_lamport.value());
- // If this was an explicit send(), reset the message_lamport so
- // that it gets updated at the next queue().
- if (!implicit_send) {
- message_lamport.reset();
- }
- #else
- messagequeue.emplace(std::move(dataqueue));
- #endif
- // If this is now the first thing in the message queue, launch
- // an async_write to write it
- if (messagequeue.size() == 1) {
- async_send_from_msgqueue();
- }
- messagequeuelock.unlock();
- }
- size_t MPCSingleIO::recv(void *data, size_t len, lamport_t &lamport)
- {
- #ifdef VERBOSE_COMMS
- size_t orig_len = len;
- printf("Recv %s.%d len=%lu lamp=%u ", dest.c_str(), thread_num,
- len, lamport);
- #endif
- #ifdef SEND_LAMPORT_CLOCKS
- char *cdata = (char *)data;
- size_t res = 0;
- while (len > 0) {
- while (recvdataremain == 0) {
- // Read a new header
- char hdr[sizeof(uint32_t) + sizeof(lamport_t)];
- uint32_t datalen;
- lamport_t recv_lamport;
- boost::asio::read(sock, boost::asio::buffer(hdr, sizeof(hdr)));
- memmove(&datalen, hdr, sizeof(datalen));
- memmove(&recv_lamport, hdr+sizeof(datalen), sizeof(lamport_t));
- lamport_t new_lamport = recv_lamport + 1;
- if (lamport < new_lamport) {
- lamport = new_lamport;
- }
- if (datalen > 0) {
- recvdata.resize(datalen, '\0');
- boost::asio::read(sock, boost::asio::buffer(recvdata));
- recvdataremain = datalen;
- }
- }
- size_t amttoread = len;
- if (amttoread > recvdataremain) {
- amttoread = recvdataremain;
- }
- memmove(cdata, recvdata.data()+recvdata.size()-recvdataremain,
- amttoread);
- cdata += amttoread;
- len -= amttoread;
- recvdataremain -= amttoread;
- res += amttoread;
- }
- #else
- size_t res = boost::asio::read(sock, boost::asio::buffer(data, len));
- #endif
- #ifdef VERBOSE_COMMS
- printf("nlamp=%u: ", lamport);
- for (size_t i=0;i<orig_len;++i) {
- printf("%02x", ((const unsigned char*)data)[i]);
- }
- printf("\n");
- #endif
- #ifdef RECORD_IOTRACE
- iotrace.push_back(-(ssize_t(res)));
- #endif
- return res;
- }
- #ifdef RECORD_IOTRACE
- void MPCSingleIO::dumptrace(std::ostream &os, const char *label)
- {
- if (label) {
- os << label << " ";
- }
- os << "IO trace:";
- for (auto& s: iotrace) {
- os << " " << s;
- }
- os << "\n";
- }
- #endif
- void MPCIO::reset_stats()
- {
- msgs_sent.clear();
- msg_bytes_sent.clear();
- aes_ops.clear();
- for (size_t i=0; i<num_threads; ++i) {
- msgs_sent.push_back(0);
- msg_bytes_sent.push_back(0);
- aes_ops.push_back(0);
- }
- steady_start = boost::chrono::steady_clock::now();
- cpu_start = boost::chrono::process_cpu_clock::now();
- }
- // Report the memory usage
- void MPCIO::dump_memusage(std::ostream &os)
- {
- struct rusage ru;
- getrusage(RUSAGE_SELF, &ru);
- os << "Mem: " << ru.ru_maxrss << " KiB\n";
- }
- void MPCIO::dump_stats(std::ostream &os)
- {
- size_t tot_msgs_sent = 0;
- size_t tot_msg_bytes_sent = 0;
- size_t tot_aes_ops = 0;
- for (auto& n : msgs_sent) {
- tot_msgs_sent += n;
- }
- for (auto& n : msg_bytes_sent) {
- tot_msg_bytes_sent += n;
- }
- for (auto& n : aes_ops) {
- tot_aes_ops += n;
- }
- auto steady_elapsed =
- boost::chrono::steady_clock::now() - steady_start;
- auto cpu_elapsed =
- boost::chrono::process_cpu_clock::now() - cpu_start;
- os << tot_msgs_sent << " messages sent\n";
- os << tot_msg_bytes_sent << " message bytes sent\n";
- os << lamport << " Lamport clock (latencies)\n";
- os << tot_aes_ops << " local AES operations\n";
- os << boost::chrono::duration_cast
- <boost::chrono::milliseconds>(steady_elapsed) <<
- " wall clock time\n";
- os << cpu_elapsed << " {real;user;system}\n";
- dump_memusage(os);
- }
- MPCPeerIO::MPCPeerIO(unsigned player, ProcessingMode mode,
- std::deque<tcp::socket> &peersocks,
- std::deque<tcp::socket> &serversocks) :
- MPCIO(player, mode, peersocks.size())
- {
- unsigned num_threads = unsigned(peersocks.size());
- for (unsigned i=0; i<num_threads; ++i) {
- multtriples.emplace_back(player, mode, "mults", i);
- }
- for (unsigned i=0; i<num_threads; ++i) {
- halftriples.emplace_back(player, mode, "halves", i);
- }
- for (unsigned i=0; i<num_threads; ++i) {
- andtriples.emplace_back(player, mode, "ands", i);
- }
- for (unsigned i=0; i<num_threads; ++i) {
- valselecttriples.emplace_back(player, mode, "selects", i);
- }
- rdpftriples.resize(num_threads);
- for (unsigned i=0; i<num_threads; ++i) {
- for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
- rdpftriples[i][depth-1].init(player, mode,
- "rdpf", i, depth);
- }
- }
- for (unsigned i=0; i<num_threads; ++i) {
- cdpfs.emplace_back(player, mode, "cdpf", i);
- }
- for (unsigned i=0; i<num_threads; ++i) {
- peerios.emplace_back(std::move(peersocks[i]), "peer", i);
- }
- for (unsigned i=0; i<num_threads; ++i) {
- serverios.emplace_back(std::move(serversocks[i]), "srv", i);
- }
- }
- void MPCPeerIO::dump_precomp_stats(std::ostream &os)
- {
- for (size_t i=0; i<multtriples.size(); ++i) {
- size_t cnt;
- if (i > 0) {
- os << " ";
- }
- os << "T" << i;
- cnt = multtriples[i].get_stats();
- if (cnt > 0) {
- os << " m:" << cnt;
- }
- cnt = halftriples[i].get_stats();
- if (cnt > 0) {
- os << " h:" << cnt;
- }
- cnt = andtriples[i].get_stats();
- if (cnt > 0) {
- os << " a:" << cnt;
- }
- cnt = valselecttriples[i].get_stats();
- if (cnt > 0) {
- os << " s:" << cnt;
- }
- for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
- cnt = rdpftriples[i][depth-1].get_stats();
- if (cnt > 0) {
- os << " r" << int(depth) << ":" << cnt;
- }
- }
- cnt = cdpfs[i].get_stats();
- if (cnt > 0) {
- os << " c:" << cnt;
- }
- }
- os << "\n";
- }
- void MPCPeerIO::reset_precomp_stats()
- {
- for (size_t i=0; i<multtriples.size(); ++i) {
- multtriples[i].reset_stats();
- halftriples[i].reset_stats();
- andtriples[i].reset_stats();
- valselecttriples[i].reset_stats();
- for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
- rdpftriples[i][depth-1].reset_stats();
- }
- }
- }
- void MPCPeerIO::dump_stats(std::ostream &os)
- {
- MPCIO::dump_stats(os);
- os << "Precomputed values used: ";
- dump_precomp_stats(os);
- }
- MPCServerIO::MPCServerIO(ProcessingMode mode,
- std::deque<tcp::socket> &p0socks,
- std::deque<tcp::socket> &p1socks) :
- MPCIO(2, mode, p0socks.size())
- {
- rdpfpairs.resize(num_threads);
- for (unsigned i=0; i<num_threads; ++i) {
- for (unsigned depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
- rdpfpairs[i][depth-1].init(player, mode,
- "rdpf", i, depth);
- }
- }
- for (unsigned i=0; i<num_threads; ++i) {
- p0ios.emplace_back(std::move(p0socks[i]), "p0", i);
- }
- for (unsigned i=0; i<num_threads; ++i) {
- p1ios.emplace_back(std::move(p1socks[i]), "p1", i);
- }
- }
- void MPCServerIO::dump_precomp_stats(std::ostream &os)
- {
- for (size_t i=0; i<rdpfpairs.size(); ++i) {
- if (i > 0) {
- os << " ";
- }
- os << "T" << i;
- for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
- size_t cnt = rdpfpairs[i][depth-1].get_stats();
- if (cnt > 0) {
- os << " r" << int(depth) << ":" << cnt;
- }
- }
- }
- os << "\n";
- }
- void MPCServerIO::reset_precomp_stats()
- {
- for (size_t i=0; i<rdpfpairs.size(); ++i) {
- for (nbits_t depth=1; depth<=ADDRESS_MAX_BITS; ++depth) {
- rdpfpairs[i][depth-1].reset_stats();
- }
- }
- }
- void MPCServerIO::dump_stats(std::ostream &os)
- {
- MPCIO::dump_stats(os);
- os << "Precomputed values used: ";
- dump_precomp_stats(os);
- }
- MPCTIO::MPCTIO(MPCIO &mpcio, int thread_num, int num_threads) :
- thread_num(thread_num), local_cpu_nthreads(num_threads),
- communication_nthreads(num_threads),
- thread_lamport(mpcio.lamport), mpcio(mpcio),
- #ifdef VERBOSE_COMMS
- round_num(0),
- #endif
- last_andtriple_bits_remaining(0)
- {
- if (mpcio.player < 2) {
- MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
- peer_iostream.emplace(mpcpio.peerios[thread_num],
- thread_lamport, mpcpio.msgs_sent[thread_num],
- mpcpio.msg_bytes_sent[thread_num]);
- server_iostream.emplace(mpcpio.serverios[thread_num],
- thread_lamport, mpcpio.msgs_sent[thread_num],
- mpcpio.msg_bytes_sent[thread_num]);
- } else {
- MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
- p0_iostream.emplace(mpcsrvio.p0ios[thread_num],
- thread_lamport, mpcsrvio.msgs_sent[thread_num],
- mpcsrvio.msg_bytes_sent[thread_num]);
- p1_iostream.emplace(mpcsrvio.p1ios[thread_num],
- thread_lamport, mpcsrvio.msgs_sent[thread_num],
- mpcsrvio.msg_bytes_sent[thread_num]);
- }
- }
- // Sync our per-thread lamport clock with the master one in the
- // mpcio. You only need to call this explicitly if your MPCTIO
- // outlives your thread (in which case call it after the join), or
- // if your threads do interthread communication amongst themselves
- // (in which case call it in the sending thread before the send, and
- // call it in the receiving thread after the receive).
- void MPCTIO::sync_lamport()
- {
- // Update the mpcio Lamport time to be max of the thread Lamport
- // time and what we thought it was before. We use this
- // compare_exchange construction in order to atomically
- // do the comparison, computation, and replacement
- lamport_t old_lamport = mpcio.lamport;
- lamport_t new_lamport = thread_lamport;
- do {
- if (new_lamport < old_lamport) {
- new_lamport = old_lamport;
- }
- // The next line atomically checks if lamport still has
- // the value old_lamport; if so, it changes its value to
- // new_lamport and returns true (ending the loop). If
- // not, it sets old_lamport to the current value of
- // lamport, and returns false (continuing the loop so
- // that new_lamport can be recomputed based on this new
- // value).
- } while (!mpcio.lamport.compare_exchange_weak(
- old_lamport, new_lamport));
- thread_lamport = new_lamport;
- }
- // Only call this if you can be sure that there are no outstanding
- // messages in flight, you can call it on all existing MPCTIOs, and
- // you really want to reset the Lamport clock in the midding of a
- // run.
- void MPCTIO::reset_lamport()
- {
- // Reset both our own Lamport clock and the parent MPCIO's
- thread_lamport = 0;
- mpcio.lamport = 0;
- }
- // Queue up data to the peer or to the server
- void MPCTIO::queue_peer(const void *data, size_t len)
- {
- if (mpcio.player < 2) {
- MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
- size_t newmsg = mpcpio.peerios[thread_num].queue(data, len, thread_lamport);
- mpcpio.msgs_sent[thread_num] += newmsg;
- mpcpio.msg_bytes_sent[thread_num] += len;
- }
- }
- void MPCTIO::queue_server(const void *data, size_t len)
- {
- if (mpcio.player < 2) {
- MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
- size_t newmsg = mpcpio.serverios[thread_num].queue(data, len, thread_lamport);
- mpcpio.msgs_sent[thread_num] += newmsg;
- mpcpio.msg_bytes_sent[thread_num] += len;
- }
- }
- // Receive data from the peer or to the server
- size_t MPCTIO::recv_peer(void *data, size_t len)
- {
- if (mpcio.player < 2) {
- MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
- return mpcpio.peerios[thread_num].recv(data, len, thread_lamport);
- }
- return 0;
- }
- size_t MPCTIO::recv_server(void *data, size_t len)
- {
- if (mpcio.player < 2) {
- MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
- return mpcpio.serverios[thread_num].recv(data, len, thread_lamport);
- }
- return 0;
- }
- // Queue up data to p0 or p1
- void MPCTIO::queue_p0(const void *data, size_t len)
- {
- if (mpcio.player == 2) {
- MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
- size_t newmsg = mpcsrvio.p0ios[thread_num].queue(data, len, thread_lamport);
- mpcsrvio.msgs_sent[thread_num] += newmsg;
- mpcsrvio.msg_bytes_sent[thread_num] += len;
- }
- }
- void MPCTIO::queue_p1(const void *data, size_t len)
- {
- if (mpcio.player == 2) {
- MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
- size_t newmsg = mpcsrvio.p1ios[thread_num].queue(data, len, thread_lamport);
- mpcsrvio.msgs_sent[thread_num] += newmsg;
- mpcsrvio.msg_bytes_sent[thread_num] += len;
- }
- }
- // Receive data from p0 or p1
- size_t MPCTIO::recv_p0(void *data, size_t len)
- {
- if (mpcio.player == 2) {
- MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
- return mpcsrvio.p0ios[thread_num].recv(data, len, thread_lamport);
- }
- return 0;
- }
- size_t MPCTIO::recv_p1(void *data, size_t len)
- {
- if (mpcio.player == 2) {
- MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
- return mpcsrvio.p1ios[thread_num].recv(data, len, thread_lamport);
- }
- return 0;
- }
- // Send all queued data for this thread
- void MPCTIO::send()
- {
- #ifdef VERBOSE_COMMS
- printf("Thread %u sending round %lu\n", thread_num, ++round_num);
- #endif
- if (mpcio.player < 2) {
- MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
- mpcpio.peerios[thread_num].send();
- mpcpio.serverios[thread_num].send();
- } else {
- MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
- mpcsrvio.p0ios[thread_num].send();
- mpcsrvio.p1ios[thread_num].send();
- }
- }
- // Functions to get precomputed values. If we're in the online
- // phase, get them from PreCompStorage. If we're in the
- // preprocessing or online-only phase, read them from the server.
- MultTriple MPCTIO::multtriple(yield_t &yield)
- {
- MultTriple val;
- if (mpcio.player < 2) {
- MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
- if (mpcpio.mode != MODE_ONLINE) {
- yield();
- recv_server(&val, sizeof(val));
- mpcpio.multtriples[thread_num].inc();
- } else {
- mpcpio.multtriples[thread_num].get(val);
- }
- } else if (mpcio.mode != MODE_ONLINE) {
- // Create multiplication triples (X0,Y0,Z0),(X1,Y1,Z1) such that
- // (X0*Y1 + Y0*X1) = (Z0+Z1)
- value_t X0, Y0, Z0, X1, Y1, Z1;
- arc4random_buf(&X0, sizeof(X0));
- arc4random_buf(&Y0, sizeof(Y0));
- arc4random_buf(&Z0, sizeof(Z0));
- arc4random_buf(&X1, sizeof(X1));
- arc4random_buf(&Y1, sizeof(Y1));
- Z1 = X0 * Y1 + X1 * Y0 - Z0;
- MultTriple T0, T1;
- T0 = std::make_tuple(X0, Y0, Z0);
- T1 = std::make_tuple(X1, Y1, Z1);
- queue_p0(&T0, sizeof(T0));
- queue_p1(&T1, sizeof(T1));
- yield();
- }
- return val;
- }
- // When halftriple() is used internally to another preprocessing
- // operation, don't tally it, so that it doesn't appear sepearately in
- // the stats from the preprocessing operation that invoked it
- HalfTriple MPCTIO::halftriple(yield_t &yield, bool tally)
- {
- HalfTriple val;
- if (mpcio.player < 2) {
- MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
- if (mpcpio.mode != MODE_ONLINE) {
- yield();
- recv_server(&val, sizeof(val));
- if (tally) {
- mpcpio.halftriples[thread_num].inc();
- }
- } else {
- mpcpio.halftriples[thread_num].get(val);
- }
- } else if (mpcio.mode != MODE_ONLINE) {
- // Create half-triples (X0,Z0),(Y1,Z1) such that
- // X0*Y1 = Z0 + Z1
- value_t X0, Z0, Y1, Z1;
- arc4random_buf(&X0, sizeof(X0));
- arc4random_buf(&Z0, sizeof(Z0));
- arc4random_buf(&Y1, sizeof(Y1));
- Z1 = X0 * Y1 - Z0;
- HalfTriple H0, H1;
- H0 = std::make_tuple(X0, Z0);
- H1 = std::make_tuple(Y1, Z1);
- queue_p0(&H0, sizeof(H0));
- queue_p1(&H1, sizeof(H1));
- yield();
- }
- return val;
- }
- MultTriple MPCTIO::andtriple(yield_t &yield)
- {
- AndTriple val;
- if (mpcio.player < 2) {
- MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
- if (mpcpio.mode != MODE_ONLINE) {
- yield();
- recv_server(&val, sizeof(val));
- mpcpio.andtriples[thread_num].inc();
- } else {
- mpcpio.andtriples[thread_num].get(val);
- }
- } else if (mpcio.mode != MODE_ONLINE) {
- // Create AND triples (X0,Y0,Z0),(X1,Y1,Z1) such that
- // (X0&Y1 ^ Y0&X1) = (Z0^Z1)
- value_t X0, Y0, Z0, X1, Y1, Z1;
- arc4random_buf(&X0, sizeof(X0));
- arc4random_buf(&Y0, sizeof(Y0));
- arc4random_buf(&Z0, sizeof(Z0));
- arc4random_buf(&X1, sizeof(X1));
- arc4random_buf(&Y1, sizeof(Y1));
- Z1 = (X0 & Y1) ^ (X1 & Y0) ^ Z0;
- AndTriple T0, T1;
- T0 = std::make_tuple(X0, Y0, Z0);
- T1 = std::make_tuple(X1, Y1, Z1);
- queue_p0(&T0, sizeof(T0));
- queue_p1(&T1, sizeof(T1));
- yield();
- }
- return val;
- }
- SelectTriple<DPFnode> MPCTIO::nodeselecttriple(yield_t &yield)
- {
- SelectTriple<DPFnode> val;
- if (mpcio.player < 2) {
- MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
- if (mpcpio.mode != MODE_ONLINE) {
- uint8_t Xbyte;
- yield();
- recv_server(&Xbyte, sizeof(Xbyte));
- val.X = Xbyte & 1;
- recv_server(&val.Y, sizeof(val.Y));
- recv_server(&val.Z, sizeof(val.Z));
- } else {
- std::cerr << "Attempted to read SelectTriple<DPFnode> in online phase\n";
- }
- } else if (mpcio.mode != MODE_ONLINE) {
- // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
- // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
- bit_t X0, X1;
- DPFnode Y0, Z0, Y1, Z1;
- X0 = arc4random() & 1;
- arc4random_buf(&Y0, sizeof(Y0));
- arc4random_buf(&Z0, sizeof(Z0));
- X1 = arc4random() & 1;
- arc4random_buf(&Y1, sizeof(Y1));
- DPFnode X0ext, X1ext;
- // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
- // 1 -> 1111...1)
- X0ext = if128_mask[X0];
- X1ext = if128_mask[X1];
- Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
- queue_p0(&X0, sizeof(X0));
- queue_p0(&Y0, sizeof(Y0));
- queue_p0(&Z0, sizeof(Z0));
- queue_p1(&X1, sizeof(X1));
- queue_p1(&Y1, sizeof(Y1));
- queue_p1(&Z1, sizeof(Z1));
- yield();
- }
- return val;
- }
- SelectTriple<value_t> MPCTIO::valselecttriple(yield_t &yield)
- {
- SelectTriple<value_t> val;
- if (mpcio.player < 2) {
- MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
- if (mpcpio.mode != MODE_ONLINE) {
- uint8_t Xbyte;
- yield();
- recv_server(&Xbyte, sizeof(Xbyte));
- val.X = Xbyte & 1;
- recv_server(&val.Y, sizeof(val.Y));
- recv_server(&val.Z, sizeof(val.Z));
- mpcpio.valselecttriples[thread_num].inc();
- } else {
- mpcpio.valselecttriples[thread_num].get(val);
- }
- } else if (mpcio.mode != MODE_ONLINE) {
- // Create triples (X0,Y0,Z0),(X1,Y1,Z1) such that
- // (X0*Y1 ^ Y0*X1) = (Z0^Z1)
- bit_t X0, X1;
- value_t Y0, Z0, Y1, Z1;
- X0 = arc4random() & 1;
- arc4random_buf(&Y0, sizeof(Y0));
- arc4random_buf(&Z0, sizeof(Z0));
- X1 = arc4random() & 1;
- arc4random_buf(&Y1, sizeof(Y1));
- value_t X0ext, X1ext;
- // Sign-extend X0 and X1 (so that 0 -> 0000...0 and
- // 1 -> 1111...1)
- X0ext = -value_t(X0);
- X1ext = -value_t(X1);
- Z1 = ((X0ext & Y1) ^ (X1ext & Y0)) ^ Z0;
- queue_p0(&X0, sizeof(X0));
- queue_p0(&Y0, sizeof(Y0));
- queue_p0(&Z0, sizeof(Z0));
- queue_p1(&X1, sizeof(X1));
- queue_p1(&Y1, sizeof(Y1));
- queue_p1(&Z1, sizeof(Z1));
- yield();
- }
- return val;
- }
- SelectTriple<bit_t> MPCTIO::bitselecttriple(yield_t &yield)
- {
- // Do we need to fetch a new AND triple?
- if (last_andtriple_bits_remaining == 0) {
- last_andtriple = andtriple(yield);
- last_andtriple_bits_remaining = 8*sizeof(value_t);
- }
- --last_andtriple_bits_remaining;
- value_t mask = value_t(1) << last_andtriple_bits_remaining;
- SelectTriple<bit_t> val;
- val.X = !!(std::get<0>(last_andtriple) & mask);
- val.Y = !!(std::get<1>(last_andtriple) & mask);
- val.Z = !!(std::get<2>(last_andtriple) & mask);
- return val;
- }
- // Only computational peers call this; the server should be calling
- // rdpfpair() at the same time
- RDPFTriple<1> MPCTIO::rdpftriple(yield_t &yield, nbits_t depth,
- bool keep_expansion)
- {
- assert(mpcio.player < 2);
- RDPFTriple<1> val;
- MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
- if (mpcio.mode == MODE_ONLINE) {
- mpcpio.rdpftriples[thread_num][depth-1].get(val);
- } else {
- val = RDPFTriple<1>(*this, yield, depth,
- keep_expansion);
- iostream_server() <<
- val.dpf[(mpcio.player == 0) ? 1 : 2];
- mpcpio.rdpftriples[thread_num][depth-1].inc();
- yield();
- }
- return val;
- }
- // Only the server calls this; the computational peers should be calling
- // rdpftriple() at the same time
- RDPFPair<1> MPCTIO::rdpfpair(yield_t &yield, nbits_t depth)
- {
- assert(mpcio.player == 2);
- RDPFPair<1> val;
- MPCServerIO &mpcsrvio = static_cast<MPCServerIO&>(mpcio);
- if (mpcio.mode == MODE_ONLINE) {
- mpcsrvio.rdpfpairs[thread_num][depth-1].get(val);
- } else {
- RDPFTriple<1> trip(*this, yield, depth, true);
- yield();
- iostream_p0() >> val.dpf[0];
- iostream_p1() >> val.dpf[1];
- mpcsrvio.rdpfpairs[thread_num][depth-1].inc();
- }
- return val;
- }
- CDPF MPCTIO::cdpf(yield_t &yield)
- {
- CDPF val;
- if (mpcio.player < 2) {
- MPCPeerIO &mpcpio = static_cast<MPCPeerIO&>(mpcio);
- if (mpcpio.mode != MODE_ONLINE) {
- yield();
- iostream_server() >> val;
- mpcpio.cdpfs[thread_num].inc();
- } else {
- mpcpio.cdpfs[thread_num].get(val);
- }
- } else if (mpcio.mode != MODE_ONLINE) {
- auto [ cdpf0, cdpf1 ] = CDPF::generate(aes_ops());
- iostream_p0() << cdpf0;
- iostream_p1() << cdpf1;
- yield();
- }
- return val;
- }
- // The port number for the P1 -> P0 connection
- static const unsigned short port_p1_p0 = 2115;
- // The port number for the P2 -> P0 connection
- static const unsigned short port_p2_p0 = 2116;
- // The port number for the P2 -> P1 connection
- static const unsigned short port_p2_p1 = 2117;
- void mpcio_setup_computational(unsigned player,
- boost::asio::io_context &io_context,
- const char *p0addr, // can be NULL when player=0
- int num_threads,
- std::deque<tcp::socket> &peersocks,
- std::deque<tcp::socket> &serversocks)
- {
- if (player == 0) {
- // Listen for connections from P1 and from P2
- tcp::acceptor acceptor_p1(io_context,
- tcp::endpoint(tcp::v4(), port_p1_p0));
- tcp::acceptor acceptor_p2(io_context,
- tcp::endpoint(tcp::v4(), port_p2_p0));
- peersocks.clear();
- serversocks.clear();
- for (int i=0;i<num_threads;++i) {
- peersocks.emplace_back(io_context);
- serversocks.emplace_back(io_context);
- }
- for (int i=0;i<num_threads;++i) {
- tcp::socket peersock = acceptor_p1.accept();
- // Read 2 bytes from the socket, which will be the thread
- // number
- unsigned short thread_num;
- boost::asio::read(peersock,
- boost::asio::buffer(&thread_num, sizeof(thread_num)));
- if (thread_num >= num_threads) {
- std::cerr << "Received bad thread number from peer\n";
- } else {
- peersocks[thread_num] = std::move(peersock);
- }
- }
- for (int i=0;i<num_threads;++i) {
- tcp::socket serversock = acceptor_p2.accept();
- // Read 2 bytes from the socket, which will be the thread
- // number
- unsigned short thread_num;
- boost::asio::read(serversock,
- boost::asio::buffer(&thread_num, sizeof(thread_num)));
- if (thread_num >= num_threads) {
- std::cerr << "Received bad thread number from server\n";
- } else {
- serversocks[thread_num] = std::move(serversock);
- }
- }
- } else if (player == 1) {
- // Listen for connections from P2, make num_threads connections to P0
- tcp::acceptor acceptor_p2(io_context,
- tcp::endpoint(tcp::v4(), port_p2_p1));
- tcp::resolver resolver(io_context);
- boost::system::error_code err;
- peersocks.clear();
- serversocks.clear();
- for (int i=0;i<num_threads;++i) {
- serversocks.emplace_back(io_context);
- }
- for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
- tcp::socket peersock(io_context);
- while(1) {
- boost::asio::connect(peersock,
- resolver.resolve(p0addr, std::to_string(port_p1_p0)), err);
- if (!err) break;
- std::cerr << "Connection to p0 refused, will retry.\n";
- sleep(1);
- }
- // Write 2 bytes to the socket indicating which thread
- // number this socket is for
- boost::asio::write(peersock,
- boost::asio::buffer(&thread_num, sizeof(thread_num)));
- peersocks.push_back(std::move(peersock));
- }
- for (int i=0;i<num_threads;++i) {
- tcp::socket serversock = acceptor_p2.accept();
- // Read 2 bytes from the socket, which will be the thread
- // number
- unsigned short thread_num;
- boost::asio::read(serversock,
- boost::asio::buffer(&thread_num, sizeof(thread_num)));
- if (thread_num >= num_threads) {
- std::cerr << "Received bad thread number from server\n";
- } else {
- serversocks[thread_num] = std::move(serversock);
- }
- }
- } else {
- std::cerr << "Invalid player number passed to mpcio_setup_computational\n";
- }
- }
- void mpcio_setup_server(boost::asio::io_context &io_context,
- const char *p0addr, const char *p1addr, int num_threads,
- std::deque<tcp::socket> &p0socks,
- std::deque<tcp::socket> &p1socks)
- {
- // Make connections to P0 and P1
- tcp::resolver resolver(io_context);
- boost::system::error_code err;
- p0socks.clear();
- p1socks.clear();
- for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
- tcp::socket p0sock(io_context);
- while(1) {
- boost::asio::connect(p0sock,
- resolver.resolve(p0addr, std::to_string(port_p2_p0)), err);
- if (!err) break;
- std::cerr << "Connection to p0 refused, will retry.\n";
- sleep(1);
- }
- // Write 2 bytes to the socket indicating which thread
- // number this socket is for
- boost::asio::write(p0sock,
- boost::asio::buffer(&thread_num, sizeof(thread_num)));
- p0socks.push_back(std::move(p0sock));
- }
- for (unsigned short thread_num = 0; thread_num < num_threads; ++thread_num) {
- tcp::socket p1sock(io_context);
- while(1) {
- boost::asio::connect(p1sock,
- resolver.resolve(p1addr, std::to_string(port_p2_p1)), err);
- if (!err) break;
- std::cerr << "Connection to p1 refused, will retry.\n";
- sleep(1);
- }
- // Write 2 bytes to the socket indicating which thread
- // number this socket is for
- boost::asio::write(p1sock,
- boost::asio::buffer(&thread_num, sizeof(thread_num)));
- p1socks.push_back(std::move(p1sock));
- }
- }
|