#include // arc4random_buf #include "online.hpp" #include "mpcops.hpp" #include "rdpf.hpp" #include "duoram.hpp" #include "cdpf.hpp" #include "cell.hpp" static void online_test(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t nbits = VALUE_BITS; if (*args) { nbits = atoi(*args); } size_t as_memsize = 9; size_t xs_memsize = 3; MPCTIO tio(mpcio, 0); bool is_server = (mpcio.player == 2); RegAS *A = new RegAS[as_memsize]; RegXS *AX = new RegXS[xs_memsize]; value_t V; RegBS F0, F1; RegBS FA, FO; RegXS X; if (!is_server) { A[0].randomize(); A[1].randomize(); F0.randomize(); A[4].randomize(); F1.randomize(); A[6].randomize(); A[7].randomize(); X.randomize(); AX[0].randomize(); AX[1].randomize(); arc4random_buf(&V, sizeof(V)); printf("A:\n"); for (size_t i=0; i coroutines; coroutines.emplace_back( [&tio, &A, nbits](yield_t &yield) { mpc_mul(tio, yield, A[2], A[0], A[1], nbits); }); coroutines.emplace_back( [&tio, &A, V, nbits](yield_t &yield) { mpc_valuemul(tio, yield, A[3], V, nbits); }); coroutines.emplace_back( [&tio, &A, &F0, nbits](yield_t &yield) { mpc_flagmult(tio, yield, A[5], F0, A[4], nbits); }); coroutines.emplace_back( [&tio, &A, &F1, nbits](yield_t &yield) { mpc_oswap(tio, yield, A[6], A[7], F1, nbits); }); coroutines.emplace_back( [&tio, &A, &X, nbits](yield_t &yield) { mpc_xs_to_as(tio, yield, A[8], X, nbits); }); coroutines.emplace_back( [&tio, &AX, &F0, nbits](yield_t &yield) { mpc_select(tio, yield, AX[2], F0, AX[0], AX[1], nbits); }); coroutines.emplace_back( [&tio, &FA, &F0, &F1, nbits](yield_t &yield) { mpc_and(tio, yield, FA, F0, F1); }); coroutines.emplace_back( [&tio, &FO, &F0, &F1, nbits](yield_t &yield) { mpc_or(tio, yield, FO, F0, F1); }); run_coroutines(tio, coroutines); if (!is_server) { printf("\n"); printf("A:\n"); for (size_t i=0; i dp = tio.rdpfpair(yield, depth); for (int i=0;i<2;++i) { const RDPF<1> &dpf = dp.dpf[i]; for (address_t x=0;x<(address_t(1)< dt = tio.rdpftriple(yield, depth); for (int i=0;i<3;++i) { const RDPF<1> &dpf = dt.dpf[i]; RegXS peer_scaled_xor; RegAS peer_scaled_sum; if (tio.player() == 1) { tio.iostream_peer() << dpf.scaled_xor << dpf.scaled_sum; } else { tio.iostream_peer() >> peer_scaled_xor >> peer_scaled_sum; peer_scaled_sum += dpf.scaled_sum; peer_scaled_xor ^= dpf.scaled_xor; } for (address_t x=0;x<(address_t(1)<> peer_ub >> peer_ua >> peer_sx >> peer_sa; ub ^= peer_ub; ua += peer_ua; sx ^= peer_sx; sa += peer_sa; if (ub.bshare || ua.ashare || sx.xshare || sa.ashare) { printf("**** %x %016lx %016lx %016lx\n", ub.bshare, ua.ashare, sx.xshare, sa.ashare); printf("SCALE %016lx %016lx\n", peer_scaled_xor.xshare, peer_scaled_sum.ashare); } } } printf("\n"); } } } }); }); } pool.join(); } static void rdpf_timing(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth=6; if (*args) { depth = atoi(*args); ++args; } int num_threads = opts.num_threads; boost::asio::thread_pool pool(num_threads); for (int thread_num = 0; thread_num < num_threads; ++thread_num) { boost::asio::post(pool, [&mpcio, thread_num, depth] { MPCTIO tio(mpcio, thread_num); run_coroutines(tio, [&tio, depth] (yield_t &yield) { size_t &aes_ops = tio.aes_ops(); if (tio.player() == 2) { RDPFPair<1> dp = tio.rdpfpair(yield, depth); for (int i=0;i<2;++i) { RDPF<1> &dpf = dp.dpf[i]; dpf.expand(aes_ops); RegXS scaled_xor; for (address_t x=0;x<(address_t(1)< dt = tio.rdpftriple(yield, depth); for (int i=0;i<3;++i) { RDPF<1> &dpf = dt.dpf[i]; dpf.expand(aes_ops); RegXS scaled_xor; for (address_t x=0;x<(address_t(1)< &dpf, address_t start, int num_threads) { RegXS scaled_xor[num_threads]; boost::asio::thread_pool pool(num_threads); address_t totsize = (address_t(1)< dp = tio.rdpfpair(yield, depth); for (int i=0;i<2;++i) { RDPF<1> &dpf = dp.dpf[i]; value_t scaled_xor = parallel_streameval_rdpf(mpcio, dpf, start, num_threads); printf("%016lx\n%016lx\n", scaled_xor, dpf.scaled_xor.xshare); printf("\n"); } } else { RDPFTriple<1> dt = tio.rdpftriple(yield, depth); for (int i=0;i<3;++i) { RDPF<1> &dpf = dt.dpf[i]; value_t scaled_xor = parallel_streameval_rdpf(mpcio, dpf, start, num_threads); printf("%016lx\n%016lx\n", scaled_xor, dpf.scaled_xor.xshare); printf("\n"); } } }); } static void par_rdpfeval_timing(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth=6; address_t start=0; if (*args) { depth = atoi(*args); ++args; } if (*args) { start = strtoull(*args, NULL, 16); ++args; } int num_threads = opts.num_threads; MPCTIO tio(mpcio, 0, num_threads); run_coroutines(tio, [&tio, depth, start, num_threads] (yield_t &yield) { if (tio.player() == 2) { RDPFPair<1> dp = tio.rdpfpair(yield, depth); for (int i=0;i<2;++i) { RDPF<1> &dpf = dp.dpf[i]; nbits_t depth = dpf.depth(); auto pe = ParallelEval(dpf, start, 0, address_t(1)<::node &leaf) { return dpf.scaled_xs(leaf); }); printf("%016lx\n%016lx\n", result.xshare, dpf.scaled_xor.xshare); printf("\n"); } } else { RDPFTriple<1> dt = tio.rdpftriple(yield, depth); for (int i=0;i<3;++i) { RDPF<1> &dpf = dt.dpf[i]; nbits_t depth = dpf.depth(); auto pe = ParallelEval(dpf, start, 0, address_t(1)<::node &leaf) { return dpf.scaled_xs(leaf); }); printf("%016lx\n%016lx\n", result.xshare, dpf.scaled_xor.xshare); printf("\n"); } } }); } static void tupleeval_timing(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth=6; address_t start=0; if (*args) { depth = atoi(*args); ++args; } if (*args) { start = atoi(*args); ++args; } int num_threads = opts.num_threads; MPCTIO tio(mpcio, 0, num_threads); run_coroutines(tio, [&tio, depth, start] (yield_t &yield) { size_t &aes_ops = tio.aes_ops(); if (tio.player() == 2) { RDPFPair<1> dp = tio.rdpfpair(yield, depth); RegXS scaled_xor0, scaled_xor1; auto ev = StreamEval(dp, start, 0, aes_ops, false); for (address_t x=0;x<(address_t(1)< dt = tio.rdpftriple(yield, depth); RegXS scaled_xor0, scaled_xor1, scaled_xor2; auto ev = StreamEval(dt, start, 0, aes_ops, false); for (address_t x=0;x<(address_t(1)< dp = tio.rdpfpair(yield, depth); auto pe = ParallelEval(dp, start, 0, address_t(1)<; V result, init; result = pe.reduce(init, [&dp] (int thread_num, address_t i, const RDPFPair<1>::node &leaf) { std::tuple scaled; dp.scaled(scaled, leaf); return scaled; }); printf("%016lx\n%016lx\n", std::get<0>(result).xshare, dp.dpf[0].scaled_xor.xshare); printf("\n"); printf("%016lx\n%016lx\n", std::get<1>(result).xshare, dp.dpf[1].scaled_xor.xshare); printf("\n"); } else { RDPFTriple<1> dt = tio.rdpftriple(yield, depth); auto pe = ParallelEval(dt, start, 0, address_t(1)<; V result, init; result = pe.reduce(init, [&dt] (int thread_num, address_t i, const RDPFTriple<1>::node &leaf) { std::tuple scaled; dt.scaled(scaled, leaf); return scaled; }); printf("%016lx\n%016lx\n", std::get<0>(result).xshare, dt.dpf[0].scaled_xor.xshare); printf("\n"); printf("%016lx\n%016lx\n", std::get<1>(result).xshare, dt.dpf[1].scaled_xor.xshare); printf("\n"); printf("%016lx\n%016lx\n", std::get<2>(result).xshare, dt.dpf[2].scaled_xor.xshare); printf("\n"); } }); } // T is RegAS or RegXS for additive or XOR shared database respectively template static void duoram_test(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth=6; address_t share=arc4random(); if (*args) { depth = atoi(*args); ++args; } if (*args) { share = atoi(*args); ++args; } share &= ((address_t(1)< oram(tio.player(), size); auto A = oram.flat(tio, yield); RegAS aidx, aidx2, aidx3; aidx.ashare = share; aidx2.ashare = share + tio.player(); aidx3.ashare = share + 1; T M; if (tio.player() == 0) { M.set(0xbabb0000); } else { M.set(0x0000a66e); } RegXS xidx; xidx.xshare = share; T N; if (tio.player() == 0) { N.set(0xdead0000); } else { N.set(0x0000beef); } // Writing and reading with additively shared indices printf("Additive Updating\n"); A[aidx] += M; printf("Additive Reading\n"); T Aa = A[aidx]; // Writing and reading with XOR shared indices printf("XOR Updating\n"); A[xidx] += N; printf("XOR Reading\n"); T Ax = A[xidx]; T Ae; // Writing and reading with explicit indices if (depth > 2) { printf("Explicit Updating\n"); A[5] += Aa; printf("Explicit Reading\n"); Ae = A[6]; } // Simultaneous independent reads printf("3 independent reading\n"); std::vector Av = A[std::array { aidx, aidx2, aidx3 }]; // Simultaneous independent updates T Aw1, Aw2, Aw3; Aw1.set(0x101010101010101 * tio.player()); Aw2.set(0x202020202020202 * tio.player()); Aw3.set(0x303030303030303 * tio.player()); printf("3 independent updating\n"); A[std::array { aidx, aidx2, aidx3 }] -= std::array { Aw1, Aw2, Aw3 }; if (depth <= 10) { oram.dump(); auto check = A.reconstruct(); if (tio.player() == 0) { for (address_t i=0;i static void duoram(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth = 6; int items = 4; if (*args) { depth = atoi(*args); ++args; } if (*args) { items = atoi(*args); ++args; } MPCTIO tio(mpcio, 0, opts.num_threads); run_coroutines(tio, [&mpcio, &tio, depth, items] (yield_t &yield) { size_t size = size_t(1)< oram(tio.player(), size); auto A = oram.flat(tio, yield); std::cout << "===== DEPENDENT UPDATES =====\n"; mpcio.reset_stats(); tio.reset_lamport(); // Make a linked list of length items std::vector list_indices; T prev_index, next_index; prev_index.randomize(depth); for (int i=0;i read_outputs = A[list_indices]; tio.sync_lamport(); mpcio.dump_stats(std::cout); std::cout << "\n===== INDEPENDENT UPDATES =====\n"; mpcio.reset_stats(); tio.reset_lamport(); // Make a vector of indices 1 larger than those in list_indices, // and a vector of values 1 larger than those in outputs std::vector indep_indices, indep_values; T one; one.set(tio.player()); // Sets the shared value to 1 for (int i=0;i> peer_xsh >> peer_lt >> peer_eq >> peer_gt >> peer_eeq; lt ^= peer_lt; eq ^= peer_eq; gt ^= peer_gt; eeq ^= peer_eeq; xsh += peer_xsh; int lti = int(lt.bshare); int eqi = int(eq.bshare); int gti = int(gt.bshare); int eeqi = int(eeq.bshare); x = xsh.share(); printf(": %d %d %d %d ", lti, eqi, gti, eeqi); bool signbit = (x >> 63); if (lti + eqi + gti != 1 || eqi != eeqi) { printf("INCONSISTENT"); res = 0; } else if (x == 0 && eqi) { printf("="); } else if (!signbit && gti) { printf(">"); } else if (signbit && lti) { printf("<"); } else { printf("INCORRECT"); res = 0; } } printf("\n"); } return res; } static int compare_test_target(MPCTIO &tio, yield_t &yield, value_t target, value_t x) { int res = 1; res &= compare_test_one(tio, yield, target, x); res &= compare_test_one(tio, yield, target, 0); res &= compare_test_one(tio, yield, target, 1); res &= compare_test_one(tio, yield, target, 15); res &= compare_test_one(tio, yield, target, 16); res &= compare_test_one(tio, yield, target, 17); res &= compare_test_one(tio, yield, target, -1); res &= compare_test_one(tio, yield, target, -15); res &= compare_test_one(tio, yield, target, -16); res &= compare_test_one(tio, yield, target, -17); res &= compare_test_one(tio, yield, target, (value_t(1)<<63)); res &= compare_test_one(tio, yield, target, (value_t(1)<<63)+1); res &= compare_test_one(tio, yield, target, (value_t(1)<<63)-1); return res; } static void compare_test(MPCIO &mpcio, const PRACOptions &opts, char **args) { value_t target, x; arc4random_buf(&target, sizeof(target)); arc4random_buf(&x, sizeof(x)); if (*args) { target = strtoull(*args, NULL, 16); ++args; } if (*args) { x = strtoull(*args, NULL, 16); ++args; } int num_threads = opts.num_threads; boost::asio::thread_pool pool(num_threads); for (int thread_num = 0; thread_num < num_threads; ++thread_num) { boost::asio::post(pool, [&mpcio, thread_num, target, x] { MPCTIO tio(mpcio, thread_num); run_coroutines(tio, [&tio, target, x] (yield_t &yield) { int res = 1; res &= compare_test_target(tio, yield, target, x); res &= compare_test_target(tio, yield, 0, x); res &= compare_test_target(tio, yield, 1, x); res &= compare_test_target(tio, yield, 15, x); res &= compare_test_target(tio, yield, 16, x); res &= compare_test_target(tio, yield, 17, x); res &= compare_test_target(tio, yield, -1, x); res &= compare_test_target(tio, yield, -15, x); res &= compare_test_target(tio, yield, -16, x); res &= compare_test_target(tio, yield, -17, x); res &= compare_test_target(tio, yield, (value_t(1)<<63), x); res &= compare_test_target(tio, yield, (value_t(1)<<63)+1, x); res &= compare_test_target(tio, yield, (value_t(1)<<63)-1, x); if (tio.player() == 0) { if (res == 1) { printf("All tests passed!\n"); } else { printf("TEST FAILURES\n"); } } }); }); } pool.join(); } static void sort_test(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth=6; if (*args) { depth = atoi(*args); ++args; } int num_threads = opts.num_threads; boost::asio::thread_pool pool(num_threads); for (int thread_num = 0; thread_num < num_threads; ++thread_num) { boost::asio::post(pool, [&mpcio, thread_num, depth] { MPCTIO tio(mpcio, thread_num); run_coroutines(tio, [&tio, depth] (yield_t &yield) { address_t size = address_t(1)< oram(tio.player(), size); auto A = oram.flat(tio, yield); A.explicitonly(true); // Initialize the memory to random values in parallel std::vector coroutines; for (address_t i=0; i>= 1; nbits_t depth=6; if (*args) { depth = atoi(*args); ++args; } if (*args) { target = strtoull(*args, NULL, 16); ++args; } int num_threads = opts.num_threads; boost::asio::thread_pool pool(num_threads); for (int thread_num = 0; thread_num < num_threads; ++thread_num) { boost::asio::post(pool, [&mpcio, thread_num, depth, target] { MPCTIO tio(mpcio, thread_num); run_coroutines(tio, [&tio, depth, target] (yield_t &yield) { address_t size = address_t(1)<> tshare; } // Create a random database and sort it // size_t &aes_ops = tio.aes_ops(); Duoram oram(tio.player(), size); auto A = oram.flat(tio, yield); A.explicitonly(true); // Initialize the memory to random values in parallel std::vector coroutines; for (address_t i=0; i> peer_tindex; tindex += peer_tindex; } if (depth <= 10) { auto check = A.reconstruct(); if (tio.player() == 0) { for (address_t i=0;i(mpcio, opts, args); } else { duoram_test(mpcio, opts, args); } } else if (!strcmp(*args, "cdpftest")) { ++args; cdpf_test(mpcio, opts, args); } else if (!strcmp(*args, "cmptest")) { ++args; compare_test(mpcio, opts, args); } else if (!strcmp(*args, "sorttest")) { ++args; sort_test(mpcio, opts, args); } else if (!strcmp(*args, "bsearch")) { ++args; bsearch_test(mpcio, opts, args); } else if (!strcmp(*args, "duoram")) { ++args; if (opts.use_xor_db) { duoram(mpcio, opts, args); } else { duoram(mpcio, opts, args); } } else if (!strcmp(*args, "cell")) { ++args; cell(mpcio, opts, args); } else { std::cerr << "Unknown mode " << *args << "\n"; } }