#include // arc4random_buf #include "online.hpp" #include "mpcops.hpp" #include "rdpf.hpp" #include "duoram.hpp" #include "cdpf.hpp" #include "cell.hpp" #include "heap.hpp" #include "shapes.hpp" #include "bst.hpp" #include "avl.hpp" #include "heapsampler.hpp" static void online_test(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t nbits = VALUE_BITS; if (*args) { nbits = atoi(*args); } size_t as_memsize = 9; size_t xs_memsize = 3; MPCTIO tio(mpcio, 0); bool is_server = (mpcio.player == 2); RegAS *A = new RegAS[as_memsize]; RegXS *AX = new RegXS[xs_memsize]; value_t V; RegBS F0, F1, F2; RegBS FA, FO, FS; RegXS X; if (!is_server) { A[0].randomize(); A[1].randomize(); F0.randomize(); A[4].randomize(); F1.randomize(); F2.randomize(); A[6].randomize(); A[7].randomize(); X.randomize(); AX[0].randomize(); AX[1].randomize(); arc4random_buf(&V, sizeof(V)); printf("A:\n"); for (size_t i=0; i coroutines; coroutines.emplace_back( [&tio, &A, nbits](yield_t &yield) { mpc_mul(tio, yield, A[2], A[0], A[1], nbits); }); coroutines.emplace_back( [&tio, &A, V, nbits](yield_t &yield) { mpc_valuemul(tio, yield, A[3], V, nbits); }); coroutines.emplace_back( [&tio, &A, &F0, nbits](yield_t &yield) { mpc_flagmult(tio, yield, A[5], F0, A[4], nbits); }); coroutines.emplace_back( [&tio, &A, &F1, nbits](yield_t &yield) { mpc_oswap(tio, yield, A[6], A[7], F1, nbits); }); coroutines.emplace_back( [&tio, &A, &X, nbits](yield_t &yield) { mpc_xs_to_as(tio, yield, A[8], X, nbits); }); coroutines.emplace_back( [&tio, &AX, &F0, nbits](yield_t &yield) { mpc_select(tio, yield, AX[2], F0, AX[0], AX[1], nbits); }); coroutines.emplace_back( [&tio, &FA, &F0, &F1](yield_t &yield) { mpc_and(tio, yield, FA, F0, F1); }); coroutines.emplace_back( [&tio, &FO, &F0, &F1](yield_t &yield) { mpc_or(tio, yield, FO, F0, F1); }); coroutines.emplace_back( [&tio, &FS, &F0, &F1, &F2](yield_t &yield) { mpc_select(tio, yield, FS, F2, F0, F1); }); run_coroutines(tio, coroutines); if (!is_server) { printf("\n"); printf("A:\n"); for (size_t i=0; i static void rdpf_test(MPCIO &mpcio, const PRACOptions &opts, char **args, bool incremental) { nbits_t depth=6; size_t num_iters = 1; if (*args) { depth = atoi(*args); ++args; } if (*args) { num_iters = atoi(*args); ++args; } MPCTIO tio(mpcio, 0, opts.num_cpu_threads); run_coroutines(tio, [&tio, depth, num_iters, incremental] (yield_t &yield) { size_t &aes_ops = tio.aes_ops(); nbits_t min_level = incremental ? 1 : depth; for (size_t iter=0; iter < num_iters; ++iter) { if (tio.player() == 2) { RDPFPair dp = tio.rdpfpair(yield, depth, incremental); for (int i=0;i<2;++i) { RDPF &dpf = dp.dpf[i]; for (nbits_t level=min_level; level<=depth; ++level) { if (incremental) { printf("Level = %u\n\n", level); dpf.depth(level); } for (address_t x=0;x<(address_t(1)<::LeafNode leaf = dpf.leaf(x, aes_ops); RegBS ub = dpf.unit_bs(leaf); RegAS ua = dpf.unit_as(leaf); typename RDPF::RegXSW sx = dpf.scaled_xs(leaf); typename RDPF::RegASW sa = dpf.scaled_as(leaf); printf("%04x %x %016lx", x, ub.bshare, ua.ashare); for (nbits_t j=0;j dt = tio.rdpftriple(yield, depth, incremental); for (int i=0;i<3;++i) { RDPF &dpf = dt.dpf[i]; for (nbits_t level=min_level; level<=depth; ++level) { if (incremental) { printf("Level = %u\n", level); dt.depth(level); RegXS tshare; dt.get_target(tshare); printf("Target share = %lx\n\n", tshare.share()); } typename RDPF::RegXSW peer_scaled_xor; typename RDPF::RegASW peer_scaled_sum; if (tio.player() == 1) { tio.iostream_peer() << dpf.li[depth-level].scaled_xor << dpf.li[depth-level].scaled_sum; } else { tio.iostream_peer() >> peer_scaled_xor >> peer_scaled_sum; peer_scaled_sum += dpf.li[depth-level].scaled_sum; peer_scaled_xor ^= dpf.li[depth-level].scaled_xor; } for (address_t x=0;x<(address_t(1)<::LeafNode leaf = dpf.leaf(x, aes_ops); RegBS ub = dpf.unit_bs(leaf); RegAS ua = dpf.unit_as(leaf); typename RDPF::RegXSW sx = dpf.scaled_xs(leaf); typename RDPF::RegASW sa = dpf.scaled_as(leaf); printf("%04x %x %016lx", x, ub.bshare, ua.ashare); for (nbits_t j=0;j::RegXSW peer_sx; typename RDPF::RegASW peer_sa; tio.iostream_peer() >> peer_ub >> peer_ua >> peer_sx >> peer_sa; ub ^= peer_ub; ua += peer_ua; sx ^= peer_sx; sa += peer_sa; bool is_nonzero = ub.bshare || ua.ashare; for (nbits_t j=0;j dp = tio.rdpfpair(yield, depth); for (int i=0;i<2;++i) { RDPF<1> &dpf = dp.dpf[i]; dpf.expand(aes_ops); RDPF<1>::RegXSW scaled_xor; for (address_t x=0;x<(address_t(1)<::LeafNode leaf = dpf.leaf(x, aes_ops); RDPF<1>::RegXSW sx = dpf.scaled_xs(leaf); scaled_xor ^= sx; } printf("%016lx\n%016lx\n", scaled_xor[0].xshare, dpf.li[0].scaled_xor[0].xshare); printf("\n"); } } else { RDPFTriple<1> dt = tio.rdpftriple(yield, depth); for (int i=0;i<3;++i) { RDPF<1> &dpf = dt.dpf[i]; dpf.expand(aes_ops); RDPF<1>::RegXSW scaled_xor; for (address_t x=0;x<(address_t(1)<::LeafNode leaf = dpf.leaf(x, aes_ops); RDPF<1>::RegXSW sx = dpf.scaled_xs(leaf); scaled_xor ^= sx; } printf("%016lx\n%016lx\n", scaled_xor[0].xshare, dpf.li[0].scaled_xor[0].xshare); printf("\n"); } } }); }); } pool.join(); } static value_t parallel_streameval_rdpf(MPCTIO &tio, const RDPF<1> &dpf, address_t start, int num_threads) { RDPF<1>::RegXSW scaled_xor[num_threads]; size_t aes_ops[num_threads]; boost::asio::thread_pool pool(num_threads); address_t totsize = (address_t(1)<::RegXSW local_xor; size_t local_aes_ops = 0; auto ev = StreamEval(dpf, threadstart, 0, local_aes_ops); for (address_t x=0;x::LeafNode leaf = ev.next(); local_xor ^= dpf.scaled_xs(leaf); } scaled_xor[thread_num] = local_xor; aes_ops[thread_num] = local_aes_ops; //printf("Thread %d complete\n", thread_num); }); threadstart = (threadstart + threadsize) % totsize; } pool.join(); RDPF<1>::RegXSW res; for (int thread_num = 0; thread_num < num_threads; ++thread_num) { res ^= scaled_xor[thread_num]; tio.aes_ops() += aes_ops[thread_num]; } return res[0].xshare; } static void rdpfeval_timing(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth=6; address_t start=0; if (*args) { depth = atoi(*args); ++args; } if (*args) { start = strtoull(*args, NULL, 16); ++args; } int num_threads = opts.num_cpu_threads; MPCTIO tio(mpcio, 0, num_threads); run_coroutines(tio, [&tio, depth, start, num_threads] (yield_t &yield) { if (tio.player() == 2) { RDPFPair<1> dp = tio.rdpfpair(yield, depth); for (int i=0;i<2;++i) { RDPF<1> &dpf = dp.dpf[i]; value_t scaled_xor = parallel_streameval_rdpf(tio, dpf, start, num_threads); printf("%016lx\n%016lx\n", scaled_xor, dpf.li[0].scaled_xor[0].xshare); printf("\n"); } } else { RDPFTriple<1> dt = tio.rdpftriple(yield, depth); for (int i=0;i<3;++i) { RDPF<1> &dpf = dt.dpf[i]; value_t scaled_xor = parallel_streameval_rdpf(tio, dpf, start, num_threads); printf("%016lx\n%016lx\n", scaled_xor, dpf.li[0].scaled_xor[0].xshare); printf("\n"); } } }); } static void par_rdpfeval_timing(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth=6; address_t start=0; if (*args) { depth = atoi(*args); ++args; } if (*args) { start = strtoull(*args, NULL, 16); ++args; } int num_threads = opts.num_cpu_threads; MPCTIO tio(mpcio, 0, num_threads); run_coroutines(tio, [&tio, depth, start, num_threads] (yield_t &yield) { if (tio.player() == 2) { RDPFPair<1> dp = tio.rdpfpair(yield, depth); for (int i=0;i<2;++i) { RDPF<1> &dpf = dp.dpf[i]; nbits_t depth = dpf.depth(); auto pe = ParallelEval(dpf, start, 0, address_t(1)<::RegXSW result, init; result = pe.reduce(init, [&dpf] (int thread_num, address_t i, const RDPF<1>::LeafNode &leaf) { return dpf.scaled_xs(leaf); }); printf("%016lx\n%016lx\n", result[0].xshare, dpf.li[0].scaled_xor[0].xshare); printf("\n"); } } else { RDPFTriple<1> dt = tio.rdpftriple(yield, depth); for (int i=0;i<3;++i) { RDPF<1> &dpf = dt.dpf[i]; nbits_t depth = dpf.depth(); auto pe = ParallelEval(dpf, start, 0, address_t(1)<::RegXSW result, init; result = pe.reduce(init, [&dpf] (int thread_num, address_t i, const RDPF<1>::LeafNode &leaf) { return dpf.scaled_xs(leaf); }); printf("%016lx\n%016lx\n", result[0].xshare, dpf.li[0].scaled_xor[0].xshare); printf("\n"); } } }); } static void tupleeval_timing(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth=6; address_t start=0; if (*args) { depth = atoi(*args); ++args; } if (*args) { start = atoi(*args); ++args; } int num_threads = opts.num_cpu_threads; MPCTIO tio(mpcio, 0, num_threads); run_coroutines(tio, [&tio, depth, start] (yield_t &yield) { size_t &aes_ops = tio.aes_ops(); if (tio.player() == 2) { RDPFPair<1> dp = tio.rdpfpair(yield, depth); RDPF<1>::RegXSW scaled_xor0, scaled_xor1; auto ev = StreamEval(dp, start, 0, aes_ops, false); for (address_t x=0;x<(address_t(1)<::RegXSW sx0 = dp.dpf[0].scaled_xs(L0); RDPF<1>::RegXSW sx1 = dp.dpf[1].scaled_xs(L1); scaled_xor0 ^= sx0; scaled_xor1 ^= sx1; } printf("%016lx\n%016lx\n", scaled_xor0[0].xshare, dp.dpf[0].li[0].scaled_xor[0].xshare); printf("\n"); printf("%016lx\n%016lx\n", scaled_xor1[0].xshare, dp.dpf[1].li[0].scaled_xor[0].xshare); printf("\n"); } else { RDPFTriple<1> dt = tio.rdpftriple(yield, depth); RDPF<1>::RegXSW scaled_xor0, scaled_xor1, scaled_xor2; auto ev = StreamEval(dt, start, 0, aes_ops, false); for (address_t x=0;x<(address_t(1)<::RegXSW sx0 = dt.dpf[0].scaled_xs(L0); RDPF<1>::RegXSW sx1 = dt.dpf[1].scaled_xs(L1); RDPF<1>::RegXSW sx2 = dt.dpf[2].scaled_xs(L2); scaled_xor0 ^= sx0; scaled_xor1 ^= sx1; scaled_xor2 ^= sx2; } printf("%016lx\n%016lx\n", scaled_xor0[0].xshare, dt.dpf[0].li[0].scaled_xor[0].xshare); printf("\n"); printf("%016lx\n%016lx\n", scaled_xor1[0].xshare, dt.dpf[1].li[0].scaled_xor[0].xshare); printf("\n"); printf("%016lx\n%016lx\n", scaled_xor2[0].xshare, dt.dpf[2].li[0].scaled_xor[0].xshare); printf("\n"); } }); } static void par_tupleeval_timing(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth=6; address_t start=0; if (*args) { depth = atoi(*args); ++args; } if (*args) { start = atoi(*args); ++args; } int num_threads = opts.num_cpu_threads; MPCTIO tio(mpcio, 0, num_threads); run_coroutines(tio, [&tio, depth, start, num_threads] (yield_t &yield) { size_t &aes_ops = tio.aes_ops(); if (tio.player() == 2) { RDPFPair<1> dp = tio.rdpfpair(yield, depth); auto pe = ParallelEval(dp, start, 0, address_t(1)<::RegXSWP result, init; result = pe.reduce(init, [&dp] (int thread_num, address_t i, const RDPFPair<1>::LeafNode &leaf) { RDPFPair<1>::RegXSWP scaled; dp.scaled(scaled, leaf); return scaled; }); printf("%016lx\n%016lx\n", std::get<0>(result)[0].xshare, dp.dpf[0].li[0].scaled_xor[0].xshare); printf("\n"); printf("%016lx\n%016lx\n", std::get<1>(result)[0].xshare, dp.dpf[1].li[0].scaled_xor[0].xshare); printf("\n"); } else { RDPFTriple<1> dt = tio.rdpftriple(yield, depth); auto pe = ParallelEval(dt, start, 0, address_t(1)<::RegXSWT result, init; result = pe.reduce(init, [&dt] (int thread_num, address_t i, const RDPFTriple<1>::LeafNode &leaf) { RDPFTriple<1>::RegXSWT scaled; dt.scaled(scaled, leaf); return scaled; }); printf("%016lx\n%016lx\n", std::get<0>(result)[0].xshare, dt.dpf[0].li[0].scaled_xor[0].xshare); printf("\n"); printf("%016lx\n%016lx\n", std::get<1>(result)[0].xshare, dt.dpf[1].li[0].scaled_xor[0].xshare); printf("\n"); printf("%016lx\n%016lx\n", std::get<2>(result)[0].xshare, dt.dpf[2].li[0].scaled_xor[0].xshare); printf("\n"); } }); } // T is RegAS or RegXS for additive or XOR shared database respectively template static void duoram_test(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth=6; address_t share=arc4random(); if (*args) { depth = atoi(*args); ++args; } if (*args) { share = atoi(*args); ++args; } share &= ((address_t(1)< oram(tio.player(), len); auto A = oram.flat(tio, yield); RegAS aidx, aidx2, aidx3; aidx.ashare = share; aidx2.ashare = share + tio.player(); aidx3.ashare = share + 1; T M; if (tio.player() == 0) { M.set(0xbabb0000); } else { M.set(0x0000a66e); } RegXS xidx; xidx.xshare = share; T N; if (tio.player() == 0) { N.set(0xdead0000); } else { N.set(0x0000beef); } RegXS oxidx; oxidx.xshare = share+3*tio.player(); T O; if (tio.player() == 0) { O.set(0x31410000); } else { O.set(0x00005926); } // Writing and reading with additively shared indices printf("Additive Updating\n"); A[aidx] += M; printf("Additive Reading\n"); T Aa = A[aidx]; // Writing and reading with XOR shared indices printf("XOR Updating\n"); A[xidx] += N; printf("XOR Reading\n"); T Ax = A[xidx]; // Writing and reading with OblivIndex indices auto oidx = A.oblivindex(oxidx); printf("OblivIndex Updating\n"); A[oidx] += O; printf("OblivIndex Reading\n"); T Ox = A[oidx]; // Writing and reading with explicit indices T Ae; if (depth > 2) { printf("Explicit Updating\n"); A[5] += Aa; printf("Explicit Reading\n"); Ae = A[6]; } // Simultaneous independent reads printf("3 independent reading\n"); std::vector Av = A[std::array { aidx, aidx2, aidx3 }]; // Simultaneous independent updates T Aw1, Aw2, Aw3; Aw1.set(0x101010101010101 * tio.player()); Aw2.set(0x202020202020202 * tio.player()); Aw3.set(0x303030303030303 * tio.player()); printf("3 independent updating\n"); A[std::array { aidx, aidx2, aidx3 }] -= std::array { Aw1, Aw2, Aw3 }; if (depth <= 10) { oram.dump(); auto check = A.reconstruct(); if (tio.player() == 0) { for (address_t i=0;i static void duoram(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth = 6; int items = 4; if (*args) { depth = atoi(*args); ++args; } if (*args) { items = atoi(*args); ++args; } MPCTIO tio(mpcio, 0, opts.num_cpu_threads); run_coroutines(tio, [&mpcio, &tio, depth, items] (yield_t &yield) { size_t size = size_t(1)< oram(tio.player(), size); auto A = oram.flat(tio, yield); std::cout << "===== DEPENDENT UPDATES =====\n"; mpcio.reset_stats(); tio.reset_lamport(); // Make a linked list of length items std::vector list_indices; T prev_index, next_index; prev_index.randomize(depth); for (int i=0;i read_outputs = A[list_indices]; tio.sync_lamport(); mpcio.dump_stats(std::cout); std::cout << "\n===== INDEPENDENT UPDATES =====\n"; mpcio.reset_stats(); tio.reset_lamport(); // Make a vector of indices 1 larger than those in list_indices, // and a vector of values 1 larger than those in outputs std::vector indep_indices, indep_values; T one; one.set(tio.player()); // Sets the shared value to 1 for (int i=0;i static void read_test(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth = 6; int items = 4; if (*args) { depth = atoi(*args); ++args; } if (*args) { items = atoi(*args); ++args; } MPCTIO tio(mpcio, 0, opts.num_cpu_threads); run_coroutines(tio, [&mpcio, &tio, depth, items] (yield_t &yield) { size_t size = size_t(1)< oram(tio.player(), size); auto A = oram.flat(tio, yield); std::cout << "\n===== SEQUENTIAL READS =====\n"; T totval; for (int i=0;i> peer_xsh >> peer_lt >> peer_eq >> peer_gt >> peer_eeq; lt ^= peer_lt; eq ^= peer_eq; gt ^= peer_gt; eeq ^= peer_eeq; xsh += peer_xsh; int lti = int(lt.bshare); int eqi = int(eq.bshare); int gti = int(gt.bshare); int eeqi = int(eeq.bshare); x = xsh.share(); printf(": %d %d %d %d ", lti, eqi, gti, eeqi); bool signbit = (x >> 63); if (lti + eqi + gti != 1 || eqi != eeqi) { printf("INCONSISTENT"); res = 0; } else if (x == 0 && eqi) { printf("="); } else if (!signbit && gti) { printf(">"); } else if (signbit && lti) { printf("<"); } else { printf("INCORRECT"); res = 0; } } printf("\n"); } return res; } static int compare_test_target(MPCTIO &tio, yield_t &yield, value_t target, value_t x) { int res = 1; res &= compare_test_one(tio, yield, target, x); res &= compare_test_one(tio, yield, target, 0); res &= compare_test_one(tio, yield, target, 1); res &= compare_test_one(tio, yield, target, 15); res &= compare_test_one(tio, yield, target, 16); res &= compare_test_one(tio, yield, target, 17); res &= compare_test_one(tio, yield, target, -1); res &= compare_test_one(tio, yield, target, -15); res &= compare_test_one(tio, yield, target, -16); res &= compare_test_one(tio, yield, target, -17); res &= compare_test_one(tio, yield, target, (value_t(1)<<63)); res &= compare_test_one(tio, yield, target, (value_t(1)<<63)+1); res &= compare_test_one(tio, yield, target, (value_t(1)<<63)-1); return res; } static void compare_test(MPCIO &mpcio, const PRACOptions &opts, char **args) { value_t target, x; arc4random_buf(&target, sizeof(target)); arc4random_buf(&x, sizeof(x)); if (*args) { target = strtoull(*args, NULL, 16); ++args; } if (*args) { x = strtoull(*args, NULL, 16); ++args; } int num_threads = opts.num_comm_threads; boost::asio::thread_pool pool(num_threads); for (int thread_num = 0; thread_num < num_threads; ++thread_num) { boost::asio::post(pool, [&mpcio, thread_num, target, x] { MPCTIO tio(mpcio, thread_num); run_coroutines(tio, [&tio, target, x] (yield_t &yield) { int res = 1; res &= compare_test_target(tio, yield, target, x); res &= compare_test_target(tio, yield, 0, x); res &= compare_test_target(tio, yield, 1, x); res &= compare_test_target(tio, yield, 15, x); res &= compare_test_target(tio, yield, 16, x); res &= compare_test_target(tio, yield, 17, x); res &= compare_test_target(tio, yield, -1, x); res &= compare_test_target(tio, yield, -15, x); res &= compare_test_target(tio, yield, -16, x); res &= compare_test_target(tio, yield, -17, x); res &= compare_test_target(tio, yield, (value_t(1)<<63), x); res &= compare_test_target(tio, yield, (value_t(1)<<63)+1, x); res &= compare_test_target(tio, yield, (value_t(1)<<63)-1, x); if (tio.player() == 0) { if (res == 1) { printf("All tests passed!\n"); } else { printf("TEST FAILURES\n"); } } }); }); } pool.join(); } static void sort_test(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth=6; if (*args) { depth = atoi(*args); ++args; } address_t len = (1< oram(tio.player(), size); auto A = oram.flat(tio, yield); A.explicitonly(true); // Initialize the memory to random values in parallel std::vector coroutines; for (address_t i=0; i0 && i oram(player, len); auto A = oram.flat(tio, yield); // Initialize the ORAM in explicit mode A.explicitonly(true); for (address_t i=0; i::Pad P(A, tio, yield, maxsize); for (address_t i=0; i static void bsearch_test(MPCIO &mpcio, const PRACOptions &opts, char **args) { value_t target; arc4random_buf(&target, sizeof(target)); target >>= 1; nbits_t depth=6; bool is_presorted = true; // Use a random array (which we explicitly sort) instead of a // presorted array if (*args && !strcmp(args[0], "-r")) { is_presorted = false; ++args; } if (*args) { depth = atoi(*args); ++args; } address_t len = (1<> tshare; } tio.sync_lamport(); mpcio.dump_stats(std::cout); std::cout << "\n===== " << (is_presorted ? "CREATE" : "SORT RANDOM") << " DATABASE =====\n"; mpcio.reset_stats(); tio.reset_lamport(); // If is_presorted is true, create a database of presorted // values. If is_presorted is false, create a database of // random values and explicitly sort it. Duoram oram(tio.player(), len); auto A = oram.flat(tio, yield); // Initialize the memory to sorted or random values, depending // on the is_presorted flag if (is_presorted) { A.init([](size_t i) { return value_t(i) << 16; }); } else { A.explicitonly(true); for (address_t i=0; i 25) { return; } tio.sync_lamport(); mpcio.dump_stats(std::cout); std::cout << "\n===== CHECK ANSWER =====\n"; mpcio.reset_stats(); tio.reset_lamport(); // Check the answer size_t size = size_t(1) << depth; value_t checkindex = mpc_reconstruct(tio, yield, tindex); value_t checktarget = mpc_reconstruct(tio, yield, tshare); auto check = A.reconstruct(); bool fail = false; if (tio.player() == 0) { for (address_t i=0;i0 && i= target, and check[i-1] // should be < target if ((i < len && check[i].share() < checktarget) || (i > 0 && check[i-1].share() >= checktarget)) { fail = true; } } } if (checkindex == len && check[len-1].share() >= checktarget) { fail = true; } printf("Target = %016lx\n", checktarget); printf("Found index = %02lx\n", checkindex); if (checkindex > size) { fail = true; } if (fail) { printf("FAIL\n"); } else { printf("PASS\n"); } } }); } template static void related(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth = 5; // The depth of the (complete) binary tree if (*args) { depth = atoi(*args); ++args; } // The layer at which to choose a random parent node (and its two // children along with it) nbits_t layer = depth-1; if (*args) { layer = atoi(*args); ++args; } assert(layer < depth); MPCTIO tio(mpcio, 0, opts.num_cpu_threads); run_coroutines(tio, [&mpcio, &tio, depth, layer] (yield_t &yield) { size_t size = size_t(1)<<(depth+1); Duoram oram(tio.player(), size); auto A = oram.flat(tio, yield); // Initialize A with words with sequential top and bottom halves // (just so we can more easily eyeball the right answers) A.init([] (size_t i) { return i * 0x100000001; } ); // We use this layout for the tree: // A[0] is unused // A[1] is the root (layer 0) // A[2..3] is layer 1 // A[4..7] is layer 2 // ... // A[(1<::template OblivIndex oidx(tio, yield, idx, layer); // This is the (known) layer containing the (unknown) parent // node typename Duoram::Flat P(A, tio, yield, 1<::Flat C(A, tio, yield, 2<::Stride L(C, tio, yield, 0, 2); typename Duoram::Stride R(C, tio, yield, 1, 2); T parent, left, right; // Do three related reads. In this version, only one DPF will // be used, but it will still be _evaluated_ three times. parent = P[oidx]; left = L[oidx]; right = R[oidx]; // The operation is just a simple rotation: the value in the // parent moves to the left child, the left child moves to the // right child, and the right child becomes the parent // Do three related updates. As above, only one (wide) DPF will // be used (the same one as for the reads in fact), but it will // still be _evaluated_ three more times. P[oidx] += right-parent; L[oidx] += parent-left; R[oidx] += left-right; // Check the answer auto check = A.reconstruct(); if (depth <= 10) { oram.dump(); if (tio.player() == 0) { for (address_t i=0;i static void path(MPCIO &mpcio, const PRACOptions &opts, char **args) { nbits_t depth = 5; // The depth of the (complete) binary tree if (*args) { depth = atoi(*args); ++args; } // The target node size_t target_node = 3 << (depth-1); if (*args) { target_node = atoi(*args); ++args; } MPCTIO tio(mpcio, 0, opts.num_cpu_threads); run_coroutines(tio, [&mpcio, &tio, depth, target_node] (yield_t &yield) { size_t size = size_t(1)<<(depth+1); Duoram oram(tio.player(), size); auto A = oram.flat(tio, yield); // Initialize A with words with sequential top and bottom halves // (just so we can more easily eyeball the right answers) A.init([] (size_t i) { return i * 0x100000001; } ); // We use this layout for the tree: // A[0] is unused // A[1] is the root (layer 0) // A[2..3] is layer 1 // A[4..7] is layer 2 // ... // A[(1<::Path P(A, tio, yield, target_node); // Re-initialize that path to something recognizable P.init([] (size_t i) { return 0xff + i * 0x1000000010000; } ); // ORAM update along that path RegXS idx; idx.set(tio.player() * arc4random_uniform(P.size())); T val; val.set(tio.player() * 0xaaaa00000000); P[idx] += val; // Binary search along that path T lookup; lookup.set(tio.player() * 0x3000000000000); RegXS foundidx = P.binary_search(lookup); // Check the answer auto check = A.reconstruct(); if (depth <= 10) { oram.dump(); if (tio.player() == 0) { for (address_t i=0;i(mpcio, opts, args, false); } else if (!strcmp(*args, "rdpftest2")) { ++args; rdpf_test<2>(mpcio, opts, args, false); } else if (!strcmp(*args, "rdpftest3")) { ++args; rdpf_test<3>(mpcio, opts, args, false); } else if (!strcmp(*args, "rdpftest4")) { ++args; rdpf_test<4>(mpcio, opts, args, false); } else if (!strcmp(*args, "rdpftest5")) { ++args; rdpf_test<5>(mpcio, opts, args, false); } else if (!strcmp(*args, "irdpftest")) { ++args; rdpf_test<1>(mpcio, opts, args, true); } else if (!strcmp(*args, "irdpftest2")) { ++args; rdpf_test<2>(mpcio, opts, args, true); } else if (!strcmp(*args, "irdpftest3")) { ++args; rdpf_test<3>(mpcio, opts, args, true); } else if (!strcmp(*args, "irdpftest4")) { ++args; rdpf_test<4>(mpcio, opts, args, true); } else if (!strcmp(*args, "irdpftest5")) { ++args; rdpf_test<5>(mpcio, opts, args, true); } else if (!strcmp(*args, "rdpftime")) { ++args; rdpf_timing(mpcio, opts, args); } else if (!strcmp(*args, "evaltime")) { ++args; rdpfeval_timing(mpcio, opts, args); } else if (!strcmp(*args, "parevaltime")) { ++args; par_rdpfeval_timing(mpcio, opts, args); } else if (!strcmp(*args, "tupletime")) { ++args; tupleeval_timing(mpcio, opts, args); } else if (!strcmp(*args, "partupletime")) { ++args; par_tupleeval_timing(mpcio, opts, args); } else if (!strcmp(*args, "duotest")) { ++args; if (opts.use_xor_db) { duoram_test(mpcio, opts, args); } else { duoram_test(mpcio, opts, args); } } else if (!strcmp(*args, "read")) { ++args; if (opts.use_xor_db) { read_test(mpcio, opts, args); } else { read_test(mpcio, opts, args); } } else if (!strcmp(*args, "cdpftest")) { ++args; cdpf_test(mpcio, opts, args); } else if (!strcmp(*args, "cmptest")) { ++args; compare_test(mpcio, opts, args); } else if (!strcmp(*args, "sorttest")) { ++args; sort_test(mpcio, opts, args); } else if (!strcmp(*args, "padtest")) { ++args; pad_test(mpcio, opts, args); } else if (!strcmp(*args, "bbsearch")) { ++args; bsearch_test(mpcio, opts, args); } else if (!strcmp(*args, "bsearch")) { ++args; bsearch_test(mpcio, opts, args); } else if (!strcmp(*args, "duoram")) { ++args; if (opts.use_xor_db) { duoram(mpcio, opts, args); } else { duoram(mpcio, opts, args); } } else if (!strcmp(*args, "related")) { ++args; if (opts.use_xor_db) { related(mpcio, opts, args); } else { related(mpcio, opts, args); } } else if (!strcmp(*args, "path")) { ++args; path(mpcio, opts, args); } else if (!strcmp(*args, "cell")) { ++args; cell(mpcio, opts, args); } else if (!strcmp(*args, "bst")) { ++args; bst(mpcio, opts, args); } else if (!strcmp(*args, "avl")) { ++args; avl(mpcio, opts, args); } else if (!strcmp(*args, "avl_tests")) { ++args; avl_tests(mpcio, opts, args); } else if (!strcmp(*args, "heap")) { ++args; Heap(mpcio, opts, args); } else if (!strcmp(*args, "heapsampler")) { ++args; heapsampler_test(mpcio, opts, args); } else if (!strcmp(*args, "weightedcoin")) { ++args; weighted_coin_test(mpcio, opts, args); } else { std::cerr << "Unknown mode " << *args << "\n"; } }