online.cpp 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170
  1. #include <bsd/stdlib.h> // arc4random_buf
  2. #include "online.hpp"
  3. #include "mpcops.hpp"
  4. #include "rdpf.hpp"
  5. #include "duoram.hpp"
  6. #include "cdpf.hpp"
  7. static void online_test(MPCIO &mpcio,
  8. const PRACOptions &opts, char **args)
  9. {
  10. nbits_t nbits = VALUE_BITS;
  11. if (*args) {
  12. nbits = atoi(*args);
  13. }
  14. size_t memsize = 9;
  15. MPCTIO tio(mpcio, 0);
  16. bool is_server = (mpcio.player == 2);
  17. RegAS *A = new RegAS[memsize];
  18. value_t V;
  19. RegBS F0, F1;
  20. RegXS X;
  21. if (!is_server) {
  22. A[0].randomize();
  23. A[1].randomize();
  24. F0.randomize();
  25. A[4].randomize();
  26. F1.randomize();
  27. A[6].randomize();
  28. A[7].randomize();
  29. X.randomize();
  30. arc4random_buf(&V, sizeof(V));
  31. printf("A:\n"); for (size_t i=0; i<memsize; ++i) printf("%3lu: %016lX\n", i, A[i].ashare);
  32. printf("V : %016lX\n", V);
  33. printf("F0 : %01X\n", F0.bshare);
  34. printf("F1 : %01X\n", F1.bshare);
  35. printf("X : %016lX\n", X.xshare);
  36. }
  37. std::vector<coro_t> coroutines;
  38. coroutines.emplace_back(
  39. [&tio, &A, nbits](yield_t &yield) {
  40. mpc_mul(tio, yield, A[2], A[0], A[1], nbits);
  41. });
  42. coroutines.emplace_back(
  43. [&tio, &A, V, nbits](yield_t &yield) {
  44. mpc_valuemul(tio, yield, A[3], V, nbits);
  45. });
  46. coroutines.emplace_back(
  47. [&tio, &A, &F0, nbits](yield_t &yield) {
  48. mpc_flagmult(tio, yield, A[5], F0, A[4], nbits);
  49. });
  50. coroutines.emplace_back(
  51. [&tio, &A, &F1, nbits](yield_t &yield) {
  52. mpc_oswap(tio, yield, A[6], A[7], F1, nbits);
  53. });
  54. coroutines.emplace_back(
  55. [&tio, &A, &X, nbits](yield_t &yield) {
  56. mpc_xs_to_as(tio, yield, A[8], X, nbits);
  57. });
  58. run_coroutines(tio, coroutines);
  59. if (!is_server) {
  60. printf("\n");
  61. printf("A:\n"); for (size_t i=0; i<memsize; ++i) printf("%3lu: %016lX\n", i, A[i].ashare);
  62. }
  63. // Check the answers
  64. if (mpcio.player == 1) {
  65. tio.queue_peer(A, memsize*sizeof(RegAS));
  66. tio.queue_peer(&V, sizeof(V));
  67. tio.queue_peer(&F0, sizeof(RegBS));
  68. tio.queue_peer(&F1, sizeof(RegBS));
  69. tio.queue_peer(&X, sizeof(RegXS));
  70. tio.send();
  71. } else if (mpcio.player == 0) {
  72. RegAS *B = new RegAS[memsize];
  73. RegBS BF0, BF1;
  74. RegXS BX;
  75. value_t BV;
  76. value_t *S = new value_t[memsize];
  77. bit_t SF0, SF1;
  78. value_t SX;
  79. tio.recv_peer(B, memsize*sizeof(RegAS));
  80. tio.recv_peer(&BV, sizeof(BV));
  81. tio.recv_peer(&BF0, sizeof(RegBS));
  82. tio.recv_peer(&BF1, sizeof(RegBS));
  83. tio.recv_peer(&BX, sizeof(RegXS));
  84. for(size_t i=0; i<memsize; ++i) S[i] = A[i].ashare+B[i].ashare;
  85. SF0 = F0.bshare ^ BF0.bshare;
  86. SF1 = F1.bshare ^ BF1.bshare;
  87. SX = X.xshare ^ BX.xshare;
  88. printf("S:\n"); for (size_t i=0; i<memsize; ++i) printf("%3lu: %016lX\n", i, S[i]);
  89. printf("SF0: %01X\n", SF0);
  90. printf("SF1: %01X\n", SF1);
  91. printf("SX : %016lX\n", SX);
  92. printf("\n%016lx\n", S[0]*S[1]-S[2]);
  93. printf("%016lx\n", (V*BV)-S[3]);
  94. printf("%016lx\n", (SF0*S[4])-S[5]);
  95. printf("%016lx\n", S[8]-SX);
  96. delete[] B;
  97. delete[] S;
  98. }
  99. delete[] A;
  100. }
  101. static void lamport_test(MPCIO &mpcio,
  102. const PRACOptions &opts, char **args)
  103. {
  104. // Create a bunch of threads and send a bunch of data to the other
  105. // peer, and receive their data. If an arg is specified, repeat
  106. // that many times. The Lamport clock at the end should be just the
  107. // number of repetitions. Subsequent args are the chunk size and
  108. // the number of chunks per message
  109. size_t niters = 1;
  110. size_t chunksize = 1<<20;
  111. size_t numchunks = 1;
  112. if (*args) {
  113. niters = atoi(*args);
  114. ++args;
  115. }
  116. if (*args) {
  117. chunksize = atoi(*args);
  118. ++args;
  119. }
  120. if (*args) {
  121. numchunks = atoi(*args);
  122. ++args;
  123. }
  124. int num_threads = opts.num_threads;
  125. boost::asio::thread_pool pool(num_threads);
  126. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  127. boost::asio::post(pool, [&mpcio, thread_num, niters, chunksize, numchunks] {
  128. MPCTIO tio(mpcio, thread_num);
  129. char *sendbuf = new char[chunksize];
  130. char *recvbuf = new char[chunksize*numchunks];
  131. for (size_t i=0; i<niters; ++i) {
  132. for (size_t chunk=0; chunk<numchunks; ++chunk) {
  133. arc4random_buf(sendbuf, chunksize);
  134. tio.queue_peer(sendbuf, chunksize);
  135. }
  136. tio.send();
  137. tio.recv_peer(recvbuf, chunksize*numchunks);
  138. }
  139. delete[] recvbuf;
  140. delete[] sendbuf;
  141. });
  142. }
  143. pool.join();
  144. }
  145. static void rdpf_test(MPCIO &mpcio,
  146. const PRACOptions &opts, char **args)
  147. {
  148. nbits_t depth=6;
  149. size_t num_iters = 1;
  150. if (*args) {
  151. depth = atoi(*args);
  152. ++args;
  153. }
  154. if (*args) {
  155. num_iters = atoi(*args);
  156. ++args;
  157. }
  158. int num_threads = opts.num_threads;
  159. boost::asio::thread_pool pool(num_threads);
  160. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  161. boost::asio::post(pool, [&mpcio, thread_num, depth, num_iters] {
  162. MPCTIO tio(mpcio, thread_num);
  163. run_coroutines(tio, [&tio, depth, num_iters] (yield_t &yield) {
  164. size_t &aes_ops = tio.aes_ops();
  165. for (size_t iter=0; iter < num_iters; ++iter) {
  166. if (tio.player() == 2) {
  167. RDPFPair dp = tio.rdpfpair(yield, depth);
  168. for (int i=0;i<2;++i) {
  169. const RDPF &dpf = dp.dpf[i];
  170. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  171. DPFnode leaf = dpf.leaf(x, aes_ops);
  172. RegBS ub = dpf.unit_bs(leaf);
  173. RegAS ua = dpf.unit_as(leaf);
  174. RegXS sx = dpf.scaled_xs(leaf);
  175. RegAS sa = dpf.scaled_as(leaf);
  176. printf("%04x %x %016lx %016lx %016lx\n", x,
  177. ub.bshare, ua.ashare, sx.xshare, sa.ashare);
  178. }
  179. printf("\n");
  180. }
  181. } else {
  182. RDPFTriple dt = tio.rdpftriple(yield, depth);
  183. for (int i=0;i<3;++i) {
  184. const RDPF &dpf = dt.dpf[i];
  185. RegXS peer_scaled_xor;
  186. RegAS peer_scaled_sum;
  187. if (tio.player() == 1) {
  188. tio.iostream_peer() << dpf.scaled_xor << dpf.scaled_sum;
  189. } else {
  190. tio.iostream_peer() >> peer_scaled_xor >> peer_scaled_sum;
  191. peer_scaled_sum += dpf.scaled_sum;
  192. peer_scaled_xor ^= dpf.scaled_xor;
  193. }
  194. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  195. DPFnode leaf = dpf.leaf(x, aes_ops);
  196. RegBS ub = dpf.unit_bs(leaf);
  197. RegAS ua = dpf.unit_as(leaf);
  198. RegXS sx = dpf.scaled_xs(leaf);
  199. RegAS sa = dpf.scaled_as(leaf);
  200. printf("%04x %x %016lx %016lx %016lx\n", x,
  201. ub.bshare, ua.ashare, sx.xshare, sa.ashare);
  202. if (tio.player() == 1) {
  203. tio.iostream_peer() << ub << ua << sx << sa;
  204. } else {
  205. RegBS peer_ub;
  206. RegAS peer_ua;
  207. RegXS peer_sx;
  208. RegAS peer_sa;
  209. tio.iostream_peer() >> peer_ub >> peer_ua >>
  210. peer_sx >> peer_sa;
  211. ub ^= peer_ub;
  212. ua += peer_ua;
  213. sx ^= peer_sx;
  214. sa += peer_sa;
  215. if (ub.bshare || ua.ashare || sx.xshare || sa.ashare) {
  216. printf("**** %x %016lx %016lx %016lx\n",
  217. ub.bshare, ua.ashare, sx.xshare, sa.ashare);
  218. printf("SCALE %016lx %016lx\n",
  219. peer_scaled_xor.xshare, peer_scaled_sum.ashare);
  220. }
  221. }
  222. }
  223. printf("\n");
  224. }
  225. }
  226. }
  227. });
  228. });
  229. }
  230. pool.join();
  231. }
  232. static void rdpf_timing(MPCIO &mpcio,
  233. const PRACOptions &opts, char **args)
  234. {
  235. nbits_t depth=6;
  236. if (*args) {
  237. depth = atoi(*args);
  238. ++args;
  239. }
  240. int num_threads = opts.num_threads;
  241. boost::asio::thread_pool pool(num_threads);
  242. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  243. boost::asio::post(pool, [&mpcio, thread_num, depth] {
  244. MPCTIO tio(mpcio, thread_num);
  245. run_coroutines(tio, [&tio, depth] (yield_t &yield) {
  246. size_t &aes_ops = tio.aes_ops();
  247. if (tio.player() == 2) {
  248. RDPFPair dp = tio.rdpfpair(yield, depth);
  249. for (int i=0;i<2;++i) {
  250. RDPF &dpf = dp.dpf[i];
  251. dpf.expand(aes_ops);
  252. RegXS scaled_xor;
  253. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  254. DPFnode leaf = dpf.leaf(x, aes_ops);
  255. RegXS sx = dpf.scaled_xs(leaf);
  256. scaled_xor ^= sx;
  257. }
  258. printf("%016lx\n%016lx\n", scaled_xor.xshare,
  259. dpf.scaled_xor.xshare);
  260. printf("\n");
  261. }
  262. } else {
  263. RDPFTriple dt = tio.rdpftriple(yield, depth);
  264. for (int i=0;i<3;++i) {
  265. RDPF &dpf = dt.dpf[i];
  266. dpf.expand(aes_ops);
  267. RegXS scaled_xor;
  268. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  269. DPFnode leaf = dpf.leaf(x, aes_ops);
  270. RegXS sx = dpf.scaled_xs(leaf);
  271. scaled_xor ^= sx;
  272. }
  273. printf("%016lx\n%016lx\n", scaled_xor.xshare,
  274. dpf.scaled_xor.xshare);
  275. printf("\n");
  276. }
  277. }
  278. });
  279. });
  280. }
  281. pool.join();
  282. }
  283. static value_t parallel_streameval_rdpf(MPCIO &mpcio, const RDPF &dpf,
  284. address_t start, int num_threads)
  285. {
  286. RegXS scaled_xor[num_threads];
  287. boost::asio::thread_pool pool(num_threads);
  288. address_t totsize = (address_t(1)<<dpf.depth());
  289. address_t threadstart = start;
  290. address_t threadchunk = totsize / num_threads;
  291. address_t threadextra = totsize % num_threads;
  292. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  293. address_t threadsize = threadchunk + (address_t(thread_num) < threadextra);
  294. boost::asio::post(pool,
  295. [&mpcio, &dpf, &scaled_xor, thread_num, threadstart, threadsize] {
  296. MPCTIO tio(mpcio, thread_num);
  297. //printf("Thread %d from %X for %X\n", thread_num, threadstart, threadsize);
  298. RegXS local_xor;
  299. size_t local_aes_ops = 0;
  300. auto ev = StreamEval(dpf, threadstart, 0, local_aes_ops);
  301. for (address_t x=0;x<threadsize;++x) {
  302. //if (x%0x10000 == 0) printf("%d", thread_num);
  303. DPFnode leaf = ev.next();
  304. local_xor ^= dpf.scaled_xs(leaf);
  305. }
  306. scaled_xor[thread_num] = local_xor;
  307. tio.aes_ops() += local_aes_ops;
  308. //printf("Thread %d complete\n", thread_num);
  309. });
  310. threadstart = (threadstart + threadsize) % totsize;
  311. }
  312. pool.join();
  313. RegXS res;
  314. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  315. res ^= scaled_xor[thread_num];
  316. }
  317. return res.xshare;
  318. }
  319. static void rdpfeval_timing(MPCIO &mpcio,
  320. const PRACOptions &opts, char **args)
  321. {
  322. nbits_t depth=6;
  323. address_t start=0;
  324. if (*args) {
  325. depth = atoi(*args);
  326. ++args;
  327. }
  328. if (*args) {
  329. start = strtoull(*args, NULL, 16);
  330. ++args;
  331. }
  332. int num_threads = opts.num_threads;
  333. MPCTIO tio(mpcio, 0, num_threads);
  334. run_coroutines(tio, [&mpcio, &tio, depth, start, num_threads] (yield_t &yield) {
  335. if (tio.player() == 2) {
  336. RDPFPair dp = tio.rdpfpair(yield, depth);
  337. for (int i=0;i<2;++i) {
  338. RDPF &dpf = dp.dpf[i];
  339. value_t scaled_xor =
  340. parallel_streameval_rdpf(mpcio, dpf, start, num_threads);
  341. printf("%016lx\n%016lx\n", scaled_xor,
  342. dpf.scaled_xor.xshare);
  343. printf("\n");
  344. }
  345. } else {
  346. RDPFTriple dt = tio.rdpftriple(yield, depth);
  347. for (int i=0;i<3;++i) {
  348. RDPF &dpf = dt.dpf[i];
  349. value_t scaled_xor =
  350. parallel_streameval_rdpf(mpcio, dpf, start, num_threads);
  351. printf("%016lx\n%016lx\n", scaled_xor,
  352. dpf.scaled_xor.xshare);
  353. printf("\n");
  354. }
  355. }
  356. });
  357. }
  358. static void par_rdpfeval_timing(MPCIO &mpcio,
  359. const PRACOptions &opts, char **args)
  360. {
  361. nbits_t depth=6;
  362. address_t start=0;
  363. if (*args) {
  364. depth = atoi(*args);
  365. ++args;
  366. }
  367. if (*args) {
  368. start = strtoull(*args, NULL, 16);
  369. ++args;
  370. }
  371. int num_threads = opts.num_threads;
  372. MPCTIO tio(mpcio, 0, num_threads);
  373. run_coroutines(tio, [&tio, depth, start, num_threads] (yield_t &yield) {
  374. if (tio.player() == 2) {
  375. RDPFPair dp = tio.rdpfpair(yield, depth);
  376. for (int i=0;i<2;++i) {
  377. RDPF &dpf = dp.dpf[i];
  378. nbits_t depth = dpf.depth();
  379. auto pe = ParallelEval(dpf, start, 0,
  380. address_t(1)<<depth, num_threads, tio.aes_ops());
  381. RegXS result, init;
  382. result = pe.reduce(init, [&dpf] (int thread_num,
  383. address_t i, const RDPF::node &leaf) {
  384. return dpf.scaled_xs(leaf);
  385. });
  386. printf("%016lx\n%016lx\n", result.xshare,
  387. dpf.scaled_xor.xshare);
  388. printf("\n");
  389. }
  390. } else {
  391. RDPFTriple dt = tio.rdpftriple(yield, depth);
  392. for (int i=0;i<3;++i) {
  393. RDPF &dpf = dt.dpf[i];
  394. nbits_t depth = dpf.depth();
  395. auto pe = ParallelEval(dpf, start, 0,
  396. address_t(1)<<depth, num_threads, tio.aes_ops());
  397. RegXS result, init;
  398. result = pe.reduce(init, [&dpf] (int thread_num,
  399. address_t i, const RDPF::node &leaf) {
  400. return dpf.scaled_xs(leaf);
  401. });
  402. printf("%016lx\n%016lx\n", result.xshare,
  403. dpf.scaled_xor.xshare);
  404. printf("\n");
  405. }
  406. }
  407. });
  408. }
  409. static void tupleeval_timing(MPCIO &mpcio,
  410. const PRACOptions &opts, char **args)
  411. {
  412. nbits_t depth=6;
  413. address_t start=0;
  414. if (*args) {
  415. depth = atoi(*args);
  416. ++args;
  417. }
  418. if (*args) {
  419. start = atoi(*args);
  420. ++args;
  421. }
  422. int num_threads = opts.num_threads;
  423. MPCTIO tio(mpcio, 0, num_threads);
  424. run_coroutines(tio, [&tio, depth, start] (yield_t &yield) {
  425. size_t &aes_ops = tio.aes_ops();
  426. if (tio.player() == 2) {
  427. RDPFPair dp = tio.rdpfpair(yield, depth);
  428. RegXS scaled_xor0, scaled_xor1;
  429. auto ev = StreamEval(dp, start, 0, aes_ops, false);
  430. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  431. auto [L0, L1] = ev.next();
  432. RegXS sx0 = dp.dpf[0].scaled_xs(L0);
  433. RegXS sx1 = dp.dpf[1].scaled_xs(L1);
  434. scaled_xor0 ^= sx0;
  435. scaled_xor1 ^= sx1;
  436. }
  437. printf("%016lx\n%016lx\n", scaled_xor0.xshare,
  438. dp.dpf[0].scaled_xor.xshare);
  439. printf("\n");
  440. printf("%016lx\n%016lx\n", scaled_xor1.xshare,
  441. dp.dpf[1].scaled_xor.xshare);
  442. printf("\n");
  443. } else {
  444. RDPFTriple dt = tio.rdpftriple(yield, depth);
  445. RegXS scaled_xor0, scaled_xor1, scaled_xor2;
  446. auto ev = StreamEval(dt, start, 0, aes_ops, false);
  447. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  448. auto [L0, L1, L2] = ev.next();
  449. RegXS sx0 = dt.dpf[0].scaled_xs(L0);
  450. RegXS sx1 = dt.dpf[1].scaled_xs(L1);
  451. RegXS sx2 = dt.dpf[2].scaled_xs(L2);
  452. scaled_xor0 ^= sx0;
  453. scaled_xor1 ^= sx1;
  454. scaled_xor2 ^= sx2;
  455. }
  456. printf("%016lx\n%016lx\n", scaled_xor0.xshare,
  457. dt.dpf[0].scaled_xor.xshare);
  458. printf("\n");
  459. printf("%016lx\n%016lx\n", scaled_xor1.xshare,
  460. dt.dpf[1].scaled_xor.xshare);
  461. printf("\n");
  462. printf("%016lx\n%016lx\n", scaled_xor2.xshare,
  463. dt.dpf[2].scaled_xor.xshare);
  464. printf("\n");
  465. }
  466. });
  467. }
  468. static void par_tupleeval_timing(MPCIO &mpcio,
  469. const PRACOptions &opts, char **args)
  470. {
  471. nbits_t depth=6;
  472. address_t start=0;
  473. if (*args) {
  474. depth = atoi(*args);
  475. ++args;
  476. }
  477. if (*args) {
  478. start = atoi(*args);
  479. ++args;
  480. }
  481. int num_threads = opts.num_threads;
  482. MPCTIO tio(mpcio, 0, num_threads);
  483. run_coroutines(tio, [&tio, depth, start, num_threads] (yield_t &yield) {
  484. size_t &aes_ops = tio.aes_ops();
  485. if (tio.player() == 2) {
  486. RDPFPair dp = tio.rdpfpair(yield, depth);
  487. auto pe = ParallelEval(dp, start, 0, address_t(1)<<depth,
  488. num_threads, aes_ops);
  489. using V = std::tuple<RegXS,RegXS>;
  490. V result, init;
  491. result = pe.reduce(init, [&dp] (int thread_num, address_t i,
  492. const RDPFPair::node &leaf) {
  493. return dp.scaled<RegXS>(leaf);
  494. });
  495. printf("%016lx\n%016lx\n", std::get<0>(result).xshare,
  496. dp.dpf[0].scaled_xor.xshare);
  497. printf("\n");
  498. printf("%016lx\n%016lx\n", std::get<1>(result).xshare,
  499. dp.dpf[1].scaled_xor.xshare);
  500. printf("\n");
  501. } else {
  502. RDPFTriple dt = tio.rdpftriple(yield, depth);
  503. auto pe = ParallelEval(dt, start, 0, address_t(1)<<depth,
  504. num_threads, aes_ops);
  505. using V = std::tuple<RegXS,RegXS,RegXS>;
  506. V result, init;
  507. result = pe.reduce(init, [&dt] (int thread_num, address_t i,
  508. const RDPFTriple::node &leaf) {
  509. return dt.scaled<RegXS>(leaf);
  510. });
  511. printf("%016lx\n%016lx\n", std::get<0>(result).xshare,
  512. dt.dpf[0].scaled_xor.xshare);
  513. printf("\n");
  514. printf("%016lx\n%016lx\n", std::get<1>(result).xshare,
  515. dt.dpf[1].scaled_xor.xshare);
  516. printf("\n");
  517. printf("%016lx\n%016lx\n", std::get<2>(result).xshare,
  518. dt.dpf[2].scaled_xor.xshare);
  519. printf("\n");
  520. }
  521. });
  522. }
  523. // T is RegAS or RegXS for additive or XOR shared database respectively
  524. template <typename T>
  525. static void duoram_test(MPCIO &mpcio,
  526. const PRACOptions &opts, char **args)
  527. {
  528. nbits_t depth=6;
  529. address_t share=arc4random();
  530. if (*args) {
  531. depth = atoi(*args);
  532. ++args;
  533. }
  534. if (*args) {
  535. share = atoi(*args);
  536. ++args;
  537. }
  538. share &= ((address_t(1)<<depth)-1);
  539. MPCTIO tio(mpcio, 0, opts.num_threads);
  540. run_coroutines(tio, [&tio, depth, share] (yield_t &yield) {
  541. size_t size = size_t(1)<<depth;
  542. // size_t &aes_ops = tio.aes_ops();
  543. Duoram<T> oram(tio.player(), size);
  544. auto A = oram.flat(tio, yield);
  545. RegAS aidx, aidx2, aidx3;
  546. aidx.ashare = share;
  547. aidx2.ashare = share + tio.player();
  548. aidx3.ashare = share + 1;
  549. T M;
  550. if (tio.player() == 0) {
  551. M.set(0xbabb0000);
  552. } else {
  553. M.set(0x0000a66e);
  554. }
  555. RegXS xidx;
  556. xidx.xshare = share;
  557. T N;
  558. if (tio.player() == 0) {
  559. N.set(0xdead0000);
  560. } else {
  561. N.set(0x0000beef);
  562. }
  563. // Writing and reading with additively shared indices
  564. printf("Additive Updating\n");
  565. A[aidx] += M;
  566. printf("Additive Reading\n");
  567. T Aa = A[aidx];
  568. // Writing and reading with XOR shared indices
  569. printf("XOR Updating\n");
  570. A[xidx] += N;
  571. printf("XOR Reading\n");
  572. T Ax = A[xidx];
  573. T Ae;
  574. // Writing and reading with explicit indices
  575. if (depth > 2) {
  576. printf("Explicit Updating\n");
  577. A[5] += Aa;
  578. printf("Explicit Reading\n");
  579. Ae = A[6];
  580. }
  581. // Simultaneous independent reads
  582. printf("3 independent reading\n");
  583. std::vector<T> Av = A.indep(std::array {
  584. aidx, aidx2, aidx3
  585. });
  586. // Simultaneous independent updates
  587. T Aw1, Aw2, Aw3;
  588. Aw1.set(0x101010101010101 * tio.player());
  589. Aw2.set(0x202020202020202 * tio.player());
  590. Aw3.set(0x303030303030303 * tio.player());
  591. printf("3 independent updating\n");
  592. A.indep(std::array { aidx, aidx2, aidx3 }) -=
  593. std::array { Aw1, Aw2, Aw3 };
  594. if (depth <= 10) {
  595. oram.dump();
  596. auto check = A.reconstruct();
  597. if (tio.player() == 0) {
  598. for (address_t i=0;i<size;++i) {
  599. printf("%04x %016lx\n", i, check[i].share());
  600. }
  601. }
  602. }
  603. auto checkread = A.reconstruct(Aa);
  604. auto checkreade = A.reconstruct(Ae);
  605. auto checkreadx = A.reconstruct(Ax);
  606. if (tio.player() == 0) {
  607. printf("Read AS value = %016lx\n", checkread.share());
  608. printf("Read AX value = %016lx\n", checkreadx.share());
  609. printf("Read Ex value = %016lx\n", checkreade.share());
  610. }
  611. for (auto &v : Av) {
  612. auto checkv = A.reconstruct(v);
  613. if (tio.player() == 0) {
  614. printf("Read Av value = %016lx\n", checkv.share());
  615. }
  616. }
  617. });
  618. }
  619. // This measures the same things as the Duoram paper: dependent and
  620. // independent reads, updates, writes, and interleaves
  621. // T is RegAS or RegXS for additive or XOR shared database respectively
  622. template <typename T>
  623. static void duoram(MPCIO &mpcio,
  624. const PRACOptions &opts, char **args)
  625. {
  626. nbits_t depth = 6;
  627. int items = 4;
  628. if (*args) {
  629. depth = atoi(*args);
  630. ++args;
  631. }
  632. if (*args) {
  633. items = atoi(*args);
  634. ++args;
  635. }
  636. MPCTIO tio(mpcio, 0, opts.num_threads);
  637. run_coroutines(tio, [&mpcio, &tio, depth, items] (yield_t &yield) {
  638. size_t size = size_t(1)<<depth;
  639. address_t mask = (depth < ADDRESS_MAX_BITS ?
  640. ((address_t(1)<<depth) - 1) : ~0);
  641. Duoram<T> oram(tio.player(), size);
  642. auto A = oram.flat(tio, yield);
  643. std::cout << "===== DEPENDENT UPDATES =====\n";
  644. mpcio.reset_stats();
  645. tio.reset_lamport();
  646. // Make a linked list of length items
  647. std::vector<T> list_indices;
  648. T prev_index, next_index;
  649. prev_index.randomize(depth);
  650. for (int i=0;i<items;++i) {
  651. next_index.randomize(depth);
  652. A[next_index] += prev_index;
  653. list_indices.push_back(next_index);
  654. prev_index = next_index;
  655. }
  656. tio.sync_lamport();
  657. mpcio.dump_stats(std::cout);
  658. std::cout << "\n===== DEPENDENT READS =====\n";
  659. mpcio.reset_stats();
  660. tio.reset_lamport();
  661. // Read the linked list starting with prev_index
  662. T cur_index = prev_index;
  663. for (int i=0;i<items;++i) {
  664. cur_index = A[cur_index];
  665. }
  666. tio.sync_lamport();
  667. mpcio.dump_stats(std::cout);
  668. std::cout << "\n===== INDEPENDENT READS =====\n";
  669. mpcio.reset_stats();
  670. tio.reset_lamport();
  671. // Read all the entries in the list at once
  672. std::vector<T> read_outputs = A.indep(list_indices);
  673. tio.sync_lamport();
  674. mpcio.dump_stats(std::cout);
  675. std::cout << "\n===== INDEPENDENT UPDATES =====\n";
  676. mpcio.reset_stats();
  677. tio.reset_lamport();
  678. // Make a vector of indices 1 larger than those in list_indices,
  679. // and a vector of values 1 larger than those in outputs
  680. std::vector<T> indep_indices, indep_values;
  681. T one;
  682. one.set(tio.player()); // Sets the shared value to 1
  683. for (int i=0;i<items;++i) {
  684. indep_indices.push_back(list_indices[i]+one);
  685. indep_values.push_back(read_outputs[i]+one);
  686. }
  687. // Update all the indices at once
  688. A.indep(indep_indices) += indep_values;
  689. tio.sync_lamport();
  690. mpcio.dump_stats(std::cout);
  691. std::cout << "\n===== DEPENDENT WRITES =====\n";
  692. mpcio.reset_stats();
  693. tio.reset_lamport();
  694. T two;
  695. two.set(2*tio.player()); // Sets the shared value to 2
  696. // For each address addr that's number i from the end of the
  697. // linked list, write i+1 into location addr+2
  698. for (int i=0;i<items;++i) {
  699. T val;
  700. val.set((i+1)*tio.player());
  701. A[list_indices[i]+two] = val;
  702. }
  703. tio.sync_lamport();
  704. mpcio.dump_stats(std::cout);
  705. std::cout << "\n===== DEPENDENT INTERLEAVED =====\n";
  706. mpcio.reset_stats();
  707. tio.reset_lamport();
  708. T three;
  709. three.set(3*tio.player()); // Sets the shared value to 3
  710. // Follow the linked list and whenever A[addr]=val, set
  711. // A[addr+3]=val+3
  712. cur_index = prev_index;
  713. for (int i=0;i<items;++i) {
  714. T next_index = A[cur_index];
  715. A[cur_index+three] = next_index+three;
  716. cur_index = next_index;
  717. }
  718. tio.sync_lamport();
  719. mpcio.dump_stats(std::cout);
  720. std::cout << "\n";
  721. mpcio.reset_stats();
  722. tio.reset_lamport();
  723. if (depth <= 30) {
  724. auto check = A.reconstruct();
  725. auto head = A.reconstruct(prev_index);
  726. if (tio.player() == 0) {
  727. int width = (depth+3)/4;
  728. printf("Head of linked list: %0*lx\n\n", width,
  729. head.share() & mask);
  730. std::cout << "Non-zero reconstructed database entries:\n";
  731. for (address_t i=0;i<size;++i) {
  732. value_t share = check[i].share() & mask;
  733. if (share) printf("%0*x: %0*lx\n", width, i, width, share);
  734. }
  735. }
  736. }
  737. });
  738. }
  739. static void cdpf_test(MPCIO &mpcio,
  740. const PRACOptions &opts, char **args)
  741. {
  742. value_t query, target;
  743. int iters = 1;
  744. arc4random_buf(&query, sizeof(query));
  745. arc4random_buf(&target, sizeof(target));
  746. if (*args) {
  747. query = strtoull(*args, NULL, 16);
  748. ++args;
  749. }
  750. if (*args) {
  751. target = strtoull(*args, NULL, 16);
  752. ++args;
  753. }
  754. if (*args) {
  755. iters = atoi(*args);
  756. ++args;
  757. }
  758. int num_threads = opts.num_threads;
  759. boost::asio::thread_pool pool(num_threads);
  760. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  761. boost::asio::post(pool, [&mpcio, thread_num, query, target, iters] {
  762. MPCTIO tio(mpcio, thread_num);
  763. run_coroutines(tio, [&tio, query, target, iters] (yield_t &yield) {
  764. size_t &aes_ops = tio.aes_ops();
  765. for (int i=0;i<iters;++i) {
  766. if (tio.player() == 2) {
  767. tio.cdpf(yield);
  768. auto [ dpf0, dpf1 ] = CDPF::generate(target, aes_ops);
  769. DPFnode leaf0 = dpf0.leaf(query, aes_ops);
  770. DPFnode leaf1 = dpf1.leaf(query, aes_ops);
  771. printf("DPFXOR_{%016lx}(%016lx} = ", target, query);
  772. dump_node(leaf0 ^ leaf1);
  773. } else {
  774. CDPF dpf = tio.cdpf(yield);
  775. printf("ashare = %016lX\nxshare = %016lX\n",
  776. dpf.as_target.ashare, dpf.xs_target.xshare);
  777. DPFnode leaf = dpf.leaf(query, aes_ops);
  778. printf("DPF(%016lx) = ", query);
  779. dump_node(leaf);
  780. if (tio.player() == 1) {
  781. tio.iostream_peer() << leaf;
  782. } else {
  783. DPFnode peerleaf;
  784. tio.iostream_peer() >> peerleaf;
  785. printf("XOR = ");
  786. dump_node(leaf ^ peerleaf);
  787. }
  788. }
  789. }
  790. });
  791. });
  792. }
  793. pool.join();
  794. }
  795. static int compare_test_one(MPCTIO &tio, yield_t &yield,
  796. value_t target, value_t x)
  797. {
  798. int player = tio.player();
  799. size_t &aes_ops = tio.aes_ops();
  800. int res = 1;
  801. if (player == 2) {
  802. // Create a CDPF pair with the given target
  803. auto [dpf0, dpf1] = CDPF::generate(target, aes_ops);
  804. // Send it and a share of x to the computational parties
  805. RegAS x0, x1;
  806. x0.randomize();
  807. x1.set(x-x0.share());
  808. tio.iostream_p0() << dpf0 << x0;
  809. tio.iostream_p1() << dpf1 << x1;
  810. } else {
  811. CDPF dpf;
  812. RegAS xsh;
  813. tio.iostream_server() >> dpf >> xsh;
  814. auto [lt, eq, gt] = dpf.compare(tio, yield, xsh, aes_ops);
  815. printf("%016lx %016lx %d %d %d ", target, x, lt.bshare,
  816. eq.bshare, gt.bshare);
  817. // Check the answer
  818. if (player == 1) {
  819. tio.iostream_peer() << xsh << lt << eq << gt;
  820. } else {
  821. RegAS peer_xsh;
  822. RegBS peer_lt, peer_eq, peer_gt;
  823. tio.iostream_peer() >> peer_xsh >> peer_lt >> peer_eq >> peer_gt;
  824. lt ^= peer_lt;
  825. eq ^= peer_eq;
  826. gt ^= peer_gt;
  827. xsh += peer_xsh;
  828. int lti = int(lt.bshare);
  829. int eqi = int(eq.bshare);
  830. int gti = int(gt.bshare);
  831. x = xsh.share();
  832. printf(": %d %d %d ", lti, eqi, gti);
  833. bool signbit = (x >> 63);
  834. if (lti + eqi + gti != 1) {
  835. printf("INCONSISTENT");
  836. res = 0;
  837. } else if (x == 0 && eqi) {
  838. printf("=");
  839. } else if (!signbit && gti) {
  840. printf(">");
  841. } else if (signbit && lti) {
  842. printf("<");
  843. } else {
  844. printf("INCORRECT");
  845. res = 0;
  846. }
  847. }
  848. printf("\n");
  849. }
  850. return res;
  851. }
  852. static int compare_test_target(MPCTIO &tio, yield_t &yield,
  853. value_t target, value_t x)
  854. {
  855. int res = 1;
  856. res &= compare_test_one(tio, yield, target, x);
  857. res &= compare_test_one(tio, yield, target, 0);
  858. res &= compare_test_one(tio, yield, target, 1);
  859. res &= compare_test_one(tio, yield, target, 15);
  860. res &= compare_test_one(tio, yield, target, 16);
  861. res &= compare_test_one(tio, yield, target, 17);
  862. res &= compare_test_one(tio, yield, target, -1);
  863. res &= compare_test_one(tio, yield, target, -15);
  864. res &= compare_test_one(tio, yield, target, -16);
  865. res &= compare_test_one(tio, yield, target, -17);
  866. res &= compare_test_one(tio, yield, target, (value_t(1)<<63));
  867. res &= compare_test_one(tio, yield, target, (value_t(1)<<63)+1);
  868. res &= compare_test_one(tio, yield, target, (value_t(1)<<63)-1);
  869. return res;
  870. }
  871. static void compare_test(MPCIO &mpcio,
  872. const PRACOptions &opts, char **args)
  873. {
  874. value_t target, x;
  875. arc4random_buf(&target, sizeof(target));
  876. arc4random_buf(&x, sizeof(x));
  877. if (*args) {
  878. target = strtoull(*args, NULL, 16);
  879. ++args;
  880. }
  881. if (*args) {
  882. x = strtoull(*args, NULL, 16);
  883. ++args;
  884. }
  885. int num_threads = opts.num_threads;
  886. boost::asio::thread_pool pool(num_threads);
  887. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  888. boost::asio::post(pool, [&mpcio, thread_num, target, x] {
  889. MPCTIO tio(mpcio, thread_num);
  890. run_coroutines(tio, [&tio, target, x] (yield_t &yield) {
  891. int res = 1;
  892. res &= compare_test_target(tio, yield, target, x);
  893. res &= compare_test_target(tio, yield, 0, x);
  894. res &= compare_test_target(tio, yield, 1, x);
  895. res &= compare_test_target(tio, yield, 15, x);
  896. res &= compare_test_target(tio, yield, 16, x);
  897. res &= compare_test_target(tio, yield, 17, x);
  898. res &= compare_test_target(tio, yield, -1, x);
  899. res &= compare_test_target(tio, yield, -15, x);
  900. res &= compare_test_target(tio, yield, -16, x);
  901. res &= compare_test_target(tio, yield, -17, x);
  902. res &= compare_test_target(tio, yield, (value_t(1)<<63), x);
  903. res &= compare_test_target(tio, yield, (value_t(1)<<63)+1, x);
  904. res &= compare_test_target(tio, yield, (value_t(1)<<63)-1, x);
  905. if (tio.player() == 0) {
  906. if (res == 1) {
  907. printf("All tests passed!\n");
  908. } else {
  909. printf("TEST FAILURES\n");
  910. }
  911. }
  912. });
  913. });
  914. }
  915. pool.join();
  916. }
  917. static void sort_test(MPCIO &mpcio,
  918. const PRACOptions &opts, char **args)
  919. {
  920. nbits_t depth=6;
  921. if (*args) {
  922. depth = atoi(*args);
  923. ++args;
  924. }
  925. int num_threads = opts.num_threads;
  926. boost::asio::thread_pool pool(num_threads);
  927. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  928. boost::asio::post(pool, [&mpcio, thread_num, depth] {
  929. MPCTIO tio(mpcio, thread_num);
  930. run_coroutines(tio, [&tio, depth] (yield_t &yield) {
  931. address_t size = address_t(1)<<depth;
  932. // size_t &aes_ops = tio.aes_ops();
  933. Duoram<RegAS> oram(tio.player(), size);
  934. auto A = oram.flat(tio, yield);
  935. A.explicitonly(true);
  936. // Initialize the memory to random values in parallel
  937. std::vector<coro_t> coroutines;
  938. for (address_t i=0; i<size; ++i) {
  939. coroutines.emplace_back(
  940. [&A, i](yield_t &yield) {
  941. auto Acoro = A.context(yield);
  942. RegAS v;
  943. v.randomize(62);
  944. Acoro[i] += v;
  945. });
  946. }
  947. run_coroutines(yield, coroutines);
  948. A.bitonic_sort(0, depth);
  949. if (depth <= 10) {
  950. oram.dump();
  951. auto check = A.reconstruct();
  952. if (tio.player() == 0) {
  953. for (address_t i=0;i<size;++i) {
  954. printf("%04x %016lx\n", i, check[i].share());
  955. }
  956. }
  957. }
  958. });
  959. });
  960. }
  961. pool.join();
  962. }
  963. static void bsearch_test(MPCIO &mpcio,
  964. const PRACOptions &opts, char **args)
  965. {
  966. value_t target;
  967. arc4random_buf(&target, sizeof(target));
  968. target >>= 1;
  969. nbits_t depth=6;
  970. if (*args) {
  971. depth = atoi(*args);
  972. ++args;
  973. }
  974. if (*args) {
  975. target = strtoull(*args, NULL, 16);
  976. ++args;
  977. }
  978. int num_threads = opts.num_threads;
  979. boost::asio::thread_pool pool(num_threads);
  980. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  981. boost::asio::post(pool, [&mpcio, thread_num, depth, target] {
  982. MPCTIO tio(mpcio, thread_num);
  983. run_coroutines(tio, [&tio, depth, target] (yield_t &yield) {
  984. address_t size = address_t(1)<<depth;
  985. RegAS tshare;
  986. if (tio.player() == 2) {
  987. // Send shares of the target to the computational
  988. // players
  989. RegAS tshare0, tshare1;
  990. tshare0.randomize();
  991. tshare1.set(target-tshare0.share());
  992. tio.iostream_p0() << tshare0;
  993. tio.iostream_p1() << tshare1;
  994. printf("Using target = %016lx\n", target);
  995. yield();
  996. } else {
  997. // Get the share of the target
  998. tio.iostream_server() >> tshare;
  999. }
  1000. // Create a random database and sort it
  1001. // size_t &aes_ops = tio.aes_ops();
  1002. Duoram<RegAS> oram(tio.player(), size);
  1003. auto A = oram.flat(tio, yield);
  1004. A.explicitonly(true);
  1005. // Initialize the memory to random values in parallel
  1006. std::vector<coro_t> coroutines;
  1007. for (address_t i=0; i<size; ++i) {
  1008. coroutines.emplace_back(
  1009. [&A, i](yield_t &yield) {
  1010. auto Acoro = A.context(yield);
  1011. RegAS v;
  1012. v.randomize(62);
  1013. Acoro[i] += v;
  1014. });
  1015. }
  1016. run_coroutines(yield, coroutines);
  1017. A.bitonic_sort(0, depth);
  1018. // Binary search for the target
  1019. RegAS tindex = A.obliv_binary_search(tshare);
  1020. // Check the answer
  1021. if (tio.player() == 1) {
  1022. tio.iostream_peer() << tindex;
  1023. } else if (tio.player() == 0) {
  1024. RegAS peer_tindex;
  1025. tio.iostream_peer() >> peer_tindex;
  1026. tindex += peer_tindex;
  1027. }
  1028. if (depth <= 10) {
  1029. auto check = A.reconstruct();
  1030. if (tio.player() == 0) {
  1031. for (address_t i=0;i<size;++i) {
  1032. printf("%04x %016lx\n", i, check[i].share());
  1033. }
  1034. }
  1035. }
  1036. if (tio.player() == 0) {
  1037. printf("Found index = %lx\n", tindex.share());
  1038. }
  1039. });
  1040. });
  1041. }
  1042. pool.join();
  1043. }
  1044. void online_main(MPCIO &mpcio, const PRACOptions &opts, char **args)
  1045. {
  1046. MPCTIO tio(mpcio, 0);
  1047. if (!*args) {
  1048. std::cerr << "Mode is required as the first argument when not preprocessing.\n";
  1049. return;
  1050. } else if (!strcmp(*args, "test")) {
  1051. ++args;
  1052. online_test(mpcio, opts, args);
  1053. } else if (!strcmp(*args, "lamporttest")) {
  1054. ++args;
  1055. lamport_test(mpcio, opts, args);
  1056. } else if (!strcmp(*args, "rdpftest")) {
  1057. ++args;
  1058. rdpf_test(mpcio, opts, args);
  1059. } else if (!strcmp(*args, "rdpftime")) {
  1060. ++args;
  1061. rdpf_timing(mpcio, opts, args);
  1062. } else if (!strcmp(*args, "evaltime")) {
  1063. ++args;
  1064. rdpfeval_timing(mpcio, opts, args);
  1065. } else if (!strcmp(*args, "parevaltime")) {
  1066. ++args;
  1067. par_rdpfeval_timing(mpcio, opts, args);
  1068. } else if (!strcmp(*args, "tupletime")) {
  1069. ++args;
  1070. tupleeval_timing(mpcio, opts, args);
  1071. } else if (!strcmp(*args, "partupletime")) {
  1072. ++args;
  1073. par_tupleeval_timing(mpcio, opts, args);
  1074. } else if (!strcmp(*args, "duotest")) {
  1075. ++args;
  1076. if (opts.use_xor_db) {
  1077. duoram_test<RegXS>(mpcio, opts, args);
  1078. } else {
  1079. duoram_test<RegAS>(mpcio, opts, args);
  1080. }
  1081. } else if (!strcmp(*args, "cdpftest")) {
  1082. ++args;
  1083. cdpf_test(mpcio, opts, args);
  1084. } else if (!strcmp(*args, "cmptest")) {
  1085. ++args;
  1086. compare_test(mpcio, opts, args);
  1087. } else if (!strcmp(*args, "sorttest")) {
  1088. ++args;
  1089. sort_test(mpcio, opts, args);
  1090. } else if (!strcmp(*args, "bsearch")) {
  1091. ++args;
  1092. bsearch_test(mpcio, opts, args);
  1093. } else if (!strcmp(*args, "duoram")) {
  1094. ++args;
  1095. if (opts.use_xor_db) {
  1096. duoram<RegXS>(mpcio, opts, args);
  1097. } else {
  1098. duoram<RegAS>(mpcio, opts, args);
  1099. }
  1100. } else {
  1101. std::cerr << "Unknown mode " << *args << "\n";
  1102. }
  1103. }