online.cpp 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876
  1. #include <bsd/stdlib.h> // arc4random_buf
  2. #include "online.hpp"
  3. #include "mpcops.hpp"
  4. #include "rdpf.hpp"
  5. #include "duoram.hpp"
  6. #include "cdpf.hpp"
  7. static void online_test(MPCIO &mpcio, yield_t &yield,
  8. const PRACOptions &opts, char **args)
  9. {
  10. nbits_t nbits = VALUE_BITS;
  11. if (*args) {
  12. nbits = atoi(*args);
  13. }
  14. size_t memsize = 9;
  15. MPCTIO tio(mpcio, 0);
  16. bool is_server = (mpcio.player == 2);
  17. RegAS *A = new RegAS[memsize];
  18. value_t V;
  19. RegBS F0, F1;
  20. RegXS X;
  21. if (!is_server) {
  22. A[0].randomize();
  23. A[1].randomize();
  24. F0.randomize();
  25. A[4].randomize();
  26. F1.randomize();
  27. A[6].randomize();
  28. A[7].randomize();
  29. X.randomize();
  30. arc4random_buf(&V, sizeof(V));
  31. printf("A:\n"); for (size_t i=0; i<memsize; ++i) printf("%3lu: %016lX\n", i, A[i].ashare);
  32. printf("V : %016lX\n", V);
  33. printf("F0 : %01X\n", F0.bshare);
  34. printf("F1 : %01X\n", F1.bshare);
  35. printf("X : %016lX\n", X.xshare);
  36. }
  37. std::vector<coro_t> coroutines;
  38. coroutines.emplace_back(
  39. [&](yield_t &yield) {
  40. mpc_mul(tio, yield, A[2], A[0], A[1], nbits);
  41. });
  42. coroutines.emplace_back(
  43. [&](yield_t &yield) {
  44. mpc_valuemul(tio, yield, A[3], V, nbits);
  45. });
  46. coroutines.emplace_back(
  47. [&](yield_t &yield) {
  48. mpc_flagmult(tio, yield, A[5], F0, A[4], nbits);
  49. });
  50. coroutines.emplace_back(
  51. [&](yield_t &yield) {
  52. mpc_oswap(tio, yield, A[6], A[7], F1, nbits);
  53. });
  54. coroutines.emplace_back(
  55. [&](yield_t &yield) {
  56. mpc_xs_to_as(tio, yield, A[8], X, nbits);
  57. });
  58. run_coroutines(yield, coroutines);
  59. if (!is_server) {
  60. printf("\n");
  61. printf("A:\n"); for (size_t i=0; i<memsize; ++i) printf("%3lu: %016lX\n", i, A[i].ashare);
  62. }
  63. // Check the answers
  64. if (mpcio.player == 1) {
  65. tio.queue_peer(A, memsize*sizeof(RegAS));
  66. tio.queue_peer(&V, sizeof(V));
  67. tio.queue_peer(&F0, sizeof(RegBS));
  68. tio.queue_peer(&F1, sizeof(RegBS));
  69. tio.queue_peer(&X, sizeof(RegXS));
  70. tio.send();
  71. } else if (mpcio.player == 0) {
  72. RegAS *B = new RegAS[memsize];
  73. RegBS BF0, BF1;
  74. RegXS BX;
  75. value_t BV;
  76. value_t *S = new value_t[memsize];
  77. bit_t SF0, SF1;
  78. value_t SX;
  79. tio.recv_peer(B, memsize*sizeof(RegAS));
  80. tio.recv_peer(&BV, sizeof(BV));
  81. tio.recv_peer(&BF0, sizeof(RegBS));
  82. tio.recv_peer(&BF1, sizeof(RegBS));
  83. tio.recv_peer(&BX, sizeof(RegXS));
  84. for(size_t i=0; i<memsize; ++i) S[i] = A[i].ashare+B[i].ashare;
  85. SF0 = F0.bshare ^ BF0.bshare;
  86. SF1 = F1.bshare ^ BF1.bshare;
  87. SX = X.xshare ^ BX.xshare;
  88. printf("S:\n"); for (size_t i=0; i<memsize; ++i) printf("%3lu: %016lX\n", i, S[i]);
  89. printf("SF0: %01X\n", SF0);
  90. printf("SF1: %01X\n", SF1);
  91. printf("SX : %016lX\n", SX);
  92. printf("\n%016lx\n", S[0]*S[1]-S[2]);
  93. printf("%016lx\n", (V*BV)-S[3]);
  94. printf("%016lx\n", (SF0*S[4])-S[5]);
  95. printf("%016lx\n", S[8]-SX);
  96. delete[] B;
  97. delete[] S;
  98. }
  99. delete[] A;
  100. }
  101. static void lamport_test(MPCIO &mpcio, yield_t &yield,
  102. const PRACOptions &opts, char **args)
  103. {
  104. // Create a bunch of threads and send a bunch of data to the other
  105. // peer, and receive their data. If an arg is specified, repeat
  106. // that many times. The Lamport clock at the end should be just the
  107. // number of repetitions. Subsequent args are the chunk size and
  108. // the number of chunks per message
  109. size_t niters = 1;
  110. size_t chunksize = 1<<20;
  111. size_t numchunks = 1;
  112. if (*args) {
  113. niters = atoi(*args);
  114. ++args;
  115. }
  116. if (*args) {
  117. chunksize = atoi(*args);
  118. ++args;
  119. }
  120. if (*args) {
  121. numchunks = atoi(*args);
  122. ++args;
  123. }
  124. int num_threads = opts.num_threads;
  125. boost::asio::thread_pool pool(num_threads);
  126. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  127. boost::asio::post(pool, [&mpcio, thread_num, niters, chunksize, numchunks] {
  128. MPCTIO tio(mpcio, thread_num);
  129. char *sendbuf = new char[chunksize];
  130. char *recvbuf = new char[chunksize*numchunks];
  131. for (size_t i=0; i<niters; ++i) {
  132. for (size_t chunk=0; chunk<numchunks; ++chunk) {
  133. arc4random_buf(sendbuf, chunksize);
  134. tio.queue_peer(sendbuf, chunksize);
  135. }
  136. tio.send();
  137. tio.recv_peer(recvbuf, chunksize*numchunks);
  138. }
  139. delete[] recvbuf;
  140. delete[] sendbuf;
  141. });
  142. }
  143. pool.join();
  144. }
  145. static void rdpf_test(MPCIO &mpcio, yield_t &yield,
  146. const PRACOptions &opts, char **args)
  147. {
  148. nbits_t depth=6;
  149. size_t num_iters = 1;
  150. if (*args) {
  151. depth = atoi(*args);
  152. ++args;
  153. }
  154. if (*args) {
  155. num_iters = atoi(*args);
  156. ++args;
  157. }
  158. int num_threads = opts.num_threads;
  159. boost::asio::thread_pool pool(num_threads);
  160. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  161. boost::asio::post(pool, [&mpcio, &yield, thread_num, depth, num_iters] {
  162. MPCTIO tio(mpcio, thread_num);
  163. size_t &aes_ops = tio.aes_ops();
  164. for (size_t iter=0; iter < num_iters; ++iter) {
  165. if (mpcio.player == 2) {
  166. RDPFPair dp = tio.rdpfpair(yield, depth);
  167. for (int i=0;i<2;++i) {
  168. const RDPF &dpf = dp.dpf[i];
  169. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  170. DPFnode leaf = dpf.leaf(x, aes_ops);
  171. RegBS ub = dpf.unit_bs(leaf);
  172. RegAS ua = dpf.unit_as(leaf);
  173. RegXS sx = dpf.scaled_xs(leaf);
  174. RegAS sa = dpf.scaled_as(leaf);
  175. printf("%04x %x %016lx %016lx %016lx\n", x,
  176. ub.bshare, ua.ashare, sx.xshare, sa.ashare);
  177. }
  178. printf("\n");
  179. }
  180. } else {
  181. RDPFTriple dt = tio.rdpftriple(yield, depth);
  182. for (int i=0;i<3;++i) {
  183. const RDPF &dpf = dt.dpf[i];
  184. RegXS peer_scaled_xor;
  185. RegAS peer_scaled_sum;
  186. if (mpcio.player == 1) {
  187. tio.iostream_peer() << dpf.scaled_xor << dpf.scaled_sum;
  188. } else {
  189. tio.iostream_peer() >> peer_scaled_xor >> peer_scaled_sum;
  190. peer_scaled_sum += dpf.scaled_sum;
  191. peer_scaled_xor ^= dpf.scaled_xor;
  192. }
  193. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  194. DPFnode leaf = dpf.leaf(x, aes_ops);
  195. RegBS ub = dpf.unit_bs(leaf);
  196. RegAS ua = dpf.unit_as(leaf);
  197. RegXS sx = dpf.scaled_xs(leaf);
  198. RegAS sa = dpf.scaled_as(leaf);
  199. printf("%04x %x %016lx %016lx %016lx\n", x,
  200. ub.bshare, ua.ashare, sx.xshare, sa.ashare);
  201. if (mpcio.player == 1) {
  202. tio.iostream_peer() << ub << ua << sx << sa;
  203. } else {
  204. RegBS peer_ub;
  205. RegAS peer_ua;
  206. RegXS peer_sx;
  207. RegAS peer_sa;
  208. tio.iostream_peer() >> peer_ub >> peer_ua >>
  209. peer_sx >> peer_sa;
  210. ub ^= peer_ub;
  211. ua += peer_ua;
  212. sx ^= peer_sx;
  213. sa += peer_sa;
  214. if (ub.bshare || ua.ashare || sx.xshare || sa.ashare) {
  215. printf("**** %x %016lx %016lx %016lx\n",
  216. ub.bshare, ua.ashare, sx.xshare, sa.ashare);
  217. printf("SCALE %016lx %016lx\n",
  218. peer_scaled_xor.xshare, peer_scaled_sum.ashare);
  219. }
  220. }
  221. }
  222. printf("\n");
  223. }
  224. }
  225. tio.send();
  226. }
  227. });
  228. }
  229. pool.join();
  230. }
  231. static void rdpf_timing(MPCIO &mpcio, yield_t &yield,
  232. const PRACOptions &opts, char **args)
  233. {
  234. nbits_t depth=6;
  235. if (*args) {
  236. depth = atoi(*args);
  237. ++args;
  238. }
  239. int num_threads = opts.num_threads;
  240. boost::asio::thread_pool pool(num_threads);
  241. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  242. boost::asio::post(pool, [&mpcio, &yield, thread_num, depth] {
  243. MPCTIO tio(mpcio, thread_num);
  244. size_t &aes_ops = tio.aes_ops();
  245. if (mpcio.player == 2) {
  246. RDPFPair dp = tio.rdpfpair(yield, depth);
  247. for (int i=0;i<2;++i) {
  248. RDPF &dpf = dp.dpf[i];
  249. dpf.expand(aes_ops);
  250. RegXS scaled_xor;
  251. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  252. DPFnode leaf = dpf.leaf(x, aes_ops);
  253. RegXS sx = dpf.scaled_xs(leaf);
  254. scaled_xor ^= sx;
  255. }
  256. printf("%016lx\n%016lx\n", scaled_xor.xshare,
  257. dpf.scaled_xor.xshare);
  258. printf("\n");
  259. }
  260. } else {
  261. RDPFTriple dt = tio.rdpftriple(yield, depth);
  262. for (int i=0;i<3;++i) {
  263. RDPF &dpf = dt.dpf[i];
  264. dpf.expand(aes_ops);
  265. RegXS scaled_xor;
  266. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  267. DPFnode leaf = dpf.leaf(x, aes_ops);
  268. RegXS sx = dpf.scaled_xs(leaf);
  269. scaled_xor ^= sx;
  270. }
  271. printf("%016lx\n%016lx\n", scaled_xor.xshare,
  272. dpf.scaled_xor.xshare);
  273. printf("\n");
  274. }
  275. }
  276. tio.send();
  277. });
  278. }
  279. pool.join();
  280. }
  281. static void rdpfeval_timing(MPCIO &mpcio, yield_t &yield,
  282. const PRACOptions &opts, char **args)
  283. {
  284. nbits_t depth=6;
  285. address_t start=0;
  286. if (*args) {
  287. depth = atoi(*args);
  288. ++args;
  289. }
  290. if (*args) {
  291. start = atoi(*args);
  292. ++args;
  293. }
  294. int num_threads = opts.num_threads;
  295. boost::asio::thread_pool pool(num_threads);
  296. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  297. boost::asio::post(pool, [&mpcio, &yield, thread_num, depth, start] {
  298. MPCTIO tio(mpcio, thread_num);
  299. size_t &aes_ops = tio.aes_ops();
  300. if (mpcio.player == 2) {
  301. RDPFPair dp = tio.rdpfpair(yield, depth);
  302. for (int i=0;i<2;++i) {
  303. RDPF &dpf = dp.dpf[i];
  304. RegXS scaled_xor;
  305. auto ev = StreamEval(dpf, start, 0, aes_ops, false);
  306. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  307. DPFnode leaf = ev.next();
  308. RegXS sx = dpf.scaled_xs(leaf);
  309. scaled_xor ^= sx;
  310. }
  311. printf("%016lx\n%016lx\n", scaled_xor.xshare,
  312. dpf.scaled_xor.xshare);
  313. printf("\n");
  314. }
  315. } else {
  316. RDPFTriple dt = tio.rdpftriple(yield, depth);
  317. for (int i=0;i<3;++i) {
  318. RDPF &dpf = dt.dpf[i];
  319. RegXS scaled_xor;
  320. auto ev = StreamEval(dpf, start, 0, aes_ops, false);
  321. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  322. DPFnode leaf = ev.next();
  323. RegXS sx = dpf.scaled_xs(leaf);
  324. scaled_xor ^= sx;
  325. }
  326. printf("%016lx\n%016lx\n", scaled_xor.xshare,
  327. dpf.scaled_xor.xshare);
  328. printf("\n");
  329. }
  330. }
  331. tio.send();
  332. });
  333. }
  334. pool.join();
  335. }
  336. static void tupleeval_timing(MPCIO &mpcio, yield_t &yield,
  337. const PRACOptions &opts, char **args)
  338. {
  339. nbits_t depth=6;
  340. address_t start=0;
  341. if (*args) {
  342. depth = atoi(*args);
  343. ++args;
  344. }
  345. if (*args) {
  346. start = atoi(*args);
  347. ++args;
  348. }
  349. int num_threads = opts.num_threads;
  350. boost::asio::thread_pool pool(num_threads);
  351. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  352. boost::asio::post(pool, [&mpcio, &yield, thread_num, depth, start] {
  353. MPCTIO tio(mpcio, thread_num);
  354. size_t &aes_ops = tio.aes_ops();
  355. if (mpcio.player == 2) {
  356. RDPFPair dp = tio.rdpfpair(yield, depth);
  357. RegXS scaled_xor0, scaled_xor1;
  358. auto ev = StreamEval(dp, start, 0, aes_ops, false);
  359. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  360. auto [L0, L1] = ev.next();
  361. RegXS sx0 = dp.dpf[0].scaled_xs(L0);
  362. RegXS sx1 = dp.dpf[1].scaled_xs(L1);
  363. scaled_xor0 ^= sx0;
  364. scaled_xor1 ^= sx1;
  365. }
  366. printf("%016lx\n%016lx\n", scaled_xor0.xshare,
  367. dp.dpf[0].scaled_xor.xshare);
  368. printf("\n");
  369. printf("%016lx\n%016lx\n", scaled_xor1.xshare,
  370. dp.dpf[1].scaled_xor.xshare);
  371. printf("\n");
  372. } else {
  373. RDPFTriple dt = tio.rdpftriple(yield, depth);
  374. RegXS scaled_xor0, scaled_xor1, scaled_xor2;
  375. auto ev = StreamEval(dt, start, 0, aes_ops, false);
  376. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  377. auto [L0, L1, L2] = ev.next();
  378. RegXS sx0 = dt.dpf[0].scaled_xs(L0);
  379. RegXS sx1 = dt.dpf[1].scaled_xs(L1);
  380. RegXS sx2 = dt.dpf[2].scaled_xs(L2);
  381. scaled_xor0 ^= sx0;
  382. scaled_xor1 ^= sx1;
  383. scaled_xor2 ^= sx2;
  384. }
  385. printf("%016lx\n%016lx\n", scaled_xor0.xshare,
  386. dt.dpf[0].scaled_xor.xshare);
  387. printf("\n");
  388. printf("%016lx\n%016lx\n", scaled_xor1.xshare,
  389. dt.dpf[1].scaled_xor.xshare);
  390. printf("\n");
  391. printf("%016lx\n%016lx\n", scaled_xor2.xshare,
  392. dt.dpf[2].scaled_xor.xshare);
  393. printf("\n");
  394. }
  395. tio.send();
  396. });
  397. }
  398. pool.join();
  399. }
  400. // T is RegAS or RegXS for additive or XOR shared database respectively
  401. template <typename T>
  402. static void duoram_test(MPCIO &mpcio, yield_t &yield,
  403. const PRACOptions &opts, char **args)
  404. {
  405. nbits_t depth=6;
  406. address_t share=arc4random();
  407. if (*args) {
  408. depth = atoi(*args);
  409. ++args;
  410. }
  411. if (*args) {
  412. share = atoi(*args);
  413. ++args;
  414. }
  415. share &= ((address_t(1)<<depth)-1);
  416. int num_threads = opts.num_threads;
  417. boost::asio::thread_pool pool(num_threads);
  418. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  419. boost::asio::post(pool, [&mpcio, &yield, thread_num, depth, share] {
  420. size_t size = size_t(1)<<depth;
  421. MPCTIO tio(mpcio, thread_num);
  422. // size_t &aes_ops = tio.aes_ops();
  423. Duoram<T> oram(mpcio.player, size);
  424. auto A = oram.flat(tio, yield);
  425. RegAS aidx;
  426. aidx.ashare = share;
  427. T M;
  428. if (tio.player() == 0) {
  429. M.set(0xbabb0000);
  430. } else {
  431. M.set(0x0000a66e);
  432. }
  433. RegXS xidx;
  434. xidx.xshare = share;
  435. T N;
  436. if (tio.player() == 0) {
  437. N.set(0xdead0000);
  438. } else {
  439. N.set(0x0000beef);
  440. }
  441. // Writing and reading with additively shared indices
  442. printf("Updating\n");
  443. A[aidx] += M;
  444. printf("Reading\n");
  445. T Aa = A[aidx];
  446. // Writing and reading with XOR shared indices
  447. printf("Updating\n");
  448. A[xidx] += N;
  449. printf("Reading\n");
  450. T Ax = A[xidx];
  451. T Ae;
  452. // Writing and reading with explicit indices
  453. if (depth > 2) {
  454. A[5] += Aa;
  455. Ae = A[6];
  456. }
  457. if (depth <= 10) {
  458. oram.dump();
  459. auto check = A.reconstruct();
  460. if (tio.player() == 0) {
  461. for (address_t i=0;i<size;++i) {
  462. printf("%04x %016lx\n", i, check[i].share());
  463. }
  464. }
  465. }
  466. auto checkread = A.reconstruct(Aa);
  467. auto checkreade = A.reconstruct(Ae);
  468. auto checkreadx = A.reconstruct(Ax);
  469. if (tio.player() == 0) {
  470. printf("Read AS value = %016lx\n", checkread.share());
  471. printf("Read AX value = %016lx\n", checkreadx.share());
  472. printf("Read Ex value = %016lx\n", checkreade.share());
  473. }
  474. tio.send();
  475. });
  476. }
  477. pool.join();
  478. }
  479. static void cdpf_test(MPCIO &mpcio, yield_t &yield,
  480. const PRACOptions &opts, char **args)
  481. {
  482. value_t query, target;
  483. int iters = 1;
  484. arc4random_buf(&query, sizeof(query));
  485. arc4random_buf(&target, sizeof(target));
  486. if (*args) {
  487. query = strtoull(*args, NULL, 16);
  488. ++args;
  489. }
  490. if (*args) {
  491. target = strtoull(*args, NULL, 16);
  492. ++args;
  493. }
  494. if (*args) {
  495. iters = atoi(*args);
  496. ++args;
  497. }
  498. int num_threads = opts.num_threads;
  499. boost::asio::thread_pool pool(num_threads);
  500. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  501. boost::asio::post(pool, [&mpcio, thread_num, &query, &target, &iters] {
  502. MPCTIO tio(mpcio, thread_num);
  503. size_t &aes_ops = tio.aes_ops();
  504. for (int i=0;i<iters;++i) {
  505. if (mpcio.player == 2) {
  506. tio.cdpf();
  507. auto [ dpf0, dpf1 ] = CDPF::generate(target, aes_ops);
  508. DPFnode leaf0 = dpf0.leaf(query, aes_ops);
  509. DPFnode leaf1 = dpf1.leaf(query, aes_ops);
  510. printf("DPFXOR_{%016lx}(%016lx} = ", target, query);
  511. dump_node(leaf0 ^ leaf1);
  512. } else {
  513. CDPF dpf = tio.cdpf();
  514. printf("ashare = %016lX\nxshare = %016lX\n",
  515. dpf.as_target.ashare, dpf.xs_target.xshare);
  516. DPFnode leaf = dpf.leaf(query, aes_ops);
  517. printf("DPF(%016lx) = ", query);
  518. dump_node(leaf);
  519. if (mpcio.player == 1) {
  520. tio.iostream_peer() << leaf;
  521. } else {
  522. DPFnode peerleaf;
  523. tio.iostream_peer() >> peerleaf;
  524. printf("XOR = ");
  525. dump_node(leaf ^ peerleaf);
  526. }
  527. }
  528. }
  529. tio.send();
  530. });
  531. }
  532. pool.join();
  533. }
  534. static int compare_test_one(MPCTIO &tio, yield_t &yield,
  535. value_t target, value_t x)
  536. {
  537. int player = tio.player();
  538. size_t &aes_ops = tio.aes_ops();
  539. int res = 1;
  540. if (player == 2) {
  541. // Create a CDPF pair with the given target
  542. auto [dpf0, dpf1] = CDPF::generate(target, aes_ops);
  543. // Send it and a share of x to the computational parties
  544. RegAS x0, x1;
  545. x0.randomize();
  546. x1.set(x-x0.share());
  547. tio.iostream_p0() << dpf0 << x0;
  548. tio.iostream_p1() << dpf1 << x1;
  549. } else {
  550. CDPF dpf;
  551. RegAS xsh;
  552. tio.iostream_server() >> dpf >> xsh;
  553. auto [lt, eq, gt] = dpf.compare(tio, yield, xsh, aes_ops);
  554. printf("%016lx %016lx %d %d %d ", target, x, lt.bshare,
  555. eq.bshare, gt.bshare);
  556. // Check the answer
  557. if (player == 1) {
  558. tio.iostream_peer() << xsh << lt << eq << gt;
  559. } else {
  560. RegAS peer_xsh;
  561. RegBS peer_lt, peer_eq, peer_gt;
  562. tio.iostream_peer() >> peer_xsh >> peer_lt >> peer_eq >> peer_gt;
  563. lt ^= peer_lt;
  564. eq ^= peer_eq;
  565. gt ^= peer_gt;
  566. xsh += peer_xsh;
  567. int lti = int(lt.bshare);
  568. int eqi = int(eq.bshare);
  569. int gti = int(gt.bshare);
  570. x = xsh.share();
  571. printf(": %d %d %d ", lti, eqi, gti);
  572. bool signbit = (x >> 63);
  573. if (lti + eqi + gti != 1) {
  574. printf("INCONSISTENT");
  575. res = 0;
  576. } else if (x == 0 && eqi) {
  577. printf("=");
  578. } else if (!signbit && gti) {
  579. printf(">");
  580. } else if (signbit && lti) {
  581. printf("<");
  582. } else {
  583. printf("INCORRECT");
  584. res = 0;
  585. }
  586. }
  587. printf("\n");
  588. }
  589. return res;
  590. }
  591. static int compare_test_target(MPCTIO &tio, yield_t &yield,
  592. value_t target, value_t x)
  593. {
  594. int res = 1;
  595. res &= compare_test_one(tio, yield, target, x);
  596. res &= compare_test_one(tio, yield, target, 0);
  597. res &= compare_test_one(tio, yield, target, 1);
  598. res &= compare_test_one(tio, yield, target, 15);
  599. res &= compare_test_one(tio, yield, target, 16);
  600. res &= compare_test_one(tio, yield, target, 17);
  601. res &= compare_test_one(tio, yield, target, -1);
  602. res &= compare_test_one(tio, yield, target, -15);
  603. res &= compare_test_one(tio, yield, target, -16);
  604. res &= compare_test_one(tio, yield, target, -17);
  605. res &= compare_test_one(tio, yield, target, (value_t(1)<<63));
  606. res &= compare_test_one(tio, yield, target, (value_t(1)<<63)+1);
  607. res &= compare_test_one(tio, yield, target, (value_t(1)<<63)-1);
  608. return res;
  609. }
  610. static void compare_test(MPCIO &mpcio, yield_t &yield,
  611. const PRACOptions &opts, char **args)
  612. {
  613. value_t target, x;
  614. arc4random_buf(&target, sizeof(target));
  615. arc4random_buf(&x, sizeof(x));
  616. if (*args) {
  617. target = strtoull(*args, NULL, 16);
  618. ++args;
  619. }
  620. if (*args) {
  621. x = strtoull(*args, NULL, 16);
  622. ++args;
  623. }
  624. int num_threads = opts.num_threads;
  625. boost::asio::thread_pool pool(num_threads);
  626. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  627. boost::asio::post(pool, [&mpcio, &yield, thread_num, &target, &x] {
  628. MPCTIO tio(mpcio, thread_num);
  629. int res = 1;
  630. res &= compare_test_target(tio, yield, target, x);
  631. res &= compare_test_target(tio, yield, 0, x);
  632. res &= compare_test_target(tio, yield, 1, x);
  633. res &= compare_test_target(tio, yield, 15, x);
  634. res &= compare_test_target(tio, yield, 16, x);
  635. res &= compare_test_target(tio, yield, 17, x);
  636. res &= compare_test_target(tio, yield, -1, x);
  637. res &= compare_test_target(tio, yield, -15, x);
  638. res &= compare_test_target(tio, yield, -16, x);
  639. res &= compare_test_target(tio, yield, -17, x);
  640. res &= compare_test_target(tio, yield, (value_t(1)<<63), x);
  641. res &= compare_test_target(tio, yield, (value_t(1)<<63)+1, x);
  642. res &= compare_test_target(tio, yield, (value_t(1)<<63)-1, x);
  643. tio.send();
  644. if (tio.player() == 0) {
  645. if (res == 1) {
  646. printf("All tests passed!\n");
  647. } else {
  648. printf("TEST FAILURES\n");
  649. }
  650. }
  651. });
  652. }
  653. pool.join();
  654. }
  655. static void sort_test(MPCIO &mpcio, yield_t &yield,
  656. const PRACOptions &opts, char **args)
  657. {
  658. nbits_t depth=6;
  659. if (*args) {
  660. depth = atoi(*args);
  661. ++args;
  662. }
  663. int num_threads = opts.num_threads;
  664. boost::asio::thread_pool pool(num_threads);
  665. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  666. boost::asio::post(pool, [&mpcio, &yield, thread_num, depth] {
  667. address_t size = address_t(1)<<depth;
  668. MPCTIO tio(mpcio, thread_num);
  669. // size_t &aes_ops = tio.aes_ops();
  670. Duoram<RegAS> oram(mpcio.player, size);
  671. auto A = oram.flat(tio, yield);
  672. A.explicitonly(true);
  673. // Initialize the memory to random values in parallel
  674. std::vector<coro_t> coroutines;
  675. for (address_t i=0; i<size; ++i) {
  676. coroutines.emplace_back(
  677. [&A, i](yield_t &yield) {
  678. auto Acoro = A.context(yield);
  679. RegAS v;
  680. v.randomize(62);
  681. Acoro[i] += v;
  682. });
  683. }
  684. run_coroutines(yield, coroutines);
  685. A.bitonic_sort(0, depth);
  686. if (depth <= 10) {
  687. oram.dump();
  688. auto check = A.reconstruct();
  689. if (tio.player() == 0) {
  690. for (address_t i=0;i<size;++i) {
  691. printf("%04x %016lx\n", i, check[i].share());
  692. }
  693. }
  694. }
  695. tio.send();
  696. });
  697. }
  698. pool.join();
  699. }
  700. static void bsearch_test(MPCIO &mpcio, yield_t &yield,
  701. const PRACOptions &opts, char **args)
  702. {
  703. value_t target;
  704. arc4random_buf(&target, sizeof(target));
  705. target >>= 1;
  706. nbits_t depth=6;
  707. if (*args) {
  708. depth = atoi(*args);
  709. ++args;
  710. }
  711. if (*args) {
  712. target = strtoull(*args, NULL, 16);
  713. ++args;
  714. }
  715. int num_threads = opts.num_threads;
  716. boost::asio::thread_pool pool(num_threads);
  717. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  718. boost::asio::post(pool, [&mpcio, &yield, thread_num, depth, target] {
  719. address_t size = address_t(1)<<depth;
  720. MPCTIO tio(mpcio, thread_num);
  721. RegAS tshare;
  722. if (tio.player() == 2) {
  723. // Send shares of the target to the computational
  724. // players
  725. RegAS tshare0, tshare1;
  726. tshare0.randomize();
  727. tshare1.set(target-tshare0.share());
  728. tio.iostream_p0() << tshare0;
  729. tio.iostream_p1() << tshare1;
  730. printf("Using target = %016lx\n", target);
  731. yield();
  732. } else {
  733. // Get the share of the target
  734. tio.iostream_server() >> tshare;
  735. }
  736. // Create a random database and sort it
  737. // size_t &aes_ops = tio.aes_ops();
  738. Duoram<RegAS> oram(mpcio.player, size);
  739. auto A = oram.flat(tio, yield);
  740. A.explicitonly(true);
  741. // Initialize the memory to random values in parallel
  742. std::vector<coro_t> coroutines;
  743. for (address_t i=0; i<size; ++i) {
  744. coroutines.emplace_back(
  745. [&A, i](yield_t &yield) {
  746. auto Acoro = A.context(yield);
  747. RegAS v;
  748. v.randomize(62);
  749. Acoro[i] += v;
  750. });
  751. }
  752. run_coroutines(yield, coroutines);
  753. A.bitonic_sort(0, depth);
  754. // Binary search for the target
  755. RegAS tindex = A.obliv_binary_search(tshare);
  756. // Check the answer
  757. if (tio.player() == 1) {
  758. tio.iostream_peer() << tindex;
  759. } else if (tio.player() == 0) {
  760. RegAS peer_tindex;
  761. tio.iostream_peer() >> peer_tindex;
  762. tindex += peer_tindex;
  763. }
  764. if (depth <= 10) {
  765. auto check = A.reconstruct();
  766. if (tio.player() == 0) {
  767. for (address_t i=0;i<size;++i) {
  768. printf("%04x %016lx\n", i, check[i].share());
  769. }
  770. }
  771. }
  772. if (tio.player() == 0) {
  773. printf("Found index = %lx\n", tindex.share());
  774. }
  775. tio.send();
  776. });
  777. }
  778. pool.join();
  779. }
  780. void online_main(MPCIO &mpcio, const PRACOptions &opts, char **args)
  781. {
  782. // Run everything inside a coroutine so that simple tests don't have
  783. // to start one themselves
  784. MPCTIO tio(mpcio, 0);
  785. run_coroutines(tio,
  786. [&](yield_t &yield) {
  787. if (!*args) {
  788. std::cerr << "Mode is required as the first argument when not preprocessing.\n";
  789. return;
  790. } else if (!strcmp(*args, "test")) {
  791. ++args;
  792. online_test(mpcio, yield, opts, args);
  793. } else if (!strcmp(*args, "lamporttest")) {
  794. ++args;
  795. lamport_test(mpcio, yield, opts, args);
  796. } else if (!strcmp(*args, "rdpftest")) {
  797. ++args;
  798. rdpf_test(mpcio, yield, opts, args);
  799. } else if (!strcmp(*args, "rdpftime")) {
  800. ++args;
  801. rdpf_timing(mpcio, yield, opts, args);
  802. } else if (!strcmp(*args, "evaltime")) {
  803. ++args;
  804. rdpfeval_timing(mpcio, yield, opts, args);
  805. } else if (!strcmp(*args, "tupletime")) {
  806. ++args;
  807. tupleeval_timing(mpcio, yield, opts, args);
  808. } else if (!strcmp(*args, "duotest")) {
  809. ++args;
  810. if (opts.use_xor_db) {
  811. duoram_test<RegXS>(mpcio, yield, opts, args);
  812. } else {
  813. duoram_test<RegAS>(mpcio, yield, opts, args);
  814. }
  815. } else if (!strcmp(*args, "cdpftest")) {
  816. ++args;
  817. cdpf_test(mpcio, yield, opts, args);
  818. } else if (!strcmp(*args, "cmptest")) {
  819. ++args;
  820. compare_test(mpcio, yield, opts, args);
  821. } else if (!strcmp(*args, "sorttest")) {
  822. ++args;
  823. sort_test(mpcio, yield, opts, args);
  824. } else if (!strcmp(*args, "bsearch")) {
  825. ++args;
  826. bsearch_test(mpcio, yield, opts, args);
  827. } else {
  828. std::cerr << "Unknown mode " << *args << "\n";
  829. }
  830. });
  831. }