online.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540
  1. #include <bsd/stdlib.h> // arc4random_buf
  2. #include "online.hpp"
  3. #include "mpcops.hpp"
  4. #include "rdpf.hpp"
  5. #include "duoram.hpp"
  6. static void online_test(MPCIO &mpcio, yield_t &yield,
  7. const PRACOptions &opts, char **args)
  8. {
  9. nbits_t nbits = VALUE_BITS;
  10. if (*args) {
  11. nbits = atoi(*args);
  12. }
  13. size_t memsize = 9;
  14. MPCTIO tio(mpcio, 0);
  15. bool is_server = (mpcio.player == 2);
  16. RegAS *A = new RegAS[memsize];
  17. value_t V;
  18. RegBS F0, F1;
  19. RegXS X;
  20. if (!is_server) {
  21. A[0].randomize();
  22. A[1].randomize();
  23. F0.randomize();
  24. A[4].randomize();
  25. F1.randomize();
  26. A[6].randomize();
  27. A[7].randomize();
  28. X.randomize();
  29. arc4random_buf(&V, sizeof(V));
  30. printf("A:\n"); for (size_t i=0; i<memsize; ++i) printf("%3lu: %016lX\n", i, A[i].ashare);
  31. printf("V : %016lX\n", V);
  32. printf("F0 : %01X\n", F0.bshare);
  33. printf("F1 : %01X\n", F1.bshare);
  34. printf("X : %016lX\n", X.xshare);
  35. }
  36. std::vector<coro_t> coroutines;
  37. coroutines.emplace_back(
  38. [&](yield_t &yield) {
  39. mpc_mul(tio, yield, A[2], A[0], A[1], nbits);
  40. });
  41. coroutines.emplace_back(
  42. [&](yield_t &yield) {
  43. mpc_valuemul(tio, yield, A[3], V, nbits);
  44. });
  45. coroutines.emplace_back(
  46. [&](yield_t &yield) {
  47. mpc_flagmult(tio, yield, A[5], F0, A[4], nbits);
  48. });
  49. coroutines.emplace_back(
  50. [&](yield_t &yield) {
  51. mpc_oswap(tio, yield, A[6], A[7], F1, nbits);
  52. });
  53. coroutines.emplace_back(
  54. [&](yield_t &yield) {
  55. mpc_xs_to_as(tio, yield, A[8], X, nbits);
  56. });
  57. run_coroutines(yield, coroutines);
  58. if (!is_server) {
  59. printf("\n");
  60. printf("A:\n"); for (size_t i=0; i<memsize; ++i) printf("%3lu: %016lX\n", i, A[i].ashare);
  61. }
  62. // Check the answers
  63. if (mpcio.player == 1) {
  64. tio.queue_peer(A, memsize*sizeof(RegAS));
  65. tio.queue_peer(&V, sizeof(V));
  66. tio.queue_peer(&F0, sizeof(RegBS));
  67. tio.queue_peer(&F1, sizeof(RegBS));
  68. tio.queue_peer(&X, sizeof(RegXS));
  69. tio.send();
  70. } else if (mpcio.player == 0) {
  71. RegAS *B = new RegAS[memsize];
  72. RegBS BF0, BF1;
  73. RegXS BX;
  74. value_t BV;
  75. value_t *S = new value_t[memsize];
  76. bit_t SF0, SF1;
  77. value_t SX;
  78. tio.recv_peer(B, memsize*sizeof(RegAS));
  79. tio.recv_peer(&BV, sizeof(BV));
  80. tio.recv_peer(&BF0, sizeof(RegBS));
  81. tio.recv_peer(&BF1, sizeof(RegBS));
  82. tio.recv_peer(&BX, sizeof(RegXS));
  83. for(size_t i=0; i<memsize; ++i) S[i] = A[i].ashare+B[i].ashare;
  84. SF0 = F0.bshare ^ BF0.bshare;
  85. SF1 = F1.bshare ^ BF1.bshare;
  86. SX = X.xshare ^ BX.xshare;
  87. printf("S:\n"); for (size_t i=0; i<memsize; ++i) printf("%3lu: %016lX\n", i, S[i]);
  88. printf("SF0: %01X\n", SF0);
  89. printf("SF1: %01X\n", SF1);
  90. printf("SX : %016lX\n", SX);
  91. printf("\n%016lx\n", S[0]*S[1]-S[2]);
  92. printf("%016lx\n", (V*BV)-S[3]);
  93. printf("%016lx\n", (SF0*S[4])-S[5]);
  94. printf("%016lx\n", S[8]-SX);
  95. delete[] B;
  96. delete[] S;
  97. }
  98. delete[] A;
  99. }
  100. static void lamport_test(MPCIO &mpcio, yield_t &yield,
  101. const PRACOptions &opts, char **args)
  102. {
  103. // Create a bunch of threads and send a bunch of data to the other
  104. // peer, and receive their data. If an arg is specified, repeat
  105. // that many times. The Lamport clock at the end should be just the
  106. // number of repetitions. Subsequent args are the chunk size and
  107. // the number of chunks per message
  108. size_t niters = 1;
  109. size_t chunksize = 1<<20;
  110. size_t numchunks = 1;
  111. if (*args) {
  112. niters = atoi(*args);
  113. ++args;
  114. }
  115. if (*args) {
  116. chunksize = atoi(*args);
  117. ++args;
  118. }
  119. if (*args) {
  120. numchunks = atoi(*args);
  121. ++args;
  122. }
  123. int num_threads = opts.num_threads;
  124. boost::asio::thread_pool pool(num_threads);
  125. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  126. boost::asio::post(pool, [&mpcio, thread_num, niters, chunksize, numchunks] {
  127. MPCTIO tio(mpcio, thread_num);
  128. char *sendbuf = new char[chunksize];
  129. char *recvbuf = new char[chunksize*numchunks];
  130. for (size_t i=0; i<niters; ++i) {
  131. for (size_t chunk=0; chunk<numchunks; ++chunk) {
  132. arc4random_buf(sendbuf, chunksize);
  133. tio.queue_peer(sendbuf, chunksize);
  134. }
  135. tio.send();
  136. tio.recv_peer(recvbuf, chunksize*numchunks);
  137. }
  138. delete[] recvbuf;
  139. delete[] sendbuf;
  140. });
  141. }
  142. pool.join();
  143. }
  144. static void rdpf_test(MPCIO &mpcio, yield_t &yield,
  145. const PRACOptions &opts, char **args)
  146. {
  147. nbits_t depth=6;
  148. if (*args) {
  149. depth = atoi(*args);
  150. ++args;
  151. }
  152. int num_threads = opts.num_threads;
  153. boost::asio::thread_pool pool(num_threads);
  154. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  155. boost::asio::post(pool, [&mpcio, thread_num, depth] {
  156. MPCTIO tio(mpcio, thread_num);
  157. size_t &aes_ops = tio.aes_ops();
  158. if (mpcio.player == 2) {
  159. RDPFPair dp = tio.rdpfpair(depth);
  160. for (int i=0;i<2;++i) {
  161. const RDPF &dpf = dp.dpf[i];
  162. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  163. DPFnode leaf = dpf.leaf(x, aes_ops);
  164. RegBS ub = dpf.unit_bs(leaf);
  165. RegAS ua = dpf.unit_as(leaf);
  166. RegXS sx = dpf.scaled_xs(leaf);
  167. RegAS sa = dpf.scaled_as(leaf);
  168. printf("%04x %x %016lx %016lx %016lx\n", x,
  169. ub.bshare, ua.ashare, sx.xshare, sa.ashare);
  170. }
  171. printf("\n");
  172. }
  173. } else {
  174. RDPFTriple dt = tio.rdpftriple(depth);
  175. for (int i=0;i<3;++i) {
  176. const RDPF &dpf = dt.dpf[i];
  177. RegXS peer_scaled_xor;
  178. RegAS peer_scaled_sum;
  179. if (mpcio.player == 1) {
  180. tio.iostream_peer() << dpf.scaled_xor << dpf.scaled_sum;
  181. } else {
  182. tio.iostream_peer() >> peer_scaled_xor >> peer_scaled_sum;
  183. peer_scaled_sum += dpf.scaled_sum;
  184. peer_scaled_xor ^= dpf.scaled_xor;
  185. }
  186. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  187. DPFnode leaf = dpf.leaf(x, aes_ops);
  188. RegBS ub = dpf.unit_bs(leaf);
  189. RegAS ua = dpf.unit_as(leaf);
  190. RegXS sx = dpf.scaled_xs(leaf);
  191. RegAS sa = dpf.scaled_as(leaf);
  192. printf("%04x %x %016lx %016lx %016lx\n", x,
  193. ub.bshare, ua.ashare, sx.xshare, sa.ashare);
  194. if (mpcio.player == 1) {
  195. tio.iostream_peer() << ub << ua << sx << sa;
  196. } else {
  197. RegBS peer_ub;
  198. RegAS peer_ua;
  199. RegXS peer_sx;
  200. RegAS peer_sa;
  201. tio.iostream_peer() >> peer_ub >> peer_ua >>
  202. peer_sx >> peer_sa;
  203. ub ^= peer_ub;
  204. ua += peer_ua;
  205. sx ^= peer_sx;
  206. sa += peer_sa;
  207. if (ub.bshare || ua.ashare || sx.xshare || sa.ashare) {
  208. printf("**** %x %016lx %016lx %016lx\n",
  209. ub.bshare, ua.ashare, sx.xshare, sa.ashare);
  210. printf("SCALE %016lx %016lx\n",
  211. peer_scaled_xor.xshare, peer_scaled_sum.ashare);
  212. }
  213. }
  214. }
  215. printf("\n");
  216. }
  217. }
  218. tio.send();
  219. });
  220. }
  221. pool.join();
  222. }
  223. static void rdpf_timing(MPCIO &mpcio, yield_t &yield,
  224. const PRACOptions &opts, char **args)
  225. {
  226. nbits_t depth=6;
  227. if (*args) {
  228. depth = atoi(*args);
  229. ++args;
  230. }
  231. int num_threads = opts.num_threads;
  232. boost::asio::thread_pool pool(num_threads);
  233. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  234. boost::asio::post(pool, [&mpcio, thread_num, depth] {
  235. MPCTIO tio(mpcio, thread_num);
  236. size_t &aes_ops = tio.aes_ops();
  237. if (mpcio.player == 2) {
  238. RDPFPair dp = tio.rdpfpair(depth);
  239. for (int i=0;i<2;++i) {
  240. RDPF &dpf = dp.dpf[i];
  241. dpf.expand(aes_ops);
  242. RegXS scaled_xor;
  243. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  244. DPFnode leaf = dpf.leaf(x, aes_ops);
  245. RegXS sx = dpf.scaled_xs(leaf);
  246. scaled_xor ^= sx;
  247. }
  248. printf("%016lx\n%016lx\n", scaled_xor.xshare,
  249. dpf.scaled_xor.xshare);
  250. printf("\n");
  251. }
  252. } else {
  253. RDPFTriple dt = tio.rdpftriple(depth);
  254. for (int i=0;i<3;++i) {
  255. RDPF &dpf = dt.dpf[i];
  256. dpf.expand(aes_ops);
  257. RegXS scaled_xor;
  258. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  259. DPFnode leaf = dpf.leaf(x, aes_ops);
  260. RegXS sx = dpf.scaled_xs(leaf);
  261. scaled_xor ^= sx;
  262. }
  263. printf("%016lx\n%016lx\n", scaled_xor.xshare,
  264. dpf.scaled_xor.xshare);
  265. printf("\n");
  266. }
  267. }
  268. tio.send();
  269. });
  270. }
  271. pool.join();
  272. }
  273. static void rdpfeval_timing(MPCIO &mpcio, yield_t &yield,
  274. const PRACOptions &opts, char **args)
  275. {
  276. nbits_t depth=6;
  277. address_t start=0;
  278. if (*args) {
  279. depth = atoi(*args);
  280. ++args;
  281. }
  282. if (*args) {
  283. start = atoi(*args);
  284. ++args;
  285. }
  286. int num_threads = opts.num_threads;
  287. boost::asio::thread_pool pool(num_threads);
  288. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  289. boost::asio::post(pool, [&mpcio, thread_num, depth, start] {
  290. MPCTIO tio(mpcio, thread_num);
  291. size_t &aes_ops = tio.aes_ops();
  292. if (mpcio.player == 2) {
  293. RDPFPair dp = tio.rdpfpair(depth);
  294. for (int i=0;i<2;++i) {
  295. RDPF &dpf = dp.dpf[i];
  296. RegXS scaled_xor;
  297. auto ev = StreamEval(dpf, start, 0, aes_ops, false);
  298. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  299. DPFnode leaf = ev.next();
  300. RegXS sx = dpf.scaled_xs(leaf);
  301. scaled_xor ^= sx;
  302. }
  303. printf("%016lx\n%016lx\n", scaled_xor.xshare,
  304. dpf.scaled_xor.xshare);
  305. printf("\n");
  306. }
  307. } else {
  308. RDPFTriple dt = tio.rdpftriple(depth);
  309. for (int i=0;i<3;++i) {
  310. RDPF &dpf = dt.dpf[i];
  311. RegXS scaled_xor;
  312. auto ev = StreamEval(dpf, start, 0, aes_ops, false);
  313. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  314. DPFnode leaf = ev.next();
  315. RegXS sx = dpf.scaled_xs(leaf);
  316. scaled_xor ^= sx;
  317. }
  318. printf("%016lx\n%016lx\n", scaled_xor.xshare,
  319. dpf.scaled_xor.xshare);
  320. printf("\n");
  321. }
  322. }
  323. tio.send();
  324. });
  325. }
  326. pool.join();
  327. }
  328. static void tupleeval_timing(MPCIO &mpcio, yield_t &yield,
  329. const PRACOptions &opts, char **args)
  330. {
  331. nbits_t depth=6;
  332. address_t start=0;
  333. if (*args) {
  334. depth = atoi(*args);
  335. ++args;
  336. }
  337. if (*args) {
  338. start = atoi(*args);
  339. ++args;
  340. }
  341. int num_threads = opts.num_threads;
  342. boost::asio::thread_pool pool(num_threads);
  343. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  344. boost::asio::post(pool, [&mpcio, thread_num, depth, start] {
  345. MPCTIO tio(mpcio, thread_num);
  346. size_t &aes_ops = tio.aes_ops();
  347. if (mpcio.player == 2) {
  348. RDPFPair dp = tio.rdpfpair(depth);
  349. RegXS scaled_xor0, scaled_xor1;
  350. auto ev = StreamEval(dp, start, 0, aes_ops, false);
  351. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  352. auto [L0, L1] = ev.next();
  353. RegXS sx0 = dp.dpf[0].scaled_xs(L0);
  354. RegXS sx1 = dp.dpf[1].scaled_xs(L1);
  355. scaled_xor0 ^= sx0;
  356. scaled_xor1 ^= sx1;
  357. }
  358. printf("%016lx\n%016lx\n", scaled_xor0.xshare,
  359. dp.dpf[0].scaled_xor.xshare);
  360. printf("\n");
  361. printf("%016lx\n%016lx\n", scaled_xor1.xshare,
  362. dp.dpf[1].scaled_xor.xshare);
  363. printf("\n");
  364. } else {
  365. RDPFTriple dt = tio.rdpftriple(depth);
  366. RegXS scaled_xor0, scaled_xor1, scaled_xor2;
  367. auto ev = StreamEval(dt, start, 0, aes_ops, false);
  368. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  369. auto [L0, L1, L2] = ev.next();
  370. RegXS sx0 = dt.dpf[0].scaled_xs(L0);
  371. RegXS sx1 = dt.dpf[1].scaled_xs(L1);
  372. RegXS sx2 = dt.dpf[2].scaled_xs(L2);
  373. scaled_xor0 ^= sx0;
  374. scaled_xor1 ^= sx1;
  375. scaled_xor2 ^= sx2;
  376. }
  377. printf("%016lx\n%016lx\n", scaled_xor0.xshare,
  378. dt.dpf[0].scaled_xor.xshare);
  379. printf("\n");
  380. printf("%016lx\n%016lx\n", scaled_xor1.xshare,
  381. dt.dpf[1].scaled_xor.xshare);
  382. printf("\n");
  383. printf("%016lx\n%016lx\n", scaled_xor2.xshare,
  384. dt.dpf[2].scaled_xor.xshare);
  385. printf("\n");
  386. }
  387. tio.send();
  388. });
  389. }
  390. pool.join();
  391. }
  392. // T is RegAS or RegXS for additive or XOR shared database respectively
  393. template <typename T>
  394. static void duoram_test(MPCIO &mpcio, yield_t &yield,
  395. const PRACOptions &opts, char **args)
  396. {
  397. nbits_t depth=6;
  398. address_t share=arc4random();
  399. if (*args) {
  400. depth = atoi(*args);
  401. ++args;
  402. }
  403. if (*args) {
  404. share = atoi(*args);
  405. ++args;
  406. }
  407. share &= ((address_t(1)<<depth)-1);
  408. int num_threads = opts.num_threads;
  409. boost::asio::thread_pool pool(num_threads);
  410. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  411. boost::asio::post(pool, [&mpcio, &yield, thread_num, depth, share] {
  412. size_t size = size_t(1)<<depth;
  413. MPCTIO tio(mpcio, thread_num);
  414. // size_t &aes_ops = tio.aes_ops();
  415. Duoram<T> oram(mpcio.player, size);
  416. auto A = oram.flat(tio, yield);
  417. RegAS aidx;
  418. aidx.ashare = share;
  419. T M;
  420. if (tio.player() == 0) {
  421. M.set(0xbabb0000);
  422. } else {
  423. M.set(0x0000a66e);
  424. }
  425. RegXS xidx;
  426. xidx.xshare = share;
  427. T N;
  428. if (tio.player() == 0) {
  429. N.set(0xdead0000);
  430. } else {
  431. N.set(0x0000beef);
  432. }
  433. // Writing and reading with additively shared indices
  434. printf("Updating\n");
  435. A[aidx] += M;
  436. printf("Reading\n");
  437. T Aa = A[aidx];
  438. // Writing and reading with XOR shared indices
  439. printf("Updating\n");
  440. A[xidx] += N;
  441. printf("Reading\n");
  442. T Ax = A[xidx];
  443. T Ae;
  444. // Writing and reading with explicit indices
  445. if (depth > 2) {
  446. A[5] += Aa;
  447. Ae = A[6];
  448. }
  449. if (depth <= 10) {
  450. oram.dump();
  451. auto check = A.reconstruct();
  452. if (tio.player() == 0) {
  453. for (address_t i=0;i<size;++i) {
  454. printf("%04x %016lx\n", i, check[i].share());
  455. }
  456. }
  457. }
  458. auto checkread = A.reconstruct(Aa);
  459. auto checkreade = A.reconstruct(Ae);
  460. auto checkreadx = A.reconstruct(Ax);
  461. if (tio.player() == 0) {
  462. printf("Read AS value = %016lx\n", checkread.share());
  463. printf("Read AX value = %016lx\n", checkreadx.share());
  464. printf("Read Ex value = %016lx\n", checkreade.share());
  465. }
  466. tio.send();
  467. });
  468. }
  469. pool.join();
  470. }
  471. void online_main(MPCIO &mpcio, const PRACOptions &opts, char **args)
  472. {
  473. // Run everything inside a coroutine so that simple tests don't have
  474. // to start one themselves
  475. MPCTIO tio(mpcio, 0);
  476. std::vector<coro_t> coroutines;
  477. coroutines.emplace_back(
  478. [&](yield_t &yield) {
  479. if (!*args) {
  480. std::cerr << "Mode is required as the first argument when not preprocessing.\n";
  481. return;
  482. } else if (!strcmp(*args, "test")) {
  483. ++args;
  484. online_test(mpcio, yield, opts, args);
  485. } else if (!strcmp(*args, "lamporttest")) {
  486. ++args;
  487. lamport_test(mpcio, yield, opts, args);
  488. } else if (!strcmp(*args, "rdpftest")) {
  489. ++args;
  490. rdpf_test(mpcio, yield, opts, args);
  491. } else if (!strcmp(*args, "rdpftime")) {
  492. ++args;
  493. rdpf_timing(mpcio, yield, opts, args);
  494. } else if (!strcmp(*args, "evaltime")) {
  495. ++args;
  496. rdpfeval_timing(mpcio, yield, opts, args);
  497. } else if (!strcmp(*args, "tupletime")) {
  498. ++args;
  499. tupleeval_timing(mpcio, yield, opts, args);
  500. } else if (!strcmp(*args, "duotest")) {
  501. ++args;
  502. if (opts.use_xor_db) {
  503. duoram_test<RegXS>(mpcio, yield, opts, args);
  504. } else {
  505. duoram_test<RegAS>(mpcio, yield, opts, args);
  506. }
  507. } else {
  508. std::cerr << "Unknown mode " << *args << "\n";
  509. }
  510. });
  511. run_coroutines(tio, coroutines);
  512. }