online.cpp 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253
  1. #include <bsd/stdlib.h> // arc4random_buf
  2. #include "online.hpp"
  3. #include "mpcops.hpp"
  4. #include "rdpf.hpp"
  5. #include "duoram.hpp"
  6. #include "cdpf.hpp"
  7. #include "cell.hpp"
  8. static void online_test(MPCIO &mpcio,
  9. const PRACOptions &opts, char **args)
  10. {
  11. nbits_t nbits = VALUE_BITS;
  12. if (*args) {
  13. nbits = atoi(*args);
  14. }
  15. size_t as_memsize = 9;
  16. size_t xs_memsize = 3;
  17. MPCTIO tio(mpcio, 0);
  18. bool is_server = (mpcio.player == 2);
  19. RegAS *A = new RegAS[as_memsize];
  20. RegXS *AX = new RegXS[xs_memsize];
  21. value_t V;
  22. RegBS F0, F1, F2;
  23. RegBS FA, FO, FS;
  24. RegXS X;
  25. if (!is_server) {
  26. A[0].randomize();
  27. A[1].randomize();
  28. F0.randomize();
  29. A[4].randomize();
  30. F1.randomize();
  31. F2.randomize();
  32. A[6].randomize();
  33. A[7].randomize();
  34. X.randomize();
  35. AX[0].randomize();
  36. AX[1].randomize();
  37. arc4random_buf(&V, sizeof(V));
  38. printf("A:\n"); for (size_t i=0; i<as_memsize; ++i) printf("%3lu: %016lX\n", i, A[i].ashare);
  39. printf("AX:\n"); for (size_t i=0; i<xs_memsize; ++i) printf("%3lu: %016lX\n", i, AX[i].xshare);
  40. printf("V : %016lX\n", V);
  41. printf("F0 : %01X\n", F0.bshare);
  42. printf("F1 : %01X\n", F1.bshare);
  43. printf("F2 : %01X\n", F2.bshare);
  44. printf("X : %016lX\n", X.xshare);
  45. }
  46. std::vector<coro_t> coroutines;
  47. coroutines.emplace_back(
  48. [&tio, &A, nbits](yield_t &yield) {
  49. mpc_mul(tio, yield, A[2], A[0], A[1], nbits);
  50. });
  51. coroutines.emplace_back(
  52. [&tio, &A, V, nbits](yield_t &yield) {
  53. mpc_valuemul(tio, yield, A[3], V, nbits);
  54. });
  55. coroutines.emplace_back(
  56. [&tio, &A, &F0, nbits](yield_t &yield) {
  57. mpc_flagmult(tio, yield, A[5], F0, A[4], nbits);
  58. });
  59. coroutines.emplace_back(
  60. [&tio, &A, &F1, nbits](yield_t &yield) {
  61. mpc_oswap(tio, yield, A[6], A[7], F1, nbits);
  62. });
  63. coroutines.emplace_back(
  64. [&tio, &A, &X, nbits](yield_t &yield) {
  65. mpc_xs_to_as(tio, yield, A[8], X, nbits);
  66. });
  67. coroutines.emplace_back(
  68. [&tio, &AX, &F0, nbits](yield_t &yield) {
  69. mpc_select(tio, yield, AX[2], F0, AX[0], AX[1], nbits);
  70. });
  71. coroutines.emplace_back(
  72. [&tio, &FA, &F0, &F1](yield_t &yield) {
  73. mpc_and(tio, yield, FA, F0, F1);
  74. });
  75. coroutines.emplace_back(
  76. [&tio, &FO, &F0, &F1](yield_t &yield) {
  77. mpc_or(tio, yield, FO, F0, F1);
  78. });
  79. coroutines.emplace_back(
  80. [&tio, &FS, &F0, &F1, &F2](yield_t &yield) {
  81. mpc_select(tio, yield, FS, F2, F0, F1);
  82. });
  83. run_coroutines(tio, coroutines);
  84. if (!is_server) {
  85. printf("\n");
  86. printf("A:\n"); for (size_t i=0; i<as_memsize; ++i) printf("%3lu: %016lX\n", i, A[i].ashare);
  87. printf("AX:\n"); for (size_t i=0; i<xs_memsize; ++i) printf("%3lu: %016lX\n", i, AX[i].xshare);
  88. }
  89. // Check the answers
  90. if (mpcio.player == 1) {
  91. tio.queue_peer(A, as_memsize*sizeof(RegAS));
  92. tio.queue_peer(AX, xs_memsize*sizeof(RegXS));
  93. tio.queue_peer(&V, sizeof(V));
  94. tio.queue_peer(&F0, sizeof(RegBS));
  95. tio.queue_peer(&F1, sizeof(RegBS));
  96. tio.queue_peer(&F2, sizeof(RegBS));
  97. tio.queue_peer(&FA, sizeof(RegBS));
  98. tio.queue_peer(&FO, sizeof(RegBS));
  99. tio.queue_peer(&FS, sizeof(RegBS));
  100. tio.queue_peer(&X, sizeof(RegXS));
  101. tio.send();
  102. } else if (mpcio.player == 0) {
  103. RegAS *B = new RegAS[as_memsize];
  104. RegXS *BAX = new RegXS[xs_memsize];
  105. RegBS BF0, BF1, BF2;
  106. RegBS BFA, BFO, BFS;
  107. RegXS BX;
  108. value_t BV;
  109. value_t *S = new value_t[as_memsize];
  110. value_t *Y = new value_t[xs_memsize];
  111. bit_t SF0, SF1, SF2;
  112. bit_t SFA, SFO, SFS;
  113. value_t SX;
  114. tio.recv_peer(B, as_memsize*sizeof(RegAS));
  115. tio.recv_peer(BAX, xs_memsize*sizeof(RegXS));
  116. tio.recv_peer(&BV, sizeof(BV));
  117. tio.recv_peer(&BF0, sizeof(RegBS));
  118. tio.recv_peer(&BF1, sizeof(RegBS));
  119. tio.recv_peer(&BF2, sizeof(RegBS));
  120. tio.recv_peer(&BFA, sizeof(RegBS));
  121. tio.recv_peer(&BFO, sizeof(RegBS));
  122. tio.recv_peer(&BFS, sizeof(RegBS));
  123. tio.recv_peer(&BX, sizeof(RegXS));
  124. for(size_t i=0; i<as_memsize; ++i) S[i] = A[i].ashare+B[i].ashare;
  125. for(size_t i=0; i<xs_memsize; ++i) Y[i] = AX[i].xshare^BAX[i].xshare;
  126. SF0 = F0.bshare ^ BF0.bshare;
  127. SF1 = F1.bshare ^ BF1.bshare;
  128. SF2 = F2.bshare ^ BF2.bshare;
  129. SFA = FA.bshare ^ BFA.bshare;
  130. SFO = FO.bshare ^ BFO.bshare;
  131. SFS = FS.bshare ^ BFS.bshare;
  132. SX = X.xshare ^ BX.xshare;
  133. printf("S:\n"); for (size_t i=0; i<as_memsize; ++i) printf("%3lu: %016lX\n", i, S[i]);
  134. printf("Y:\n"); for (size_t i=0; i<xs_memsize; ++i) printf("%3lu: %016lX\n", i, Y[i]);
  135. printf("SF0: %01X\n", SF0);
  136. printf("SF1: %01X\n", SF1);
  137. printf("SF2: %01X\n", SF2);
  138. printf("SFA: %01X\n", SFA);
  139. printf("SFO: %01X\n", SFO);
  140. printf("SFS: %01X\n", SFS);
  141. printf("SX : %016lX\n", SX);
  142. printf("\n%016lx\n", S[0]*S[1]-S[2]);
  143. printf("%016lx\n", (V*BV)-S[3]);
  144. printf("%016lx\n", (SF0*S[4])-S[5]);
  145. printf("%016lx\n", S[8]-SX);
  146. delete[] B;
  147. delete[] S;
  148. }
  149. delete[] A;
  150. delete[] AX;
  151. }
  152. static void lamport_test(MPCIO &mpcio,
  153. const PRACOptions &opts, char **args)
  154. {
  155. // Create a bunch of threads and send a bunch of data to the other
  156. // peer, and receive their data. If an arg is specified, repeat
  157. // that many times. The Lamport clock at the end should be just the
  158. // number of repetitions. Subsequent args are the chunk size and
  159. // the number of chunks per message
  160. size_t niters = 1;
  161. size_t chunksize = 1<<20;
  162. size_t numchunks = 1;
  163. if (*args) {
  164. niters = atoi(*args);
  165. ++args;
  166. }
  167. if (*args) {
  168. chunksize = atoi(*args);
  169. ++args;
  170. }
  171. if (*args) {
  172. numchunks = atoi(*args);
  173. ++args;
  174. }
  175. int num_threads = opts.num_threads;
  176. boost::asio::thread_pool pool(num_threads);
  177. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  178. boost::asio::post(pool, [&mpcio, thread_num, niters, chunksize, numchunks] {
  179. MPCTIO tio(mpcio, thread_num);
  180. char *sendbuf = new char[chunksize];
  181. char *recvbuf = new char[chunksize*numchunks];
  182. for (size_t i=0; i<niters; ++i) {
  183. for (size_t chunk=0; chunk<numchunks; ++chunk) {
  184. arc4random_buf(sendbuf, chunksize);
  185. tio.queue_peer(sendbuf, chunksize);
  186. }
  187. tio.send();
  188. tio.recv_peer(recvbuf, chunksize*numchunks);
  189. }
  190. delete[] recvbuf;
  191. delete[] sendbuf;
  192. });
  193. }
  194. pool.join();
  195. }
  196. template <nbits_t WIDTH>
  197. static void rdpf_test(MPCIO &mpcio,
  198. const PRACOptions &opts, char **args)
  199. {
  200. nbits_t depth=6;
  201. size_t num_iters = 1;
  202. if (*args) {
  203. depth = atoi(*args);
  204. ++args;
  205. }
  206. if (*args) {
  207. num_iters = atoi(*args);
  208. ++args;
  209. }
  210. MPCTIO tio(mpcio, 0, opts.num_threads);
  211. run_coroutines(tio, [&tio, depth, num_iters] (yield_t &yield) {
  212. size_t &aes_ops = tio.aes_ops();
  213. for (size_t iter=0; iter < num_iters; ++iter) {
  214. if (tio.player() == 2) {
  215. RDPFPair<WIDTH> dp = tio.rdpfpair<WIDTH>(yield, depth);
  216. for (int i=0;i<2;++i) {
  217. const RDPF<WIDTH> &dpf = dp.dpf[i];
  218. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  219. typename RDPF<WIDTH>::LeafNode leaf = dpf.leaf(x, aes_ops);
  220. RegBS ub = dpf.unit_bs(leaf);
  221. RegAS ua = dpf.unit_as(leaf);
  222. typename RDPF<WIDTH>::RegXSW sx = dpf.scaled_xs(leaf);
  223. typename RDPF<WIDTH>::RegASW sa = dpf.scaled_as(leaf);
  224. printf("%04x %x %016lx", x, ub.bshare, ua.ashare);
  225. for (nbits_t j=0;j<WIDTH;++j) {
  226. printf(" %016lx %016lx", sx[j].xshare, sa[j].ashare);
  227. }
  228. printf("\n");
  229. }
  230. printf("\n");
  231. }
  232. } else {
  233. RDPFTriple<WIDTH> dt = tio.rdpftriple<WIDTH>(yield, depth);
  234. for (int i=0;i<3;++i) {
  235. const RDPF<WIDTH> &dpf = dt.dpf[i];
  236. typename RDPF<WIDTH>::RegXSW peer_scaled_xor;
  237. typename RDPF<WIDTH>::RegASW peer_scaled_sum;
  238. if (tio.player() == 1) {
  239. tio.iostream_peer() << dpf.li[0].scaled_xor << dpf.li[0].scaled_sum;
  240. } else {
  241. tio.iostream_peer() >> peer_scaled_xor >> peer_scaled_sum;
  242. peer_scaled_sum += dpf.li[0].scaled_sum;
  243. peer_scaled_xor ^= dpf.li[0].scaled_xor;
  244. }
  245. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  246. typename RDPF<WIDTH>::LeafNode leaf = dpf.leaf(x, aes_ops);
  247. RegBS ub = dpf.unit_bs(leaf);
  248. RegAS ua = dpf.unit_as(leaf);
  249. typename RDPF<WIDTH>::RegXSW sx = dpf.scaled_xs(leaf);
  250. typename RDPF<WIDTH>::RegASW sa = dpf.scaled_as(leaf);
  251. printf("%04x %x %016lx", x, ub.bshare, ua.ashare);
  252. for (nbits_t j=0;j<WIDTH;++j) {
  253. printf(" %016lx %016lx", sx[j].xshare, sa[j].ashare);
  254. }
  255. printf("\n");
  256. if (tio.player() == 1) {
  257. tio.iostream_peer() << ub << ua << sx << sa;
  258. } else {
  259. RegBS peer_ub;
  260. RegAS peer_ua;
  261. typename RDPF<WIDTH>::RegXSW peer_sx;
  262. typename RDPF<WIDTH>::RegASW peer_sa;
  263. tio.iostream_peer() >> peer_ub >> peer_ua >>
  264. peer_sx >> peer_sa;
  265. ub ^= peer_ub;
  266. ua += peer_ua;
  267. sx ^= peer_sx;
  268. sa += peer_sa;
  269. bool is_nonzero = ub.bshare || ua.ashare;
  270. for (nbits_t j=0;j<WIDTH;++j) {
  271. is_nonzero |= (sx[j].xshare || sa[j].ashare);
  272. }
  273. if (is_nonzero) {
  274. printf("**** %x %016lx", ub.bshare, ua.ashare);
  275. for (nbits_t j=0;j<WIDTH;++j) {
  276. printf(" %016lx %016lx", sx[j].xshare, sa[j].ashare);
  277. }
  278. printf("\nSCALE ");
  279. for (nbits_t j=0;j<WIDTH;++j) {
  280. printf(" %016lx %016lx",
  281. peer_scaled_xor[j].xshare,
  282. peer_scaled_sum[j].ashare);
  283. }
  284. printf("\n");
  285. }
  286. }
  287. }
  288. printf("\n");
  289. }
  290. }
  291. }
  292. });
  293. }
  294. static void rdpf_timing(MPCIO &mpcio,
  295. const PRACOptions &opts, char **args)
  296. {
  297. nbits_t depth=6;
  298. if (*args) {
  299. depth = atoi(*args);
  300. ++args;
  301. }
  302. int num_threads = opts.num_threads;
  303. boost::asio::thread_pool pool(num_threads);
  304. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  305. boost::asio::post(pool, [&mpcio, thread_num, depth] {
  306. MPCTIO tio(mpcio, thread_num);
  307. run_coroutines(tio, [&tio, depth] (yield_t &yield) {
  308. size_t &aes_ops = tio.aes_ops();
  309. if (tio.player() == 2) {
  310. RDPFPair<1> dp = tio.rdpfpair(yield, depth);
  311. for (int i=0;i<2;++i) {
  312. RDPF<1> &dpf = dp.dpf[i];
  313. dpf.expand(aes_ops);
  314. RDPF<1>::RegXSW scaled_xor;
  315. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  316. RDPF<1>::LeafNode leaf = dpf.leaf(x, aes_ops);
  317. RDPF<1>::RegXSW sx = dpf.scaled_xs(leaf);
  318. scaled_xor ^= sx;
  319. }
  320. printf("%016lx\n%016lx\n", scaled_xor[0].xshare,
  321. dpf.li[0].scaled_xor[0].xshare);
  322. printf("\n");
  323. }
  324. } else {
  325. RDPFTriple<1> dt = tio.rdpftriple(yield, depth);
  326. for (int i=0;i<3;++i) {
  327. RDPF<1> &dpf = dt.dpf[i];
  328. dpf.expand(aes_ops);
  329. RDPF<1>::RegXSW scaled_xor;
  330. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  331. RDPF<1>::LeafNode leaf = dpf.leaf(x, aes_ops);
  332. RDPF<1>::RegXSW sx = dpf.scaled_xs(leaf);
  333. scaled_xor ^= sx;
  334. }
  335. printf("%016lx\n%016lx\n", scaled_xor[0].xshare,
  336. dpf.li[0].scaled_xor[0].xshare);
  337. printf("\n");
  338. }
  339. }
  340. });
  341. });
  342. }
  343. pool.join();
  344. }
  345. static value_t parallel_streameval_rdpf(MPCIO &mpcio, const RDPF<1> &dpf,
  346. address_t start, int num_threads)
  347. {
  348. RDPF<1>::RegXSW scaled_xor[num_threads];
  349. boost::asio::thread_pool pool(num_threads);
  350. address_t totsize = (address_t(1)<<dpf.depth());
  351. address_t threadstart = start;
  352. address_t threadchunk = totsize / num_threads;
  353. address_t threadextra = totsize % num_threads;
  354. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  355. address_t threadsize = threadchunk + (address_t(thread_num) < threadextra);
  356. boost::asio::post(pool,
  357. [&mpcio, &dpf, &scaled_xor, thread_num, threadstart, threadsize] {
  358. MPCTIO tio(mpcio, thread_num);
  359. //printf("Thread %d from %X for %X\n", thread_num, threadstart, threadsize);
  360. RDPF<1>::RegXSW local_xor;
  361. size_t local_aes_ops = 0;
  362. auto ev = StreamEval(dpf, threadstart, 0, local_aes_ops);
  363. for (address_t x=0;x<threadsize;++x) {
  364. //if (x%0x10000 == 0) printf("%d", thread_num);
  365. RDPF<1>::LeafNode leaf = ev.next();
  366. local_xor ^= dpf.scaled_xs(leaf);
  367. }
  368. scaled_xor[thread_num] = local_xor;
  369. tio.aes_ops() += local_aes_ops;
  370. //printf("Thread %d complete\n", thread_num);
  371. });
  372. threadstart = (threadstart + threadsize) % totsize;
  373. }
  374. pool.join();
  375. RDPF<1>::RegXSW res;
  376. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  377. res ^= scaled_xor[thread_num];
  378. }
  379. return res[0].xshare;
  380. }
  381. static void rdpfeval_timing(MPCIO &mpcio,
  382. const PRACOptions &opts, char **args)
  383. {
  384. nbits_t depth=6;
  385. address_t start=0;
  386. if (*args) {
  387. depth = atoi(*args);
  388. ++args;
  389. }
  390. if (*args) {
  391. start = strtoull(*args, NULL, 16);
  392. ++args;
  393. }
  394. int num_threads = opts.num_threads;
  395. MPCTIO tio(mpcio, 0, num_threads);
  396. run_coroutines(tio, [&mpcio, &tio, depth, start, num_threads] (yield_t &yield) {
  397. if (tio.player() == 2) {
  398. RDPFPair<1> dp = tio.rdpfpair(yield, depth);
  399. for (int i=0;i<2;++i) {
  400. RDPF<1> &dpf = dp.dpf[i];
  401. value_t scaled_xor =
  402. parallel_streameval_rdpf(mpcio, dpf, start, num_threads);
  403. printf("%016lx\n%016lx\n", scaled_xor,
  404. dpf.li[0].scaled_xor[0].xshare);
  405. printf("\n");
  406. }
  407. } else {
  408. RDPFTriple<1> dt = tio.rdpftriple(yield, depth);
  409. for (int i=0;i<3;++i) {
  410. RDPF<1> &dpf = dt.dpf[i];
  411. value_t scaled_xor =
  412. parallel_streameval_rdpf(mpcio, dpf, start, num_threads);
  413. printf("%016lx\n%016lx\n", scaled_xor,
  414. dpf.li[0].scaled_xor[0].xshare);
  415. printf("\n");
  416. }
  417. }
  418. });
  419. }
  420. static void par_rdpfeval_timing(MPCIO &mpcio,
  421. const PRACOptions &opts, char **args)
  422. {
  423. nbits_t depth=6;
  424. address_t start=0;
  425. if (*args) {
  426. depth = atoi(*args);
  427. ++args;
  428. }
  429. if (*args) {
  430. start = strtoull(*args, NULL, 16);
  431. ++args;
  432. }
  433. int num_threads = opts.num_threads;
  434. MPCTIO tio(mpcio, 0, num_threads);
  435. run_coroutines(tio, [&tio, depth, start, num_threads] (yield_t &yield) {
  436. if (tio.player() == 2) {
  437. RDPFPair<1> dp = tio.rdpfpair(yield, depth);
  438. for (int i=0;i<2;++i) {
  439. RDPF<1> &dpf = dp.dpf[i];
  440. nbits_t depth = dpf.depth();
  441. auto pe = ParallelEval(dpf, start, 0,
  442. address_t(1)<<depth, num_threads, tio.aes_ops());
  443. RDPF<1>::RegXSW result, init;
  444. result = pe.reduce(init, [&dpf] (int thread_num,
  445. address_t i, const RDPF<1>::LeafNode &leaf) {
  446. return dpf.scaled_xs(leaf);
  447. });
  448. printf("%016lx\n%016lx\n", result[0].xshare,
  449. dpf.li[0].scaled_xor[0].xshare);
  450. printf("\n");
  451. }
  452. } else {
  453. RDPFTriple<1> dt = tio.rdpftriple(yield, depth);
  454. for (int i=0;i<3;++i) {
  455. RDPF<1> &dpf = dt.dpf[i];
  456. nbits_t depth = dpf.depth();
  457. auto pe = ParallelEval(dpf, start, 0,
  458. address_t(1)<<depth, num_threads, tio.aes_ops());
  459. RDPF<1>::RegXSW result, init;
  460. result = pe.reduce(init, [&dpf] (int thread_num,
  461. address_t i, const RDPF<1>::LeafNode &leaf) {
  462. return dpf.scaled_xs(leaf);
  463. });
  464. printf("%016lx\n%016lx\n", result[0].xshare,
  465. dpf.li[0].scaled_xor[0].xshare);
  466. printf("\n");
  467. }
  468. }
  469. });
  470. }
  471. static void tupleeval_timing(MPCIO &mpcio,
  472. const PRACOptions &opts, char **args)
  473. {
  474. nbits_t depth=6;
  475. address_t start=0;
  476. if (*args) {
  477. depth = atoi(*args);
  478. ++args;
  479. }
  480. if (*args) {
  481. start = atoi(*args);
  482. ++args;
  483. }
  484. int num_threads = opts.num_threads;
  485. MPCTIO tio(mpcio, 0, num_threads);
  486. run_coroutines(tio, [&tio, depth, start] (yield_t &yield) {
  487. size_t &aes_ops = tio.aes_ops();
  488. if (tio.player() == 2) {
  489. RDPFPair<1> dp = tio.rdpfpair(yield, depth);
  490. RDPF<1>::RegXSW scaled_xor0, scaled_xor1;
  491. auto ev = StreamEval(dp, start, 0, aes_ops, false);
  492. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  493. auto [L0, L1] = ev.next();
  494. RDPF<1>::RegXSW sx0 = dp.dpf[0].scaled_xs(L0);
  495. RDPF<1>::RegXSW sx1 = dp.dpf[1].scaled_xs(L1);
  496. scaled_xor0 ^= sx0;
  497. scaled_xor1 ^= sx1;
  498. }
  499. printf("%016lx\n%016lx\n", scaled_xor0[0].xshare,
  500. dp.dpf[0].li[0].scaled_xor[0].xshare);
  501. printf("\n");
  502. printf("%016lx\n%016lx\n", scaled_xor1[0].xshare,
  503. dp.dpf[1].li[0].scaled_xor[0].xshare);
  504. printf("\n");
  505. } else {
  506. RDPFTriple<1> dt = tio.rdpftriple(yield, depth);
  507. RDPF<1>::RegXSW scaled_xor0, scaled_xor1, scaled_xor2;
  508. auto ev = StreamEval(dt, start, 0, aes_ops, false);
  509. for (address_t x=0;x<(address_t(1)<<depth);++x) {
  510. auto [L0, L1, L2] = ev.next();
  511. RDPF<1>::RegXSW sx0 = dt.dpf[0].scaled_xs(L0);
  512. RDPF<1>::RegXSW sx1 = dt.dpf[1].scaled_xs(L1);
  513. RDPF<1>::RegXSW sx2 = dt.dpf[2].scaled_xs(L2);
  514. scaled_xor0 ^= sx0;
  515. scaled_xor1 ^= sx1;
  516. scaled_xor2 ^= sx2;
  517. }
  518. printf("%016lx\n%016lx\n", scaled_xor0[0].xshare,
  519. dt.dpf[0].li[0].scaled_xor[0].xshare);
  520. printf("\n");
  521. printf("%016lx\n%016lx\n", scaled_xor1[0].xshare,
  522. dt.dpf[1].li[0].scaled_xor[0].xshare);
  523. printf("\n");
  524. printf("%016lx\n%016lx\n", scaled_xor2[0].xshare,
  525. dt.dpf[2].li[0].scaled_xor[0].xshare);
  526. printf("\n");
  527. }
  528. });
  529. }
  530. static void par_tupleeval_timing(MPCIO &mpcio,
  531. const PRACOptions &opts, char **args)
  532. {
  533. nbits_t depth=6;
  534. address_t start=0;
  535. if (*args) {
  536. depth = atoi(*args);
  537. ++args;
  538. }
  539. if (*args) {
  540. start = atoi(*args);
  541. ++args;
  542. }
  543. int num_threads = opts.num_threads;
  544. MPCTIO tio(mpcio, 0, num_threads);
  545. run_coroutines(tio, [&tio, depth, start, num_threads] (yield_t &yield) {
  546. size_t &aes_ops = tio.aes_ops();
  547. if (tio.player() == 2) {
  548. RDPFPair<1> dp = tio.rdpfpair(yield, depth);
  549. auto pe = ParallelEval(dp, start, 0, address_t(1)<<depth,
  550. num_threads, aes_ops);
  551. RDPFPair<1>::RegXSWP result, init;
  552. result = pe.reduce(init, [&dp] (int thread_num, address_t i,
  553. const RDPFPair<1>::LeafNode &leaf) {
  554. RDPFPair<1>::RegXSWP scaled;
  555. dp.scaled(scaled, leaf);
  556. return scaled;
  557. });
  558. printf("%016lx\n%016lx\n", std::get<0>(result)[0].xshare,
  559. dp.dpf[0].li[0].scaled_xor[0].xshare);
  560. printf("\n");
  561. printf("%016lx\n%016lx\n", std::get<1>(result)[0].xshare,
  562. dp.dpf[1].li[0].scaled_xor[0].xshare);
  563. printf("\n");
  564. } else {
  565. RDPFTriple<1> dt = tio.rdpftriple(yield, depth);
  566. auto pe = ParallelEval(dt, start, 0, address_t(1)<<depth,
  567. num_threads, aes_ops);
  568. RDPFTriple<1>::RegXSWT result, init;
  569. result = pe.reduce(init, [&dt] (int thread_num, address_t i,
  570. const RDPFTriple<1>::LeafNode &leaf) {
  571. RDPFTriple<1>::RegXSWT scaled;
  572. dt.scaled(scaled, leaf);
  573. return scaled;
  574. });
  575. printf("%016lx\n%016lx\n", std::get<0>(result)[0].xshare,
  576. dt.dpf[0].li[0].scaled_xor[0].xshare);
  577. printf("\n");
  578. printf("%016lx\n%016lx\n", std::get<1>(result)[0].xshare,
  579. dt.dpf[1].li[0].scaled_xor[0].xshare);
  580. printf("\n");
  581. printf("%016lx\n%016lx\n", std::get<2>(result)[0].xshare,
  582. dt.dpf[2].li[0].scaled_xor[0].xshare);
  583. printf("\n");
  584. }
  585. });
  586. }
  587. // T is RegAS or RegXS for additive or XOR shared database respectively
  588. template <typename T>
  589. static void duoram_test(MPCIO &mpcio,
  590. const PRACOptions &opts, char **args)
  591. {
  592. nbits_t depth=6;
  593. address_t share=arc4random();
  594. if (*args) {
  595. depth = atoi(*args);
  596. ++args;
  597. }
  598. if (*args) {
  599. share = atoi(*args);
  600. ++args;
  601. }
  602. share &= ((address_t(1)<<depth)-1);
  603. MPCTIO tio(mpcio, 0, opts.num_threads);
  604. run_coroutines(tio, [&tio, depth, share] (yield_t &yield) {
  605. size_t size = size_t(1)<<depth;
  606. // size_t &aes_ops = tio.aes_ops();
  607. Duoram<T> oram(tio.player(), size);
  608. auto A = oram.flat(tio, yield);
  609. RegAS aidx, aidx2, aidx3;
  610. aidx.ashare = share;
  611. aidx2.ashare = share + tio.player();
  612. aidx3.ashare = share + 1;
  613. T M;
  614. if (tio.player() == 0) {
  615. M.set(0xbabb0000);
  616. } else {
  617. M.set(0x0000a66e);
  618. }
  619. RegXS xidx;
  620. xidx.xshare = share;
  621. T N;
  622. if (tio.player() == 0) {
  623. N.set(0xdead0000);
  624. } else {
  625. N.set(0x0000beef);
  626. }
  627. // Writing and reading with additively shared indices
  628. printf("Additive Updating\n");
  629. A[aidx] += M;
  630. printf("Additive Reading\n");
  631. T Aa = A[aidx];
  632. // Writing and reading with XOR shared indices
  633. printf("XOR Updating\n");
  634. A[xidx] += N;
  635. printf("XOR Reading\n");
  636. T Ax = A[xidx];
  637. T Ae;
  638. // Writing and reading with explicit indices
  639. if (depth > 2) {
  640. printf("Explicit Updating\n");
  641. A[5] += Aa;
  642. printf("Explicit Reading\n");
  643. Ae = A[6];
  644. }
  645. // Simultaneous independent reads
  646. printf("3 independent reading\n");
  647. std::vector<T> Av = A[std::array {
  648. aidx, aidx2, aidx3
  649. }];
  650. // Simultaneous independent updates
  651. T Aw1, Aw2, Aw3;
  652. Aw1.set(0x101010101010101 * tio.player());
  653. Aw2.set(0x202020202020202 * tio.player());
  654. Aw3.set(0x303030303030303 * tio.player());
  655. printf("3 independent updating\n");
  656. A[std::array { aidx, aidx2, aidx3 }] -=
  657. std::array { Aw1, Aw2, Aw3 };
  658. if (depth <= 10) {
  659. oram.dump();
  660. auto check = A.reconstruct();
  661. if (tio.player() == 0) {
  662. for (address_t i=0;i<size;++i) {
  663. printf("%04x %016lx\n", i, check[i].share());
  664. }
  665. }
  666. }
  667. auto checkread = A.reconstruct(Aa);
  668. auto checkreade = A.reconstruct(Ae);
  669. auto checkreadx = A.reconstruct(Ax);
  670. if (tio.player() == 0) {
  671. printf("Read AS value = %016lx\n", checkread.share());
  672. printf("Read AX value = %016lx\n", checkreadx.share());
  673. printf("Read Ex value = %016lx\n", checkreade.share());
  674. }
  675. for (auto &v : Av) {
  676. auto checkv = A.reconstruct(v);
  677. if (tio.player() == 0) {
  678. printf("Read Av value = %016lx\n", checkv.share());
  679. }
  680. }
  681. });
  682. }
  683. // This measures the same things as the Duoram paper: dependent and
  684. // independent reads, updates, writes, and interleaves
  685. // T is RegAS or RegXS for additive or XOR shared database respectively
  686. template <typename T>
  687. static void duoram(MPCIO &mpcio,
  688. const PRACOptions &opts, char **args)
  689. {
  690. nbits_t depth = 6;
  691. int items = 4;
  692. if (*args) {
  693. depth = atoi(*args);
  694. ++args;
  695. }
  696. if (*args) {
  697. items = atoi(*args);
  698. ++args;
  699. }
  700. MPCTIO tio(mpcio, 0, opts.num_threads);
  701. run_coroutines(tio, [&mpcio, &tio, depth, items] (yield_t &yield) {
  702. size_t size = size_t(1)<<depth;
  703. address_t mask = (depth < ADDRESS_MAX_BITS ?
  704. ((address_t(1)<<depth) - 1) : ~0);
  705. Duoram<T> oram(tio.player(), size);
  706. auto A = oram.flat(tio, yield);
  707. std::cout << "===== DEPENDENT UPDATES =====\n";
  708. mpcio.reset_stats();
  709. tio.reset_lamport();
  710. // Make a linked list of length items
  711. std::vector<T> list_indices;
  712. T prev_index, next_index;
  713. prev_index.randomize(depth);
  714. for (int i=0;i<items;++i) {
  715. next_index.randomize(depth);
  716. A[next_index] += prev_index;
  717. list_indices.push_back(next_index);
  718. prev_index = next_index;
  719. }
  720. tio.sync_lamport();
  721. mpcio.dump_stats(std::cout);
  722. std::cout << "\n===== DEPENDENT READS =====\n";
  723. mpcio.reset_stats();
  724. tio.reset_lamport();
  725. // Read the linked list starting with prev_index
  726. T cur_index = prev_index;
  727. for (int i=0;i<items;++i) {
  728. cur_index = A[cur_index];
  729. }
  730. tio.sync_lamport();
  731. mpcio.dump_stats(std::cout);
  732. std::cout << "\n===== INDEPENDENT READS =====\n";
  733. mpcio.reset_stats();
  734. tio.reset_lamport();
  735. // Read all the entries in the list at once
  736. std::vector<T> read_outputs = A[list_indices];
  737. tio.sync_lamport();
  738. mpcio.dump_stats(std::cout);
  739. std::cout << "\n===== INDEPENDENT UPDATES =====\n";
  740. mpcio.reset_stats();
  741. tio.reset_lamport();
  742. // Make a vector of indices 1 larger than those in list_indices,
  743. // and a vector of values 1 larger than those in outputs
  744. std::vector<T> indep_indices, indep_values;
  745. T one;
  746. one.set(tio.player()); // Sets the shared value to 1
  747. for (int i=0;i<items;++i) {
  748. indep_indices.push_back(list_indices[i]+one);
  749. indep_values.push_back(read_outputs[i]+one);
  750. }
  751. // Update all the indices at once
  752. A[indep_indices] += indep_values;
  753. tio.sync_lamport();
  754. mpcio.dump_stats(std::cout);
  755. std::cout << "\n===== DEPENDENT WRITES =====\n";
  756. mpcio.reset_stats();
  757. tio.reset_lamport();
  758. T two;
  759. two.set(2*tio.player()); // Sets the shared value to 2
  760. // For each address addr that's number i from the end of the
  761. // linked list, write i+1 into location addr+2
  762. for (int i=0;i<items;++i) {
  763. T val;
  764. val.set((i+1)*tio.player());
  765. A[list_indices[i]+two] = val;
  766. }
  767. tio.sync_lamport();
  768. mpcio.dump_stats(std::cout);
  769. std::cout << "\n===== DEPENDENT INTERLEAVED =====\n";
  770. mpcio.reset_stats();
  771. tio.reset_lamport();
  772. T three;
  773. three.set(3*tio.player()); // Sets the shared value to 3
  774. // Follow the linked list and whenever A[addr]=val, set
  775. // A[addr+3]=val+3
  776. cur_index = prev_index;
  777. for (int i=0;i<items;++i) {
  778. T next_index = A[cur_index];
  779. A[cur_index+three] = next_index+three;
  780. cur_index = next_index;
  781. }
  782. tio.sync_lamport();
  783. mpcio.dump_stats(std::cout);
  784. std::cout << "\n";
  785. mpcio.reset_stats();
  786. tio.reset_lamport();
  787. if (depth <= 30) {
  788. auto check = A.reconstruct();
  789. auto head = A.reconstruct(prev_index);
  790. if (tio.player() == 0) {
  791. int width = (depth+3)/4;
  792. printf("Head of linked list: %0*lx\n\n", width,
  793. head.share() & mask);
  794. std::cout << "Non-zero reconstructed database entries:\n";
  795. for (address_t i=0;i<size;++i) {
  796. value_t share = check[i].share() & mask;
  797. if (share) printf("%0*x: %0*lx\n", width, i, width, share);
  798. }
  799. }
  800. }
  801. });
  802. }
  803. static void cdpf_test(MPCIO &mpcio,
  804. const PRACOptions &opts, char **args)
  805. {
  806. value_t query, target;
  807. int iters = 1;
  808. arc4random_buf(&query, sizeof(query));
  809. arc4random_buf(&target, sizeof(target));
  810. if (*args) {
  811. query = strtoull(*args, NULL, 16);
  812. ++args;
  813. }
  814. if (*args) {
  815. target = strtoull(*args, NULL, 16);
  816. ++args;
  817. }
  818. if (*args) {
  819. iters = atoi(*args);
  820. ++args;
  821. }
  822. int num_threads = opts.num_threads;
  823. boost::asio::thread_pool pool(num_threads);
  824. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  825. boost::asio::post(pool, [&mpcio, thread_num, query, target, iters] {
  826. MPCTIO tio(mpcio, thread_num);
  827. run_coroutines(tio, [&tio, query, target, iters] (yield_t &yield) {
  828. size_t &aes_ops = tio.aes_ops();
  829. for (int i=0;i<iters;++i) {
  830. if (tio.player() == 2) {
  831. tio.cdpf(yield);
  832. auto [ dpf0, dpf1 ] = CDPF::generate(target, aes_ops);
  833. DPFnode leaf0 = dpf0.leaf(query, aes_ops);
  834. DPFnode leaf1 = dpf1.leaf(query, aes_ops);
  835. printf("DPFXOR_{%016lx}(%016lx} = ", target, query);
  836. dump_node(leaf0 ^ leaf1);
  837. } else {
  838. CDPF dpf = tio.cdpf(yield);
  839. printf("ashare = %016lX\nxshare = %016lX\n",
  840. dpf.as_target.ashare, dpf.xs_target.xshare);
  841. DPFnode leaf = dpf.leaf(query, aes_ops);
  842. printf("DPF(%016lx) = ", query);
  843. dump_node(leaf);
  844. if (tio.player() == 1) {
  845. tio.iostream_peer() << leaf;
  846. } else {
  847. DPFnode peerleaf;
  848. tio.iostream_peer() >> peerleaf;
  849. printf("XOR = ");
  850. dump_node(leaf ^ peerleaf);
  851. }
  852. }
  853. }
  854. });
  855. });
  856. }
  857. pool.join();
  858. }
  859. static int compare_test_one(MPCTIO &tio, yield_t &yield,
  860. value_t target, value_t x)
  861. {
  862. int player = tio.player();
  863. size_t &aes_ops = tio.aes_ops();
  864. int res = 1;
  865. if (player == 2) {
  866. // Create a CDPF pair with the given target
  867. auto [dpf0, dpf1] = CDPF::generate(target, aes_ops);
  868. // Send it and a share of x to the computational parties
  869. RegAS x0, x1;
  870. x0.randomize();
  871. x1.set(x-x0.share());
  872. tio.iostream_p0() << dpf0 << x0;
  873. tio.iostream_p1() << dpf1 << x1;
  874. } else {
  875. CDPF dpf;
  876. RegAS xsh;
  877. tio.iostream_server() >> dpf >> xsh;
  878. auto [lt, eq, gt] = dpf.compare(tio, yield, xsh, aes_ops);
  879. RegBS eeq = dpf.is_zero(tio, yield, xsh, aes_ops);
  880. printf("%016lx %016lx %d %d %d %d ", target, x, lt.bshare,
  881. eq.bshare, gt.bshare, eeq.bshare);
  882. // Check the answer
  883. if (player == 1) {
  884. tio.iostream_peer() << xsh << lt << eq << gt << eeq;
  885. } else {
  886. RegAS peer_xsh;
  887. RegBS peer_lt, peer_eq, peer_gt, peer_eeq;
  888. tio.iostream_peer() >> peer_xsh >> peer_lt >> peer_eq >>
  889. peer_gt >> peer_eeq;
  890. lt ^= peer_lt;
  891. eq ^= peer_eq;
  892. gt ^= peer_gt;
  893. eeq ^= peer_eeq;
  894. xsh += peer_xsh;
  895. int lti = int(lt.bshare);
  896. int eqi = int(eq.bshare);
  897. int gti = int(gt.bshare);
  898. int eeqi = int(eeq.bshare);
  899. x = xsh.share();
  900. printf(": %d %d %d %d ", lti, eqi, gti, eeqi);
  901. bool signbit = (x >> 63);
  902. if (lti + eqi + gti != 1 || eqi != eeqi) {
  903. printf("INCONSISTENT");
  904. res = 0;
  905. } else if (x == 0 && eqi) {
  906. printf("=");
  907. } else if (!signbit && gti) {
  908. printf(">");
  909. } else if (signbit && lti) {
  910. printf("<");
  911. } else {
  912. printf("INCORRECT");
  913. res = 0;
  914. }
  915. }
  916. printf("\n");
  917. }
  918. return res;
  919. }
  920. static int compare_test_target(MPCTIO &tio, yield_t &yield,
  921. value_t target, value_t x)
  922. {
  923. int res = 1;
  924. res &= compare_test_one(tio, yield, target, x);
  925. res &= compare_test_one(tio, yield, target, 0);
  926. res &= compare_test_one(tio, yield, target, 1);
  927. res &= compare_test_one(tio, yield, target, 15);
  928. res &= compare_test_one(tio, yield, target, 16);
  929. res &= compare_test_one(tio, yield, target, 17);
  930. res &= compare_test_one(tio, yield, target, -1);
  931. res &= compare_test_one(tio, yield, target, -15);
  932. res &= compare_test_one(tio, yield, target, -16);
  933. res &= compare_test_one(tio, yield, target, -17);
  934. res &= compare_test_one(tio, yield, target, (value_t(1)<<63));
  935. res &= compare_test_one(tio, yield, target, (value_t(1)<<63)+1);
  936. res &= compare_test_one(tio, yield, target, (value_t(1)<<63)-1);
  937. return res;
  938. }
  939. static void compare_test(MPCIO &mpcio,
  940. const PRACOptions &opts, char **args)
  941. {
  942. value_t target, x;
  943. arc4random_buf(&target, sizeof(target));
  944. arc4random_buf(&x, sizeof(x));
  945. if (*args) {
  946. target = strtoull(*args, NULL, 16);
  947. ++args;
  948. }
  949. if (*args) {
  950. x = strtoull(*args, NULL, 16);
  951. ++args;
  952. }
  953. int num_threads = opts.num_threads;
  954. boost::asio::thread_pool pool(num_threads);
  955. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  956. boost::asio::post(pool, [&mpcio, thread_num, target, x] {
  957. MPCTIO tio(mpcio, thread_num);
  958. run_coroutines(tio, [&tio, target, x] (yield_t &yield) {
  959. int res = 1;
  960. res &= compare_test_target(tio, yield, target, x);
  961. res &= compare_test_target(tio, yield, 0, x);
  962. res &= compare_test_target(tio, yield, 1, x);
  963. res &= compare_test_target(tio, yield, 15, x);
  964. res &= compare_test_target(tio, yield, 16, x);
  965. res &= compare_test_target(tio, yield, 17, x);
  966. res &= compare_test_target(tio, yield, -1, x);
  967. res &= compare_test_target(tio, yield, -15, x);
  968. res &= compare_test_target(tio, yield, -16, x);
  969. res &= compare_test_target(tio, yield, -17, x);
  970. res &= compare_test_target(tio, yield, (value_t(1)<<63), x);
  971. res &= compare_test_target(tio, yield, (value_t(1)<<63)+1, x);
  972. res &= compare_test_target(tio, yield, (value_t(1)<<63)-1, x);
  973. if (tio.player() == 0) {
  974. if (res == 1) {
  975. printf("All tests passed!\n");
  976. } else {
  977. printf("TEST FAILURES\n");
  978. }
  979. }
  980. });
  981. });
  982. }
  983. pool.join();
  984. }
  985. static void sort_test(MPCIO &mpcio,
  986. const PRACOptions &opts, char **args)
  987. {
  988. nbits_t depth=6;
  989. if (*args) {
  990. depth = atoi(*args);
  991. ++args;
  992. }
  993. int num_threads = opts.num_threads;
  994. boost::asio::thread_pool pool(num_threads);
  995. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  996. boost::asio::post(pool, [&mpcio, thread_num, depth] {
  997. MPCTIO tio(mpcio, thread_num);
  998. run_coroutines(tio, [&tio, depth] (yield_t &yield) {
  999. address_t size = address_t(1)<<depth;
  1000. // size_t &aes_ops = tio.aes_ops();
  1001. Duoram<RegAS> oram(tio.player(), size);
  1002. auto A = oram.flat(tio, yield);
  1003. A.explicitonly(true);
  1004. // Initialize the memory to random values in parallel
  1005. std::vector<coro_t> coroutines;
  1006. for (address_t i=0; i<size; ++i) {
  1007. coroutines.emplace_back(
  1008. [&A, i](yield_t &yield) {
  1009. auto Acoro = A.context(yield);
  1010. RegAS v;
  1011. v.randomize(62);
  1012. Acoro[i] += v;
  1013. });
  1014. }
  1015. run_coroutines(yield, coroutines);
  1016. A.bitonic_sort(0, depth);
  1017. if (depth <= 10) {
  1018. oram.dump();
  1019. auto check = A.reconstruct();
  1020. if (tio.player() == 0) {
  1021. for (address_t i=0;i<size;++i) {
  1022. printf("%04x %016lx\n", i, check[i].share());
  1023. }
  1024. }
  1025. }
  1026. });
  1027. });
  1028. }
  1029. pool.join();
  1030. }
  1031. static void bsearch_test(MPCIO &mpcio,
  1032. const PRACOptions &opts, char **args)
  1033. {
  1034. value_t target;
  1035. arc4random_buf(&target, sizeof(target));
  1036. target >>= 1;
  1037. nbits_t depth=6;
  1038. if (*args) {
  1039. depth = atoi(*args);
  1040. ++args;
  1041. }
  1042. if (*args) {
  1043. target = strtoull(*args, NULL, 16);
  1044. ++args;
  1045. }
  1046. int num_threads = opts.num_threads;
  1047. boost::asio::thread_pool pool(num_threads);
  1048. for (int thread_num = 0; thread_num < num_threads; ++thread_num) {
  1049. boost::asio::post(pool, [&mpcio, thread_num, depth, target] {
  1050. MPCTIO tio(mpcio, thread_num);
  1051. run_coroutines(tio, [&tio, depth, target] (yield_t &yield) {
  1052. address_t size = address_t(1)<<depth;
  1053. RegAS tshare;
  1054. if (tio.player() == 2) {
  1055. // Send shares of the target to the computational
  1056. // players
  1057. RegAS tshare0, tshare1;
  1058. tshare0.randomize();
  1059. tshare1.set(target-tshare0.share());
  1060. tio.iostream_p0() << tshare0;
  1061. tio.iostream_p1() << tshare1;
  1062. printf("Using target = %016lx\n", target);
  1063. yield();
  1064. } else {
  1065. // Get the share of the target
  1066. tio.iostream_server() >> tshare;
  1067. }
  1068. // Create a random database and sort it
  1069. // size_t &aes_ops = tio.aes_ops();
  1070. Duoram<RegAS> oram(tio.player(), size);
  1071. auto A = oram.flat(tio, yield);
  1072. A.explicitonly(true);
  1073. // Initialize the memory to random values in parallel
  1074. std::vector<coro_t> coroutines;
  1075. for (address_t i=0; i<size; ++i) {
  1076. coroutines.emplace_back(
  1077. [&A, i](yield_t &yield) {
  1078. auto Acoro = A.context(yield);
  1079. RegAS v;
  1080. v.randomize(62);
  1081. Acoro[i] += v;
  1082. });
  1083. }
  1084. run_coroutines(yield, coroutines);
  1085. A.bitonic_sort(0, depth);
  1086. // Binary search for the target
  1087. RegAS tindex = A.obliv_binary_search(tshare);
  1088. // Check the answer
  1089. if (tio.player() == 1) {
  1090. tio.iostream_peer() << tindex;
  1091. } else if (tio.player() == 0) {
  1092. RegAS peer_tindex;
  1093. tio.iostream_peer() >> peer_tindex;
  1094. tindex += peer_tindex;
  1095. }
  1096. if (depth <= 10) {
  1097. auto check = A.reconstruct();
  1098. if (tio.player() == 0) {
  1099. for (address_t i=0;i<size;++i) {
  1100. printf("%04x %016lx\n", i, check[i].share());
  1101. }
  1102. }
  1103. }
  1104. if (tio.player() == 0) {
  1105. printf("Found index = %lx\n", tindex.share());
  1106. }
  1107. });
  1108. });
  1109. }
  1110. pool.join();
  1111. }
  1112. void online_main(MPCIO &mpcio, const PRACOptions &opts, char **args)
  1113. {
  1114. MPCTIO tio(mpcio, 0);
  1115. if (!*args) {
  1116. std::cerr << "Mode is required as the first argument when not preprocessing.\n";
  1117. return;
  1118. } else if (!strcmp(*args, "test")) {
  1119. ++args;
  1120. online_test(mpcio, opts, args);
  1121. } else if (!strcmp(*args, "lamporttest")) {
  1122. ++args;
  1123. lamport_test(mpcio, opts, args);
  1124. } else if (!strcmp(*args, "rdpftest")) {
  1125. ++args;
  1126. rdpf_test<1>(mpcio, opts, args);
  1127. } else if (!strcmp(*args, "rdpftest2")) {
  1128. ++args;
  1129. rdpf_test<2>(mpcio, opts, args);
  1130. } else if (!strcmp(*args, "rdpftest3")) {
  1131. ++args;
  1132. rdpf_test<3>(mpcio, opts, args);
  1133. } else if (!strcmp(*args, "rdpftest4")) {
  1134. ++args;
  1135. rdpf_test<4>(mpcio, opts, args);
  1136. } else if (!strcmp(*args, "rdpftest5")) {
  1137. ++args;
  1138. rdpf_test<5>(mpcio, opts, args);
  1139. } else if (!strcmp(*args, "rdpftime")) {
  1140. ++args;
  1141. rdpf_timing(mpcio, opts, args);
  1142. } else if (!strcmp(*args, "evaltime")) {
  1143. ++args;
  1144. rdpfeval_timing(mpcio, opts, args);
  1145. } else if (!strcmp(*args, "parevaltime")) {
  1146. ++args;
  1147. par_rdpfeval_timing(mpcio, opts, args);
  1148. } else if (!strcmp(*args, "tupletime")) {
  1149. ++args;
  1150. tupleeval_timing(mpcio, opts, args);
  1151. } else if (!strcmp(*args, "partupletime")) {
  1152. ++args;
  1153. par_tupleeval_timing(mpcio, opts, args);
  1154. } else if (!strcmp(*args, "duotest")) {
  1155. ++args;
  1156. if (opts.use_xor_db) {
  1157. duoram_test<RegXS>(mpcio, opts, args);
  1158. } else {
  1159. duoram_test<RegAS>(mpcio, opts, args);
  1160. }
  1161. } else if (!strcmp(*args, "cdpftest")) {
  1162. ++args;
  1163. cdpf_test(mpcio, opts, args);
  1164. } else if (!strcmp(*args, "cmptest")) {
  1165. ++args;
  1166. compare_test(mpcio, opts, args);
  1167. } else if (!strcmp(*args, "sorttest")) {
  1168. ++args;
  1169. sort_test(mpcio, opts, args);
  1170. } else if (!strcmp(*args, "bsearch")) {
  1171. ++args;
  1172. bsearch_test(mpcio, opts, args);
  1173. } else if (!strcmp(*args, "duoram")) {
  1174. ++args;
  1175. if (opts.use_xor_db) {
  1176. duoram<RegXS>(mpcio, opts, args);
  1177. } else {
  1178. duoram<RegAS>(mpcio, opts, args);
  1179. }
  1180. } else if (!strcmp(*args, "cell")) {
  1181. ++args;
  1182. cell(mpcio, opts, args);
  1183. } else {
  1184. std::cerr << "Unknown mode " << *args << "\n";
  1185. }
  1186. }