duoram.tcc 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. // Templated method implementations for duoram.hpp
  2. #include <stdio.h>
  3. #include "mpcops.hpp"
  4. #include "cdpf.hpp"
  5. #include "rdpf.hpp"
  6. // Pass the player number and desired size
  7. template <typename T>
  8. Duoram<T>::Duoram(int player, size_t size) : player(player),
  9. oram_size(size), p0_blind(blind), p1_blind(peer_blinded_db) {
  10. if (player < 2) {
  11. database.resize(size);
  12. blind.resize(size);
  13. peer_blinded_db.resize(size);
  14. } else {
  15. p0_blind.resize(size);
  16. p1_blind.resize(size);
  17. }
  18. }
  19. // For debugging; print the contents of the Duoram to stdout
  20. template <typename T>
  21. void Duoram<T>::dump() const
  22. {
  23. for (size_t i=0; i<oram_size; ++i) {
  24. if (player < 2) {
  25. printf("%04lx ", i);
  26. database[i].dump();
  27. printf(" ");
  28. blind[i].dump();
  29. printf(" ");
  30. peer_blinded_db[i].dump();
  31. printf("\n");
  32. } else {
  33. printf("%04lx ", i);
  34. p0_blind[i].dump();
  35. printf(" ");
  36. p1_blind[i].dump();
  37. printf("\n");
  38. }
  39. }
  40. printf("\n");
  41. }
  42. // Enable or disable explicit-only mode. Only using [] with
  43. // explicit (address_t) indices are allowed in this mode. Using []
  44. // with RegAS or RegXS indices will automatically turn off this
  45. // mode, or you can turn it off explicitly. In explicit-only mode,
  46. // updates to the memory in the Shape will not induce communication
  47. // to the server or peer, but when it turns off, a message of the
  48. // size of the entire Shape will be sent to each of the server and
  49. // the peer. This is useful if you're going to be doing multiple
  50. // explicit writes to every element of the Shape before you do your
  51. // next oblivious read or write. Bitonic sort is a prime example.
  52. template <typename T>
  53. void Duoram<T>::Shape::explicitonly(bool enable)
  54. {
  55. if (enable == true) {
  56. explicitmode = true;
  57. } else if (explicitmode == true) {
  58. explicitmode = false;
  59. // Reblind the whole Shape
  60. int player = tio.player();
  61. if (player < 2) {
  62. for (size_t i=0; i<shape_size; ++i) {
  63. auto [ DB, BL, PBD ] = get_comp(i);
  64. BL.randomize();
  65. tio.iostream_server() << BL;
  66. tio.iostream_peer() << (DB + BL);
  67. }
  68. yield();
  69. for (size_t i=0; i<shape_size; ++i) {
  70. auto [ DB, BL, PBD ] = get_comp(i);
  71. tio.iostream_peer() >> PBD;
  72. }
  73. } else {
  74. yield();
  75. for (size_t i=0; i<shape_size; ++i) {
  76. auto [BL0, BL1] = get_server(i);
  77. tio.iostream_p0() >> BL0;
  78. tio.iostream_p1() >> BL1;
  79. }
  80. }
  81. }
  82. }
  83. // For debugging or checking your answers (using this in general is
  84. // of course insecure)
  85. // This one reconstructs the whole Shape
  86. template <typename T>
  87. std::vector<T> Duoram<T>::Shape::reconstruct() const
  88. {
  89. int player = tio.player();
  90. std::vector<T> res;
  91. res.resize(shape_size);
  92. // Player 1 sends their share of the database to player 0
  93. if (player == 1) {
  94. for (size_t i=0; i < shape_size; ++i) {
  95. T elt = std::get<0>(get_comp(i));
  96. tio.queue_peer(&elt, sizeof(T));
  97. }
  98. yield();
  99. } else if (player == 0) {
  100. yield();
  101. for(size_t i=0; i < shape_size; ++i) {
  102. tio.recv_peer(&res[i], sizeof(T));
  103. T myelt = std::get<0>(get_comp(i));
  104. res[i] += myelt;
  105. }
  106. } else if (player == 2) {
  107. // The server (player 2) only syncs with the yield
  108. yield();
  109. }
  110. // Players 1 and 2 will get an empty vector here
  111. return res;
  112. }
  113. // This one reconstructs a single database value
  114. template <typename T>
  115. T Duoram<T>::Shape::reconstruct(const T& share) const
  116. {
  117. int player = tio.player();
  118. T res;
  119. // Player 1 sends their share of the value to player 0
  120. if (player == 1) {
  121. tio.queue_peer(&share, sizeof(T));
  122. yield();
  123. } else if (player == 0) {
  124. yield();
  125. tio.recv_peer(&res, sizeof(T));
  126. res += share;
  127. } else if (player == 2) {
  128. // The server (player 2) only syncs with the yield
  129. yield();
  130. }
  131. // Players 1 and 2 will get 0 here
  132. return res;
  133. }
  134. // Function to set the shape_size of a shape and compute the number of
  135. // bits you need to address a shape of that size (which is the number of
  136. // bits in sz-1). This is typically called by subclass constructors.
  137. template <typename T>
  138. void Duoram<T>::Shape::set_shape_size(size_t sz)
  139. {
  140. shape_size = sz;
  141. // Compute the number of bits in (sz-1)
  142. // But use 0 if sz=0 for some reason (though that should never
  143. // happen)
  144. if (sz > 1) {
  145. addr_size = 64-__builtin_clzll(sz-1);
  146. addr_mask = address_t((size_t(1)<<addr_size)-1);
  147. } else {
  148. addr_size = 0;
  149. addr_mask = 0;
  150. }
  151. }
  152. // Constructor for the Flat shape. len=0 means the maximum size (the
  153. // parent's size minus start).
  154. template <typename T>
  155. Duoram<T>::Flat::Flat(Duoram &duoram, MPCTIO &tio, yield_t &yield,
  156. size_t start, size_t len) : Shape(*this, duoram, tio, yield)
  157. {
  158. size_t parentsize = duoram.size();
  159. if (start > parentsize) {
  160. start = parentsize;
  161. }
  162. this->start = start;
  163. size_t maxshapesize = parentsize - start;
  164. if (len > maxshapesize || len == 0) {
  165. len = maxshapesize;
  166. }
  167. this->len = len;
  168. this->set_shape_size(len);
  169. }
  170. // Constructor for the Flat shape. len=0 means the maximum size (the
  171. // parent's size minus start).
  172. template <typename T>
  173. Duoram<T>::Flat::Flat(const Shape &parent, MPCTIO &tio, yield_t &yield,
  174. size_t start, size_t len) : Shape(parent, parent.duoram, tio, yield)
  175. {
  176. size_t parentsize = parent.size();
  177. if (start > parentsize) {
  178. start = parentsize;
  179. }
  180. this->start = start;
  181. size_t maxshapesize = parentsize - start;
  182. if (len > maxshapesize || len == 0) {
  183. len = maxshapesize;
  184. }
  185. this->len = len;
  186. this->set_shape_size(len);
  187. }
  188. // Bitonic sort the elements from start to start+len-1, in
  189. // increasing order if dir=0 or decreasing order if dir=1. Note that
  190. // the elements must be at most 63 bits long each for the notion of
  191. // ">" to make consistent sense.
  192. template <typename T>
  193. void Duoram<T>::Flat::bitonic_sort(address_t start, address_t len, bool dir)
  194. {
  195. if (len < 2) return;
  196. if (len == 2) {
  197. osort(start, start+1, dir);
  198. return;
  199. }
  200. address_t leftlen, rightlen;
  201. leftlen = (len+1) >> 1;
  202. rightlen = len >> 1;
  203. // Recurse on the first half (opposite to the desired order)
  204. // and the second half (desired order) in parallel
  205. run_coroutines(this->yield,
  206. [this, start, leftlen, dir](yield_t &yield) {
  207. Flat Acoro = context(yield);
  208. Acoro.bitonic_sort(start, leftlen, !dir);
  209. },
  210. [this, start, leftlen, rightlen, dir](yield_t &yield) {
  211. Flat Acoro = context(yield);
  212. Acoro.bitonic_sort(start+leftlen, rightlen, dir);
  213. });
  214. // Merge the two into the desired order
  215. butterfly(start, len, dir);
  216. }
  217. // Internal function to aid bitonic_sort
  218. template <typename T>
  219. void Duoram<T>::Flat::butterfly(address_t start, address_t len, bool dir)
  220. {
  221. if (len < 2) return;
  222. if (len == 2) {
  223. osort(start, start+1, dir);
  224. return;
  225. }
  226. address_t leftlen, rightlen, offset, num_swaps;
  227. // leftlen = (len+1) >> 1;
  228. leftlen = 1;
  229. while(2*leftlen < len) {
  230. leftlen *= 2;
  231. }
  232. rightlen = len - leftlen;
  233. offset = leftlen;
  234. num_swaps = rightlen;
  235. // Sort pairs of elements offset apart in parallel
  236. std::vector<coro_t> coroutines;
  237. for (address_t i=0; i<num_swaps;++i) {
  238. coroutines.emplace_back(
  239. [this, start, offset, dir, i](yield_t &yield) {
  240. Flat Acoro = context(yield);
  241. Acoro.osort(start+i, start+i+offset, dir);
  242. });
  243. }
  244. run_coroutines(this->yield, coroutines);
  245. // Recurse on each half in parallel
  246. run_coroutines(this->yield,
  247. [this, start, leftlen, dir](yield_t &yield) {
  248. Flat Acoro = context(yield);
  249. Acoro.butterfly(start, leftlen, dir);
  250. },
  251. [this, start, leftlen, rightlen, dir](yield_t &yield) {
  252. Flat Acoro = context(yield);
  253. Acoro.butterfly(start+leftlen, rightlen, dir);
  254. });
  255. }
  256. // Helper functions to specialize the read and update operations for
  257. // RegAS and RegXS shared indices
  258. template <typename U>
  259. inline address_t IfRegAS(address_t val);
  260. template <typename U>
  261. inline address_t IfRegXS(address_t val);
  262. template <>
  263. inline address_t IfRegAS<RegAS>(address_t val) { return val; }
  264. template <>
  265. inline address_t IfRegAS<RegXS>(address_t val) { return 0; }
  266. template <>
  267. inline address_t IfRegXS<RegAS>(address_t val) { return 0; }
  268. template <>
  269. inline address_t IfRegXS<RegXS>(address_t val) { return val; }
  270. // Oblivious read from an additively or XOR shared index of Duoram memory
  271. // T is the sharing type of the _values_ in the database; U is the
  272. // sharing type of the _indices_ in the database. If we are referencing
  273. // an entire entry of type T, then the field type FT will equal T, and
  274. // the field selector type FST will be nullopt_t. If we are referencing
  275. // a particular field of T, then FT will be the type of the field (RegAS
  276. // or RegXS) and FST will be a pointer-to-member T::* type pointing to
  277. // that field. Sh is the specific Shape subtype used to create the
  278. // MemRefS. WIDTH is the RDPF width to use.
  279. template <typename T>
  280. template <typename U,typename FT,typename FST,typename Sh,nbits_t WIDTH>
  281. Duoram<T>::Shape::MemRefS<U,FT,FST,Sh,WIDTH>::operator FT()
  282. {
  283. FT res;
  284. Sh &shape = this->shape;
  285. shape.explicitonly(false);
  286. int player = shape.tio.player();
  287. if (player < 2) {
  288. // Computational players do this
  289. const RDPFTriple<WIDTH> &dt = *(oblividx->dt);
  290. const nbits_t depth = dt.depth();
  291. // Compute the index offset
  292. U indoffset;
  293. dt.get_target(indoffset);
  294. indoffset -= oblividx->idx;
  295. // We only need two of the DPFs for reading
  296. RDPF2of3<WIDTH> dp(dt, 0, player == 0 ? 2 : 1);
  297. // Send it to the peer and the server
  298. shape.tio.queue_peer(&indoffset, BITBYTES(depth));
  299. shape.tio.queue_server(&indoffset, BITBYTES(depth));
  300. shape.yield();
  301. // Receive the above from the peer
  302. U peerindoffset;
  303. shape.tio.recv_peer(&peerindoffset, BITBYTES(depth));
  304. // Reconstruct the total offset
  305. auto indshift = combine(indoffset, peerindoffset, depth);
  306. // Evaluate the DPFs and compute the dotproducts
  307. ParallelEval pe(dp, IfRegAS<U>(indshift), IfRegXS<U>(indshift),
  308. shape.shape_size, shape.tio.cpu_nthreads(),
  309. shape.tio.aes_ops());
  310. FT init;
  311. res = pe.reduce(init, [this, &dp, &shape] (int thread_num,
  312. address_t i, const typename RDPFPair<WIDTH>::LeafNode &leaf) {
  313. // The values from the two DPFs, which will each be of type T
  314. std::tuple<FT,FT> V;
  315. dp.unit(V, leaf);
  316. auto [V0, V1] = V;
  317. // References to the appropriate cells in our database, our
  318. // blind, and our copy of the peer's blinded database
  319. auto [DB, BL, PBD] = shape.get_comp(i, fieldsel);
  320. return (DB + PBD).mulshare(V0) - BL.mulshare(V1-V0);
  321. });
  322. shape.yield();
  323. // Receive the cancellation term from the server
  324. FT gamma;
  325. shape.tio.iostream_server() >> gamma;
  326. res += gamma;
  327. } else {
  328. // The server does this
  329. const RDPFPair<WIDTH> &dp = *(oblividx->dp);
  330. const nbits_t depth = dp.depth();
  331. U p0indoffset, p1indoffset;
  332. shape.yield();
  333. // Receive the index offset from the computational players and
  334. // combine them
  335. shape.tio.recv_p0(&p0indoffset, BITBYTES(depth));
  336. shape.tio.recv_p1(&p1indoffset, BITBYTES(depth));
  337. auto indshift = combine(p0indoffset, p1indoffset, depth);
  338. // Evaluate the DPFs to compute the cancellation terms
  339. std::tuple<FT,FT> init, gamma;
  340. ParallelEval pe(dp, IfRegAS<U>(indshift), IfRegXS<U>(indshift),
  341. shape.shape_size, shape.tio.cpu_nthreads(),
  342. shape.tio.aes_ops());
  343. gamma = pe.reduce(init, [this, &dp, &shape] (int thread_num,
  344. address_t i, const typename RDPFPair<WIDTH>::LeafNode &leaf) {
  345. // The values from the two DPFs, each of type FT
  346. std::tuple<FT,FT> V;
  347. dp.unit(V, leaf);
  348. auto [V0, V1] = V;
  349. // shape.get_server(i) returns a pair of references to the
  350. // appropriate cells in the two blinded databases
  351. auto [BL0, BL1] = shape.get_server(i, fieldsel);
  352. return std::make_tuple(-BL0.mulshare(V1), -BL1.mulshare(V0));
  353. });
  354. // Choose a random blinding factor
  355. FT rho;
  356. rho.randomize();
  357. std::get<0>(gamma) += rho;
  358. std::get<1>(gamma) -= rho;
  359. // Send the cancellation terms to the computational players
  360. shape.tio.iostream_p0() << std::get<0>(gamma);
  361. shape.tio.iostream_p1() << std::get<1>(gamma);
  362. shape.yield();
  363. }
  364. return res; // The server will always get 0
  365. }
  366. // Oblivious update to a shared index of Duoram memory, only for
  367. // FT = RegAS or RegXS. The template parameters are as above.
  368. template <typename T>
  369. template <typename U, typename FT, typename FST, typename Sh, nbits_t WIDTH>
  370. typename Duoram<T>::Shape::template MemRefS<U,FT,FST,Sh,WIDTH>
  371. &Duoram<T>::Shape::MemRefS<U,FT,FST,Sh,WIDTH>::oram_update(const FT& M,
  372. const prac_template_true &)
  373. {
  374. Sh &shape = this->shape;
  375. shape.explicitonly(false);
  376. int player = shape.tio.player();
  377. if (player < 2) {
  378. // Computational players do this
  379. const RDPFTriple<WIDTH> &dt = *(oblividx->dt);
  380. const nbits_t windex = oblividx->windex();
  381. const nbits_t depth = dt.depth();
  382. // Compute the index and message offsets
  383. U indoffset;
  384. dt.get_target(indoffset);
  385. indoffset -= oblividx->idx;
  386. typename RDPF<WIDTH>::template W<FT> MW;
  387. MW[windex] = M;
  388. auto Moffset = std::make_tuple(MW, MW, MW);
  389. typename RDPFTriple<WIDTH>::template WTriple<FT> scaled_val;
  390. dt.scaled_value(scaled_val);
  391. Moffset -= scaled_val;
  392. // Send them to the peer, and everything except the first offset
  393. // to the server
  394. shape.tio.queue_peer(&indoffset, BITBYTES(depth));
  395. shape.tio.iostream_peer() << Moffset;
  396. shape.tio.queue_server(&indoffset, BITBYTES(depth));
  397. shape.tio.iostream_server() << std::get<1>(Moffset) <<
  398. std::get<2>(Moffset);
  399. shape.yield();
  400. // Receive the above from the peer
  401. U peerindoffset;
  402. typename RDPFTriple<WIDTH>::template WTriple<FT> peerMoffset;
  403. shape.tio.recv_peer(&peerindoffset, BITBYTES(depth));
  404. shape.tio.iostream_peer() >> peerMoffset;
  405. // Reconstruct the total offsets
  406. auto indshift = combine(indoffset, peerindoffset, depth);
  407. auto Mshift = combine(Moffset, peerMoffset);
  408. // Evaluate the DPFs and add them to the database
  409. ParallelEval pe(dt, IfRegAS<U>(indshift), IfRegXS<U>(indshift),
  410. shape.shape_size, shape.tio.cpu_nthreads(),
  411. shape.tio.aes_ops());
  412. int init = 0;
  413. pe.reduce(init, [this, &dt, &shape, &Mshift, player, windex] (int thread_num,
  414. address_t i, const typename RDPFTriple<WIDTH>::LeafNode &leaf) {
  415. // The values from the three DPFs
  416. typename RDPFTriple<WIDTH>::template WTriple<FT> scaled;
  417. std::tuple<FT,FT,FT> unit;
  418. dt.scaled(scaled, leaf);
  419. dt.unit(unit, leaf);
  420. auto [V0, V1, V2] = scaled + unit * Mshift;
  421. // References to the appropriate cells in our database, our
  422. // blind, and our copy of the peer's blinded database
  423. auto [DB, BL, PBD] = shape.get_comp(i,fieldsel);
  424. DB += V0[windex];
  425. if (player == 0) {
  426. BL -= V1[windex];
  427. PBD += V2[windex]-V0[windex];
  428. } else {
  429. BL -= V2[windex];
  430. PBD += V1[windex]-V0[windex];
  431. }
  432. return 0;
  433. });
  434. } else {
  435. // The server does this
  436. const RDPFPair<WIDTH> &dp = *(oblividx->dp);
  437. const nbits_t windex = oblividx->windex();
  438. const nbits_t depth = dp.depth();
  439. U p0indoffset, p1indoffset;
  440. typename RDPFPair<WIDTH>::template WPair<FT> p0Moffset, p1Moffset;
  441. shape.yield();
  442. // Receive the index and message offsets from the computational
  443. // players and combine them
  444. shape.tio.recv_p0(&p0indoffset, BITBYTES(depth));
  445. shape.tio.iostream_p0() >> p0Moffset;
  446. shape.tio.recv_p1(&p1indoffset, BITBYTES(depth));
  447. shape.tio.iostream_p1() >> p1Moffset;
  448. auto indshift = combine(p0indoffset, p1indoffset, depth);
  449. auto Mshift = combine(p0Moffset, p1Moffset);
  450. // Evaluate the DPFs and subtract them from the blinds
  451. ParallelEval pe(dp, IfRegAS<U>(indshift), IfRegXS<U>(indshift),
  452. shape.shape_size, shape.tio.cpu_nthreads(),
  453. shape.tio.aes_ops());
  454. int init = 0;
  455. pe.reduce(init, [this, &dp, &shape, &Mshift, windex] (int thread_num,
  456. address_t i, const typename RDPFPair<WIDTH>::LeafNode &leaf) {
  457. // The values from the two DPFs
  458. typename RDPFPair<WIDTH>::template WPair<FT> scaled;
  459. std::tuple<FT,FT> unit;
  460. dp.scaled(scaled, leaf);
  461. dp.unit(unit, leaf);
  462. auto [V0, V1] = scaled + unit * Mshift;
  463. // shape.get_server(i) returns a pair of references to the
  464. // appropriate cells in the two blinded databases, so we can
  465. // subtract the pair directly.
  466. auto [BL0, BL1] = shape.get_server(i,fieldsel);
  467. BL0 -= V0[windex];
  468. BL1 -= V1[windex];
  469. return 0;
  470. });
  471. }
  472. return *this;
  473. }
  474. // Oblivious update to a shared index of Duoram memory, only for
  475. // FT not RegAS or RegXS. The template parameters are as above.
  476. template <typename T>
  477. template <typename U, typename FT, typename FST, typename Sh, nbits_t WIDTH>
  478. typename Duoram<T>::Shape::template MemRefS<U,FT,FST,Sh,WIDTH>
  479. &Duoram<T>::Shape::MemRefS<U,FT,FST,Sh,WIDTH>::oram_update(const FT& M,
  480. const prac_template_false &)
  481. {
  482. T::update(shape, shape.yield, oblividx->idx, M);
  483. return *this;
  484. }
  485. // Oblivious update to an additively or XOR shared index of Duoram
  486. // memory. The template parameters are as above.
  487. template <typename T>
  488. template <typename U, typename FT, typename FST, typename Sh, nbits_t WIDTH>
  489. typename Duoram<T>::Shape::template MemRefS<U,FT,FST,Sh,WIDTH>
  490. &Duoram<T>::Shape::MemRefS<U,FT,FST,Sh,WIDTH>::operator+=(const FT& M)
  491. {
  492. return oram_update(M, prac_basic_Reg_S<FT>());
  493. }
  494. // Oblivious write to an additively or XOR shared index of Duoram
  495. // memory. The template parameters are as above.
  496. template <typename T>
  497. template <typename U, typename FT, typename FST, typename Sh, nbits_t WIDTH>
  498. typename Duoram<T>::Shape::template MemRefS<U,FT,FST,Sh,WIDTH>
  499. &Duoram<T>::Shape::MemRefS<U,FT,FST,Sh,WIDTH>::operator=(const FT& M)
  500. {
  501. FT oldval = *this;
  502. FT update = M - oldval;
  503. *this += update;
  504. return *this;
  505. }
  506. // Oblivious sort with the provided other element. Without
  507. // reconstructing the values, *this will become a share of the
  508. // smaller of the reconstructed values, and other will become a
  509. // share of the larger.
  510. //
  511. // Note: this only works for additively shared databases
  512. template <> template <typename U,typename V>
  513. void Duoram<RegAS>::Flat::osort(const U &idx1, const V &idx2, bool dir)
  514. {
  515. // Load the values in parallel
  516. RegAS val1, val2;
  517. run_coroutines(yield,
  518. [this, &idx1, &val1](yield_t &yield) {
  519. Flat Acoro = context(yield);
  520. val1 = Acoro[idx1];
  521. },
  522. [this, &idx2, &val2](yield_t &yield) {
  523. Flat Acoro = context(yield);
  524. val2 = Acoro[idx2];
  525. });
  526. // Get a CDPF
  527. CDPF cdpf = tio.cdpf(yield);
  528. // Use it to compare the values
  529. RegAS diff = val1-val2;
  530. auto [lt, eq, gt] = cdpf.compare(tio, yield, diff, tio.aes_ops());
  531. RegBS cmp = dir ? lt : gt;
  532. // Get additive shares of cmp*diff
  533. RegAS cmp_diff;
  534. mpc_flagmult(tio, yield, cmp_diff, cmp, diff);
  535. // Update the two locations in parallel
  536. run_coroutines(yield,
  537. [this, &idx1, &cmp_diff](yield_t &yield) {
  538. Flat Acoro = context(yield);
  539. Acoro[idx1] -= cmp_diff;
  540. },
  541. [this, &idx2, &cmp_diff](yield_t &yield) {
  542. Flat Acoro = context(yield);
  543. Acoro[idx2] += cmp_diff;
  544. });
  545. }
  546. // Explicit read from a given index of Duoram memory
  547. template <typename T> template <typename FT, typename FST>
  548. Duoram<T>::Shape::MemRefExpl<FT,FST>::operator FT()
  549. {
  550. Shape &shape = this->shape;
  551. FT res;
  552. int player = shape.tio.player();
  553. if (player < 2) {
  554. res = std::get<0>(shape.get_comp(idx, fieldsel));
  555. }
  556. return res; // The server will always get 0
  557. }
  558. // Explicit update to a given index of Duoram memory
  559. template <typename T> template <typename FT, typename FST>
  560. typename Duoram<T>::Shape::template MemRefExpl<FT,FST>
  561. &Duoram<T>::Shape::MemRefExpl<FT,FST>::operator+=(const FT& M)
  562. {
  563. Shape &shape = this->shape;
  564. int player = shape.tio.player();
  565. // In explicit-only mode, just update the local DB; we'll sync the
  566. // blinds and the blinded DB when we leave explicit-only mode.
  567. if (shape.explicitmode) {
  568. if (player < 2) {
  569. auto [ DB, BL, PBD ] = shape.get_comp(idx, fieldsel);
  570. DB += M;
  571. }
  572. return *this;
  573. }
  574. if (player < 2) {
  575. // Computational players do this
  576. // Pick a blinding factor
  577. FT blind;
  578. blind.randomize();
  579. // Send the blind to the server, and the blinded value to the
  580. // peer
  581. shape.tio.iostream_server() << blind;
  582. shape.tio.iostream_peer() << (M + blind);
  583. shape.yield();
  584. // Receive the peer's blinded value
  585. FT peerblinded;
  586. shape.tio.iostream_peer() >> peerblinded;
  587. // Our database, our blind, the peer's blinded database
  588. auto [ DB, BL, PBD ] = shape.get_comp(idx, fieldsel);
  589. DB += M;
  590. BL += blind;
  591. PBD += peerblinded;
  592. } else if (player == 2) {
  593. // The server does this
  594. shape.yield();
  595. // Receive the updates to the blinds
  596. FT p0blind, p1blind;
  597. shape.tio.iostream_p0() >> p0blind;
  598. shape.tio.iostream_p1() >> p1blind;
  599. // The two computational parties' blinds
  600. auto [ BL0, BL1 ] = shape.get_server(idx, fieldsel);
  601. BL0 += p0blind;
  602. BL1 += p1blind;
  603. }
  604. return *this;
  605. }
  606. // Explicit write to a given index of Duoram memory
  607. template <typename T> template <typename FT, typename FST>
  608. typename Duoram<T>::Shape::template MemRefExpl<FT,FST>
  609. &Duoram<T>::Shape::MemRefExpl<FT,FST>::operator=(const FT& M)
  610. {
  611. FT oldval = *this;
  612. FT update = M - oldval;
  613. *this += update;
  614. return *this;
  615. }
  616. // Independent U-shared reads into a Shape of subtype Sh on a Duoram
  617. // with values of sharing type T
  618. template <typename T> template <typename U, typename Sh>
  619. Duoram<T>::Shape::MemRefInd<U,Sh>::operator std::vector<T>()
  620. {
  621. std::vector<T> res;
  622. size_t size = indcs.size();
  623. res.resize(size);
  624. std::vector<coro_t> coroutines;
  625. for (size_t i=0;i<size;++i) {
  626. coroutines.emplace_back([this, &res, i] (yield_t &yield) {
  627. Sh Sh_coro = shape.context(yield);
  628. res[i] = Sh_coro[indcs[i]];
  629. });
  630. }
  631. run_coroutines(shape.yield, coroutines);
  632. return res;
  633. }
  634. // Independent U-shared updates into a Shape of subtype Sh on a Duoram
  635. // with values of sharing type T (vector version)
  636. template <typename T> template <typename U, typename Sh>
  637. typename Duoram<T>::Shape::template MemRefInd<U,Sh>
  638. &Duoram<T>::Shape::MemRefInd<U,Sh>::operator+=(const std::vector<T>& M)
  639. {
  640. size_t size = indcs.size();
  641. assert(M.size() == size);
  642. std::vector<coro_t> coroutines;
  643. for (size_t i=0;i<size;++i) {
  644. coroutines.emplace_back([this, &M, i] (yield_t &yield) {
  645. Sh Sh_coro = shape.context(yield);
  646. Sh_coro[indcs[i]] += M[i];
  647. });
  648. }
  649. run_coroutines(shape.yield, coroutines);
  650. return *this;
  651. }
  652. // Independent U-shared updates into a Shape of subtype Sh on a Duoram
  653. // with values of sharing type T (array version)
  654. template <typename T> template <typename U, typename Sh> template <size_t N>
  655. typename Duoram<T>::Shape::template MemRefInd<U,Sh>
  656. &Duoram<T>::Shape::MemRefInd<U,Sh>::operator+=(const std::array<T,N>& M)
  657. {
  658. size_t size = indcs.size();
  659. assert(N == size);
  660. std::vector<coro_t> coroutines;
  661. for (size_t i=0;i<size;++i) {
  662. coroutines.emplace_back([this, &M, i] (yield_t &yield) {
  663. Sh Sh_coro = shape.context(yield);
  664. Sh_coro[indcs[i]] += M[i];
  665. });
  666. }
  667. run_coroutines(shape.yield, coroutines);
  668. return *this;
  669. }
  670. // Independent U-shared writes into a Shape of subtype Sh on a Duoram
  671. // with values of sharing type T (vector version)
  672. template <typename T> template <typename U, typename Sh>
  673. typename Duoram<T>::Shape::template MemRefInd<U,Sh>
  674. &Duoram<T>::Shape::MemRefInd<U,Sh>::operator=(const std::vector<T>& M)
  675. {
  676. size_t size = indcs.size();
  677. assert(M.size() == size);
  678. std::vector<coro_t> coroutines;
  679. for (size_t i=0;i<size;++i) {
  680. coroutines.emplace_back([this, &M, i] (yield_t &yield) {
  681. Sh Sh_coro = shape.context(yield);
  682. Sh_coro[indcs[i]] = M[i];
  683. });
  684. }
  685. run_coroutines(shape.yield, coroutines);
  686. return *this;
  687. }
  688. // Independent U-shared writes into a Shape of subtype Sh on a Duoram
  689. // with values of sharing type T (array version)
  690. template <typename T> template <typename U, typename Sh> template <size_t N>
  691. typename Duoram<T>::Shape::template MemRefInd<U,Sh>
  692. &Duoram<T>::Shape::MemRefInd<U,Sh>::operator=(const std::array<T,N>& M)
  693. {
  694. size_t size = indcs.size();
  695. assert(N == size);
  696. std::vector<coro_t> coroutines;
  697. for (size_t i=0;i<size;++i) {
  698. coroutines.emplace_back([this, &M, i] (yield_t &yield) {
  699. Sh Sh_coro = shape.context(yield);
  700. Sh_coro[indcs[i]] = M[i];
  701. });
  702. }
  703. run_coroutines(shape.yield, coroutines);
  704. return *this;
  705. }