duoram.tcc 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736
  1. // Templated method implementations for duoram.hpp
  2. #include <stdio.h>
  3. #include "cdpf.hpp"
  4. // Pass the player number and desired size
  5. template <typename T>
  6. Duoram<T>::Duoram(int player, size_t size) : player(player),
  7. oram_size(size), p0_blind(blind), p1_blind(peer_blinded_db) {
  8. if (player < 2) {
  9. database.resize(size);
  10. blind.resize(size);
  11. peer_blinded_db.resize(size);
  12. } else {
  13. p0_blind.resize(size);
  14. p1_blind.resize(size);
  15. }
  16. }
  17. // For debugging; print the contents of the Duoram to stdout
  18. template <typename T>
  19. void Duoram<T>::dump() const
  20. {
  21. for (size_t i=0; i<oram_size; ++i) {
  22. if (player < 2) {
  23. printf("%04lx %016lx %016lx %016lx\n",
  24. i, database[i].share(), blind[i].share(),
  25. peer_blinded_db[i].share());
  26. } else {
  27. printf("%04lx %016lx %016lx\n",
  28. i, p0_blind[i].share(), p1_blind[i].share());
  29. }
  30. }
  31. printf("\n");
  32. }
  33. // Enable or disable explicit-only mode. Only using [] with
  34. // explicit (address_t) indices are allowed in this mode. Using []
  35. // with RegAS or RegXS indices will automatically turn off this
  36. // mode, or you can turn it off explicitly. In explicit-only mode,
  37. // updates to the memory in the Shape will not induce communication
  38. // to the server or peer, but when it turns off, a message of the
  39. // size of the entire Shape will be sent to each of the server and
  40. // the peer. This is useful if you're going to be doing multiple
  41. // explicit writes to every element of the Shape before you do your
  42. // next oblivious read or write. Bitonic sort is a prime example.
  43. template <typename T>
  44. void Duoram<T>::Shape::explicitonly(bool enable)
  45. {
  46. if (enable == true) {
  47. explicitmode = true;
  48. } else if (explicitmode == true) {
  49. explicitmode = false;
  50. // Reblind the whole Shape
  51. int player = tio.player();
  52. if (player < 2) {
  53. for (size_t i=0; i<shape_size; ++i) {
  54. auto [ DB, BL, PBD ] = get_comp(i);
  55. BL.randomize();
  56. tio.iostream_server() << BL;
  57. tio.iostream_peer() << (DB + BL);
  58. }
  59. yield();
  60. for (size_t i=0; i<shape_size; ++i) {
  61. auto [ DB, BL, PBD ] = get_comp(i);
  62. tio.iostream_peer() >> PBD;
  63. }
  64. } else {
  65. yield();
  66. for (size_t i=0; i<shape_size; ++i) {
  67. auto [BL0, BL1] = get_server(i);
  68. tio.iostream_p0() >> BL0;
  69. tio.iostream_p1() >> BL1;
  70. }
  71. }
  72. }
  73. }
  74. // For debugging or checking your answers (using this in general is
  75. // of course insecure)
  76. // This one reconstructs the whole database
  77. template <typename T>
  78. std::vector<T> Duoram<T>::Shape::reconstruct() const
  79. {
  80. int player = tio.player();
  81. std::vector<T> res;
  82. res.resize(duoram.size());
  83. // Player 1 sends their share of the database to player 0
  84. if (player == 1) {
  85. tio.queue_peer(duoram.database.data(), duoram.size()*sizeof(T));
  86. yield();
  87. } else if (player == 0) {
  88. yield();
  89. tio.recv_peer(res.data(), duoram.size()*sizeof(T));
  90. for(size_t i=0;i<duoram.size();++i) {
  91. res[i] += duoram.database[i];
  92. }
  93. } else if (player == 2) {
  94. // The server (player 2) only syncs with the yield
  95. yield();
  96. }
  97. // Players 1 and 2 will get an empty vector here
  98. return res;
  99. }
  100. // This one reconstructs a single database value
  101. template <typename T>
  102. T Duoram<T>::Shape::reconstruct(const T& share) const
  103. {
  104. int player = tio.player();
  105. T res;
  106. // Player 1 sends their share of the value to player 0
  107. if (player == 1) {
  108. tio.queue_peer(&share, sizeof(T));
  109. yield();
  110. } else if (player == 0) {
  111. yield();
  112. tio.recv_peer(&res, sizeof(T));
  113. res += share;
  114. } else if (player == 2) {
  115. // The server (player 2) only syncs with the yield
  116. yield();
  117. }
  118. // Players 1 and 2 will get 0 here
  119. return res;
  120. }
  121. // Function to set the shape_size of a shape and compute the number of
  122. // bits you need to address a shape of that size (which is the number of
  123. // bits in sz-1). This is typically called by subclass constructors.
  124. template <typename T>
  125. void Duoram<T>::Shape::set_shape_size(size_t sz)
  126. {
  127. shape_size = sz;
  128. // Compute the number of bits in (sz-1)
  129. // But use 0 if sz=0 for some reason (though that should never
  130. // happen)
  131. if (sz > 1) {
  132. addr_size = 64-__builtin_clzll(sz-1);
  133. addr_mask = address_t((size_t(1)<<addr_size)-1);
  134. } else {
  135. addr_size = 0;
  136. addr_mask = 0;
  137. }
  138. }
  139. // Constructor for the Flat shape. len=0 means the maximum size (the
  140. // parent's size minus start).
  141. template <typename T>
  142. Duoram<T>::Flat::Flat(Duoram &duoram, MPCTIO &tio, yield_t &yield,
  143. size_t start, size_t len) : Shape(*this, duoram, tio, yield)
  144. {
  145. size_t parentsize = duoram.size();
  146. if (start > parentsize) {
  147. start = parentsize;
  148. }
  149. this->start = start;
  150. size_t maxshapesize = parentsize - start;
  151. if (len > maxshapesize || len == 0) {
  152. len = maxshapesize;
  153. }
  154. this->len = len;
  155. this->set_shape_size(len);
  156. }
  157. // Bitonic sort the elements from start to start+(1<<depth)-1, in
  158. // increasing order if dir=0 or decreasing order if dir=1. Note that
  159. // the elements must be at most 63 bits long each for the notion of
  160. // ">" to make consistent sense.
  161. template <typename T>
  162. void Duoram<T>::Flat::bitonic_sort(address_t start, nbits_t depth, bool dir)
  163. {
  164. if (depth == 0) return;
  165. if (depth == 1) {
  166. osort(start, start+1, dir);
  167. return;
  168. }
  169. // Recurse on the first half (increasing order) and the second half
  170. // (decreasing order) in parallel
  171. run_coroutines(this->yield,
  172. [this, start, depth](yield_t &yield) {
  173. Flat Acoro = context(yield);
  174. Acoro.bitonic_sort(start, depth-1, 0);
  175. },
  176. [this, start, depth](yield_t &yield) {
  177. Flat Acoro = context(yield);
  178. Acoro.bitonic_sort(start+(1<<(depth-1)), depth-1, 1);
  179. });
  180. // Merge the two into the desired order
  181. butterfly(start, depth, dir);
  182. }
  183. // Internal function to aid bitonic_sort
  184. template <typename T>
  185. void Duoram<T>::Flat::butterfly(address_t start, nbits_t depth, bool dir)
  186. {
  187. if (depth == 0) return;
  188. if (depth == 1) {
  189. osort(start, start+1, dir);
  190. return;
  191. }
  192. // Sort pairs of elements half the width apart in parallel
  193. address_t halfwidth = address_t(1)<<(depth-1);
  194. std::vector<coro_t> coroutines;
  195. for (address_t i=0; i<halfwidth;++i) {
  196. coroutines.emplace_back(
  197. [this, start, halfwidth, dir, i](yield_t &yield) {
  198. Flat Acoro = context(yield);
  199. Acoro.osort(start+i, start+i+halfwidth, dir);
  200. });
  201. }
  202. run_coroutines(this->yield, coroutines);
  203. // Recurse on each half in parallel
  204. run_coroutines(this->yield,
  205. [this, start, depth, dir](yield_t &yield) {
  206. Flat Acoro = context(yield);
  207. Acoro.butterfly(start, depth-1, dir);
  208. },
  209. [this, start, halfwidth, depth, dir](yield_t &yield) {
  210. Flat Acoro = context(yield);
  211. Acoro.butterfly(start+halfwidth, depth-1, dir);
  212. });
  213. }
  214. // Assuming the memory is already sorted, do an oblivious binary
  215. // search for the largest index containing the value at most the
  216. // given one. (The answer will be 0 if all of the memory elements
  217. // are greate than the target.) This Flat must be a power of 2 size.
  218. // Only available for additive shared databases for now.
  219. template <>
  220. RegAS Duoram<RegAS>::Flat::obliv_binary_search(RegAS &target)
  221. {
  222. nbits_t depth = this->addr_size;
  223. // Start in the middle
  224. RegAS index;
  225. index.set(this->tio.player() ? 0 : 1<<(depth-1));
  226. // Invariant: index points to the first element of the right half of
  227. // the remaining possible range
  228. while (depth > 0) {
  229. // Obliviously read the value there
  230. RegAS val = operator[](index);
  231. // Compare it to the target
  232. CDPF cdpf = tio.cdpf(this->yield);
  233. auto [lt, eq, gt] = cdpf.compare(this->tio, this->yield,
  234. val-target, tio.aes_ops());
  235. if (depth > 1) {
  236. // If val > target, the answer is strictly to the left
  237. // and we should subtract 2^{depth-2} from index
  238. // If val <= target, the answer is here or to the right
  239. // and we should add 2^{depth-2} to index
  240. // So we unconditionally subtract 2^{depth-2} from index, and
  241. // add (lt+eq)*2^{depth-1}.
  242. RegAS uncond;
  243. uncond.set(tio.player() ? 0 : address_t(1)<<(depth-2));
  244. RegAS cond;
  245. cond.set(tio.player() ? 0 : address_t(1)<<(depth-1));
  246. RegAS condprod;
  247. RegBS le = lt ^ eq;
  248. mpc_flagmult(this->tio, this->yield, condprod, le, cond);
  249. index -= uncond;
  250. index += condprod;
  251. } else {
  252. // If val > target, the answer is strictly to the left
  253. // If val <= target, the answer is here or to the right
  254. // so subtract gt from index
  255. RegAS cond;
  256. cond.set(tio.player() ? 0 : 1);
  257. RegAS condprod;
  258. mpc_flagmult(this->tio, this->yield, condprod, gt, cond);
  259. index -= condprod;
  260. }
  261. --depth;
  262. }
  263. return index;
  264. }
  265. // Helper functions to specialize the read and update operations for
  266. // RegAS and RegXS shared indices
  267. template <typename U>
  268. inline address_t IfRegAS(address_t val);
  269. template <typename U>
  270. inline address_t IfRegXS(address_t val);
  271. template <>
  272. inline address_t IfRegAS<RegAS>(address_t val) { return val; }
  273. template <>
  274. inline address_t IfRegAS<RegXS>(address_t val) { return 0; }
  275. template <>
  276. inline address_t IfRegXS<RegAS>(address_t val) { return 0; }
  277. template <>
  278. inline address_t IfRegXS<RegXS>(address_t val) { return val; }
  279. // Oblivious read from an additively or XOR shared index of Duoram memory
  280. // T is the sharing type of the _values_ in the database; U is the
  281. // sharing type of the _indices_ in the database.
  282. template <typename T> template <typename U>
  283. Duoram<T>::Shape::MemRefS<U>::operator T()
  284. {
  285. T res;
  286. Shape &shape = this->shape;
  287. shape.explicitonly(false);
  288. int player = shape.tio.player();
  289. if (player < 2) {
  290. // Computational players do this
  291. RDPFTriple dt = shape.tio.rdpftriple(shape.yield, shape.addr_size);
  292. // Compute the index offset
  293. U indoffset = dt.target<U>();
  294. indoffset -= idx;
  295. // We only need two of the DPFs for reading
  296. RDPFPair dp(std::move(dt), 0, player == 0 ? 2 : 1);
  297. // The RDPFTriple dt is now broken, since we've moved things out
  298. // of it.
  299. // Send it to the peer and the server
  300. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  301. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  302. shape.yield();
  303. // Receive the above from the peer
  304. U peerindoffset;
  305. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  306. // Reconstruct the total offset
  307. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  308. // Evaluate the DPFs and compute the dotproducts
  309. ParallelEval pe(dp, IfRegAS<U>(indshift), IfRegXS<U>(indshift),
  310. shape.shape_size, shape.tio.cpu_nthreads(),
  311. shape.tio.aes_ops());
  312. T init;
  313. res = pe.reduce(init, [&dp, &shape] (int thread_num, address_t i,
  314. const RDPFPair::node &leaf) {
  315. // The values from the two DPFs
  316. auto [V0, V1] = dp.unit<T>(leaf);
  317. // References to the appropriate cells in our database, our
  318. // blind, and our copy of the peer's blinded database
  319. auto [DB, BL, PBD] = shape.get_comp(i);
  320. return (DB + PBD) * V0.share() - BL * (V1-V0).share();
  321. });
  322. shape.yield();
  323. // Receive the cancellation term from the server
  324. T gamma;
  325. shape.tio.iostream_server() >> gamma;
  326. res += gamma;
  327. } else {
  328. // The server does this
  329. RDPFPair dp = shape.tio.rdpfpair(shape.yield, shape.addr_size);
  330. U p0indoffset, p1indoffset;
  331. shape.yield();
  332. // Receive the index offset from the computational players and
  333. // combine them
  334. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  335. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  336. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  337. // Evaluate the DPFs to compute the cancellation terms
  338. std::tuple<T,T> init, gamma;
  339. ParallelEval pe(dp, IfRegAS<U>(indshift), IfRegXS<U>(indshift),
  340. shape.shape_size, shape.tio.cpu_nthreads(),
  341. shape.tio.aes_ops());
  342. gamma = pe.reduce(init, [&dp, &shape] (int thread_num, address_t i,
  343. const RDPFPair::node &leaf) {
  344. // The values from the two DPFs
  345. auto [V0, V1] = dp.unit<T>(leaf);
  346. // shape.get_server(i) returns a pair of references to the
  347. // appropriate cells in the two blinded databases
  348. auto [BL0, BL1] = shape.get_server(i);
  349. return std::make_tuple(-BL0 * V1.share(), -BL1 * V0.share());
  350. });
  351. // Choose a random blinding factor
  352. T rho;
  353. rho.randomize();
  354. std::get<0>(gamma) += rho;
  355. std::get<1>(gamma) -= rho;
  356. // Send the cancellation terms to the computational players
  357. shape.tio.iostream_p0() << std::get<0>(gamma);
  358. shape.tio.iostream_p1() << std::get<1>(gamma);
  359. shape.yield();
  360. }
  361. return res; // The server will always get 0
  362. }
  363. // Oblivious update to an additively or XOR shared index of Duoram memory
  364. template <typename T> template <typename U>
  365. typename Duoram<T>::Shape::template MemRefS<U>
  366. &Duoram<T>::Shape::MemRefS<U>::operator+=(const T& M)
  367. {
  368. Shape &shape = this->shape;
  369. shape.explicitonly(false);
  370. int player = shape.tio.player();
  371. if (player < 2) {
  372. // Computational players do this
  373. RDPFTriple dt = shape.tio.rdpftriple(shape.yield, shape.addr_size);
  374. // Compute the index and message offsets
  375. U indoffset = dt.target<U>();
  376. indoffset -= idx;
  377. auto Moffset = std::make_tuple(M, M, M);
  378. Moffset -= dt.scaled_value<T>();
  379. // Send them to the peer, and everything except the first offset
  380. // to the server
  381. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  382. shape.tio.iostream_peer() << Moffset;
  383. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  384. shape.tio.iostream_server() << std::get<1>(Moffset) <<
  385. std::get<2>(Moffset);
  386. shape.yield();
  387. // Receive the above from the peer
  388. U peerindoffset;
  389. std::tuple<T,T,T> peerMoffset;
  390. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  391. shape.tio.iostream_peer() >> peerMoffset;
  392. // Reconstruct the total offsets
  393. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  394. auto Mshift = combine(Moffset, peerMoffset);
  395. // Evaluate the DPFs and add them to the database
  396. ParallelEval pe(dt, IfRegAS<U>(indshift), IfRegXS<U>(indshift),
  397. shape.shape_size, shape.tio.cpu_nthreads(),
  398. shape.tio.aes_ops());
  399. int init = 0;
  400. pe.reduce(init, [&dt, &shape, &Mshift, player] (int thread_num,
  401. address_t i, const RDPFTriple::node &leaf) {
  402. // The values from the three DPFs
  403. auto [V0, V1, V2] = dt.scaled<T>(leaf) + dt.unit<T>(leaf) * Mshift;
  404. // References to the appropriate cells in our database, our
  405. // blind, and our copy of the peer's blinded database
  406. auto [DB, BL, PBD] = shape.get_comp(i);
  407. DB += V0;
  408. if (player == 0) {
  409. BL -= V1;
  410. PBD += V2-V0;
  411. } else {
  412. BL -= V2;
  413. PBD += V1-V0;
  414. }
  415. return 0;
  416. });
  417. } else {
  418. // The server does this
  419. RDPFPair dp = shape.tio.rdpfpair(shape.yield, shape.addr_size);
  420. U p0indoffset, p1indoffset;
  421. std::tuple<T,T> p0Moffset, p1Moffset;
  422. shape.yield();
  423. // Receive the index and message offsets from the computational
  424. // players and combine them
  425. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  426. shape.tio.iostream_p0() >> p0Moffset;
  427. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  428. shape.tio.iostream_p1() >> p1Moffset;
  429. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  430. auto Mshift = combine(p0Moffset, p1Moffset);
  431. // Evaluate the DPFs and subtract them from the blinds
  432. ParallelEval pe(dp, IfRegAS<U>(indshift), IfRegXS<U>(indshift),
  433. shape.shape_size, shape.tio.cpu_nthreads(),
  434. shape.tio.aes_ops());
  435. int init = 0;
  436. pe.reduce(init, [&dp, &shape, &Mshift] (int thread_num,
  437. address_t i, const RDPFPair::node &leaf) {
  438. // The values from the two DPFs
  439. auto V = dp.scaled<T>(leaf) + dp.unit<T>(leaf) * Mshift;
  440. // shape.get_server(i) returns a pair of references to the
  441. // appropriate cells in the two blinded databases, so we can
  442. // subtract the pair directly.
  443. shape.get_server(i) -= V;
  444. return 0;
  445. });
  446. }
  447. return *this;
  448. }
  449. // Oblivious write to an additively or XOR shared index of Duoram memory
  450. template <typename T> template <typename U>
  451. typename Duoram<T>::Shape::template MemRefS<U>
  452. &Duoram<T>::Shape::MemRefS<U>::operator=(const T& M)
  453. {
  454. T oldval = *this;
  455. T update = M - oldval;
  456. *this += update;
  457. return *this;
  458. }
  459. // Oblivious sort with the provided other element. Without
  460. // reconstructing the values, *this will become a share of the
  461. // smaller of the reconstructed values, and other will become a
  462. // share of the larger.
  463. //
  464. // Note: this only works for additively shared databases
  465. template <> template <typename U,typename V>
  466. void Duoram<RegAS>::Flat::osort(const U &idx1, const V &idx2, bool dir)
  467. {
  468. // Load the values in parallel
  469. RegAS val1, val2;
  470. run_coroutines(yield,
  471. [this, &idx1, &val1](yield_t &yield) {
  472. Flat Acoro = context(yield);
  473. val1 = Acoro[idx1];
  474. },
  475. [this, &idx2, &val2](yield_t &yield) {
  476. Flat Acoro = context(yield);
  477. val2 = Acoro[idx2];
  478. });
  479. // Get a CDPF
  480. CDPF cdpf = tio.cdpf(yield);
  481. // Use it to compare the values
  482. RegAS diff = val1-val2;
  483. auto [lt, eq, gt] = cdpf.compare(tio, yield, diff, tio.aes_ops());
  484. RegBS cmp = dir ? lt : gt;
  485. // Get additive shares of cmp*diff
  486. RegAS cmp_diff;
  487. mpc_flagmult(tio, yield, cmp_diff, cmp, diff);
  488. // Update the two locations in parallel
  489. run_coroutines(yield,
  490. [this, &idx1, &cmp_diff](yield_t &yield) {
  491. Flat Acoro = context(yield);
  492. Acoro[idx1] -= cmp_diff;
  493. },
  494. [this, &idx2, &cmp_diff](yield_t &yield) {
  495. Flat Acoro = context(yield);
  496. Acoro[idx2] += cmp_diff;
  497. });
  498. }
  499. // Explicit read from a given index of Duoram memory
  500. template <typename T>
  501. Duoram<T>::Shape::MemRefExpl::operator T()
  502. {
  503. Shape &shape = this->shape;
  504. T res;
  505. int player = shape.tio.player();
  506. if (player < 2) {
  507. res = std::get<0>(shape.get_comp(idx));
  508. }
  509. return res; // The server will always get 0
  510. }
  511. // Explicit update to a given index of Duoram memory
  512. template <typename T>
  513. typename Duoram<T>::Shape::MemRefExpl
  514. &Duoram<T>::Shape::MemRefExpl::operator+=(const T& M)
  515. {
  516. Shape &shape = this->shape;
  517. int player = shape.tio.player();
  518. // In explicit-only mode, just update the local DB; we'll sync the
  519. // blinds and the blinded DB when we leave explicit-only mode.
  520. if (shape.explicitmode) {
  521. if (player < 2) {
  522. auto [ DB, BL, PBD ] = shape.get_comp(idx);
  523. DB += M;
  524. }
  525. return *this;
  526. }
  527. if (player < 2) {
  528. // Computational players do this
  529. // Pick a blinding factor
  530. T blind;
  531. blind.randomize();
  532. // Send the blind to the server, and the blinded value to the
  533. // peer
  534. shape.tio.iostream_server() << blind;
  535. shape.tio.iostream_peer() << (M + blind);
  536. shape.yield();
  537. // Receive the peer's blinded value
  538. T peerblinded;
  539. shape.tio.iostream_peer() >> peerblinded;
  540. // Our database, our blind, the peer's blinded database
  541. auto [ DB, BL, PBD ] = shape.get_comp(idx);
  542. DB += M;
  543. BL += blind;
  544. PBD += peerblinded;
  545. } else if (player == 2) {
  546. // The server does this
  547. shape.yield();
  548. // Receive the updates to the blinds
  549. T p0blind, p1blind;
  550. shape.tio.iostream_p0() >> p0blind;
  551. shape.tio.iostream_p1() >> p1blind;
  552. // The two computational parties' blinds
  553. auto [ BL0, BL1 ] = shape.get_server(idx);
  554. BL0 += p0blind;
  555. BL1 += p1blind;
  556. }
  557. return *this;
  558. }
  559. // Explicit write to a given index of Duoram memory
  560. template <typename T>
  561. typename Duoram<T>::Shape::MemRefExpl
  562. &Duoram<T>::Shape::MemRefExpl::operator=(const T& M)
  563. {
  564. T oldval = *this;
  565. T update = M - oldval;
  566. *this += update;
  567. return *this;
  568. }
  569. // Independent U-shared reads into a Shape of subtype Sh on a Duoram
  570. // with values of sharing type T
  571. template <typename T> template <typename U, typename Sh>
  572. Duoram<T>::Shape::MemRefInd<U,Sh>::operator std::vector<T>()
  573. {
  574. std::vector<T> res;
  575. size_t size = indcs.size();
  576. res.resize(size);
  577. std::vector<coro_t> coroutines;
  578. for (size_t i=0;i<size;++i) {
  579. coroutines.emplace_back([this, &res, i] (yield_t &yield) {
  580. Sh Sh_coro = shape.context(yield);
  581. res[i] = Sh_coro[indcs[i]];
  582. });
  583. }
  584. run_coroutines(shape.yield, coroutines);
  585. return res;
  586. }
  587. // Independent U-shared updates into a Shape of subtype Sh on a Duoram
  588. // with values of sharing type T (vector version)
  589. template <typename T> template <typename U, typename Sh>
  590. typename Duoram<T>::Shape::template MemRefInd<U,Sh>
  591. &Duoram<T>::Shape::MemRefInd<U,Sh>::operator+=(const std::vector<T>& M)
  592. {
  593. size_t size = indcs.size();
  594. assert(M.size() == size);
  595. std::vector<coro_t> coroutines;
  596. for (size_t i=0;i<size;++i) {
  597. coroutines.emplace_back([this, &M, i] (yield_t &yield) {
  598. Sh Sh_coro = shape.context(yield);
  599. Sh_coro[indcs[i]] += M[i];
  600. });
  601. }
  602. run_coroutines(shape.yield, coroutines);
  603. return *this;
  604. }
  605. // Independent U-shared updates into a Shape of subtype Sh on a Duoram
  606. // with values of sharing type T (array version)
  607. template <typename T> template <typename U, typename Sh> template <size_t N>
  608. typename Duoram<T>::Shape::template MemRefInd<U,Sh>
  609. &Duoram<T>::Shape::MemRefInd<U,Sh>::operator+=(const std::array<T,N>& M)
  610. {
  611. size_t size = indcs.size();
  612. assert(N == size);
  613. std::vector<coro_t> coroutines;
  614. for (size_t i=0;i<size;++i) {
  615. coroutines.emplace_back([this, &M, i] (yield_t &yield) {
  616. Sh Sh_coro = shape.context(yield);
  617. Sh_coro[indcs[i]] += M[i];
  618. });
  619. }
  620. run_coroutines(shape.yield, coroutines);
  621. return *this;
  622. }
  623. // Independent U-shared writes into a Shape of subtype Sh on a Duoram
  624. // with values of sharing type T (vector version)
  625. template <typename T> template <typename U, typename Sh>
  626. typename Duoram<T>::Shape::template MemRefInd<U,Sh>
  627. &Duoram<T>::Shape::MemRefInd<U,Sh>::operator=(const std::vector<T>& M)
  628. {
  629. size_t size = indcs.size();
  630. assert(M.size() == size);
  631. std::vector<coro_t> coroutines;
  632. for (size_t i=0;i<size;++i) {
  633. coroutines.emplace_back([this, &M, i] (yield_t &yield) {
  634. Sh Sh_coro = shape.context(yield);
  635. Sh_coro[indcs[i]] = M[i];
  636. });
  637. }
  638. run_coroutines(shape.yield, coroutines);
  639. return *this;
  640. }
  641. // Independent U-shared writes into a Shape of subtype Sh on a Duoram
  642. // with values of sharing type T (array version)
  643. template <typename T> template <typename U, typename Sh> template <size_t N>
  644. typename Duoram<T>::Shape::template MemRefInd<U,Sh>
  645. &Duoram<T>::Shape::MemRefInd<U,Sh>::operator=(const std::array<T,N>& M)
  646. {
  647. size_t size = indcs.size();
  648. assert(N == size);
  649. std::vector<coro_t> coroutines;
  650. for (size_t i=0;i<size;++i) {
  651. coroutines.emplace_back([this, &M, i] (yield_t &yield) {
  652. Sh Sh_coro = shape.context(yield);
  653. Sh_coro[indcs[i]] = M[i];
  654. });
  655. }
  656. run_coroutines(shape.yield, coroutines);
  657. return *this;
  658. }