duoram.tcc 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697
  1. // Templated method implementations for duoram.hpp
  2. #include <stdio.h>
  3. #include "cdpf.hpp"
  4. // Pass the player number and desired size
  5. template <typename T>
  6. Duoram<T>::Duoram(int player, size_t size) : player(player),
  7. oram_size(size), p0_blind(blind), p1_blind(peer_blinded_db) {
  8. if (player < 2) {
  9. database.resize(size);
  10. blind.resize(size);
  11. peer_blinded_db.resize(size);
  12. } else {
  13. p0_blind.resize(size);
  14. p1_blind.resize(size);
  15. }
  16. }
  17. // For debugging; print the contents of the Duoram to stdout
  18. template <typename T>
  19. void Duoram<T>::dump() const
  20. {
  21. for (size_t i=0; i<oram_size; ++i) {
  22. if (player < 2) {
  23. printf("%04lx %016lx %016lx %016lx\n",
  24. i, database[i].share(), blind[i].share(),
  25. peer_blinded_db[i].share());
  26. } else {
  27. printf("%04lx %016lx %016lx\n",
  28. i, p0_blind[i].share(), p1_blind[i].share());
  29. }
  30. }
  31. printf("\n");
  32. }
  33. // For debugging or checking your answers (using this in general is
  34. // of course insecure)
  35. // This one reconstructs the whole database
  36. template <typename T>
  37. std::vector<T> Duoram<T>::Shape::reconstruct() const
  38. {
  39. int player = tio.player();
  40. std::vector<T> res;
  41. res.resize(duoram.size());
  42. // Player 1 sends their share of the database to player 0
  43. if (player == 1) {
  44. tio.queue_peer(duoram.database.data(), duoram.size()*sizeof(T));
  45. } else if (player == 0) {
  46. tio.recv_peer(res.data(), duoram.size()*sizeof(T));
  47. for(size_t i=0;i<duoram.size();++i) {
  48. res[i] += duoram.database[i];
  49. }
  50. }
  51. // The server (player 2) does nothing
  52. // Players 1 and 2 will get an empty vector here
  53. return res;
  54. }
  55. // This one reconstructs a single database value
  56. template <typename T>
  57. T Duoram<T>::Shape::reconstruct(const T& share) const
  58. {
  59. int player = tio.player();
  60. T res;
  61. // Player 1 sends their share of the value to player 0
  62. if (player == 1) {
  63. tio.queue_peer(&share, sizeof(T));
  64. } else if (player == 0) {
  65. tio.recv_peer(&res, sizeof(T));
  66. res += share;
  67. }
  68. // The server (player 2) does nothing
  69. // Players 1 and 2 will get 0 here
  70. return res;
  71. }
  72. // Function to set the shape_size of a shape and compute the number of
  73. // bits you need to address a shape of that size (which is the number of
  74. // bits in sz-1). This is typically called by subclass constructors.
  75. template <typename T>
  76. void Duoram<T>::Shape::set_shape_size(size_t sz)
  77. {
  78. shape_size = sz;
  79. // Compute the number of bits in (sz-1)
  80. // But use 0 if sz=0 for some reason (though that should never
  81. // happen)
  82. if (sz > 1) {
  83. addr_size = 64-__builtin_clzll(sz-1);
  84. addr_mask = address_t((size_t(1)<<addr_size)-1);
  85. } else {
  86. addr_size = 0;
  87. addr_mask = 0;
  88. }
  89. }
  90. // Constructor for the Flat shape. len=0 means the maximum size (the
  91. // parent's size minus start).
  92. template <typename T>
  93. Duoram<T>::Flat::Flat(Duoram &duoram, MPCTIO &tio, yield_t &yield,
  94. size_t start, size_t len) : Shape(*this, duoram, tio, yield)
  95. {
  96. size_t parentsize = duoram.size();
  97. if (start > parentsize) {
  98. start = parentsize;
  99. }
  100. this->start = start;
  101. size_t maxshapesize = parentsize - start;
  102. if (len > maxshapesize || len == 0) {
  103. len = maxshapesize;
  104. }
  105. this->len = len;
  106. this->set_shape_size(len);
  107. }
  108. // Bitonic sort the elements from start to start+(1<<depth)-1, in
  109. // increasing order if dir=0 or decreasing order if dir=1. Note that
  110. // the elements must be at most 63 bits long each for the notion of
  111. // ">" to make consistent sense.
  112. template <typename T>
  113. void Duoram<T>::Flat::bitonic_sort(address_t start, nbits_t depth, bool dir)
  114. {
  115. if (depth == 0) return;
  116. if (depth == 1) {
  117. osort(start, start+1, dir);
  118. return;
  119. }
  120. // Recurse on the first half (increasing order) and the second half
  121. // (decreasing order) in parallel
  122. std::vector<coro_t> coroutines;
  123. coroutines.emplace_back([&](yield_t &yield) {
  124. Flat Acoro = context(yield);
  125. Acoro.bitonic_sort(start, depth-1, 0);
  126. });
  127. coroutines.emplace_back([&](yield_t &yield) {
  128. Flat Acoro = context(yield);
  129. Acoro.bitonic_sort(start+(1<<(depth-1)), depth-1, 1);
  130. });
  131. run_coroutines(this->yield, coroutines);
  132. // Merge the two into the desired order
  133. butterfly(start, depth, dir);
  134. }
  135. // Internal function to aid bitonic_sort
  136. template <typename T>
  137. void Duoram<T>::Flat::butterfly(address_t start, nbits_t depth, bool dir)
  138. {
  139. if (depth == 0) return;
  140. if (depth == 1) {
  141. osort(start, start+1, dir);
  142. return;
  143. }
  144. // Sort pairs of elements half the width apart in parallel
  145. address_t halfwidth = address_t(1)<<(depth-1);
  146. std::vector<coro_t> coroutines;
  147. for (address_t i=0; i<halfwidth;++i) {
  148. coroutines.emplace_back([&](yield_t &yield) {
  149. Flat Acoro = context(yield);
  150. Acoro.osort(start+i, start+i+halfwidth, dir);
  151. });
  152. }
  153. run_coroutines(this->yield, coroutines);
  154. // Recurse on each half in parallel
  155. coroutines.clear();
  156. coroutines.emplace_back([&](yield_t &yield) {
  157. Flat Acoro = context(yield);
  158. Acoro.butterfly(start, depth-1, dir);
  159. });
  160. coroutines.emplace_back([&](yield_t &yield) {
  161. Flat Acoro = context(yield);
  162. Acoro.butterfly(start+halfwidth, depth-1, dir);
  163. });
  164. run_coroutines(this->yield, coroutines);
  165. }
  166. // Assuming the memory is already sorted, do an oblivious binary
  167. // search for the largest index containing the value at most the
  168. // given one. (The answer will be 0 if all of the memory elements
  169. // are greate than the target.) This Flat must be a power of 2 size.
  170. // Only available for additive shared databases for now.
  171. template <>
  172. RegAS Duoram<RegAS>::Flat::obliv_binary_search(RegAS &target)
  173. {
  174. nbits_t depth = this->addr_size;
  175. // Start in the middle
  176. RegAS index;
  177. index.set(this->tio.player() ? 0 : 1<<(depth-1));
  178. // Invariant: index points to the first element of the right half of
  179. // the remaining possible range
  180. while (depth > 0) {
  181. // Obliviously read the value there
  182. RegAS val = operator[](index);
  183. // Compare it to the target
  184. CDPF cdpf = tio.cdpf();
  185. auto [lt, eq, gt] = cdpf.compare(this->tio, this->yield,
  186. val-target, tio.aes_ops());
  187. if (depth > 1) {
  188. // If val > target, the answer is strictly to the left
  189. // and we should subtract 2^{depth-2} from index
  190. // If val <= target, the answer is here or to the right
  191. // and we should add 2^{depth-2} to index
  192. // So we unconditionally subtract 2^{depth-2} from index, and
  193. // add (lt+eq)*2^{depth-1}.
  194. RegAS uncond;
  195. uncond.set(tio.player() ? 0 : address_t(1)<<(depth-2));
  196. RegAS cond;
  197. cond.set(tio.player() ? 0 : address_t(1)<<(depth-1));
  198. RegAS condprod;
  199. RegBS le = lt ^ eq;
  200. mpc_flagmult(this->tio, this->yield, condprod, le, cond);
  201. index -= uncond;
  202. index += condprod;
  203. } else {
  204. // If val > target, the answer is strictly to the left
  205. // If val <= target, the answer is here or to the right
  206. // so subtract gt from index
  207. RegAS cond;
  208. cond.set(tio.player() ? 0 : 1);
  209. RegAS condprod;
  210. mpc_flagmult(this->tio, this->yield, condprod, gt, cond);
  211. index -= condprod;
  212. }
  213. --depth;
  214. }
  215. return index;
  216. }
  217. // Oblivious read from an additively shared index of Duoram memory
  218. template <typename T>
  219. Duoram<T>::Shape::MemRefAS::operator T()
  220. {
  221. T res;
  222. const Shape &shape = this->shape;
  223. int player = shape.tio.player();
  224. if (player < 2) {
  225. // Computational players do this
  226. RDPFTriple dt = shape.tio.rdpftriple(shape.addr_size);
  227. // Compute the index offset
  228. RegAS indoffset = dt.as_target;
  229. indoffset -= idx;
  230. // We only need two of the DPFs for reading
  231. RDPFPair dp(std::move(dt), 0, player == 0 ? 2 : 1);
  232. // The RDPFTriple dt is now broken, since we've moved things out
  233. // of it.
  234. // Send it to the peer and the server
  235. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  236. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  237. shape.yield();
  238. // Receive the above from the peer
  239. RegAS peerindoffset;
  240. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  241. // Reconstruct the total offset
  242. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  243. // Evaluate the DPFs and compute the dotproducts
  244. StreamEval ev(dp, indshift, 0, shape.tio.aes_ops());
  245. for (size_t i=0; i<shape.shape_size; ++i) {
  246. auto L = ev.next();
  247. // The values from the two DPFs
  248. auto [V0, V1] = dp.unit<T>(L);
  249. // References to the appropriate cells in our database, our
  250. // blind, and our copy of the peer's blinded database
  251. auto [DB, BL, PBD] = shape.get_comp(i);
  252. res += (DB + PBD) * V0.share() - BL * (V1-V0).share();
  253. }
  254. // Receive the cancellation term from the server
  255. T gamma;
  256. shape.tio.iostream_server() >> gamma;
  257. res += gamma;
  258. } else {
  259. // The server does this
  260. RDPFPair dp = shape.tio.rdpfpair(shape.addr_size);
  261. RegAS p0indoffset, p1indoffset;
  262. // Receive the index offset from the computational players and
  263. // combine them
  264. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  265. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  266. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  267. // Evaluate the DPFs to compute the cancellation terms
  268. T gamma0, gamma1;
  269. StreamEval ev(dp, indshift, 0, shape.tio.aes_ops());
  270. for (size_t i=0; i<shape.shape_size; ++i) {
  271. auto L = ev.next();
  272. // The values from the two DPFs
  273. auto [V0, V1] = dp.unit<T>(L);
  274. // shape.get_server(i) returns a pair of references to the
  275. // appropriate cells in the two blinded databases
  276. auto [BL0, BL1] = shape.get_server(i);
  277. gamma0 -= BL0 * V1.share();
  278. gamma1 -= BL1 * V0.share();
  279. }
  280. // Choose a random blinding factor
  281. T rho;
  282. rho.randomize();
  283. gamma0 += rho;
  284. gamma1 -= rho;
  285. // Send the cancellation terms to the computational players
  286. shape.tio.iostream_p0() << gamma0;
  287. shape.tio.iostream_p1() << gamma1;
  288. shape.yield();
  289. }
  290. return res; // The server will always get 0
  291. }
  292. // Oblivious update to an additively shared index of Duoram memory
  293. template <typename T>
  294. typename Duoram<T>::Shape::MemRefAS
  295. &Duoram<T>::Shape::MemRefAS::operator+=(const T& M)
  296. {
  297. const Shape &shape = this->shape;
  298. int player = shape.tio.player();
  299. if (player < 2) {
  300. // Computational players do this
  301. RDPFTriple dt = shape.tio.rdpftriple(shape.addr_size);
  302. // Compute the index and message offsets
  303. RegAS indoffset = dt.as_target;
  304. indoffset -= idx;
  305. auto Moffset = std::make_tuple(M, M, M);
  306. Moffset -= dt.scaled_value<T>();
  307. // Send them to the peer, and everything except the first offset
  308. // to the server
  309. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  310. shape.tio.iostream_peer() << Moffset;
  311. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  312. shape.tio.iostream_server() << std::get<1>(Moffset) <<
  313. std::get<2>(Moffset);
  314. shape.yield();
  315. // Receive the above from the peer
  316. RegAS peerindoffset;
  317. std::tuple<T,T,T> peerMoffset;
  318. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  319. shape.tio.iostream_peer() >> peerMoffset;
  320. // Reconstruct the total offsets
  321. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  322. auto Mshift = combine(Moffset, peerMoffset);
  323. // Evaluate the DPFs and add them to the database
  324. StreamEval ev(dt, indshift, 0, shape.tio.aes_ops());
  325. for (size_t i=0; i<shape.shape_size; ++i) {
  326. auto L = ev.next();
  327. // The values from the three DPFs
  328. auto [V0, V1, V2] = dt.scaled<T>(L) + dt.unit<T>(L) * Mshift;
  329. // References to the appropriate cells in our database, our
  330. // blind, and our copy of the peer's blinded database
  331. auto [DB, BL, PBD] = shape.get_comp(i);
  332. DB += V0;
  333. if (player == 0) {
  334. BL -= V1;
  335. PBD += V2-V0;
  336. } else {
  337. BL -= V2;
  338. PBD += V1-V0;
  339. }
  340. }
  341. } else {
  342. // The server does this
  343. RDPFPair dp = shape.tio.rdpfpair(shape.addr_size);
  344. RegAS p0indoffset, p1indoffset;
  345. std::tuple<T,T> p0Moffset, p1Moffset;
  346. // Receive the index and message offsets from the computational
  347. // players and combine them
  348. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  349. shape.tio.iostream_p0() >> p0Moffset;
  350. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  351. shape.tio.iostream_p1() >> p1Moffset;
  352. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  353. auto Mshift = combine(p0Moffset, p1Moffset);
  354. // Evaluate the DPFs and subtract them from the blinds
  355. StreamEval ev(dp, indshift, 0, shape.tio.aes_ops());
  356. for (size_t i=0; i<shape.shape_size; ++i) {
  357. auto L = ev.next();
  358. // The values from the two DPFs
  359. auto V = dp.scaled<T>(L) + dp.unit<T>(L) * Mshift;
  360. // shape.get_server(i) returns a pair of references to the
  361. // appropriate cells in the two blinded databases, so we can
  362. // subtract the pair directly.
  363. shape.get_server(i) -= V;
  364. }
  365. }
  366. return *this;
  367. }
  368. // Oblivious sort with the provided other element. Without
  369. // reconstructing the values, *this will become a share of the
  370. // smaller of the reconstructed values, and other will become a
  371. // share of the larger.
  372. //
  373. // Note: this only works for additively shared databases
  374. template <> template <typename U,typename V>
  375. void Duoram<RegAS>::Flat::osort(const U &idx1, const V &idx2, bool dir)
  376. {
  377. // Load the values in parallel
  378. std::vector<coro_t> coroutines;
  379. RegAS val1, val2;
  380. coroutines.emplace_back([&](yield_t &yield) {
  381. Flat Acoro = context(yield);
  382. val1 = Acoro[idx1];
  383. });
  384. coroutines.emplace_back([&](yield_t &yield) {
  385. Flat Acoro = context(yield);
  386. val2 = Acoro[idx2];
  387. });
  388. run_coroutines(yield, coroutines);
  389. // Get a CDPF
  390. CDPF cdpf = tio.cdpf();
  391. // Use it to compare the values
  392. RegAS diff = val1-val2;
  393. auto [lt, eq, gt] = cdpf.compare(tio, yield, diff, tio.aes_ops());
  394. RegBS cmp = dir ? lt : gt;
  395. // Get additive shares of cmp*diff
  396. RegAS cmp_diff;
  397. mpc_flagmult(tio, yield, cmp_diff, cmp, diff);
  398. // Update the two locations in parallel
  399. coroutines.clear();
  400. coroutines.emplace_back([&](yield_t &yield) {
  401. Flat Acoro = context(yield);
  402. Acoro[idx1] -= cmp_diff;
  403. });
  404. coroutines.emplace_back([&](yield_t &yield) {
  405. Flat Acoro = context(yield);
  406. Acoro[idx2] += cmp_diff;
  407. });
  408. run_coroutines(yield, coroutines);
  409. }
  410. // The MemRefXS routines are almost identical to the MemRefAS routines,
  411. // but I couldn't figure out how to get them to be two instances of a
  412. // template. Sorry for the code duplication.
  413. // Oblivious read from an XOR shared index of Duoram memory
  414. template <typename T>
  415. Duoram<T>::Shape::MemRefXS::operator T()
  416. {
  417. const Shape &shape = this->shape;
  418. T res;
  419. int player = shape.tio.player();
  420. if (player < 2) {
  421. // Computational players do this
  422. RDPFTriple dt = shape.tio.rdpftriple(shape.addr_size);
  423. // Compute the index offset
  424. RegXS indoffset = dt.xs_target;
  425. indoffset -= idx;
  426. // We only need two of the DPFs for reading
  427. RDPFPair dp(std::move(dt), 0, player == 0 ? 2 : 1);
  428. // Send it to the peer and the server
  429. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  430. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  431. shape.yield();
  432. // Receive the above from the peer
  433. RegXS peerindoffset;
  434. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  435. // Reconstruct the total offset
  436. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  437. // Evaluate the DPFs and compute the dotproducts
  438. StreamEval ev(dp, 0, indshift, shape.tio.aes_ops());
  439. for (size_t i=0; i<shape.shape_size; ++i) {
  440. auto L = ev.next();
  441. // The values from the two DPFs
  442. auto [V0, V1] = dp.unit<T>(L);
  443. // References to the appropriate cells in our database, our
  444. // blind, and our copy of the peer's blinded database
  445. auto [DB, BL, PBD] = shape.get_comp(i);
  446. res += (DB + PBD) * V0.share() - BL * (V1-V0).share();
  447. }
  448. // Receive the cancellation term from the server
  449. T gamma;
  450. shape.tio.iostream_server() >> gamma;
  451. res += gamma;
  452. } else {
  453. // The server does this
  454. RDPFPair dp = shape.tio.rdpfpair(shape.addr_size);
  455. RegXS p0indoffset, p1indoffset;
  456. // Receive the index offset from the computational players and
  457. // combine them
  458. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  459. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  460. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  461. // Evaluate the DPFs to compute the cancellation terms
  462. T gamma0, gamma1;
  463. StreamEval ev(dp, 0, indshift, shape.tio.aes_ops());
  464. for (size_t i=0; i<shape.shape_size; ++i) {
  465. auto L = ev.next();
  466. // The values from the two DPFs
  467. auto [V0, V1] = dp.unit<T>(L);
  468. // shape.get_server(i) returns a pair of references to the
  469. // appropriate cells in the two blinded databases
  470. auto [BL0, BL1] = shape.get_server(i);
  471. gamma0 -= BL0 * V1.share();
  472. gamma1 -= BL1 * V0.share();
  473. }
  474. // Choose a random blinding factor
  475. T rho;
  476. rho.randomize();
  477. gamma0 += rho;
  478. gamma1 -= rho;
  479. // Send the cancellation terms to the computational players
  480. shape.tio.iostream_p0() << gamma0;
  481. shape.tio.iostream_p1() << gamma1;
  482. shape.yield();
  483. }
  484. return res; // The server will always get 0
  485. }
  486. // Oblivious update to an XOR shared index of Duoram memory
  487. template <typename T>
  488. typename Duoram<T>::Shape::MemRefXS
  489. &Duoram<T>::Shape::MemRefXS::operator+=(const T& M)
  490. {
  491. const Shape &shape = this->shape;
  492. int player = shape.tio.player();
  493. if (player < 2) {
  494. // Computational players do this
  495. RDPFTriple dt = shape.tio.rdpftriple(shape.addr_size);
  496. // Compute the index and message offsets
  497. RegXS indoffset = dt.xs_target;
  498. indoffset -= idx;
  499. auto Moffset = std::make_tuple(M, M, M);
  500. Moffset -= dt.scaled_value<T>();
  501. // Send them to the peer, and everything except the first offset
  502. // to the server
  503. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  504. shape.tio.iostream_peer() << Moffset;
  505. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  506. shape.tio.iostream_server() << std::get<1>(Moffset) <<
  507. std::get<2>(Moffset);
  508. shape.yield();
  509. // Receive the above from the peer
  510. RegXS peerindoffset;
  511. std::tuple<T,T,T> peerMoffset;
  512. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  513. shape.tio.iostream_peer() >> peerMoffset;
  514. // Reconstruct the total offsets
  515. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  516. auto Mshift = combine(Moffset, peerMoffset);
  517. // Evaluate the DPFs and add them to the database
  518. StreamEval ev(dt, 0, indshift, shape.tio.aes_ops());
  519. for (size_t i=0; i<shape.shape_size; ++i) {
  520. auto L = ev.next();
  521. // The values from the three DPFs
  522. auto [V0, V1, V2] = dt.scaled<T>(L) + dt.unit<T>(L) * Mshift;
  523. // References to the appropriate cells in our database, our
  524. // blind, and our copy of the peer's blinded database
  525. auto [DB, BL, PBD] = shape.get_comp(i);
  526. DB += V0;
  527. if (player == 0) {
  528. BL -= V1;
  529. PBD += V2-V0;
  530. } else {
  531. BL -= V2;
  532. PBD += V1-V0;
  533. }
  534. }
  535. } else {
  536. // The server does this
  537. RDPFPair dp = shape.tio.rdpfpair(shape.addr_size);
  538. RegXS p0indoffset, p1indoffset;
  539. std::tuple<T,T> p0Moffset, p1Moffset;
  540. // Receive the index and message offsets from the computational
  541. // players and combine them
  542. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  543. shape.tio.iostream_p0() >> p0Moffset;
  544. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  545. shape.tio.iostream_p1() >> p1Moffset;
  546. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  547. auto Mshift = combine(p0Moffset, p1Moffset);
  548. // Evaluate the DPFs and subtract them from the blinds
  549. StreamEval ev(dp, 0, indshift, shape.tio.aes_ops());
  550. for (size_t i=0; i<shape.shape_size; ++i) {
  551. auto L = ev.next();
  552. // The values from the two DPFs
  553. auto V = dp.scaled<T>(L) + dp.unit<T>(L) * Mshift;
  554. // shape.get_server(i) returns a pair of references to the
  555. // appropriate cells in the two blinded databases, so we can
  556. // subtract the pair directly.
  557. shape.get_server(i) -= V;
  558. }
  559. }
  560. return *this;
  561. }
  562. // Explicit read from a given index of Duoram memory
  563. template <typename T>
  564. Duoram<T>::Shape::MemRefExpl::operator T()
  565. {
  566. const Shape &shape = this->shape;
  567. T res;
  568. int player = shape.tio.player();
  569. if (player < 2) {
  570. res = std::get<0>(shape.get_comp(idx));
  571. }
  572. return res; // The server will always get 0
  573. }
  574. // Explicit update to a given index of Duoram memory
  575. template <typename T>
  576. typename Duoram<T>::Shape::MemRefExpl
  577. &Duoram<T>::Shape::MemRefExpl::operator+=(const T& M)
  578. {
  579. const Shape &shape = this->shape;
  580. int player = shape.tio.player();
  581. if (player < 2) {
  582. // Computational players do this
  583. // Pick a blinding factor
  584. T blind;
  585. blind.randomize();
  586. // Send the blind to the server, and the blinded value to the
  587. // peer
  588. shape.tio.iostream_server() << blind;
  589. shape.tio.iostream_peer() << (M + blind);
  590. shape.yield();
  591. // Receive the peer's blinded value
  592. T peerblinded;
  593. shape.tio.iostream_peer() >> peerblinded;
  594. // Our database, our blind, the peer's blinded database
  595. auto [ DB, BL, PBD ] = shape.get_comp(idx);
  596. DB += M;
  597. BL += blind;
  598. PBD += peerblinded;
  599. } else if (player == 2) {
  600. // The server does this
  601. // Receive the updates to the blinds
  602. T p0blind, p1blind;
  603. shape.tio.iostream_p0() >> p0blind;
  604. shape.tio.iostream_p1() >> p1blind;
  605. // The two computational parties' blinds
  606. auto [ BL0, BL1 ] = shape.get_server(idx);
  607. BL0 += p0blind;
  608. BL1 += p1blind;
  609. }
  610. return *this;
  611. }