duoram.tcc 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748
  1. // Templated method implementations for duoram.hpp
  2. #include <stdio.h>
  3. #include "cdpf.hpp"
  4. // Pass the player number and desired size
  5. template <typename T>
  6. Duoram<T>::Duoram(int player, size_t size) : player(player),
  7. oram_size(size), p0_blind(blind), p1_blind(peer_blinded_db) {
  8. if (player < 2) {
  9. database.resize(size);
  10. blind.resize(size);
  11. peer_blinded_db.resize(size);
  12. } else {
  13. p0_blind.resize(size);
  14. p1_blind.resize(size);
  15. }
  16. }
  17. // For debugging; print the contents of the Duoram to stdout
  18. template <typename T>
  19. void Duoram<T>::dump() const
  20. {
  21. for (size_t i=0; i<oram_size; ++i) {
  22. if (player < 2) {
  23. printf("%04lx %016lx %016lx %016lx\n",
  24. i, database[i].share(), blind[i].share(),
  25. peer_blinded_db[i].share());
  26. } else {
  27. printf("%04lx %016lx %016lx\n",
  28. i, p0_blind[i].share(), p1_blind[i].share());
  29. }
  30. }
  31. printf("\n");
  32. }
  33. // Enable or disable explicit-only mode. Only using [] with
  34. // explicit (address_t) indices are allowed in this mode. Using []
  35. // with RegAS or RegXS indices will automatically turn off this
  36. // mode, or you can turn it off explicitly. In explicit-only mode,
  37. // updates to the memory in the Shape will not induce communication
  38. // to the server or peer, but when it turns off, a message of the
  39. // size of the entire Shape will be sent to each of the server and
  40. // the peer. This is useful if you're going to be doing multiple
  41. // explicit writes to every element of the Shape before you do your
  42. // next oblivious read or write. Bitonic sort is a prime example.
  43. template <typename T>
  44. void Duoram<T>::Shape::explicitonly(bool enable)
  45. {
  46. if (enable == true) {
  47. explicitmode = true;
  48. } else if (explicitmode == true) {
  49. explicitmode = false;
  50. // Reblind the whole Shape
  51. int player = tio.player();
  52. if (player < 2) {
  53. for (size_t i=0; i<shape_size; ++i) {
  54. auto [ DB, BL, PBD ] = get_comp(i);
  55. BL.randomize();
  56. tio.iostream_server() << BL;
  57. tio.iostream_peer() << (DB + BL);
  58. }
  59. yield();
  60. for (size_t i=0; i<shape_size; ++i) {
  61. auto [ DB, BL, PBD ] = get_comp(i);
  62. tio.iostream_peer() >> PBD;
  63. }
  64. } else {
  65. for (size_t i=0; i<shape_size; ++i) {
  66. auto [BL0, BL1] = get_server(i);
  67. tio.iostream_p0() >> BL0;
  68. tio.iostream_p1() >> BL1;
  69. }
  70. }
  71. }
  72. }
  73. // For debugging or checking your answers (using this in general is
  74. // of course insecure)
  75. // This one reconstructs the whole database
  76. template <typename T>
  77. std::vector<T> Duoram<T>::Shape::reconstruct() const
  78. {
  79. int player = tio.player();
  80. std::vector<T> res;
  81. res.resize(duoram.size());
  82. // Player 1 sends their share of the database to player 0
  83. if (player == 1) {
  84. tio.queue_peer(duoram.database.data(), duoram.size()*sizeof(T));
  85. } else if (player == 0) {
  86. tio.recv_peer(res.data(), duoram.size()*sizeof(T));
  87. for(size_t i=0;i<duoram.size();++i) {
  88. res[i] += duoram.database[i];
  89. }
  90. }
  91. // The server (player 2) does nothing
  92. // Players 1 and 2 will get an empty vector here
  93. return res;
  94. }
  95. // This one reconstructs a single database value
  96. template <typename T>
  97. T Duoram<T>::Shape::reconstruct(const T& share) const
  98. {
  99. int player = tio.player();
  100. T res;
  101. // Player 1 sends their share of the value to player 0
  102. if (player == 1) {
  103. tio.queue_peer(&share, sizeof(T));
  104. } else if (player == 0) {
  105. tio.recv_peer(&res, sizeof(T));
  106. res += share;
  107. }
  108. // The server (player 2) does nothing
  109. // Players 1 and 2 will get 0 here
  110. return res;
  111. }
  112. // Function to set the shape_size of a shape and compute the number of
  113. // bits you need to address a shape of that size (which is the number of
  114. // bits in sz-1). This is typically called by subclass constructors.
  115. template <typename T>
  116. void Duoram<T>::Shape::set_shape_size(size_t sz)
  117. {
  118. shape_size = sz;
  119. // Compute the number of bits in (sz-1)
  120. // But use 0 if sz=0 for some reason (though that should never
  121. // happen)
  122. if (sz > 1) {
  123. addr_size = 64-__builtin_clzll(sz-1);
  124. addr_mask = address_t((size_t(1)<<addr_size)-1);
  125. } else {
  126. addr_size = 0;
  127. addr_mask = 0;
  128. }
  129. }
  130. // Constructor for the Flat shape. len=0 means the maximum size (the
  131. // parent's size minus start).
  132. template <typename T>
  133. Duoram<T>::Flat::Flat(Duoram &duoram, MPCTIO &tio, yield_t &yield,
  134. size_t start, size_t len) : Shape(*this, duoram, tio, yield)
  135. {
  136. size_t parentsize = duoram.size();
  137. if (start > parentsize) {
  138. start = parentsize;
  139. }
  140. this->start = start;
  141. size_t maxshapesize = parentsize - start;
  142. if (len > maxshapesize || len == 0) {
  143. len = maxshapesize;
  144. }
  145. this->len = len;
  146. this->set_shape_size(len);
  147. }
  148. // Bitonic sort the elements from start to start+(1<<depth)-1, in
  149. // increasing order if dir=0 or decreasing order if dir=1. Note that
  150. // the elements must be at most 63 bits long each for the notion of
  151. // ">" to make consistent sense.
  152. template <typename T>
  153. void Duoram<T>::Flat::bitonic_sort(address_t start, nbits_t depth, bool dir)
  154. {
  155. if (depth == 0) return;
  156. if (depth == 1) {
  157. osort(start, start+1, dir);
  158. return;
  159. }
  160. // Recurse on the first half (increasing order) and the second half
  161. // (decreasing order) in parallel
  162. run_coroutines(this->yield,
  163. [&](yield_t &yield) {
  164. Flat Acoro = context(yield);
  165. Acoro.bitonic_sort(start, depth-1, 0);
  166. },
  167. [&](yield_t &yield) {
  168. Flat Acoro = context(yield);
  169. Acoro.bitonic_sort(start+(1<<(depth-1)), depth-1, 1);
  170. });
  171. // Merge the two into the desired order
  172. butterfly(start, depth, dir);
  173. }
  174. // Internal function to aid bitonic_sort
  175. template <typename T>
  176. void Duoram<T>::Flat::butterfly(address_t start, nbits_t depth, bool dir)
  177. {
  178. if (depth == 0) return;
  179. if (depth == 1) {
  180. osort(start, start+1, dir);
  181. return;
  182. }
  183. // Sort pairs of elements half the width apart in parallel
  184. address_t halfwidth = address_t(1)<<(depth-1);
  185. std::vector<coro_t> coroutines;
  186. for (address_t i=0; i<halfwidth;++i) {
  187. coroutines.emplace_back([&](yield_t &yield) {
  188. Flat Acoro = context(yield);
  189. Acoro.osort(start+i, start+i+halfwidth, dir);
  190. });
  191. }
  192. run_coroutines(this->yield, coroutines);
  193. // Recurse on each half in parallel
  194. run_coroutines(this->yield,
  195. [&](yield_t &yield) {
  196. Flat Acoro = context(yield);
  197. Acoro.butterfly(start, depth-1, dir);
  198. },
  199. [&](yield_t &yield) {
  200. Flat Acoro = context(yield);
  201. Acoro.butterfly(start+halfwidth, depth-1, dir);
  202. });
  203. }
  204. // Assuming the memory is already sorted, do an oblivious binary
  205. // search for the largest index containing the value at most the
  206. // given one. (The answer will be 0 if all of the memory elements
  207. // are greate than the target.) This Flat must be a power of 2 size.
  208. // Only available for additive shared databases for now.
  209. template <>
  210. RegAS Duoram<RegAS>::Flat::obliv_binary_search(RegAS &target)
  211. {
  212. nbits_t depth = this->addr_size;
  213. // Start in the middle
  214. RegAS index;
  215. index.set(this->tio.player() ? 0 : 1<<(depth-1));
  216. // Invariant: index points to the first element of the right half of
  217. // the remaining possible range
  218. while (depth > 0) {
  219. // Obliviously read the value there
  220. RegAS val = operator[](index);
  221. // Compare it to the target
  222. CDPF cdpf = tio.cdpf();
  223. auto [lt, eq, gt] = cdpf.compare(this->tio, this->yield,
  224. val-target, tio.aes_ops());
  225. if (depth > 1) {
  226. // If val > target, the answer is strictly to the left
  227. // and we should subtract 2^{depth-2} from index
  228. // If val <= target, the answer is here or to the right
  229. // and we should add 2^{depth-2} to index
  230. // So we unconditionally subtract 2^{depth-2} from index, and
  231. // add (lt+eq)*2^{depth-1}.
  232. RegAS uncond;
  233. uncond.set(tio.player() ? 0 : address_t(1)<<(depth-2));
  234. RegAS cond;
  235. cond.set(tio.player() ? 0 : address_t(1)<<(depth-1));
  236. RegAS condprod;
  237. RegBS le = lt ^ eq;
  238. mpc_flagmult(this->tio, this->yield, condprod, le, cond);
  239. index -= uncond;
  240. index += condprod;
  241. } else {
  242. // If val > target, the answer is strictly to the left
  243. // If val <= target, the answer is here or to the right
  244. // so subtract gt from index
  245. RegAS cond;
  246. cond.set(tio.player() ? 0 : 1);
  247. RegAS condprod;
  248. mpc_flagmult(this->tio, this->yield, condprod, gt, cond);
  249. index -= condprod;
  250. }
  251. --depth;
  252. }
  253. return index;
  254. }
  255. // Oblivious read from an additively shared index of Duoram memory
  256. template <typename T>
  257. Duoram<T>::Shape::MemRefAS::operator T()
  258. {
  259. T res;
  260. Shape &shape = this->shape;
  261. shape.explicitonly(false);
  262. int player = shape.tio.player();
  263. if (player < 2) {
  264. // Computational players do this
  265. RDPFTriple dt = shape.tio.rdpftriple(shape.yield, shape.addr_size);
  266. // Compute the index offset
  267. RegAS indoffset = dt.as_target;
  268. indoffset -= idx;
  269. // We only need two of the DPFs for reading
  270. RDPFPair dp(std::move(dt), 0, player == 0 ? 2 : 1);
  271. // The RDPFTriple dt is now broken, since we've moved things out
  272. // of it.
  273. // Send it to the peer and the server
  274. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  275. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  276. shape.yield();
  277. // Receive the above from the peer
  278. RegAS peerindoffset;
  279. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  280. // Reconstruct the total offset
  281. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  282. // Evaluate the DPFs and compute the dotproducts
  283. StreamEval ev(dp, indshift, 0, shape.tio.aes_ops());
  284. for (size_t i=0; i<shape.shape_size; ++i) {
  285. auto L = ev.next();
  286. // The values from the two DPFs
  287. auto [V0, V1] = dp.unit<T>(L);
  288. // References to the appropriate cells in our database, our
  289. // blind, and our copy of the peer's blinded database
  290. auto [DB, BL, PBD] = shape.get_comp(i);
  291. res += (DB + PBD) * V0.share() - BL * (V1-V0).share();
  292. }
  293. // Receive the cancellation term from the server
  294. T gamma;
  295. shape.tio.iostream_server() >> gamma;
  296. res += gamma;
  297. } else {
  298. // The server does this
  299. RDPFPair dp = shape.tio.rdpfpair(shape.yield, shape.addr_size);
  300. RegAS p0indoffset, p1indoffset;
  301. // Receive the index offset from the computational players and
  302. // combine them
  303. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  304. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  305. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  306. // Evaluate the DPFs to compute the cancellation terms
  307. T gamma0, gamma1;
  308. StreamEval ev(dp, indshift, 0, shape.tio.aes_ops());
  309. for (size_t i=0; i<shape.shape_size; ++i) {
  310. auto L = ev.next();
  311. // The values from the two DPFs
  312. auto [V0, V1] = dp.unit<T>(L);
  313. // shape.get_server(i) returns a pair of references to the
  314. // appropriate cells in the two blinded databases
  315. auto [BL0, BL1] = shape.get_server(i);
  316. gamma0 -= BL0 * V1.share();
  317. gamma1 -= BL1 * V0.share();
  318. }
  319. // Choose a random blinding factor
  320. T rho;
  321. rho.randomize();
  322. gamma0 += rho;
  323. gamma1 -= rho;
  324. // Send the cancellation terms to the computational players
  325. shape.tio.iostream_p0() << gamma0;
  326. shape.tio.iostream_p1() << gamma1;
  327. shape.yield();
  328. }
  329. return res; // The server will always get 0
  330. }
  331. // Oblivious update to an additively shared index of Duoram memory
  332. template <typename T>
  333. typename Duoram<T>::Shape::MemRefAS
  334. &Duoram<T>::Shape::MemRefAS::operator+=(const T& M)
  335. {
  336. Shape &shape = this->shape;
  337. shape.explicitonly(false);
  338. int player = shape.tio.player();
  339. if (player < 2) {
  340. // Computational players do this
  341. RDPFTriple dt = shape.tio.rdpftriple(shape.yield, shape.addr_size);
  342. // Compute the index and message offsets
  343. RegAS indoffset = dt.as_target;
  344. indoffset -= idx;
  345. auto Moffset = std::make_tuple(M, M, M);
  346. Moffset -= dt.scaled_value<T>();
  347. // Send them to the peer, and everything except the first offset
  348. // to the server
  349. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  350. shape.tio.iostream_peer() << Moffset;
  351. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  352. shape.tio.iostream_server() << std::get<1>(Moffset) <<
  353. std::get<2>(Moffset);
  354. shape.yield();
  355. // Receive the above from the peer
  356. RegAS peerindoffset;
  357. std::tuple<T,T,T> peerMoffset;
  358. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  359. shape.tio.iostream_peer() >> peerMoffset;
  360. // Reconstruct the total offsets
  361. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  362. auto Mshift = combine(Moffset, peerMoffset);
  363. // Evaluate the DPFs and add them to the database
  364. StreamEval ev(dt, indshift, 0, shape.tio.aes_ops());
  365. for (size_t i=0; i<shape.shape_size; ++i) {
  366. auto L = ev.next();
  367. // The values from the three DPFs
  368. auto [V0, V1, V2] = dt.scaled<T>(L) + dt.unit<T>(L) * Mshift;
  369. // References to the appropriate cells in our database, our
  370. // blind, and our copy of the peer's blinded database
  371. auto [DB, BL, PBD] = shape.get_comp(i);
  372. DB += V0;
  373. if (player == 0) {
  374. BL -= V1;
  375. PBD += V2-V0;
  376. } else {
  377. BL -= V2;
  378. PBD += V1-V0;
  379. }
  380. }
  381. } else {
  382. // The server does this
  383. RDPFPair dp = shape.tio.rdpfpair(shape.yield, shape.addr_size);
  384. RegAS p0indoffset, p1indoffset;
  385. std::tuple<T,T> p0Moffset, p1Moffset;
  386. // Receive the index and message offsets from the computational
  387. // players and combine them
  388. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  389. shape.tio.iostream_p0() >> p0Moffset;
  390. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  391. shape.tio.iostream_p1() >> p1Moffset;
  392. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  393. auto Mshift = combine(p0Moffset, p1Moffset);
  394. // Evaluate the DPFs and subtract them from the blinds
  395. StreamEval ev(dp, indshift, 0, shape.tio.aes_ops());
  396. for (size_t i=0; i<shape.shape_size; ++i) {
  397. auto L = ev.next();
  398. // The values from the two DPFs
  399. auto V = dp.scaled<T>(L) + dp.unit<T>(L) * Mshift;
  400. // shape.get_server(i) returns a pair of references to the
  401. // appropriate cells in the two blinded databases, so we can
  402. // subtract the pair directly.
  403. shape.get_server(i) -= V;
  404. }
  405. }
  406. return *this;
  407. }
  408. // Oblivious sort with the provided other element. Without
  409. // reconstructing the values, *this will become a share of the
  410. // smaller of the reconstructed values, and other will become a
  411. // share of the larger.
  412. //
  413. // Note: this only works for additively shared databases
  414. template <> template <typename U,typename V>
  415. void Duoram<RegAS>::Flat::osort(const U &idx1, const V &idx2, bool dir)
  416. {
  417. // Load the values in parallel
  418. RegAS val1, val2;
  419. run_coroutines(yield,
  420. [&](yield_t &yield) {
  421. Flat Acoro = context(yield);
  422. val1 = Acoro[idx1];
  423. },
  424. [&](yield_t &yield) {
  425. Flat Acoro = context(yield);
  426. val2 = Acoro[idx2];
  427. });
  428. // Get a CDPF
  429. CDPF cdpf = tio.cdpf();
  430. // Use it to compare the values
  431. RegAS diff = val1-val2;
  432. auto [lt, eq, gt] = cdpf.compare(tio, yield, diff, tio.aes_ops());
  433. RegBS cmp = dir ? lt : gt;
  434. // Get additive shares of cmp*diff
  435. RegAS cmp_diff;
  436. mpc_flagmult(tio, yield, cmp_diff, cmp, diff);
  437. // Update the two locations in parallel
  438. run_coroutines(yield,
  439. [&](yield_t &yield) {
  440. Flat Acoro = context(yield);
  441. Acoro[idx1] -= cmp_diff;
  442. },
  443. [&](yield_t &yield) {
  444. Flat Acoro = context(yield);
  445. Acoro[idx2] += cmp_diff;
  446. });
  447. }
  448. // The MemRefXS routines are almost identical to the MemRefAS routines,
  449. // but I couldn't figure out how to get them to be two instances of a
  450. // template. Sorry for the code duplication.
  451. // Oblivious read from an XOR shared index of Duoram memory
  452. template <typename T>
  453. Duoram<T>::Shape::MemRefXS::operator T()
  454. {
  455. Shape &shape = this->shape;
  456. shape.explicitonly(false);
  457. T res;
  458. int player = shape.tio.player();
  459. if (player < 2) {
  460. // Computational players do this
  461. RDPFTriple dt = shape.tio.rdpftriple(shape.yield, shape.addr_size);
  462. // Compute the index offset
  463. RegXS indoffset = dt.xs_target;
  464. indoffset -= idx;
  465. // We only need two of the DPFs for reading
  466. RDPFPair dp(std::move(dt), 0, player == 0 ? 2 : 1);
  467. // Send it to the peer and the server
  468. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  469. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  470. shape.yield();
  471. // Receive the above from the peer
  472. RegXS peerindoffset;
  473. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  474. // Reconstruct the total offset
  475. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  476. // Evaluate the DPFs and compute the dotproducts
  477. StreamEval ev(dp, 0, indshift, shape.tio.aes_ops());
  478. for (size_t i=0; i<shape.shape_size; ++i) {
  479. auto L = ev.next();
  480. // The values from the two DPFs
  481. auto [V0, V1] = dp.unit<T>(L);
  482. // References to the appropriate cells in our database, our
  483. // blind, and our copy of the peer's blinded database
  484. auto [DB, BL, PBD] = shape.get_comp(i);
  485. res += (DB + PBD) * V0.share() - BL * (V1-V0).share();
  486. }
  487. // Receive the cancellation term from the server
  488. T gamma;
  489. shape.tio.iostream_server() >> gamma;
  490. res += gamma;
  491. } else {
  492. // The server does this
  493. RDPFPair dp = shape.tio.rdpfpair(shape.yield, shape.addr_size);
  494. RegXS p0indoffset, p1indoffset;
  495. // Receive the index offset from the computational players and
  496. // combine them
  497. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  498. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  499. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  500. // Evaluate the DPFs to compute the cancellation terms
  501. T gamma0, gamma1;
  502. StreamEval ev(dp, 0, indshift, shape.tio.aes_ops());
  503. for (size_t i=0; i<shape.shape_size; ++i) {
  504. auto L = ev.next();
  505. // The values from the two DPFs
  506. auto [V0, V1] = dp.unit<T>(L);
  507. // shape.get_server(i) returns a pair of references to the
  508. // appropriate cells in the two blinded databases
  509. auto [BL0, BL1] = shape.get_server(i);
  510. gamma0 -= BL0 * V1.share();
  511. gamma1 -= BL1 * V0.share();
  512. }
  513. // Choose a random blinding factor
  514. T rho;
  515. rho.randomize();
  516. gamma0 += rho;
  517. gamma1 -= rho;
  518. // Send the cancellation terms to the computational players
  519. shape.tio.iostream_p0() << gamma0;
  520. shape.tio.iostream_p1() << gamma1;
  521. shape.yield();
  522. }
  523. return res; // The server will always get 0
  524. }
  525. // Oblivious update to an XOR shared index of Duoram memory
  526. template <typename T>
  527. typename Duoram<T>::Shape::MemRefXS
  528. &Duoram<T>::Shape::MemRefXS::operator+=(const T& M)
  529. {
  530. Shape &shape = this->shape;
  531. shape.explicitonly(false);
  532. int player = shape.tio.player();
  533. if (player < 2) {
  534. // Computational players do this
  535. RDPFTriple dt = shape.tio.rdpftriple(shape.yield, shape.addr_size);
  536. // Compute the index and message offsets
  537. RegXS indoffset = dt.xs_target;
  538. indoffset -= idx;
  539. auto Moffset = std::make_tuple(M, M, M);
  540. Moffset -= dt.scaled_value<T>();
  541. // Send them to the peer, and everything except the first offset
  542. // to the server
  543. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  544. shape.tio.iostream_peer() << Moffset;
  545. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  546. shape.tio.iostream_server() << std::get<1>(Moffset) <<
  547. std::get<2>(Moffset);
  548. shape.yield();
  549. // Receive the above from the peer
  550. RegXS peerindoffset;
  551. std::tuple<T,T,T> peerMoffset;
  552. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  553. shape.tio.iostream_peer() >> peerMoffset;
  554. // Reconstruct the total offsets
  555. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  556. auto Mshift = combine(Moffset, peerMoffset);
  557. // Evaluate the DPFs and add them to the database
  558. StreamEval ev(dt, 0, indshift, shape.tio.aes_ops());
  559. for (size_t i=0; i<shape.shape_size; ++i) {
  560. auto L = ev.next();
  561. // The values from the three DPFs
  562. auto [V0, V1, V2] = dt.scaled<T>(L) + dt.unit<T>(L) * Mshift;
  563. // References to the appropriate cells in our database, our
  564. // blind, and our copy of the peer's blinded database
  565. auto [DB, BL, PBD] = shape.get_comp(i);
  566. DB += V0;
  567. if (player == 0) {
  568. BL -= V1;
  569. PBD += V2-V0;
  570. } else {
  571. BL -= V2;
  572. PBD += V1-V0;
  573. }
  574. }
  575. } else {
  576. // The server does this
  577. RDPFPair dp = shape.tio.rdpfpair(shape.yield, shape.addr_size);
  578. RegXS p0indoffset, p1indoffset;
  579. std::tuple<T,T> p0Moffset, p1Moffset;
  580. // Receive the index and message offsets from the computational
  581. // players and combine them
  582. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  583. shape.tio.iostream_p0() >> p0Moffset;
  584. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  585. shape.tio.iostream_p1() >> p1Moffset;
  586. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  587. auto Mshift = combine(p0Moffset, p1Moffset);
  588. // Evaluate the DPFs and subtract them from the blinds
  589. StreamEval ev(dp, 0, indshift, shape.tio.aes_ops());
  590. for (size_t i=0; i<shape.shape_size; ++i) {
  591. auto L = ev.next();
  592. // The values from the two DPFs
  593. auto V = dp.scaled<T>(L) + dp.unit<T>(L) * Mshift;
  594. // shape.get_server(i) returns a pair of references to the
  595. // appropriate cells in the two blinded databases, so we can
  596. // subtract the pair directly.
  597. shape.get_server(i) -= V;
  598. }
  599. }
  600. return *this;
  601. }
  602. // Explicit read from a given index of Duoram memory
  603. template <typename T>
  604. Duoram<T>::Shape::MemRefExpl::operator T()
  605. {
  606. Shape &shape = this->shape;
  607. T res;
  608. int player = shape.tio.player();
  609. if (player < 2) {
  610. res = std::get<0>(shape.get_comp(idx));
  611. }
  612. return res; // The server will always get 0
  613. }
  614. // Explicit update to a given index of Duoram memory
  615. template <typename T>
  616. typename Duoram<T>::Shape::MemRefExpl
  617. &Duoram<T>::Shape::MemRefExpl::operator+=(const T& M)
  618. {
  619. Shape &shape = this->shape;
  620. int player = shape.tio.player();
  621. // In explicit-only mode, just update the local DB; we'll sync the
  622. // blinds and the blinded DB when we leave explicit-only mode.
  623. if (shape.explicitmode) {
  624. if (player < 2) {
  625. auto [ DB, BL, PBD ] = shape.get_comp(idx);
  626. DB += M;
  627. }
  628. return *this;
  629. }
  630. if (player < 2) {
  631. // Computational players do this
  632. // Pick a blinding factor
  633. T blind;
  634. blind.randomize();
  635. // Send the blind to the server, and the blinded value to the
  636. // peer
  637. shape.tio.iostream_server() << blind;
  638. shape.tio.iostream_peer() << (M + blind);
  639. shape.yield();
  640. // Receive the peer's blinded value
  641. T peerblinded;
  642. shape.tio.iostream_peer() >> peerblinded;
  643. // Our database, our blind, the peer's blinded database
  644. auto [ DB, BL, PBD ] = shape.get_comp(idx);
  645. DB += M;
  646. BL += blind;
  647. PBD += peerblinded;
  648. } else if (player == 2) {
  649. // The server does this
  650. // Receive the updates to the blinds
  651. T p0blind, p1blind;
  652. shape.tio.iostream_p0() >> p0blind;
  653. shape.tio.iostream_p1() >> p1blind;
  654. // The two computational parties' blinds
  655. auto [ BL0, BL1 ] = shape.get_server(idx);
  656. BL0 += p0blind;
  657. BL1 += p1blind;
  658. }
  659. return *this;
  660. }