duoram.tcc 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. // Templated method implementations for duoram.hpp
  2. #include <stdio.h>
  3. #include "cdpf.hpp"
  4. // Pass the player number and desired size
  5. template <typename T>
  6. Duoram<T>::Duoram(int player, size_t size) : player(player),
  7. oram_size(size), p0_blind(blind), p1_blind(peer_blinded_db) {
  8. if (player < 2) {
  9. database.resize(size);
  10. blind.resize(size);
  11. peer_blinded_db.resize(size);
  12. } else {
  13. p0_blind.resize(size);
  14. p1_blind.resize(size);
  15. }
  16. }
  17. // For debugging; print the contents of the Duoram to stdout
  18. template <typename T>
  19. void Duoram<T>::dump() const
  20. {
  21. for (size_t i=0; i<oram_size; ++i) {
  22. if (player < 2) {
  23. printf("%04lx %016lx %016lx %016lx\n",
  24. i, database[i].share(), blind[i].share(),
  25. peer_blinded_db[i].share());
  26. } else {
  27. printf("%04lx %016lx %016lx\n",
  28. i, p0_blind[i].share(), p1_blind[i].share());
  29. }
  30. }
  31. printf("\n");
  32. }
  33. // For debugging or checking your answers (using this in general is
  34. // of course insecure)
  35. // This one reconstructs the whole database
  36. template <typename T>
  37. std::vector<T> Duoram<T>::Shape::reconstruct() const
  38. {
  39. int player = tio.player();
  40. std::vector<T> res;
  41. res.resize(duoram.size());
  42. // Player 1 sends their share of the database to player 0
  43. if (player == 1) {
  44. tio.queue_peer(duoram.database.data(), duoram.size()*sizeof(T));
  45. } else if (player == 0) {
  46. tio.recv_peer(res.data(), duoram.size()*sizeof(T));
  47. for(size_t i=0;i<duoram.size();++i) {
  48. res[i] += duoram.database[i];
  49. }
  50. }
  51. // The server (player 2) does nothing
  52. // Players 1 and 2 will get an empty vector here
  53. return res;
  54. }
  55. // This one reconstructs a single database value
  56. template <typename T>
  57. T Duoram<T>::Shape::reconstruct(const T& share) const
  58. {
  59. int player = tio.player();
  60. T res;
  61. // Player 1 sends their share of the value to player 0
  62. if (player == 1) {
  63. tio.queue_peer(&share, sizeof(T));
  64. } else if (player == 0) {
  65. tio.recv_peer(&res, sizeof(T));
  66. res += share;
  67. }
  68. // The server (player 2) does nothing
  69. // Players 1 and 2 will get 0 here
  70. return res;
  71. }
  72. // Function to set the shape_size of a shape and compute the number of
  73. // bits you need to address a shape of that size (which is the number of
  74. // bits in sz-1). This is typically called by subclass constructors.
  75. template <typename T>
  76. void Duoram<T>::Shape::set_shape_size(size_t sz)
  77. {
  78. shape_size = sz;
  79. // Compute the number of bits in (sz-1)
  80. // But use 0 if sz=0 for some reason (though that should never
  81. // happen)
  82. if (sz > 1) {
  83. addr_size = 64-__builtin_clzll(sz-1);
  84. addr_mask = address_t((size_t(1)<<addr_size)-1);
  85. } else {
  86. addr_size = 0;
  87. addr_mask = 0;
  88. }
  89. }
  90. // Constructor for the Flat shape. len=0 means the maximum size (the
  91. // parent's size minus start).
  92. template <typename T>
  93. Duoram<T>::Flat::Flat(Duoram &duoram, MPCTIO &tio, yield_t &yield,
  94. size_t start, size_t len) : Shape(*this, duoram, tio, yield)
  95. {
  96. size_t parentsize = duoram.size();
  97. if (start > parentsize) {
  98. start = parentsize;
  99. }
  100. this->start = start;
  101. size_t maxshapesize = parentsize - start;
  102. if (len > maxshapesize || len == 0) {
  103. len = maxshapesize;
  104. }
  105. this->len = len;
  106. this->set_shape_size(len);
  107. }
  108. // Bitonic sort the elements from start to start+(1<<depth)-1, in
  109. // increasing order if dir=0 or decreasing order if dir=1. Note that
  110. // the elements must be at most 63 bits long each for the notion of
  111. // ">" to make consistent sense.
  112. template <typename T>
  113. void Duoram<T>::Flat::bitonic_sort(address_t start, nbits_t depth, bool dir)
  114. {
  115. if (depth == 0) return;
  116. if (depth == 1) {
  117. osort(start, start+1, dir);
  118. return;
  119. }
  120. // Recurse on the first half (increasing order) and the second half
  121. // (decreasing order) in parallel
  122. std::vector<coro_t> coroutines;
  123. coroutines.emplace_back([&](yield_t &yield) {
  124. Flat Acoro = context(yield);
  125. Acoro.bitonic_sort(start, depth-1, 0);
  126. });
  127. coroutines.emplace_back([&](yield_t &yield) {
  128. Flat Acoro = context(yield);
  129. Acoro.bitonic_sort(start+(1<<(depth-1)), depth-1, 1);
  130. });
  131. run_coroutines(this->yield, coroutines);
  132. // Merge the two into the desired order
  133. butterfly(start, depth, dir);
  134. }
  135. // Internal function to aid bitonic_sort
  136. template <typename T>
  137. void Duoram<T>::Flat::butterfly(address_t start, nbits_t depth, bool dir)
  138. {
  139. if (depth == 0) return;
  140. if (depth == 1) {
  141. osort(start, start+1, dir);
  142. return;
  143. }
  144. // Sort pairs of elements half the width apart in parallel
  145. address_t halfwidth = address_t(1)<<(depth-1);
  146. std::vector<coro_t> coroutines;
  147. for (address_t i=0; i<halfwidth;++i) {
  148. coroutines.emplace_back([&](yield_t &yield) {
  149. Flat Acoro = context(yield);
  150. Acoro.osort(start+i, start+i+halfwidth, dir);
  151. });
  152. }
  153. run_coroutines(this->yield, coroutines);
  154. // Recurse on each half in parallel
  155. coroutines.clear();
  156. coroutines.emplace_back([&](yield_t &yield) {
  157. Flat Acoro = context(yield);
  158. Acoro.butterfly(start, depth-1, dir);
  159. });
  160. coroutines.emplace_back([&](yield_t &yield) {
  161. Flat Acoro = context(yield);
  162. Acoro.butterfly(start+halfwidth, depth-1, dir);
  163. });
  164. run_coroutines(this->yield, coroutines);
  165. }
  166. // Oblivious read from an additively shared index of Duoram memory
  167. template <typename T>
  168. Duoram<T>::Shape::MemRefAS::operator T()
  169. {
  170. T res;
  171. const Shape &shape = this->shape;
  172. int player = shape.tio.player();
  173. if (player < 2) {
  174. // Computational players do this
  175. RDPFTriple dt = shape.tio.rdpftriple(shape.addr_size);
  176. // Compute the index offset
  177. RegAS indoffset = dt.as_target;
  178. indoffset -= idx;
  179. // We only need two of the DPFs for reading
  180. RDPFPair dp(std::move(dt), 0, player == 0 ? 2 : 1);
  181. // The RDPFTriple dt is now broken, since we've moved things out
  182. // of it.
  183. // Send it to the peer and the server
  184. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  185. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  186. shape.yield();
  187. // Receive the above from the peer
  188. RegAS peerindoffset;
  189. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  190. // Reconstruct the total offset
  191. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  192. // Evaluate the DPFs and compute the dotproducts
  193. StreamEval ev(dp, indshift, 0, shape.tio.aes_ops());
  194. for (size_t i=0; i<shape.shape_size; ++i) {
  195. auto L = ev.next();
  196. // The values from the two DPFs
  197. auto [V0, V1] = dp.unit<T>(L);
  198. // References to the appropriate cells in our database, our
  199. // blind, and our copy of the peer's blinded database
  200. auto [DB, BL, PBD] = shape.get_comp(i);
  201. res += (DB + PBD) * V0.share() - BL * (V1-V0).share();
  202. }
  203. // Receive the cancellation term from the server
  204. T gamma;
  205. shape.tio.iostream_server() >> gamma;
  206. res += gamma;
  207. } else {
  208. // The server does this
  209. RDPFPair dp = shape.tio.rdpfpair(shape.addr_size);
  210. RegAS p0indoffset, p1indoffset;
  211. // Receive the index offset from the computational players and
  212. // combine them
  213. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  214. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  215. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  216. // Evaluate the DPFs to compute the cancellation terms
  217. T gamma0, gamma1;
  218. StreamEval ev(dp, indshift, 0, shape.tio.aes_ops());
  219. for (size_t i=0; i<shape.shape_size; ++i) {
  220. auto L = ev.next();
  221. // The values from the two DPFs
  222. auto [V0, V1] = dp.unit<T>(L);
  223. // shape.get_server(i) returns a pair of references to the
  224. // appropriate cells in the two blinded databases
  225. auto [BL0, BL1] = shape.get_server(i);
  226. gamma0 -= BL0 * V1.share();
  227. gamma1 -= BL1 * V0.share();
  228. }
  229. // Choose a random blinding factor
  230. T rho;
  231. rho.randomize();
  232. gamma0 += rho;
  233. gamma1 -= rho;
  234. // Send the cancellation terms to the computational players
  235. shape.tio.iostream_p0() << gamma0;
  236. shape.tio.iostream_p1() << gamma1;
  237. shape.yield();
  238. }
  239. return res; // The server will always get 0
  240. }
  241. // Oblivious update to an additively shared index of Duoram memory
  242. template <typename T>
  243. typename Duoram<T>::Shape::MemRefAS
  244. &Duoram<T>::Shape::MemRefAS::operator+=(const T& M)
  245. {
  246. const Shape &shape = this->shape;
  247. int player = shape.tio.player();
  248. if (player < 2) {
  249. // Computational players do this
  250. RDPFTriple dt = shape.tio.rdpftriple(shape.addr_size);
  251. // Compute the index and message offsets
  252. RegAS indoffset = dt.as_target;
  253. indoffset -= idx;
  254. auto Moffset = std::make_tuple(M, M, M);
  255. Moffset -= dt.scaled_value<T>();
  256. // Send them to the peer, and everything except the first offset
  257. // to the server
  258. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  259. shape.tio.iostream_peer() << Moffset;
  260. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  261. shape.tio.iostream_server() << std::get<1>(Moffset) <<
  262. std::get<2>(Moffset);
  263. shape.yield();
  264. // Receive the above from the peer
  265. RegAS peerindoffset;
  266. std::tuple<T,T,T> peerMoffset;
  267. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  268. shape.tio.iostream_peer() >> peerMoffset;
  269. // Reconstruct the total offsets
  270. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  271. auto Mshift = combine(Moffset, peerMoffset);
  272. // Evaluate the DPFs and add them to the database
  273. StreamEval ev(dt, indshift, 0, shape.tio.aes_ops());
  274. for (size_t i=0; i<shape.shape_size; ++i) {
  275. auto L = ev.next();
  276. // The values from the three DPFs
  277. auto [V0, V1, V2] = dt.scaled<T>(L) + dt.unit<T>(L) * Mshift;
  278. // References to the appropriate cells in our database, our
  279. // blind, and our copy of the peer's blinded database
  280. auto [DB, BL, PBD] = shape.get_comp(i);
  281. DB += V0;
  282. if (player == 0) {
  283. BL -= V1;
  284. PBD += V2-V0;
  285. } else {
  286. BL -= V2;
  287. PBD += V1-V0;
  288. }
  289. }
  290. } else {
  291. // The server does this
  292. RDPFPair dp = shape.tio.rdpfpair(shape.addr_size);
  293. RegAS p0indoffset, p1indoffset;
  294. std::tuple<T,T> p0Moffset, p1Moffset;
  295. // Receive the index and message offsets from the computational
  296. // players and combine them
  297. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  298. shape.tio.iostream_p0() >> p0Moffset;
  299. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  300. shape.tio.iostream_p1() >> p1Moffset;
  301. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  302. auto Mshift = combine(p0Moffset, p1Moffset);
  303. // Evaluate the DPFs and subtract them from the blinds
  304. StreamEval ev(dp, indshift, 0, shape.tio.aes_ops());
  305. for (size_t i=0; i<shape.shape_size; ++i) {
  306. auto L = ev.next();
  307. // The values from the two DPFs
  308. auto V = dp.scaled<T>(L) + dp.unit<T>(L) * Mshift;
  309. // shape.get_server(i) returns a pair of references to the
  310. // appropriate cells in the two blinded databases, so we can
  311. // subtract the pair directly.
  312. shape.get_server(i) -= V;
  313. }
  314. }
  315. return *this;
  316. }
  317. // Oblivious sort with the provided other element. Without
  318. // reconstructing the values, *this will become a share of the
  319. // smaller of the reconstructed values, and other will become a
  320. // share of the larger.
  321. //
  322. // Note: this only works for additively shared databases
  323. template <> template <typename U,typename V>
  324. void Duoram<RegAS>::Flat::osort(const U &idx1, const V &idx2, bool dir)
  325. {
  326. printf("osort %u %u %d\n", idx1, idx2, dir);
  327. // Load the values in parallel
  328. std::vector<coro_t> coroutines;
  329. RegAS val1, val2;
  330. coroutines.emplace_back([&](yield_t &yield) {
  331. Flat Acoro = context(yield);
  332. val1 = Acoro[idx1];
  333. });
  334. coroutines.emplace_back([&](yield_t &yield) {
  335. Flat Acoro = context(yield);
  336. val2 = Acoro[idx2];
  337. });
  338. run_coroutines(yield, coroutines);
  339. // Get a CDPF
  340. CDPF cdpf = tio.cdpf();
  341. // Use it to compare the values
  342. RegAS diff = val1-val2;
  343. auto [lt, eq, gt] = cdpf.compare(tio, yield, diff, tio.aes_ops());
  344. RegBS cmp = dir ? lt : gt;
  345. // Get additive shares of cmp*diff
  346. RegAS cmp_diff;
  347. mpc_flagmult(tio, yield, cmp_diff, cmp, diff);
  348. // Update the two locations in parallel
  349. coroutines.clear();
  350. coroutines.emplace_back([&](yield_t &yield) {
  351. Flat Acoro = context(yield);
  352. Acoro[idx1] -= cmp_diff;
  353. });
  354. coroutines.emplace_back([&](yield_t &yield) {
  355. Flat Acoro = context(yield);
  356. Acoro[idx2] += cmp_diff;
  357. });
  358. run_coroutines(yield, coroutines);
  359. }
  360. // The MemRefXS routines are almost identical to the MemRefAS routines,
  361. // but I couldn't figure out how to get them to be two instances of a
  362. // template. Sorry for the code duplication.
  363. // Oblivious read from an XOR shared index of Duoram memory
  364. template <typename T>
  365. Duoram<T>::Shape::MemRefXS::operator T()
  366. {
  367. const Shape &shape = this->shape;
  368. T res;
  369. int player = shape.tio.player();
  370. if (player < 2) {
  371. // Computational players do this
  372. RDPFTriple dt = shape.tio.rdpftriple(shape.addr_size);
  373. // Compute the index offset
  374. RegXS indoffset = dt.xs_target;
  375. indoffset -= idx;
  376. // We only need two of the DPFs for reading
  377. RDPFPair dp(std::move(dt), 0, player == 0 ? 2 : 1);
  378. // Send it to the peer and the server
  379. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  380. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  381. shape.yield();
  382. // Receive the above from the peer
  383. RegXS peerindoffset;
  384. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  385. // Reconstruct the total offset
  386. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  387. // Evaluate the DPFs and compute the dotproducts
  388. StreamEval ev(dp, 0, indshift, shape.tio.aes_ops());
  389. for (size_t i=0; i<shape.shape_size; ++i) {
  390. auto L = ev.next();
  391. // The values from the two DPFs
  392. auto [V0, V1] = dp.unit<T>(L);
  393. // References to the appropriate cells in our database, our
  394. // blind, and our copy of the peer's blinded database
  395. auto [DB, BL, PBD] = shape.get_comp(i);
  396. res += (DB + PBD) * V0.share() - BL * (V1-V0).share();
  397. }
  398. // Receive the cancellation term from the server
  399. T gamma;
  400. shape.tio.iostream_server() >> gamma;
  401. res += gamma;
  402. } else {
  403. // The server does this
  404. RDPFPair dp = shape.tio.rdpfpair(shape.addr_size);
  405. RegXS p0indoffset, p1indoffset;
  406. // Receive the index offset from the computational players and
  407. // combine them
  408. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  409. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  410. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  411. // Evaluate the DPFs to compute the cancellation terms
  412. T gamma0, gamma1;
  413. StreamEval ev(dp, 0, indshift, shape.tio.aes_ops());
  414. for (size_t i=0; i<shape.shape_size; ++i) {
  415. auto L = ev.next();
  416. // The values from the two DPFs
  417. auto [V0, V1] = dp.unit<T>(L);
  418. // shape.get_server(i) returns a pair of references to the
  419. // appropriate cells in the two blinded databases
  420. auto [BL0, BL1] = shape.get_server(i);
  421. gamma0 -= BL0 * V1.share();
  422. gamma1 -= BL1 * V0.share();
  423. }
  424. // Choose a random blinding factor
  425. T rho;
  426. rho.randomize();
  427. gamma0 += rho;
  428. gamma1 -= rho;
  429. // Send the cancellation terms to the computational players
  430. shape.tio.iostream_p0() << gamma0;
  431. shape.tio.iostream_p1() << gamma1;
  432. shape.yield();
  433. }
  434. return res; // The server will always get 0
  435. }
  436. // Oblivious update to an XOR shared index of Duoram memory
  437. template <typename T>
  438. typename Duoram<T>::Shape::MemRefXS
  439. &Duoram<T>::Shape::MemRefXS::operator+=(const T& M)
  440. {
  441. const Shape &shape = this->shape;
  442. int player = shape.tio.player();
  443. if (player < 2) {
  444. // Computational players do this
  445. RDPFTriple dt = shape.tio.rdpftriple(shape.addr_size);
  446. // Compute the index and message offsets
  447. RegXS indoffset = dt.xs_target;
  448. indoffset -= idx;
  449. auto Moffset = std::make_tuple(M, M, M);
  450. Moffset -= dt.scaled_value<T>();
  451. // Send them to the peer, and everything except the first offset
  452. // to the server
  453. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  454. shape.tio.iostream_peer() << Moffset;
  455. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  456. shape.tio.iostream_server() << std::get<1>(Moffset) <<
  457. std::get<2>(Moffset);
  458. shape.yield();
  459. // Receive the above from the peer
  460. RegXS peerindoffset;
  461. std::tuple<T,T,T> peerMoffset;
  462. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  463. shape.tio.iostream_peer() >> peerMoffset;
  464. // Reconstruct the total offsets
  465. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  466. auto Mshift = combine(Moffset, peerMoffset);
  467. // Evaluate the DPFs and add them to the database
  468. StreamEval ev(dt, 0, indshift, shape.tio.aes_ops());
  469. for (size_t i=0; i<shape.shape_size; ++i) {
  470. auto L = ev.next();
  471. // The values from the three DPFs
  472. auto [V0, V1, V2] = dt.scaled<T>(L) + dt.unit<T>(L) * Mshift;
  473. // References to the appropriate cells in our database, our
  474. // blind, and our copy of the peer's blinded database
  475. auto [DB, BL, PBD] = shape.get_comp(i);
  476. DB += V0;
  477. if (player == 0) {
  478. BL -= V1;
  479. PBD += V2-V0;
  480. } else {
  481. BL -= V2;
  482. PBD += V1-V0;
  483. }
  484. }
  485. } else {
  486. // The server does this
  487. RDPFPair dp = shape.tio.rdpfpair(shape.addr_size);
  488. RegXS p0indoffset, p1indoffset;
  489. std::tuple<T,T> p0Moffset, p1Moffset;
  490. // Receive the index and message offsets from the computational
  491. // players and combine them
  492. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  493. shape.tio.iostream_p0() >> p0Moffset;
  494. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  495. shape.tio.iostream_p1() >> p1Moffset;
  496. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  497. auto Mshift = combine(p0Moffset, p1Moffset);
  498. // Evaluate the DPFs and subtract them from the blinds
  499. StreamEval ev(dp, 0, indshift, shape.tio.aes_ops());
  500. for (size_t i=0; i<shape.shape_size; ++i) {
  501. auto L = ev.next();
  502. // The values from the two DPFs
  503. auto V = dp.scaled<T>(L) + dp.unit<T>(L) * Mshift;
  504. // shape.get_server(i) returns a pair of references to the
  505. // appropriate cells in the two blinded databases, so we can
  506. // subtract the pair directly.
  507. shape.get_server(i) -= V;
  508. }
  509. }
  510. return *this;
  511. }
  512. // Explicit read from a given index of Duoram memory
  513. template <typename T>
  514. Duoram<T>::Shape::MemRefExpl::operator T()
  515. {
  516. const Shape &shape = this->shape;
  517. T res;
  518. int player = shape.tio.player();
  519. if (player < 2) {
  520. res = std::get<0>(shape.get_comp(idx));
  521. }
  522. return res; // The server will always get 0
  523. }
  524. // Explicit update to a given index of Duoram memory
  525. template <typename T>
  526. typename Duoram<T>::Shape::MemRefExpl
  527. &Duoram<T>::Shape::MemRefExpl::operator+=(const T& M)
  528. {
  529. const Shape &shape = this->shape;
  530. int player = shape.tio.player();
  531. if (player < 2) {
  532. // Computational players do this
  533. // Pick a blinding factor
  534. T blind;
  535. blind.randomize();
  536. // Send the blind to the server, and the blinded value to the
  537. // peer
  538. shape.tio.iostream_server() << blind;
  539. shape.tio.iostream_peer() << (M + blind);
  540. shape.yield();
  541. // Receive the peer's blinded value
  542. T peerblinded;
  543. shape.tio.iostream_peer() >> peerblinded;
  544. // Our database, our blind, the peer's blinded database
  545. auto [ DB, BL, PBD ] = shape.get_comp(idx);
  546. DB += M;
  547. BL += blind;
  548. PBD += peerblinded;
  549. } else if (player == 2) {
  550. // The server does this
  551. // Receive the updates to the blinds
  552. T p0blind, p1blind;
  553. shape.tio.iostream_p0() >> p0blind;
  554. shape.tio.iostream_p1() >> p1blind;
  555. // The two computational parties' blinds
  556. auto [ BL0, BL1 ] = shape.get_server(idx);
  557. BL0 += p0blind;
  558. BL1 += p1blind;
  559. }
  560. return *this;
  561. }