duoram.tcc 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712
  1. // Templated method implementations for duoram.hpp
  2. #include <stdio.h>
  3. #include "mpcops.hpp"
  4. #include "cdpf.hpp"
  5. #include "rdpf.hpp"
  6. // Pass the player number and desired size
  7. template <typename T>
  8. Duoram<T>::Duoram(int player, size_t size) : player(player),
  9. oram_size(size), p0_blind(blind), p1_blind(peer_blinded_db) {
  10. if (player < 2) {
  11. database.resize(size);
  12. blind.resize(size);
  13. peer_blinded_db.resize(size);
  14. } else {
  15. p0_blind.resize(size);
  16. p1_blind.resize(size);
  17. }
  18. }
  19. // For debugging; print the contents of the Duoram to stdout
  20. template <typename T>
  21. void Duoram<T>::dump() const
  22. {
  23. for (size_t i=0; i<oram_size; ++i) {
  24. if (player < 2) {
  25. printf("%04lx ", i);
  26. database[i].dump();
  27. printf(" ");
  28. blind[i].dump();
  29. printf(" ");
  30. peer_blinded_db[i].dump();
  31. printf("\n");
  32. } else {
  33. printf("%04lx ", i);
  34. p0_blind[i].dump();
  35. printf(" ");
  36. p1_blind[i].dump();
  37. printf("\n");
  38. }
  39. }
  40. printf("\n");
  41. }
  42. // Enable or disable explicit-only mode. Only using [] with
  43. // explicit (address_t) indices are allowed in this mode. Using []
  44. // with RegAS or RegXS indices will automatically turn off this
  45. // mode, or you can turn it off explicitly. In explicit-only mode,
  46. // updates to the memory in the Shape will not induce communication
  47. // to the server or peer, but when it turns off, a message of the
  48. // size of the entire Shape will be sent to each of the server and
  49. // the peer. This is useful if you're going to be doing multiple
  50. // explicit writes to every element of the Shape before you do your
  51. // next oblivious read or write. Bitonic sort is a prime example.
  52. template <typename T>
  53. void Duoram<T>::Shape::explicitonly(bool enable)
  54. {
  55. if (enable == true) {
  56. explicitmode = true;
  57. } else if (explicitmode == true) {
  58. explicitmode = false;
  59. // Reblind the whole Shape
  60. int player = tio.player();
  61. if (player < 2) {
  62. for (size_t i=0; i<shape_size; ++i) {
  63. auto [ DB, BL, PBD ] = get_comp(i);
  64. BL.randomize();
  65. tio.iostream_server() << BL;
  66. tio.iostream_peer() << (DB + BL);
  67. }
  68. yield();
  69. for (size_t i=0; i<shape_size; ++i) {
  70. auto [ DB, BL, PBD ] = get_comp(i);
  71. tio.iostream_peer() >> PBD;
  72. }
  73. } else {
  74. yield();
  75. for (size_t i=0; i<shape_size; ++i) {
  76. auto [BL0, BL1] = get_server(i);
  77. tio.iostream_p0() >> BL0;
  78. tio.iostream_p1() >> BL1;
  79. }
  80. }
  81. }
  82. }
  83. // For debugging or checking your answers (using this in general is
  84. // of course insecure)
  85. // This one reconstructs the whole database
  86. template <typename T>
  87. std::vector<T> Duoram<T>::Shape::reconstruct() const
  88. {
  89. int player = tio.player();
  90. std::vector<T> res;
  91. res.resize(duoram.size());
  92. // Player 1 sends their share of the database to player 0
  93. if (player == 1) {
  94. tio.queue_peer(duoram.database.data(), duoram.size()*sizeof(T));
  95. yield();
  96. } else if (player == 0) {
  97. yield();
  98. tio.recv_peer(res.data(), duoram.size()*sizeof(T));
  99. for(size_t i=0;i<duoram.size();++i) {
  100. res[i] += duoram.database[i];
  101. }
  102. } else if (player == 2) {
  103. // The server (player 2) only syncs with the yield
  104. yield();
  105. }
  106. // Players 1 and 2 will get an empty vector here
  107. return res;
  108. }
  109. // This one reconstructs a single database value
  110. template <typename T>
  111. T Duoram<T>::Shape::reconstruct(const T& share) const
  112. {
  113. int player = tio.player();
  114. T res;
  115. // Player 1 sends their share of the value to player 0
  116. if (player == 1) {
  117. tio.queue_peer(&share, sizeof(T));
  118. yield();
  119. } else if (player == 0) {
  120. yield();
  121. tio.recv_peer(&res, sizeof(T));
  122. res += share;
  123. } else if (player == 2) {
  124. // The server (player 2) only syncs with the yield
  125. yield();
  126. }
  127. // Players 1 and 2 will get 0 here
  128. return res;
  129. }
  130. // Function to set the shape_size of a shape and compute the number of
  131. // bits you need to address a shape of that size (which is the number of
  132. // bits in sz-1). This is typically called by subclass constructors.
  133. template <typename T>
  134. void Duoram<T>::Shape::set_shape_size(size_t sz)
  135. {
  136. shape_size = sz;
  137. // Compute the number of bits in (sz-1)
  138. // But use 0 if sz=0 for some reason (though that should never
  139. // happen)
  140. if (sz > 1) {
  141. addr_size = 64-__builtin_clzll(sz-1);
  142. addr_mask = address_t((size_t(1)<<addr_size)-1);
  143. } else {
  144. addr_size = 0;
  145. addr_mask = 0;
  146. }
  147. }
  148. // Constructor for the Flat shape. len=0 means the maximum size (the
  149. // parent's size minus start).
  150. template <typename T>
  151. Duoram<T>::Flat::Flat(Duoram &duoram, MPCTIO &tio, yield_t &yield,
  152. size_t start, size_t len) : Shape(*this, duoram, tio, yield)
  153. {
  154. size_t parentsize = duoram.size();
  155. if (start > parentsize) {
  156. start = parentsize;
  157. }
  158. this->start = start;
  159. size_t maxshapesize = parentsize - start;
  160. if (len > maxshapesize || len == 0) {
  161. len = maxshapesize;
  162. }
  163. this->len = len;
  164. this->set_shape_size(len);
  165. }
  166. // Bitonic sort the elements from start to start+(1<<depth)-1, in
  167. // increasing order if dir=0 or decreasing order if dir=1. Note that
  168. // the elements must be at most 63 bits long each for the notion of
  169. // ">" to make consistent sense.
  170. template <typename T>
  171. void Duoram<T>::Flat::bitonic_sort(address_t start, nbits_t depth, bool dir)
  172. {
  173. if (depth == 0) return;
  174. if (depth == 1) {
  175. osort(start, start+1, dir);
  176. return;
  177. }
  178. // Recurse on the first half (increasing order) and the second half
  179. // (decreasing order) in parallel
  180. run_coroutines(this->yield,
  181. [this, start, depth](yield_t &yield) {
  182. Flat Acoro = context(yield);
  183. Acoro.bitonic_sort(start, depth-1, 0);
  184. },
  185. [this, start, depth](yield_t &yield) {
  186. Flat Acoro = context(yield);
  187. Acoro.bitonic_sort(start+(1<<(depth-1)), depth-1, 1);
  188. });
  189. // Merge the two into the desired order
  190. butterfly(start, depth, dir);
  191. }
  192. // Internal function to aid bitonic_sort
  193. template <typename T>
  194. void Duoram<T>::Flat::butterfly(address_t start, nbits_t depth, bool dir)
  195. {
  196. if (depth == 0) return;
  197. if (depth == 1) {
  198. osort(start, start+1, dir);
  199. return;
  200. }
  201. // Sort pairs of elements half the width apart in parallel
  202. address_t halfwidth = address_t(1)<<(depth-1);
  203. std::vector<coro_t> coroutines;
  204. for (address_t i=0; i<halfwidth;++i) {
  205. coroutines.emplace_back(
  206. [this, start, halfwidth, dir, i](yield_t &yield) {
  207. Flat Acoro = context(yield);
  208. Acoro.osort(start+i, start+i+halfwidth, dir);
  209. });
  210. }
  211. run_coroutines(this->yield, coroutines);
  212. // Recurse on each half in parallel
  213. run_coroutines(this->yield,
  214. [this, start, depth, dir](yield_t &yield) {
  215. Flat Acoro = context(yield);
  216. Acoro.butterfly(start, depth-1, dir);
  217. },
  218. [this, start, halfwidth, depth, dir](yield_t &yield) {
  219. Flat Acoro = context(yield);
  220. Acoro.butterfly(start+halfwidth, depth-1, dir);
  221. });
  222. }
  223. // Helper functions to specialize the read and update operations for
  224. // RegAS and RegXS shared indices
  225. template <typename U>
  226. inline address_t IfRegAS(address_t val);
  227. template <typename U>
  228. inline address_t IfRegXS(address_t val);
  229. template <>
  230. inline address_t IfRegAS<RegAS>(address_t val) { return val; }
  231. template <>
  232. inline address_t IfRegAS<RegXS>(address_t val) { return 0; }
  233. template <>
  234. inline address_t IfRegXS<RegAS>(address_t val) { return 0; }
  235. template <>
  236. inline address_t IfRegXS<RegXS>(address_t val) { return val; }
  237. // Oblivious read from an additively or XOR shared index of Duoram memory
  238. // T is the sharing type of the _values_ in the database; U is the
  239. // sharing type of the _indices_ in the database.
  240. template <typename T> template <typename U>
  241. Duoram<T>::Shape::MemRefS<U>::operator T()
  242. {
  243. T res;
  244. Shape &shape = this->shape;
  245. shape.explicitonly(false);
  246. int player = shape.tio.player();
  247. if (player < 2) {
  248. // Computational players do this
  249. RDPFTriple dt = shape.tio.rdpftriple(shape.yield, shape.addr_size);
  250. // Compute the index offset
  251. U indoffset = dt.target<U>();
  252. indoffset -= idx;
  253. // We only need two of the DPFs for reading
  254. RDPFPair dp(std::move(dt), 0, player == 0 ? 2 : 1);
  255. // The RDPFTriple dt is now broken, since we've moved things out
  256. // of it.
  257. // Send it to the peer and the server
  258. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  259. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  260. shape.yield();
  261. // Receive the above from the peer
  262. U peerindoffset;
  263. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  264. // Reconstruct the total offset
  265. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  266. // Evaluate the DPFs and compute the dotproducts
  267. ParallelEval pe(dp, IfRegAS<U>(indshift), IfRegXS<U>(indshift),
  268. shape.shape_size, shape.tio.cpu_nthreads(),
  269. shape.tio.aes_ops());
  270. T init;
  271. res = pe.reduce(init, [&dp, &shape] (int thread_num, address_t i,
  272. const RDPFPair::node &leaf) {
  273. // The values from the two DPFs, which will each be of type T
  274. auto [V0, V1] = dp.unit<T>(leaf);
  275. // References to the appropriate cells in our database, our
  276. // blind, and our copy of the peer's blinded database
  277. auto [DB, BL, PBD] = shape.get_comp(i);
  278. return (DB + PBD).mulshare(V0) - BL.mulshare(V1-V0);
  279. });
  280. shape.yield();
  281. // Receive the cancellation term from the server
  282. T gamma;
  283. shape.tio.iostream_server() >> gamma;
  284. res += gamma;
  285. } else {
  286. // The server does this
  287. RDPFPair dp = shape.tio.rdpfpair(shape.yield, shape.addr_size);
  288. U p0indoffset, p1indoffset;
  289. shape.yield();
  290. // Receive the index offset from the computational players and
  291. // combine them
  292. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  293. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  294. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  295. // Evaluate the DPFs to compute the cancellation terms
  296. std::tuple<T,T> init, gamma;
  297. ParallelEval pe(dp, IfRegAS<U>(indshift), IfRegXS<U>(indshift),
  298. shape.shape_size, shape.tio.cpu_nthreads(),
  299. shape.tio.aes_ops());
  300. gamma = pe.reduce(init, [&dp, &shape] (int thread_num, address_t i,
  301. const RDPFPair::node &leaf) {
  302. // The values from the two DPFs, each of type T
  303. auto [V0, V1] = dp.unit<T>(leaf);
  304. // shape.get_server(i) returns a pair of references to the
  305. // appropriate cells in the two blinded databases
  306. auto [BL0, BL1] = shape.get_server(i);
  307. return std::make_tuple(-BL0.mulshare(V1), -BL1.mulshare(V0));
  308. });
  309. // Choose a random blinding factor
  310. T rho;
  311. rho.randomize();
  312. std::get<0>(gamma) += rho;
  313. std::get<1>(gamma) -= rho;
  314. // Send the cancellation terms to the computational players
  315. shape.tio.iostream_p0() << std::get<0>(gamma);
  316. shape.tio.iostream_p1() << std::get<1>(gamma);
  317. shape.yield();
  318. }
  319. return res; // The server will always get 0
  320. }
  321. // Oblivious update to a shared index of Duoram memory, only for
  322. // T = RegAS or RegXS
  323. template <typename T> template <typename U>
  324. typename Duoram<T>::Shape::template MemRefS<U>
  325. &Duoram<T>::Shape::MemRefS<U>::oram_update(const T& M,
  326. const prac_template_true &)
  327. {
  328. Shape &shape = this->shape;
  329. shape.explicitonly(false);
  330. int player = shape.tio.player();
  331. if (player < 2) {
  332. // Computational players do this
  333. RDPFTriple dt = shape.tio.rdpftriple(shape.yield, shape.addr_size);
  334. // Compute the index and message offsets
  335. U indoffset = dt.target<U>();
  336. indoffset -= idx;
  337. auto Moffset = std::make_tuple(M, M, M);
  338. Moffset -= dt.scaled_value<T>();
  339. // Send them to the peer, and everything except the first offset
  340. // to the server
  341. shape.tio.queue_peer(&indoffset, BITBYTES(shape.addr_size));
  342. shape.tio.iostream_peer() << Moffset;
  343. shape.tio.queue_server(&indoffset, BITBYTES(shape.addr_size));
  344. shape.tio.iostream_server() << std::get<1>(Moffset) <<
  345. std::get<2>(Moffset);
  346. shape.yield();
  347. // Receive the above from the peer
  348. U peerindoffset;
  349. std::tuple<T,T,T> peerMoffset;
  350. shape.tio.recv_peer(&peerindoffset, BITBYTES(shape.addr_size));
  351. shape.tio.iostream_peer() >> peerMoffset;
  352. // Reconstruct the total offsets
  353. auto indshift = combine(indoffset, peerindoffset, shape.addr_size);
  354. auto Mshift = combine(Moffset, peerMoffset);
  355. // Evaluate the DPFs and add them to the database
  356. ParallelEval pe(dt, IfRegAS<U>(indshift), IfRegXS<U>(indshift),
  357. shape.shape_size, shape.tio.cpu_nthreads(),
  358. shape.tio.aes_ops());
  359. int init = 0;
  360. pe.reduce(init, [&dt, &shape, &Mshift, player] (int thread_num,
  361. address_t i, const RDPFTriple::node &leaf) {
  362. // The values from the three DPFs
  363. auto [V0, V1, V2] = dt.scaled<T>(leaf) + dt.unit<T>(leaf) * Mshift;
  364. // References to the appropriate cells in our database, our
  365. // blind, and our copy of the peer's blinded database
  366. auto [DB, BL, PBD] = shape.get_comp(i);
  367. DB += V0;
  368. if (player == 0) {
  369. BL -= V1;
  370. PBD += V2-V0;
  371. } else {
  372. BL -= V2;
  373. PBD += V1-V0;
  374. }
  375. return 0;
  376. });
  377. } else {
  378. // The server does this
  379. RDPFPair dp = shape.tio.rdpfpair(shape.yield, shape.addr_size);
  380. U p0indoffset, p1indoffset;
  381. std::tuple<T,T> p0Moffset, p1Moffset;
  382. shape.yield();
  383. // Receive the index and message offsets from the computational
  384. // players and combine them
  385. shape.tio.recv_p0(&p0indoffset, BITBYTES(shape.addr_size));
  386. shape.tio.iostream_p0() >> p0Moffset;
  387. shape.tio.recv_p1(&p1indoffset, BITBYTES(shape.addr_size));
  388. shape.tio.iostream_p1() >> p1Moffset;
  389. auto indshift = combine(p0indoffset, p1indoffset, shape.addr_size);
  390. auto Mshift = combine(p0Moffset, p1Moffset);
  391. // Evaluate the DPFs and subtract them from the blinds
  392. ParallelEval pe(dp, IfRegAS<U>(indshift), IfRegXS<U>(indshift),
  393. shape.shape_size, shape.tio.cpu_nthreads(),
  394. shape.tio.aes_ops());
  395. int init = 0;
  396. pe.reduce(init, [&dp, &shape, &Mshift] (int thread_num,
  397. address_t i, const RDPFPair::node &leaf) {
  398. // The values from the two DPFs
  399. auto V = dp.scaled<T>(leaf) + dp.unit<T>(leaf) * Mshift;
  400. // shape.get_server(i) returns a pair of references to the
  401. // appropriate cells in the two blinded databases, so we can
  402. // subtract the pair directly.
  403. shape.get_server(i) -= V;
  404. return 0;
  405. });
  406. }
  407. return *this;
  408. }
  409. // Oblivious update to a shared index of Duoram memory, only for
  410. // T not equal to RegAS or RegXS
  411. template <typename T> template <typename U>
  412. typename Duoram<T>::Shape::template MemRefS<U>
  413. &Duoram<T>::Shape::MemRefS<U>::oram_update(const T& M,
  414. const prac_template_false &)
  415. {
  416. return *this;
  417. }
  418. // Oblivious update to an additively or XOR shared index of Duoram memory
  419. template <typename T> template <typename U>
  420. typename Duoram<T>::Shape::template MemRefS<U>
  421. &Duoram<T>::Shape::MemRefS<U>::operator+=(const T& M)
  422. {
  423. return oram_update(M, prac_basic_Reg_S<T>());
  424. }
  425. // Oblivious write to an additively or XOR shared index of Duoram memory
  426. template <typename T> template <typename U>
  427. typename Duoram<T>::Shape::template MemRefS<U>
  428. &Duoram<T>::Shape::MemRefS<U>::operator=(const T& M)
  429. {
  430. T oldval = *this;
  431. T update = M - oldval;
  432. *this += update;
  433. return *this;
  434. }
  435. // Oblivious sort with the provided other element. Without
  436. // reconstructing the values, *this will become a share of the
  437. // smaller of the reconstructed values, and other will become a
  438. // share of the larger.
  439. //
  440. // Note: this only works for additively shared databases
  441. template <> template <typename U,typename V>
  442. void Duoram<RegAS>::Flat::osort(const U &idx1, const V &idx2, bool dir)
  443. {
  444. // Load the values in parallel
  445. RegAS val1, val2;
  446. run_coroutines(yield,
  447. [this, &idx1, &val1](yield_t &yield) {
  448. Flat Acoro = context(yield);
  449. val1 = Acoro[idx1];
  450. },
  451. [this, &idx2, &val2](yield_t &yield) {
  452. Flat Acoro = context(yield);
  453. val2 = Acoro[idx2];
  454. });
  455. // Get a CDPF
  456. CDPF cdpf = tio.cdpf(yield);
  457. // Use it to compare the values
  458. RegAS diff = val1-val2;
  459. auto [lt, eq, gt] = cdpf.compare(tio, yield, diff, tio.aes_ops());
  460. RegBS cmp = dir ? lt : gt;
  461. // Get additive shares of cmp*diff
  462. RegAS cmp_diff;
  463. mpc_flagmult(tio, yield, cmp_diff, cmp, diff);
  464. // Update the two locations in parallel
  465. run_coroutines(yield,
  466. [this, &idx1, &cmp_diff](yield_t &yield) {
  467. Flat Acoro = context(yield);
  468. Acoro[idx1] -= cmp_diff;
  469. },
  470. [this, &idx2, &cmp_diff](yield_t &yield) {
  471. Flat Acoro = context(yield);
  472. Acoro[idx2] += cmp_diff;
  473. });
  474. }
  475. // Explicit read from a given index of Duoram memory
  476. template <typename T>
  477. Duoram<T>::Shape::MemRefExpl::operator T()
  478. {
  479. Shape &shape = this->shape;
  480. T res;
  481. int player = shape.tio.player();
  482. if (player < 2) {
  483. res = std::get<0>(shape.get_comp(idx));
  484. }
  485. return res; // The server will always get 0
  486. }
  487. // Explicit update to a given index of Duoram memory
  488. template <typename T>
  489. typename Duoram<T>::Shape::MemRefExpl
  490. &Duoram<T>::Shape::MemRefExpl::operator+=(const T& M)
  491. {
  492. Shape &shape = this->shape;
  493. int player = shape.tio.player();
  494. // In explicit-only mode, just update the local DB; we'll sync the
  495. // blinds and the blinded DB when we leave explicit-only mode.
  496. if (shape.explicitmode) {
  497. if (player < 2) {
  498. auto [ DB, BL, PBD ] = shape.get_comp(idx);
  499. DB += M;
  500. }
  501. return *this;
  502. }
  503. if (player < 2) {
  504. // Computational players do this
  505. // Pick a blinding factor
  506. T blind;
  507. blind.randomize();
  508. // Send the blind to the server, and the blinded value to the
  509. // peer
  510. shape.tio.iostream_server() << blind;
  511. shape.tio.iostream_peer() << (M + blind);
  512. shape.yield();
  513. // Receive the peer's blinded value
  514. T peerblinded;
  515. shape.tio.iostream_peer() >> peerblinded;
  516. // Our database, our blind, the peer's blinded database
  517. auto [ DB, BL, PBD ] = shape.get_comp(idx);
  518. DB += M;
  519. BL += blind;
  520. PBD += peerblinded;
  521. } else if (player == 2) {
  522. // The server does this
  523. shape.yield();
  524. // Receive the updates to the blinds
  525. T p0blind, p1blind;
  526. shape.tio.iostream_p0() >> p0blind;
  527. shape.tio.iostream_p1() >> p1blind;
  528. // The two computational parties' blinds
  529. auto [ BL0, BL1 ] = shape.get_server(idx);
  530. BL0 += p0blind;
  531. BL1 += p1blind;
  532. }
  533. return *this;
  534. }
  535. // Explicit write to a given index of Duoram memory
  536. template <typename T>
  537. typename Duoram<T>::Shape::MemRefExpl
  538. &Duoram<T>::Shape::MemRefExpl::operator=(const T& M)
  539. {
  540. T oldval = *this;
  541. T update = M - oldval;
  542. *this += update;
  543. return *this;
  544. }
  545. // Independent U-shared reads into a Shape of subtype Sh on a Duoram
  546. // with values of sharing type T
  547. template <typename T> template <typename U, typename Sh>
  548. Duoram<T>::Shape::MemRefInd<U,Sh>::operator std::vector<T>()
  549. {
  550. std::vector<T> res;
  551. size_t size = indcs.size();
  552. res.resize(size);
  553. std::vector<coro_t> coroutines;
  554. for (size_t i=0;i<size;++i) {
  555. coroutines.emplace_back([this, &res, i] (yield_t &yield) {
  556. Sh Sh_coro = shape.context(yield);
  557. res[i] = Sh_coro[indcs[i]];
  558. });
  559. }
  560. run_coroutines(shape.yield, coroutines);
  561. return res;
  562. }
  563. // Independent U-shared updates into a Shape of subtype Sh on a Duoram
  564. // with values of sharing type T (vector version)
  565. template <typename T> template <typename U, typename Sh>
  566. typename Duoram<T>::Shape::template MemRefInd<U,Sh>
  567. &Duoram<T>::Shape::MemRefInd<U,Sh>::operator+=(const std::vector<T>& M)
  568. {
  569. size_t size = indcs.size();
  570. assert(M.size() == size);
  571. std::vector<coro_t> coroutines;
  572. for (size_t i=0;i<size;++i) {
  573. coroutines.emplace_back([this, &M, i] (yield_t &yield) {
  574. Sh Sh_coro = shape.context(yield);
  575. Sh_coro[indcs[i]] += M[i];
  576. });
  577. }
  578. run_coroutines(shape.yield, coroutines);
  579. return *this;
  580. }
  581. // Independent U-shared updates into a Shape of subtype Sh on a Duoram
  582. // with values of sharing type T (array version)
  583. template <typename T> template <typename U, typename Sh> template <size_t N>
  584. typename Duoram<T>::Shape::template MemRefInd<U,Sh>
  585. &Duoram<T>::Shape::MemRefInd<U,Sh>::operator+=(const std::array<T,N>& M)
  586. {
  587. size_t size = indcs.size();
  588. assert(N == size);
  589. std::vector<coro_t> coroutines;
  590. for (size_t i=0;i<size;++i) {
  591. coroutines.emplace_back([this, &M, i] (yield_t &yield) {
  592. Sh Sh_coro = shape.context(yield);
  593. Sh_coro[indcs[i]] += M[i];
  594. });
  595. }
  596. run_coroutines(shape.yield, coroutines);
  597. return *this;
  598. }
  599. // Independent U-shared writes into a Shape of subtype Sh on a Duoram
  600. // with values of sharing type T (vector version)
  601. template <typename T> template <typename U, typename Sh>
  602. typename Duoram<T>::Shape::template MemRefInd<U,Sh>
  603. &Duoram<T>::Shape::MemRefInd<U,Sh>::operator=(const std::vector<T>& M)
  604. {
  605. size_t size = indcs.size();
  606. assert(M.size() == size);
  607. std::vector<coro_t> coroutines;
  608. for (size_t i=0;i<size;++i) {
  609. coroutines.emplace_back([this, &M, i] (yield_t &yield) {
  610. Sh Sh_coro = shape.context(yield);
  611. Sh_coro[indcs[i]] = M[i];
  612. });
  613. }
  614. run_coroutines(shape.yield, coroutines);
  615. return *this;
  616. }
  617. // Independent U-shared writes into a Shape of subtype Sh on a Duoram
  618. // with values of sharing type T (array version)
  619. template <typename T> template <typename U, typename Sh> template <size_t N>
  620. typename Duoram<T>::Shape::template MemRefInd<U,Sh>
  621. &Duoram<T>::Shape::MemRefInd<U,Sh>::operator=(const std::array<T,N>& M)
  622. {
  623. size_t size = indcs.size();
  624. assert(N == size);
  625. std::vector<coro_t> coroutines;
  626. for (size_t i=0;i<size;++i) {
  627. coroutines.emplace_back([this, &M, i] (yield_t &yield) {
  628. Sh Sh_coro = shape.context(yield);
  629. Sh_coro[indcs[i]] = M[i];
  630. });
  631. }
  632. run_coroutines(shape.yield, coroutines);
  633. return *this;
  634. }