duoram.hpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. #ifndef __DUORAM_HPP__
  2. #define __DUORAM_HPP__
  3. #include <optional>
  4. #include <functional>
  5. #include "types.hpp"
  6. #include "mpcio.hpp"
  7. #include "coroutine.hpp"
  8. // Implementation of the 3-party protocols described in:
  9. // Adithya Vadapalli, Ryan Henry, Ian Goldberg, "Duoram: A
  10. // Bandwidth-Efficient Distributed ORAM for 2- and 3-Party Computation".
  11. // A Duoram object is like physical memory: it's just a flat address
  12. // space, and you can't access it directly. Instead, you need to access
  13. // it through a "Shape", such as Flat, Tree, Path, etc. Shapes can be
  14. // nested, so you can have a Path of a Subtree of a Tree sitting on the
  15. // base Duoram. Each Shape's parent must remain in scope (references to
  16. // it must remain valid) for the lifetime of the child Shapre. Each
  17. // shape is bound to a context, which is a thread-specific MPCTIO and a
  18. // coroutine-specific yield_t. If you launch new threads and/or
  19. // coroutines, you'll need to make a copy of the current Shape for your
  20. // new context, and call context() on it. Be sure not to call context()
  21. // on a Shape shared with other threads or coroutines.
  22. // This is templated, because you can have a Duoram of additively shared
  23. // (RegAS) or XOR shared (RegXS) elements, or more complex cell types
  24. // (see cell.hpp for example).
  25. template <typename T>
  26. class Duoram {
  27. // The computational parties have three vectors: the share of the
  28. // database itself, the party's own blinding factors for its
  29. // database share, and the _other_ computational party's blinded
  30. // database share (its database share plus its blind).
  31. // The player number (0 and 1 for the computational parties and 2
  32. // for the server) and the size of the Duoram
  33. int player;
  34. size_t oram_size;
  35. // The server has two vectors: a copy of each computational party's
  36. // blind. The database vector will remain empty.
  37. std::vector<T> database; // computational parties only
  38. std::vector<T> blind; // computational parties use this name
  39. std::vector<T> &p0_blind; // server uses this name
  40. std::vector<T> peer_blinded_db; // computational parties
  41. std::vector<T> &p1_blind; // server
  42. public:
  43. // The type of this Duoram
  44. using type = T;
  45. // The different Shapes are subclasses of this inner class
  46. class Shape;
  47. // These are the different Shapes that exist
  48. class Flat;
  49. class Pad;
  50. class Stride;
  51. // Oblivious indices for use in related-index ORAM accesses
  52. template <typename U, nbits_t WIDTH>
  53. class OblivIndex;
  54. // Pass the player number and desired size
  55. Duoram(int player, size_t size);
  56. // Get the size
  57. inline size_t size() { return oram_size; }
  58. // Get the basic Flat shape for this Duoram
  59. Flat flat(MPCTIO &tio, yield_t &yield, size_t start = 0,
  60. size_t len = 0) {
  61. return Flat(*this, tio, yield, start, len);
  62. }
  63. // For debugging; print the contents of the Duoram to stdout
  64. void dump() const;
  65. };
  66. // The parent class of all Shapes. This is an abstract class that
  67. // cannot itself be instantiated.
  68. template <typename T>
  69. class Duoram<T>::Shape {
  70. // Subclasses should be able to access _other_ Shapes'
  71. // get_{comp,server} functions
  72. friend class Flat;
  73. friend class Pad;
  74. friend class Stride;
  75. template <typename U, nbits_t WIDTH>
  76. friend class OblivIndex;
  77. // When you index into a shape (A[x]), you get one of these types,
  78. // depending on the type of x (the index), _not_ on the type T (the
  79. // underlying type of the Duoram). That is, you can have an
  80. // additive-shared index (x) into an XOR-shared database (T), for
  81. // example.
  82. // When x is additively or XOR shared
  83. // U is the sharing type of the indices, while T is the sharing type
  84. // of the data in the database. If we are referencing an entire
  85. // entry of type T, then the field type FT will equal T, and the
  86. // field selector type FST will be nullopt_t. If we are referencing
  87. // a particular field of T, then FT will be the type of the field
  88. // (RegAS or RegXS) and FST will be a pointer-to-member T::* type
  89. // pointing to that field. Sh is the specific Shape subtype used to
  90. // create the MemRefS. WIDTH is the RDPF width to use.
  91. template <typename U, typename FT, typename FST, typename Sh, nbits_t WIDTH>
  92. class MemRefS;
  93. // When x is unshared explicit value. FT and FST are as above.
  94. template <typename FT, typename FST>
  95. class MemRefExpl;
  96. // When x is a vector or array of values of type U, used to denote a
  97. // collection of independent memory operations that can be performed
  98. // simultaneously. Sh is the specific Shape subtype used to create
  99. // the MemRefInd.
  100. template <typename U, typename Sh>
  101. class MemRefInd;
  102. protected:
  103. // A reference to the parent shape. As with ".." in the root
  104. // directory of a filesystem, the topmost shape is indicated by
  105. // having parent = *this.
  106. const Shape &parent;
  107. // A reference to the backing physical storage
  108. Duoram &duoram;
  109. // The size of this shape
  110. size_t shape_size;
  111. // The number of bits needed to address this shape (the number of
  112. // bits in shape_size-1)
  113. nbits_t addr_size;
  114. // And a mask with the low addr_size bits set
  115. address_t addr_mask;
  116. // The Shape's context (MPCTIO and yield_t)
  117. MPCTIO &tio;
  118. yield_t &yield;
  119. // If you enable explicit-only mode, sending updates of your blind
  120. // to the server and of your blinded database to your peer will be
  121. // temporarily disabled. When you disable it (which will happen
  122. // automatically at the next ORAM read or write, or you can do it
  123. // explicitly), new random blinds will be chosen for the whole
  124. // Shape, and the blinds sent to the server, and the blinded
  125. // database sent to the peer.
  126. bool explicitmode;
  127. // A function to set the shape_size and compute addr_size and
  128. // addr_mask
  129. void set_shape_size(size_t sz);
  130. // We need a constructor because we hold non-static references; this
  131. // constructor is called by the subclass constructors
  132. Shape(const Shape &parent, Duoram &duoram, MPCTIO &tio,
  133. yield_t &yield) : parent(parent), duoram(duoram), shape_size(0),
  134. tio(tio), yield(yield), explicitmode(false) {}
  135. // Copy the given Shape except for the tio and yield
  136. Shape(const Shape &copy_from, MPCTIO &tio, yield_t &yield) :
  137. parent(copy_from.parent), duoram(copy_from.duoram),
  138. shape_size(copy_from.shape_size),
  139. addr_size(copy_from.addr_size), addr_mask(copy_from.addr_mask),
  140. tio(tio), yield(yield),
  141. explicitmode(copy_from.explicitmode) {}
  142. // The index-mapping function. Input the index relative to this
  143. // shape, and output the corresponding index relative to the parent
  144. // shape.
  145. //
  146. // This is a pure virtual function; all subclasses of Shape must
  147. // implement it, and of course Shape itself therefore cannot be
  148. // instantiated.
  149. virtual size_t indexmap(size_t idx) const = 0;
  150. // Get a pair (for the server) of references to the underlying
  151. // Duoram entries at share virtual index idx.
  152. virtual inline std::tuple<T&,T&> get_server(size_t idx,
  153. std::nullopt_t null = std::nullopt) const {
  154. size_t parindex = indexmap(idx);
  155. if (&(this->parent) == this) {
  156. return std::tie(
  157. duoram.p0_blind[parindex],
  158. duoram.p1_blind[parindex]);
  159. } else {
  160. return this->parent.get_server(parindex, null);
  161. }
  162. }
  163. // Get a triple (for the computational players) of references to the
  164. // underlying Duoram entries at share virtual index idx.
  165. virtual inline std::tuple<T&,T&,T&> get_comp(size_t idx,
  166. std::nullopt_t null = std::nullopt) const {
  167. size_t parindex = indexmap(idx);
  168. if (&(this->parent) == this) {
  169. return std::tie(
  170. duoram.database[parindex],
  171. duoram.blind[parindex],
  172. duoram.peer_blinded_db[parindex]);
  173. } else {
  174. return this->parent.get_comp(parindex, null);
  175. }
  176. }
  177. // Get a pair (for the server) of references to a particular field
  178. // of the underlying Duoram entries at share virtual index idx.
  179. template <typename FT>
  180. inline std::tuple<FT&,FT&> get_server(size_t idx, FT T::*field) const {
  181. size_t parindex = indexmap(idx);
  182. if (&(this->parent) == this) {
  183. return std::tie(
  184. duoram.p0_blind[parindex].*field,
  185. duoram.p1_blind[parindex].*field);
  186. } else {
  187. return this->parent.get_server(parindex, field);
  188. }
  189. }
  190. // Get a triple (for the computational players) of references to a
  191. // particular field to the underlying Duoram entries at share
  192. // virtual index idx.
  193. template <typename FT>
  194. inline std::tuple<FT&,FT&,FT&> get_comp(size_t idx, FT T::*field) const {
  195. size_t parindex = indexmap(idx);
  196. if (&(this->parent) == this) {
  197. return std::tie(
  198. duoram.database[parindex].*field,
  199. duoram.blind[parindex].*field,
  200. duoram.peer_blinded_db[parindex].*field);
  201. } else {
  202. return this->parent.get_comp(parindex, field);
  203. }
  204. }
  205. public:
  206. // Get the size
  207. inline size_t size() const { return shape_size; }
  208. // Initialize the contents of the Shape to a constant. This method
  209. // does no communication; all the operations are local.
  210. void init(size_t value) {
  211. init([value] (size_t i) { return value; });
  212. }
  213. // Pass a function f: size_t -> size_t, and initialize element i of
  214. // the Shape to f(i) for each i. This method does no communication;
  215. // all the operations are local. This function _must_ be
  216. // deterministic.
  217. void init(std::function<size_t(size_t)> f) {
  218. int player = tio.player();
  219. if (player < 2) {
  220. for (size_t i=0; i<shape_size; ++i) {
  221. auto [DB, BL, PBD] = get_comp(i);
  222. BL.set(0);
  223. if (player) {
  224. DB.set(f(i));
  225. PBD.set(0);
  226. } else {
  227. DB.set(0);
  228. PBD.set(f(i));
  229. }
  230. }
  231. } else {
  232. for (size_t i=0; i<shape_size; ++i) {
  233. auto [BL0, BL1] = get_server(i);
  234. BL0.set(0);
  235. BL1.set(0);
  236. }
  237. }
  238. }
  239. // Enable or disable explicit-only mode. Only using [] with
  240. // explicit (address_t) indices are allowed in this mode. Using []
  241. // with RegAS or RegXS indices will automatically turn off this
  242. // mode, or you can turn it off explicitly. In explicit-only mode,
  243. // updates to the memory in the Shape will not induce communication
  244. // to the server or peer, but when it turns off, a message of the
  245. // size of the entire Shape will be sent to each of the server and
  246. // the peer. This is useful if you're going to be doing multiple
  247. // explicit writes to every element of the Shape before you do your
  248. // next oblivious read or write. Bitonic sort is a prime example.
  249. void explicitonly(bool enable);
  250. // Create an OblivIndex, non-incrementally (supply the shares of the
  251. // index directly) or incrementally (the bits of the index will be
  252. // supplied later, one at a time)
  253. // Non-incremental, RegXS index
  254. OblivIndex<RegXS,1> oblivindex(const RegXS &idx, nbits_t depth=0) {
  255. if (depth == 0) {
  256. depth = this->addr_size;
  257. }
  258. typename Duoram<T>::OblivIndex<RegXS,1>
  259. res(this->tio, this->yield, idx, depth);
  260. return res;
  261. }
  262. // Non-incremental, RegAS index
  263. OblivIndex<RegAS,1> oblivindex(const RegAS &idx, nbits_t depth=0) {
  264. if (depth == 0) {
  265. depth = this->addr_size;
  266. }
  267. typename Duoram<T>::OblivIndex<RegAS,1>
  268. res(this->tio, this->yield, idx, depth);
  269. return res;
  270. }
  271. // Incremental (requires RegXS index, supplied bit-by-bit later)
  272. OblivIndex<RegXS,1> oblivindex(nbits_t depth=0) {
  273. if (depth == 0) {
  274. depth = this->addr_size;
  275. }
  276. typename Duoram<T>::OblivIndex<RegXS,1>
  277. res(this->tio, this->yield, depth);
  278. return res;
  279. }
  280. // For debugging or checking your answers (using this in general is
  281. // of course insecure)
  282. // This one reconstructs the whole database
  283. std::vector<T> reconstruct() const;
  284. // This one reconstructs a single database value
  285. T reconstruct(const T& share) const;
  286. };
  287. // The most basic shape is Flat. It is almost always the topmost shape,
  288. // and serves to provide MPCTIO and yield_t context to a Duoram without
  289. // changing the indices or size (but can specify a subrange if desired).
  290. template <typename T>
  291. class Duoram<T>::Flat : public Duoram<T>::Shape {
  292. // If this is a subrange, start may be non-0, but it's usually 0
  293. size_t start;
  294. size_t len;
  295. inline size_t indexmap(size_t idx) const {
  296. size_t paridx = idx + start;
  297. return paridx;
  298. }
  299. // Internal function to aid bitonic_sort
  300. void butterfly(address_t start, address_t len, bool dir);
  301. public:
  302. // Constructor. len=0 means the maximum size (the parent's size
  303. // minus start).
  304. Flat(Duoram &duoram, MPCTIO &tio, yield_t &yield, size_t start = 0,
  305. size_t len = 0);
  306. // Constructor. len=0 means the maximum size (the parent's size
  307. // minus start).
  308. Flat(const Shape &parent, MPCTIO &tio, yield_t &yield, size_t start = 0,
  309. size_t len = 0);
  310. // Copy the given Flat except for the tio and yield
  311. Flat(const Flat &copy_from, MPCTIO &tio, yield_t &yield) :
  312. Shape(copy_from, tio, yield), start(copy_from.start),
  313. len(copy_from.len) {}
  314. // Update the context (MPCTIO and yield if you've started a new
  315. // thread, or just yield if you've started a new coroutine in the
  316. // same thread). Returns a new Shape with an updated context.
  317. Flat context(MPCTIO &new_tio, yield_t &new_yield) const {
  318. return Flat(*this, new_tio, new_yield);
  319. }
  320. Flat context(yield_t &new_yield) const {
  321. return Flat(*this, this->tio, new_yield);
  322. }
  323. // Index into this Flat in various ways
  324. typename Duoram::Shape::template MemRefS<RegAS,T,std::nullopt_t,Flat,1>
  325. operator[](const RegAS &idx) {
  326. typename Duoram<T>::Shape::
  327. template MemRefS<RegAS,T,std::nullopt_t,Flat,1>
  328. res(*this, idx, std::nullopt);
  329. return res;
  330. }
  331. typename Duoram::Shape::template MemRefS<RegXS,T,std::nullopt_t,Flat,1>
  332. operator[](const RegXS &idx) {
  333. typename Duoram<T>::Shape::
  334. template MemRefS<RegXS,T,std::nullopt_t,Flat,1>
  335. res(*this, idx, std::nullopt);
  336. return res;
  337. }
  338. template <typename U, nbits_t WIDTH>
  339. typename Duoram::Shape::template MemRefS<U,T,std::nullopt_t,Flat,WIDTH>
  340. operator[](OblivIndex<U,WIDTH> &obidx) {
  341. typename Duoram<T>::Shape::
  342. template MemRefS<RegXS,T,std::nullopt_t,Flat,WIDTH>
  343. res(*this, obidx, std::nullopt);
  344. return res;
  345. }
  346. typename Duoram::Shape::template MemRefExpl<T,std::nullopt_t>
  347. operator[](address_t idx) {
  348. typename Duoram<T>::Shape::
  349. template MemRefExpl<T,std::nullopt_t>
  350. res(*this, idx, std::nullopt);
  351. return res;
  352. }
  353. template <typename U>
  354. Duoram::Shape::MemRefInd<U, Flat>
  355. operator[](const std::vector<U> &indcs) {
  356. typename Duoram<T>::Shape::
  357. template MemRefInd<U,Flat>
  358. res(*this, indcs);
  359. return res;
  360. }
  361. template <typename U, size_t N>
  362. Duoram::Shape::MemRefInd<U, Flat>
  363. operator[](const std::array<U,N> &indcs) {
  364. typename Duoram<T>::Shape::
  365. template MemRefInd<U,Flat>
  366. res(*this, indcs);
  367. return res;
  368. }
  369. // Oblivious sort the elements indexed by the two given indices.
  370. // Without reconstructing the values, if dir=0, this[idx1] will
  371. // become a share of the smaller of the reconstructed values, and
  372. // this[idx2] will become a share of the larger. If dir=1, it's the
  373. // other way around.
  374. //
  375. // Note: this only works for additively shared databases
  376. template<typename U,typename V>
  377. void osort(const U &idx1, const V &idx2, bool dir=0);
  378. // Bitonic sort the elements from start to start+len-1, in
  379. // increasing order if dir=0 or decreasing order if dir=1. Note that
  380. // the elements must be at most 63 bits long each for the notion of
  381. // ">" to make consistent sense.
  382. void bitonic_sort(address_t start, address_t len, bool dir=0);
  383. // Assuming the memory is already sorted, do an oblivious binary
  384. // search for the smallest index containing the value at least the
  385. // given one. (The answer will be the length of the Flat if all
  386. // elements are smaller than the target.) Only available for additive
  387. // shared databases for now.
  388. // The basic version uses log(N) ORAM reads of size N, where N is
  389. // the smallest power of 2 strictly larger than the Flat size
  390. RegAS basic_binary_search(RegAS &target);
  391. // This version does 1 ORAM read of size 2, 1 of size 4, 1 of size
  392. // 8, ..., 1 of size N/2, where N is the smallest power of 2
  393. // strictly larger than the Flat size
  394. RegXS binary_search(RegAS &target);
  395. };
  396. // Oblivious indices for use in related-index ORAM accesses.
  397. template <typename T>
  398. template <typename U, nbits_t WIDTH>
  399. class Duoram<T>::OblivIndex {
  400. template <typename Ux,typename FT,typename FST,typename Sh,nbits_t WIDTHx>
  401. friend class Shape::MemRefS;
  402. int player;
  403. std::optional<RDPFTriple<WIDTH>> dt;
  404. std::optional<RDPFPair<WIDTH>> dp;
  405. nbits_t curdepth, maxdepth;
  406. nbits_t next_windex;
  407. bool incremental;
  408. U idx;
  409. public:
  410. // Non-incremental constructor
  411. OblivIndex(MPCTIO &tio, yield_t &yield, const U &idx, nbits_t depth) :
  412. player(tio.player()), curdepth(depth), maxdepth(depth),
  413. next_windex(0), incremental(false), idx(idx)
  414. {
  415. if (player < 2) {
  416. dt = tio.rdpftriple<WIDTH>(yield, depth);
  417. } else {
  418. dp = tio.rdpfpair<WIDTH>(yield, depth);
  419. }
  420. }
  421. // Incremental constructor: only for U=RegXS
  422. OblivIndex(MPCTIO &tio, yield_t &yield, nbits_t depth) :
  423. player(tio.player()), curdepth(0), maxdepth(depth),
  424. next_windex(0), incremental(true), idx(RegXS())
  425. {
  426. if (player < 2) {
  427. dt = tio.rdpftriple(yield, depth, true);
  428. } else {
  429. dp = tio.rdpfpair(yield, depth, true);
  430. }
  431. }
  432. // Incrementally append a (shared) bit to the oblivious index
  433. void incr(RegBS bit)
  434. {
  435. assert(incremental);
  436. idx.xshare = (idx.xshare << 1) | value_t(bit.bshare);
  437. ++curdepth;
  438. if (player < 2) {
  439. dt->depth(curdepth);
  440. } else {
  441. dp->depth(curdepth);
  442. }
  443. }
  444. // Get a copy of the index
  445. U index() { return idx; }
  446. // Get the next wide-RDPF index
  447. nbits_t windex() { assert(next_windex < WIDTH); return next_windex++; }
  448. };
  449. // An additive or XOR shared memory reference. You get one of these
  450. // from a Shape A and an additively shared RegAS index x, or an XOR
  451. // shared RegXS index x, with A[x]. Then you perform operations on this
  452. // object, which do the Duoram operations. As above, T is the sharing
  453. // type of the data in the database, while U is the sharing type of the
  454. // index used to create this memory reference. If we are referencing an
  455. // entire entry of type T, then the field type FT will equal T, and the
  456. // field selector type FST will be nullopt_t. If we are referencing a
  457. // particular field of T, then FT will be the type of the field (RegAS
  458. // or RegXS) and FST will be a pointer-to-member T::* type pointing to
  459. // that field. Sh is the specific Shape subtype used to create the
  460. // MemRefS. WIDTH is the RDPF width to use.
  461. template <typename T>
  462. template <typename U, typename FT, typename FST, typename Sh, nbits_t WIDTH>
  463. class Duoram<T>::Shape::MemRefS {
  464. Sh &shape;
  465. // oblividx is a reference to the OblivIndex we're using. In the
  466. // common case, we own the actual OblivIndex, and it's stored in
  467. // our_oblividx, and oblividx is a pointer to that. Sometimes
  468. // (for example incremental ORAM accesses), the caller will own (and
  469. // modify between uses) the OblivIndex. In that case, oblividx will
  470. // be a pointer to the caller's OblivIndex object, and
  471. // our_oblividx will be nullopt.
  472. std::optional<Duoram<T>::OblivIndex<U,WIDTH>> our_oblividx;
  473. Duoram<T>::OblivIndex<U,WIDTH> *oblividx;
  474. FST fieldsel;
  475. private:
  476. // Oblivious update to a shared index of Duoram memory, only for
  477. // FT = RegAS or RegXS
  478. MemRefS<U,FT,FST,Sh,WIDTH> &oram_update(const FT& M, const prac_template_true&);
  479. // Oblivious update to a shared index of Duoram memory, for
  480. // FT not RegAS or RegXS
  481. MemRefS<U,FT,FST,Sh,WIDTH> &oram_update(const FT& M, const prac_template_false&);
  482. public:
  483. MemRefS<U,FT,FST,Sh,WIDTH>(Sh &shape, const U &idx, FST fieldsel) :
  484. shape(shape), fieldsel(fieldsel) {
  485. our_oblividx.emplace(shape.tio, shape.yield, idx,
  486. shape.addr_size);
  487. oblividx = &(*our_oblividx);
  488. }
  489. MemRefS<U,FT,FST,Sh,WIDTH>(Sh &shape, OblivIndex<U,WIDTH> &obidx, FST fieldsel) :
  490. shape(shape), fieldsel(fieldsel) {
  491. oblividx = &obidx;
  492. }
  493. // Create a MemRefExpl for accessing a partcular field of T
  494. template <typename SFT>
  495. MemRefS<U,SFT,SFT T::*,Sh,WIDTH> field(SFT T::*subfieldsel) {
  496. auto res = MemRefS<U,SFT,SFT T::*,Sh,WIDTH>(this->shape,
  497. *oblividx, subfieldsel);
  498. return res;
  499. }
  500. // Oblivious read from a shared index of Duoram memory
  501. operator FT();
  502. // Oblivious update to a shared index of Duoram memory
  503. MemRefS<U,FT,FST,Sh,WIDTH> &operator+=(const FT& M);
  504. // Oblivious write to a shared index of Duoram memory
  505. MemRefS<U,FT,FST,Sh,WIDTH> &operator=(const FT& M);
  506. };
  507. // An explicit memory reference. You get one of these from a Shape A
  508. // and an address_t index x with A[x]. Then you perform operations on
  509. // this object, which update the Duoram state without performing Duoram
  510. // operations. If we are referencing an entire entry of type T, then
  511. // the field type FT will equal T, and the field selector type FST will
  512. // be nullopt_t. If we are referencing a particular field of T, then FT
  513. // will be the type of the field (RegAS or RegXS) and FST will be a
  514. // pointer-to-member T::* type pointing to that field.
  515. template <typename T> template <typename FT, typename FST>
  516. class Duoram<T>::Shape::MemRefExpl {
  517. Shape &shape;
  518. address_t idx;
  519. FST fieldsel;
  520. public:
  521. MemRefExpl(Shape &shape, address_t idx, FST fieldsel) :
  522. shape(shape), idx(idx), fieldsel(fieldsel) {}
  523. // Create a MemRefExpl for accessing a partcular field of T
  524. template <typename SFT>
  525. MemRefExpl<SFT,SFT T::*> field(SFT T::*subfieldsel) {
  526. auto res = MemRefExpl<SFT,SFT T::*>(this->shape, idx, subfieldsel);
  527. return res;
  528. }
  529. // Explicit read from a given index of Duoram memory
  530. operator FT();
  531. // Explicit update to a given index of Duoram memory
  532. MemRefExpl &operator+=(const FT& M);
  533. // Explicit write to a given index of Duoram memory
  534. MemRefExpl &operator=(const FT& M);
  535. // Convenience function
  536. MemRefExpl &operator-=(const FT& M) { *this += (-M); return *this; }
  537. };
  538. // A collection of independent memory references that can be processed
  539. // simultaneously. You get one of these from a Shape A (of specific
  540. // subclass Sh) and a vector or array of indices v with each element of
  541. // type U.
  542. template <typename T> template <typename U, typename Sh>
  543. class Duoram<T>::Shape::MemRefInd {
  544. Sh &shape;
  545. std::vector<U> indcs;
  546. public:
  547. MemRefInd(Sh &shape, std::vector<U> indcs) :
  548. shape(shape), indcs(indcs) {}
  549. template <size_t N>
  550. MemRefInd(Sh &shape, std::array<U,N> aindcs) :
  551. shape(shape) { for ( auto &i : aindcs ) { indcs.push_back(i); } }
  552. // Independent reads from shared or explicit indices of Duoram memory
  553. operator std::vector<T>();
  554. // Independent updates to shared or explicit indices of Duoram memory
  555. MemRefInd &operator+=(const std::vector<T>& M);
  556. template <size_t N>
  557. MemRefInd &operator+=(const std::array<T,N>& M);
  558. // Independent writes to shared or explicit indices of Duoram memory
  559. MemRefInd &operator=(const std::vector<T>& M);
  560. template <size_t N>
  561. MemRefInd &operator=(const std::array<T,N>& M);
  562. // Convenience function
  563. MemRefInd &operator-=(const std::vector<T>& M) { *this += (-M); return *this; }
  564. template <size_t N>
  565. MemRefInd &operator-=(const std::array<T,N>& M) { *this += (-M); return *this; }
  566. };
  567. #include "duoram.tcc"
  568. #endif