duoram.hpp 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746
  1. #ifndef __DUORAM_HPP__
  2. #define __DUORAM_HPP__
  3. #include <optional>
  4. #include <functional>
  5. #include "types.hpp"
  6. #include "mpcio.hpp"
  7. #include "coroutine.hpp"
  8. #include "rdpf.hpp"
  9. // Implementation of the 3-party protocols described in:
  10. // Adithya Vadapalli, Ryan Henry, Ian Goldberg, "Duoram: A
  11. // Bandwidth-Efficient Distributed ORAM for 2- and 3-Party Computation".
  12. // A Duoram object is like physical memory: it's just a flat address
  13. // space, and you can't access it directly. Instead, you need to access
  14. // it through a "Shape", such as Flat, Tree, Path, etc. Shapes can be
  15. // nested, so you can have a Path of a Subtree of a Tree sitting on the
  16. // base Duoram. Each Shape's parent must remain in scope (references to
  17. // it must remain valid) for the lifetime of the child Shape. Each
  18. // shape is bound to a context, which is a thread-specific MPCTIO and a
  19. // coroutine-specific yield_t. If you launch new threads and/or
  20. // coroutines, you'll need to make a copy of the current Shape for your
  21. // new context, and call context() on it. Be sure not to call context()
  22. // on a Shape shared with other threads or coroutines.
  23. // This is templated, because you can have a Duoram of additively shared
  24. // (RegAS) or XOR shared (RegXS) elements, or more complex cell types
  25. // (see cell.hpp for example).
  26. template <typename T>
  27. class Duoram {
  28. // The computational parties have three vectors: the share of the
  29. // database itself, the party's own blinding factors for its
  30. // database share, and the _other_ computational party's blinded
  31. // database share (its database share plus its blind).
  32. // The player number (0 and 1 for the computational parties and 2
  33. // for the server) and the size of the Duoram
  34. int player;
  35. size_t oram_size;
  36. // The server has two vectors: a copy of each computational party's
  37. // blind. The database vector will remain empty.
  38. std::vector<T> database; // computational parties only
  39. std::vector<T> blind; // computational parties use this name
  40. std::vector<T> &p0_blind; // server uses this name
  41. std::vector<T> peer_blinded_db; // computational parties
  42. std::vector<T> &p1_blind; // server
  43. public:
  44. // The type of this Duoram
  45. using type = T;
  46. // The different Shapes are subclasses of this inner class
  47. class Shape;
  48. // These are the different Shapes that exist
  49. class Flat;
  50. class Pad;
  51. class Stride;
  52. class Path;
  53. // Oblivious indices for use in related-index ORAM accesses
  54. template <typename U, nbits_t WIDTH>
  55. class OblivIndex;
  56. // Pass the player number and desired size
  57. Duoram(int player, size_t size);
  58. // Get the size
  59. inline size_t size() { return oram_size; }
  60. // Get the basic Flat shape for this Duoram
  61. Flat flat(MPCTIO &tio, yield_t &yield, size_t start = 0,
  62. size_t len = 0) {
  63. return Flat(*this, tio, yield, start, len);
  64. }
  65. // For debugging; print the contents of the Duoram to stdout
  66. void dump() const;
  67. };
  68. // The parent class of all Shapes. This is an abstract class that
  69. // cannot itself be instantiated.
  70. template <typename T>
  71. class Duoram<T>::Shape {
  72. // Subclasses should be able to access _other_ Shapes'
  73. // get_{comp,server} functions
  74. friend class Flat;
  75. friend class Pad;
  76. friend class Stride;
  77. friend class Path;
  78. template <typename U, nbits_t WIDTH>
  79. friend class OblivIndex;
  80. // When you index into a shape (A[x]), you get one of these types,
  81. // depending on the type of x (the index), _not_ on the type T (the
  82. // underlying type of the Duoram). That is, you can have an
  83. // additive-shared index (x) into an XOR-shared database (T), for
  84. // example.
  85. // When x is additively or XOR shared
  86. // U is the sharing type of the indices, while T is the sharing type
  87. // of the data in the database. If we are referencing an entire
  88. // entry of type T, then the field type FT will equal T, and the
  89. // field selector type FST will be nullopt_t. If we are referencing
  90. // a particular field of T, then FT will be the type of the field
  91. // (RegAS or RegXS) and FST will be a pointer-to-member T::* type
  92. // pointing to that field. Sh is the specific Shape subtype used to
  93. // create the MemRefS. WIDTH is the RDPF width to use.
  94. template <typename U, typename FT, typename FST, typename Sh, nbits_t WIDTH>
  95. class MemRefS;
  96. // When x is unshared explicit value. FT and FST are as above.
  97. template <typename FT, typename FST>
  98. class MemRefExpl;
  99. // When x is a vector or array of values of type U, used to denote a
  100. // collection of independent memory operations that can be performed
  101. // simultaneously. Sh is the specific Shape subtype used to create
  102. // the MemRefInd.
  103. template <typename U, typename Sh>
  104. class MemRefInd;
  105. protected:
  106. // A reference to the parent shape. As with ".." in the root
  107. // directory of a filesystem, the topmost shape is indicated by
  108. // having parent = *this.
  109. const Shape &parent;
  110. // A reference to the backing physical storage
  111. Duoram &duoram;
  112. // The size of this shape
  113. size_t shape_size;
  114. // The number of bits needed to address this shape (the number of
  115. // bits in shape_size-1)
  116. nbits_t addr_size;
  117. // And a mask with the low addr_size bits set
  118. address_t addr_mask;
  119. // The Shape's context (MPCTIO and yield_t)
  120. MPCTIO &tio;
  121. yield_t &yield;
  122. // If you enable explicit-only mode, sending updates of your blind
  123. // to the server and of your blinded database to your peer will be
  124. // temporarily disabled. When you disable it (which will happen
  125. // automatically at the next ORAM read or write, or you can do it
  126. // explicitly), new random blinds will be chosen for the whole
  127. // Shape, and the blinds sent to the server, and the blinded
  128. // database sent to the peer.
  129. bool explicitmode;
  130. // A function to set the shape_size and compute addr_size and
  131. // addr_mask
  132. void set_shape_size(size_t sz);
  133. // We need a constructor because we hold non-static references; this
  134. // constructor is called by the subclass constructors
  135. Shape(const Shape &parent, Duoram &duoram, MPCTIO &tio,
  136. yield_t &yield) : parent(parent), duoram(duoram), shape_size(0),
  137. tio(tio), yield(yield), explicitmode(false) {}
  138. // Copy the given Shape except for the tio and yield
  139. Shape(const Shape &copy_from, MPCTIO &tio, yield_t &yield) :
  140. parent(copy_from.parent), duoram(copy_from.duoram),
  141. shape_size(copy_from.shape_size),
  142. addr_size(copy_from.addr_size), addr_mask(copy_from.addr_mask),
  143. tio(tio), yield(yield),
  144. explicitmode(copy_from.explicitmode) {}
  145. // The index-mapping function. Input the index relative to this
  146. // shape, and output the corresponding index relative to the parent
  147. // shape.
  148. //
  149. // This is a pure virtual function; all subclasses of Shape must
  150. // implement it, and of course Shape itself therefore cannot be
  151. // instantiated.
  152. virtual size_t indexmap(size_t idx) const = 0;
  153. // Get a pair (for the server) of references to the underlying
  154. // Duoram entries at share virtual index idx.
  155. virtual inline std::tuple<T&,T&> get_server(size_t idx,
  156. std::nullopt_t null = std::nullopt) const {
  157. size_t parindex = indexmap(idx);
  158. if (&(this->parent) == this) {
  159. return std::tie(
  160. duoram.p0_blind[parindex],
  161. duoram.p1_blind[parindex]);
  162. } else {
  163. return this->parent.get_server(parindex, null);
  164. }
  165. }
  166. // Get a triple (for the computational players) of references to the
  167. // underlying Duoram entries at share virtual index idx.
  168. virtual inline std::tuple<T&,T&,T&> get_comp(size_t idx,
  169. std::nullopt_t null = std::nullopt) const {
  170. size_t parindex = indexmap(idx);
  171. if (&(this->parent) == this) {
  172. return std::tie(
  173. duoram.database[parindex],
  174. duoram.blind[parindex],
  175. duoram.peer_blinded_db[parindex]);
  176. } else {
  177. return this->parent.get_comp(parindex, null);
  178. }
  179. }
  180. // Get a pair (for the server) of references to a particular field
  181. // of the underlying Duoram entries at share virtual index idx.
  182. template <typename FT>
  183. inline std::tuple<FT&,FT&> get_server(size_t idx, FT T::*field) const {
  184. size_t parindex = indexmap(idx);
  185. if (&(this->parent) == this) {
  186. return std::tie(
  187. duoram.p0_blind[parindex].*field,
  188. duoram.p1_blind[parindex].*field);
  189. } else {
  190. return this->parent.get_server(parindex, field);
  191. }
  192. }
  193. // Get a triple (for the computational players) of references to a
  194. // particular field to the underlying Duoram entries at share
  195. // virtual index idx.
  196. template <typename FT>
  197. inline std::tuple<FT&,FT&,FT&> get_comp(size_t idx, FT T::*field) const {
  198. size_t parindex = indexmap(idx);
  199. if (&(this->parent) == this) {
  200. return std::tie(
  201. duoram.database[parindex].*field,
  202. duoram.blind[parindex].*field,
  203. duoram.peer_blinded_db[parindex].*field);
  204. } else {
  205. return this->parent.get_comp(parindex, field);
  206. }
  207. }
  208. public:
  209. // Get the size
  210. inline size_t size() const { return shape_size; }
  211. // Initialize the contents of the Shape to a constant. This method
  212. // does no communication; all the operations are local. This only
  213. // works for T=RegXS or RegAS.
  214. void init(size_t value) {
  215. T v;
  216. v.set(value);
  217. init([v] (size_t i) { return v; });
  218. }
  219. // As above, but for general T
  220. void init(const T &value) {
  221. init([value] (size_t i) { return value; });
  222. }
  223. // As above, but use the default initializer for T (probably sets
  224. // everything to 0).
  225. void init() {
  226. T deflt;
  227. init(deflt);
  228. }
  229. // Pass a function f: size_t -> size_t, and initialize element i of the
  230. // Shape to f(i) for each i. This method does no communication; all
  231. // the operations are local. This function must be deterministic
  232. // and public. Only works for T=RegAS or RegXS.
  233. void init(std::function<size_t(size_t)> f) {
  234. int player = tio.player();
  235. if (player < 2) {
  236. for (size_t i=0; i<shape_size; ++i) {
  237. auto [DB, BL, PBD] = get_comp(i);
  238. BL.set(0);
  239. if (player) {
  240. DB.set(f(i));
  241. PBD.set(0);
  242. } else {
  243. DB.set(0);
  244. PBD.set(f(i));
  245. }
  246. }
  247. } else {
  248. for (size_t i=0; i<shape_size; ++i) {
  249. auto [BL0, BL1] = get_server(i);
  250. BL0.set(0);
  251. BL1.set(0);
  252. }
  253. }
  254. }
  255. // Pass a function f: size_t -> T, and initialize element i of the
  256. // Shape to f(i) for each i. This method does no communication; all
  257. // the operations are local. This function must be deterministic
  258. // and public.
  259. void init(std::function<T(size_t)> f) {
  260. int player = tio.player();
  261. if (player < 2) {
  262. for (size_t i=0; i<shape_size; ++i) {
  263. auto [DB, BL, PBD] = get_comp(i);
  264. BL = T();
  265. if (player) {
  266. DB = f(i);
  267. PBD = T();
  268. } else {
  269. DB = T();
  270. PBD = f(i);
  271. }
  272. }
  273. } else {
  274. for (size_t i=0; i<shape_size; ++i) {
  275. auto [BL0, BL1] = get_server(i);
  276. BL0 = T();
  277. BL1 = T();
  278. }
  279. }
  280. }
  281. // Assuming the Shape is already sorted, do an oblivious binary
  282. // search for the smallest index containing the value at least the
  283. // given one. (The answer will be the length of the Shape if all
  284. // elements are smaller than the target.) Only available for additive
  285. // shared databases for now.
  286. // The basic version uses log(N) ORAM reads of size N, where N is
  287. // the smallest power of 2 strictly larger than the Shape size
  288. RegAS basic_binary_search(RegAS &target);
  289. // This version does 1 ORAM read of size 2, 1 of size 4, 1 of size
  290. // 8, ..., 1 of size N/2, where N is the smallest power of 2
  291. // strictly larger than the Shape size
  292. RegXS binary_search(RegAS &target);
  293. // Enable or disable explicit-only mode. Only using [] with
  294. // explicit (address_t) indices are allowed in this mode. Using []
  295. // with RegAS or RegXS indices will automatically turn off this
  296. // mode, or you can turn it off explicitly. In explicit-only mode,
  297. // updates to the memory in the Shape will not induce communication
  298. // to the server or peer, but when it turns off, a message of the
  299. // size of the entire Shape will be sent to each of the server and
  300. // the peer. This is useful if you're going to be doing multiple
  301. // explicit writes to every element of the Shape before you do your
  302. // next oblivious read or write. Bitonic sort is a prime example.
  303. void explicitonly(bool enable);
  304. // Create an OblivIndex, non-incrementally (supply the shares of the
  305. // index directly) or incrementally (the bits of the index will be
  306. // supplied later, one at a time)
  307. // Non-incremental, RegXS index
  308. OblivIndex<RegXS,1> oblivindex(const RegXS &idx, nbits_t depth=0) {
  309. if (depth == 0) {
  310. depth = this->addr_size;
  311. }
  312. typename Duoram<T>::template OblivIndex<RegXS,1>
  313. res(this->tio, this->yield, idx, depth);
  314. return res;
  315. }
  316. // Non-incremental, RegAS index
  317. OblivIndex<RegAS,1> oblivindex(const RegAS &idx, nbits_t depth=0) {
  318. if (depth == 0) {
  319. depth = this->addr_size;
  320. }
  321. typename Duoram<T>::template OblivIndex<RegAS,1>
  322. res(this->tio, this->yield, idx, depth);
  323. return res;
  324. }
  325. // Incremental (requires RegXS index, supplied bit-by-bit later)
  326. OblivIndex<RegXS,1> oblivindex(nbits_t depth=0) {
  327. if (depth == 0) {
  328. depth = this->addr_size;
  329. }
  330. typename Duoram<T>::template OblivIndex<RegXS,1>
  331. res(this->tio, this->yield, depth);
  332. return res;
  333. }
  334. // For debugging or checking your answers (using this in general is
  335. // of course insecure)
  336. // This one reconstructs the whole database
  337. std::vector<T> reconstruct() const;
  338. // This one reconstructs a single database value
  339. T reconstruct(const T& share) const;
  340. };
  341. // The most basic shape is Flat. It is almost always the topmost shape,
  342. // and serves to provide MPCTIO and yield_t context to a Duoram without
  343. // changing the indices or size (but can specify a subrange if desired).
  344. template <typename T>
  345. class Duoram<T>::Flat : public Duoram<T>::Shape {
  346. // If this is a subrange, start may be non-0, but it's usually 0
  347. size_t start;
  348. size_t len;
  349. inline size_t indexmap(size_t idx) const {
  350. size_t paridx = idx + start;
  351. return paridx;
  352. }
  353. // Internal function to aid bitonic_sort
  354. void butterfly(address_t start, address_t len, bool dir);
  355. public:
  356. // Constructor. len=0 means the maximum size (the parent's size
  357. // minus start).
  358. Flat(Duoram &duoram, MPCTIO &tio, yield_t &yield, size_t start = 0,
  359. size_t len = 0);
  360. // Constructor. len=0 means the maximum size (the parent's size
  361. // minus start).
  362. Flat(const Shape &parent, MPCTIO &tio, yield_t &yield, size_t start = 0,
  363. size_t len = 0);
  364. // Copy the given Flat except for the tio and yield
  365. Flat(const Flat &copy_from, MPCTIO &tio, yield_t &yield) :
  366. Shape(copy_from, tio, yield), start(copy_from.start),
  367. len(copy_from.len) {}
  368. // Update the context (MPCTIO and yield if you've started a new
  369. // thread, or just yield if you've started a new coroutine in the
  370. // same thread). Returns a new Shape with an updated context.
  371. Flat context(MPCTIO &new_tio, yield_t &new_yield) const {
  372. return Flat(*this, new_tio, new_yield);
  373. }
  374. Flat context(yield_t &new_yield) const {
  375. return Flat(*this, this->tio, new_yield);
  376. }
  377. // Index into this Flat in various ways
  378. typename Duoram::Shape::template MemRefS<RegAS,T,std::nullopt_t,Flat,1>
  379. operator[](const RegAS &idx) {
  380. typename Duoram<T>::Shape::
  381. template MemRefS<RegAS,T,std::nullopt_t,Flat,1>
  382. res(*this, idx, std::nullopt);
  383. return res;
  384. }
  385. typename Duoram::Shape::template MemRefS<RegXS,T,std::nullopt_t,Flat,1>
  386. operator[](const RegXS &idx) {
  387. typename Duoram<T>::Shape::
  388. template MemRefS<RegXS,T,std::nullopt_t,Flat,1>
  389. res(*this, idx, std::nullopt);
  390. return res;
  391. }
  392. template <typename U, nbits_t WIDTH>
  393. typename Duoram::Shape::template MemRefS<U,T,std::nullopt_t,Flat,WIDTH>
  394. operator[](OblivIndex<U,WIDTH> &obidx) {
  395. typename Duoram<T>::Shape::
  396. template MemRefS<RegXS,T,std::nullopt_t,Flat,WIDTH>
  397. res(*this, obidx, std::nullopt);
  398. return res;
  399. }
  400. typename Duoram::Shape::template MemRefExpl<T,std::nullopt_t>
  401. operator[](address_t idx) {
  402. typename Duoram<T>::Shape::
  403. template MemRefExpl<T,std::nullopt_t>
  404. res(*this, idx, std::nullopt);
  405. return res;
  406. }
  407. template <typename U>
  408. Duoram::Shape::MemRefInd<U, Flat>
  409. operator[](const std::vector<U> &indcs) {
  410. typename Duoram<T>::Shape::
  411. template MemRefInd<U,Flat>
  412. res(*this, indcs);
  413. return res;
  414. }
  415. template <typename U, size_t N>
  416. Duoram::Shape::MemRefInd<U, Flat>
  417. operator[](const std::array<U,N> &indcs) {
  418. typename Duoram<T>::Shape::
  419. template MemRefInd<U,Flat>
  420. res(*this, indcs);
  421. return res;
  422. }
  423. // Oblivious sort the elements indexed by the two given indices.
  424. // Without reconstructing the values, if dir=0, this[idx1] will
  425. // become a share of the smaller of the reconstructed values, and
  426. // this[idx2] will become a share of the larger. If dir=1, it's the
  427. // other way around.
  428. //
  429. // Note: this only works for additively shared databases
  430. template<typename U,typename V>
  431. void osort(const U &idx1, const V &idx2, bool dir=0);
  432. // Bitonic sort the elements from start to start+len-1, in
  433. // increasing order if dir=0 or decreasing order if dir=1. Note that
  434. // the elements must be at most 63 bits long each for the notion of
  435. // ">" to make consistent sense.
  436. void bitonic_sort(address_t start, address_t len, bool dir=0);
  437. };
  438. // Oblivious indices for use in related-index ORAM accesses.
  439. template <typename T>
  440. template <typename U, nbits_t WIDTH>
  441. class Duoram<T>::OblivIndex {
  442. template <typename Ux,typename FT,typename FST,typename Sh,nbits_t WIDTHx>
  443. friend class Shape::MemRefS;
  444. int player;
  445. std::optional<RDPFTriple<WIDTH>> dt;
  446. std::optional<RDPFPair<WIDTH>> dp;
  447. nbits_t curdepth, maxdepth;
  448. nbits_t next_windex;
  449. bool incremental;
  450. U idx;
  451. public:
  452. // Non-incremental constructor
  453. OblivIndex(MPCTIO &tio, yield_t &yield, const U &idx, nbits_t depth) :
  454. player(tio.player()), curdepth(depth), maxdepth(depth),
  455. next_windex(0), incremental(false), idx(idx)
  456. {
  457. if (player < 2) {
  458. dt = tio.rdpftriple<WIDTH>(yield, depth);
  459. } else {
  460. dp = tio.rdpfpair<WIDTH>(yield, depth);
  461. }
  462. }
  463. // Incremental constructor: only for U=RegXS
  464. OblivIndex(MPCTIO &tio, yield_t &yield, nbits_t depth) :
  465. player(tio.player()), curdepth(0), maxdepth(depth),
  466. next_windex(0), incremental(true), idx(RegXS())
  467. {
  468. if (player < 2) {
  469. dt = tio.rdpftriple<WIDTH>(yield, depth, true);
  470. } else {
  471. dp = tio.rdpfpair<WIDTH>(yield, depth, true);
  472. }
  473. }
  474. // The function unit_vector takes in an XOR-share of an index foundindx and a size
  475. // The function outputs _boolean shares_ of a standard-basis vector of size (with the non-zero index at foundindx)
  476. // For example suppose nitems = 6; and suppose P0 and P1 take parameters foundindx0 and foundindx1 such that, foundindx0 \oplus foundindx1 = 3
  477. // P0 and P1 output vectors r0 and r1 such that r0 \oplus r1 = [000100]
  478. std::vector<RegBS> unit_vector(MPCTIO &tio, yield_t &yield, size_t nitems, RegXS foundidx)
  479. {
  480. std::vector<RegBS> standard_basis(nitems);
  481. if (player < 2) {
  482. U indoffset;
  483. dt->get_target(indoffset);
  484. indoffset -= foundidx;
  485. U peerindoffset;
  486. tio.queue_peer(&indoffset, BITBYTES(curdepth));
  487. yield();
  488. tio.recv_peer(&peerindoffset, BITBYTES(curdepth));
  489. auto indshift = combine(indoffset, peerindoffset, curdepth);
  490. // Pick one of the DPF triples, we can also pick dpf[0] or dpf[2]
  491. auto se = StreamEval(dt->dpf[1], 0, indshift, tio.aes_ops(), true);
  492. for (size_t j = 0; j < nitems; ++j) {
  493. typename RDPF<WIDTH>::LeafNode leaf = se.next();
  494. standard_basis[j] = dt->dpf[1].unit_bs(leaf);
  495. }
  496. } else {
  497. yield();
  498. }
  499. return standard_basis;
  500. }
  501. // Incrementally append a (shared) bit to the oblivious index
  502. void incr(RegBS bit)
  503. {
  504. assert(incremental);
  505. idx.xshare = (idx.xshare << 1) | value_t(bit.bshare);
  506. ++curdepth;
  507. if (player < 2) {
  508. dt->depth(curdepth);
  509. } else {
  510. dp->depth(curdepth);
  511. }
  512. }
  513. // Get a copy of the index
  514. U index() { return idx; }
  515. nbits_t depth() {return curdepth;}
  516. // Get the next wide-RDPF index
  517. nbits_t windex() { assert(next_windex < WIDTH); return next_windex++; }
  518. };
  519. // An additive or XOR shared memory reference. You get one of these
  520. // from a Shape A and an additively shared RegAS index x, or an XOR
  521. // shared RegXS index x, with A[x]. Then you perform operations on this
  522. // object, which do the Duoram operations. As above, T is the sharing
  523. // type of the data in the database, while U is the sharing type of the
  524. // index used to create this memory reference. If we are referencing an
  525. // entire entry of type T, then the field type FT will equal T, and the
  526. // field selector type FST will be nullopt_t. If we are referencing a
  527. // particular field of T, then FT will be the type of the field (RegAS
  528. // or RegXS) and FST will be a pointer-to-member T::* type pointing to
  529. // that field. Sh is the specific Shape subtype used to create the
  530. // MemRefS. WIDTH is the RDPF width to use.
  531. template <typename T>
  532. template <typename U, typename FT, typename FST, typename Sh, nbits_t WIDTH>
  533. class Duoram<T>::Shape::MemRefS {
  534. Sh &shape;
  535. // oblividx is a reference to the OblivIndex we're using. In the
  536. // common case, we own the actual OblivIndex, and it's stored in
  537. // our_oblividx, and oblividx is a pointer to that. Sometimes
  538. // (for example incremental ORAM accesses), the caller will own (and
  539. // modify between uses) the OblivIndex. In that case, oblividx will
  540. // be a pointer to the caller's OblivIndex object, and
  541. // our_oblividx will be nullopt.
  542. std::optional<Duoram<T>::OblivIndex<U,WIDTH>> our_oblividx;
  543. Duoram<T>::OblivIndex<U,WIDTH> *oblividx;
  544. FST fieldsel;
  545. private:
  546. // Oblivious update to a shared index of Duoram memory, only for
  547. // FT = RegAS or RegXS
  548. MemRefS<U,FT,FST,Sh,WIDTH> &oram_update(const FT& M, const prac_template_true&);
  549. // Oblivious update to a shared index of Duoram memory, for
  550. // FT not RegAS or RegXS
  551. MemRefS<U,FT,FST,Sh,WIDTH> &oram_update(const FT& M, const prac_template_false&);
  552. public:
  553. MemRefS<U,FT,FST,Sh,WIDTH>(Sh &shape, const U &idx, FST fieldsel) :
  554. shape(shape), fieldsel(fieldsel) {
  555. our_oblividx.emplace(shape.tio, shape.yield, idx,
  556. shape.addr_size);
  557. oblividx = &(*our_oblividx);
  558. }
  559. MemRefS<U,FT,FST,Sh,WIDTH>(Sh &shape, OblivIndex<U,WIDTH> &obidx, FST fieldsel) :
  560. shape(shape), fieldsel(fieldsel) {
  561. oblividx = &obidx;
  562. }
  563. // Create a MemRefS for accessing a partcular field of T
  564. template <typename SFT>
  565. MemRefS<U,SFT,SFT T::*,Sh,WIDTH> field(SFT T::*subfieldsel) {
  566. auto res = MemRefS<U,SFT,SFT T::*,Sh,WIDTH>(this->shape,
  567. *oblividx, subfieldsel);
  568. return res;
  569. }
  570. // Oblivious read from a shared index of Duoram memory
  571. operator FT();
  572. // Oblivious update to a shared index of Duoram memory
  573. MemRefS<U,FT,FST,Sh,WIDTH> &operator+=(const FT& M);
  574. // Oblivious write to a shared index of Duoram memory
  575. MemRefS<U,FT,FST,Sh,WIDTH> &operator=(const FT& M);
  576. };
  577. // An explicit memory reference. You get one of these from a Shape A
  578. // and an address_t index x with A[x]. Then you perform operations on
  579. // this object, which update the Duoram state without performing Duoram
  580. // operations. If we are referencing an entire entry of type T, then
  581. // the field type FT will equal T, and the field selector type FST will
  582. // be nullopt_t. If we are referencing a particular field of T, then FT
  583. // will be the type of the field (RegAS or RegXS) and FST will be a
  584. // pointer-to-member T::* type pointing to that field.
  585. template <typename T> template <typename FT, typename FST>
  586. class Duoram<T>::Shape::MemRefExpl {
  587. Shape &shape;
  588. address_t idx;
  589. FST fieldsel;
  590. public:
  591. MemRefExpl(Shape &shape, address_t idx, FST fieldsel) :
  592. shape(shape), idx(idx), fieldsel(fieldsel) {}
  593. // Create a MemRefExpl for accessing a partcular field of T
  594. template <typename SFT>
  595. MemRefExpl<SFT,SFT T::*> field(SFT T::*subfieldsel) {
  596. auto res = MemRefExpl<SFT,SFT T::*>(this->shape, idx, subfieldsel);
  597. return res;
  598. }
  599. // Explicit read from a given index of Duoram memory
  600. operator FT();
  601. // Explicit update to a given index of Duoram memory
  602. MemRefExpl &operator+=(const FT& M);
  603. // Explicit write to a given index of Duoram memory
  604. MemRefExpl &operator=(const FT& M);
  605. // Convenience function
  606. MemRefExpl &operator-=(const FT& M) { *this += (-M); return *this; }
  607. };
  608. // A collection of independent memory references that can be processed
  609. // simultaneously. You get one of these from a Shape A (of specific
  610. // subclass Sh) and a vector or array of indices v with each element of
  611. // type U.
  612. template <typename T> template <typename U, typename Sh>
  613. class Duoram<T>::Shape::MemRefInd {
  614. Sh &shape;
  615. std::vector<U> indcs;
  616. public:
  617. MemRefInd(Sh &shape, std::vector<U> indcs) :
  618. shape(shape), indcs(indcs) {}
  619. template <size_t N>
  620. MemRefInd(Sh &shape, std::array<U,N> aindcs) :
  621. shape(shape) { for ( auto &i : aindcs ) { indcs.push_back(i); } }
  622. // Independent reads from shared or explicit indices of Duoram memory
  623. operator std::vector<T>();
  624. // Independent updates to shared or explicit indices of Duoram memory
  625. MemRefInd &operator+=(const std::vector<T>& M);
  626. template <size_t N>
  627. MemRefInd &operator+=(const std::array<T,N>& M);
  628. // Independent writes to shared or explicit indices of Duoram memory
  629. MemRefInd &operator=(const std::vector<T>& M);
  630. template <size_t N>
  631. MemRefInd &operator=(const std::array<T,N>& M);
  632. // Convenience function
  633. MemRefInd &operator-=(const std::vector<T>& M) { *this += (-M); return *this; }
  634. template <size_t N>
  635. MemRefInd &operator-=(const std::array<T,N>& M) { *this += (-M); return *this; }
  636. };
  637. #include "duoram.tcc"
  638. #endif