duoram.hpp 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. #ifndef __DUORAM_HPP__
  2. #define __DUORAM_HPP__
  3. #include "types.hpp"
  4. // Implementation of the 3-party protocols described in:
  5. // Adithya Vadapalli, Ryan Henry, Ian Goldberg, "Duoram: A
  6. // Bandwidth-Efficient Distributed ORAM for 2- and 3-Party Computation".
  7. // A Duoram object is like physical memory: it's just a flat address
  8. // space, and you can't access it directly. Instead, you need to access
  9. // it through a "Shape", such as Flat, Tree, Path, etc. Shapes can be
  10. // nested, so you can have a Path of a Subtree of a Tree sitting on the
  11. // base Duoram. Each Shape's parent must remain in scope (references to
  12. // it must remain valid) for the lifetime of the child Shapre. Each
  13. // shape is bound to a context, which is a thread-specific MPCTIO and a
  14. // coroutine-specific yield_t. If you launch new threads and/or
  15. // coroutines, you'll need to make a copy of the current Shape for your
  16. // new context, and call context() on it. Be sure not to call context()
  17. // on a Shape shared with other threads or coroutines.
  18. // This is templated, because you can have a Duoram of additively shared
  19. // (RegAS) or XOR shared (RegXS) elements, or std::arrays of those to
  20. // get "wide" memory cells.
  21. // The initial implementation is focused on additive shares.
  22. template <typename T>
  23. class Duoram {
  24. // The computational parties have three vectors: the share of the
  25. // database itself, the party's own blinding factors for its
  26. // database share, and the _other_ computational party's blinded
  27. // database share (its database share plus its blind).
  28. // The player number (0 and 1 for the computational parties and 2
  29. // for the server) and the size of the Duoram
  30. int player;
  31. size_t oram_size;
  32. // The server has two vectors: a copy of each computational party's
  33. // blind. The database vector will remain empty.
  34. std::vector<T> database; // computational parties only
  35. std::vector<T> blind; // computational parties use this name
  36. std::vector<T> &p0_blind; // server uses this name
  37. std::vector<T> peer_blinded_db; // computational parties
  38. std::vector<T> &p1_blind; // server
  39. public:
  40. // The type of this Duoram
  41. using type = T;
  42. // The different Shapes are subclasses of this inner class
  43. class Shape;
  44. // These are the different Shapes that exist
  45. class Flat;
  46. // Pass the player number and desired size
  47. Duoram(int player, size_t size);
  48. // Get the size
  49. inline size_t size() { return oram_size; }
  50. // Get the basic Flat shape for this Duoram
  51. Flat flat(MPCTIO &tio, yield_t &yield, size_t start = 0,
  52. size_t len = 0) {
  53. return Flat(*this, tio, yield, start, len);
  54. }
  55. // For debugging; print the contents of the Duoram to stdout
  56. void dump() const;
  57. };
  58. // The parent class of all Shapes. This is an abstract class that
  59. // cannot itself be instantiated.
  60. template <typename T>
  61. class Duoram<T>::Shape {
  62. // Subclasses should be able to access _other_ Shapes' indexmap
  63. friend class Flat;
  64. // When you index into a shape (A[x]), you get one of these types,
  65. // depending on the type of x (the index), _not_ on the type T (the
  66. // underlying type of the Duoram). That is, you can have an
  67. // additive-shared index (x) into an XOR-shared database (T), for
  68. // example.
  69. // When x is unshared explicit value
  70. class MemRefExpl;
  71. // When x is additively shared
  72. class MemRefAS;
  73. // When x is XOR shared
  74. class MemRefXS;
  75. protected:
  76. // A reference to the parent shape. As with ".." in the root
  77. // directory of a filesystem, the topmost shape is indicated by
  78. // having parent = *this.
  79. const Shape &parent;
  80. // A reference to the backing physical storage
  81. Duoram &duoram;
  82. // The size of this shape
  83. size_t shape_size;
  84. // The number of bits needed to address this shape (the number of
  85. // bits in shape_size-1)
  86. nbits_t addr_size;
  87. // And a mask with the low addr_size bits set
  88. address_t addr_mask;
  89. // The Shape's context (MPCTIO and yield_t)
  90. MPCTIO &tio;
  91. yield_t &yield;
  92. // A function to set the shape_size and compute addr_size and
  93. // addr_mask
  94. void set_shape_size(size_t sz);
  95. // We need a constructor because we hold non-static references; this
  96. // constructor is called by the subclass constructors
  97. Shape(const Shape &parent, Duoram &duoram, MPCTIO &tio,
  98. yield_t &yield) : parent(parent), duoram(duoram), shape_size(0),
  99. tio(tio), yield(yield) {}
  100. // The index-mapping function. Input the index relative to this
  101. // shape, and output the corresponding physical address. The
  102. // strategy is to map the index relative to this shape to the index
  103. // relative to the parent shape, call the parent's indexmap function
  104. // on that (unless this is the topmost shape), and return what it
  105. // returns. If this is the topmost shape, just return what you
  106. // would have passed to the parent's indexmap.
  107. //
  108. // This is a pure virtual function; all subclasses of Shape must
  109. // implement it, and of course Shape itself therefore cannot be
  110. // instantiated.
  111. virtual size_t indexmap(size_t idx) const = 0;
  112. // Get a pair (for the server) of references to the underlying
  113. // Duoram entries at share virtual index idx. (That is, it gets
  114. // duoram.p0_blind[indexmap(idx)], etc.)
  115. inline std::tuple<T&,T&> get_server(size_t idx) const {
  116. size_t physaddr = indexmap(idx);
  117. return std::tie(
  118. duoram.p0_blind[physaddr],
  119. duoram.p1_blind[physaddr]);
  120. }
  121. // Get a triple (for the computational players) of references to the
  122. // underlying Duoram entries at share virtual index idx. (That is,
  123. // it gets duoram.database[indexmap(idx)], etc.)
  124. inline std::tuple<T&,T&,T&> get_comp(size_t idx) const {
  125. size_t physaddr = indexmap(idx);
  126. return std::tie(
  127. duoram.database[physaddr],
  128. duoram.blind[physaddr],
  129. duoram.peer_blinded_db[physaddr]);
  130. }
  131. public:
  132. // Get the size
  133. inline size_t size() { return shape_size; }
  134. // Update the context (MPCTIO and yield if you've started a new
  135. // thread, or just yield if you've started a new coroutine in the
  136. // same thread)
  137. void context(MPCTIO &new_tio, yield_t &new_yield) {
  138. tio = new_tio;
  139. yield = new_yield;
  140. }
  141. void context(yield_t &new_yield) { yield = new_yield; }
  142. // Index into this Shape in various ways
  143. MemRefAS operator[](const RegAS &idx) { return MemRefAS(*this, idx); }
  144. MemRefXS operator[](const RegXS &idx) { return MemRefXS(*this, idx); }
  145. MemRefExpl operator[](address_t idx) { return MemRefExpl(*this, idx); }
  146. // For debugging or checking your answers (using this in general is
  147. // of course insecure)
  148. // This one reconstructs the whole database
  149. std::vector<T> reconstruct() const;
  150. // This one reconstructs a single database value
  151. T reconstruct(const T& share) const;
  152. };
  153. // The most basic shape is Flat. It is almost always the topmost shape,
  154. // and serves to provide MPCTIO and yield_t context to a Duoram without
  155. // changing the indices or size (but can specify a subrange if desired).
  156. template <typename T>
  157. class Duoram<T>::Flat : public Duoram<T>::Shape {
  158. // If this is a subrange, start may be non-0, but it's usually 0
  159. size_t start;
  160. inline size_t indexmap(size_t idx) const {
  161. size_t paridx = idx + start;
  162. if (&(this->parent) == this) {
  163. return paridx;
  164. } else {
  165. return this->parent.indexmap(paridx);
  166. }
  167. }
  168. public:
  169. // Constructor. len=0 means the maximum size (the parent's size
  170. // minus start).
  171. Flat(Duoram &duoram, MPCTIO &tio, yield_t &yield, size_t start = 0,
  172. size_t len = 0);
  173. };
  174. // An additively shared memory reference. You get one of these from a
  175. // Shape A and an additively shared RegAS index x with A[x]. Then you
  176. // perform operations on this object, which do the Duoram operations.
  177. template <typename T>
  178. class Duoram<T>::Shape::MemRefAS {
  179. const Shape &shape;
  180. RegAS idx;
  181. public:
  182. MemRefAS(const Shape &shape, const RegAS &idx) :
  183. shape(shape), idx(idx) {}
  184. // Oblivious read from an additively shared index of Duoram memory
  185. operator T();
  186. // Oblivious update to an additively shared index of Duoram memory
  187. MemRefAS &operator+=(const T& M);
  188. };
  189. // An XOR shared memory reference. You get one of these from a Shape A
  190. // and an XOR shared RegXS index x with A[x]. Then you perform
  191. // operations on this object, which do the Duoram operations.
  192. template <typename T>
  193. class Duoram<T>::Shape::MemRefXS {
  194. const Shape &shape;
  195. RegXS idx;
  196. public:
  197. MemRefXS(const Shape &shape, const RegXS &idx) :
  198. shape(shape), idx(idx) {}
  199. // Oblivious read from an XOR shared index of Duoram memory
  200. operator T();
  201. // Oblivious update to an XOR shared index of Duoram memory
  202. MemRefXS &operator+=(const T& M);
  203. };
  204. // An explicit memory reference. You get one of these from a Shape A
  205. // and an address_t index x with A[x]. Then you perform operations on
  206. // this object, which update the Duoram state without performing Duoram
  207. // operations.
  208. template <typename T>
  209. class Duoram<T>::Shape::MemRefExpl {
  210. const Shape &shape;
  211. address_t idx;
  212. public:
  213. MemRefExpl(const Shape &shape, address_t idx) :
  214. shape(shape), idx(idx) {}
  215. // Explicit read from a given index of Duoram memory
  216. operator T();
  217. // Explicit update to a given index of Duoram memory
  218. MemRefExpl &operator+=(const T& M);
  219. };
  220. #include "duoram.tcc"
  221. #endif