Browse Source

The Pad Shape and the start of the Stride Shape

Ian Goldberg 2 years ago
parent
commit
99bb7cf962
5 changed files with 333 additions and 3 deletions
  1. 1 1
      Makefile
  2. 6 2
      duoram.hpp
  3. 68 0
      online.cpp
  4. 195 0
      shapes.hpp
  5. 63 0
      shapes.tcc

+ 1 - 1
Makefile

@@ -42,7 +42,7 @@ preproc.o: rdpf.tcc mpcops.hpp mpcops.tcc cdpf.hpp cdpf.tcc
 online.o: online.hpp mpcio.hpp types.hpp bitutils.hpp corotypes.hpp mpcio.tcc
 online.o: options.hpp mpcops.hpp coroutine.hpp mpcops.tcc rdpf.hpp dpf.hpp
 online.o: prg.hpp aes.hpp rdpf.tcc duoram.hpp duoram.tcc cdpf.hpp cdpf.tcc
-online.o: cell.hpp
+online.o: cell.hpp shapes.hpp shapes.tcc
 mpcops.o: mpcops.hpp types.hpp bitutils.hpp mpcio.hpp corotypes.hpp mpcio.tcc
 mpcops.o: coroutine.hpp mpcops.tcc
 rdpf.o: rdpf.hpp mpcio.hpp types.hpp bitutils.hpp corotypes.hpp mpcio.tcc

+ 6 - 2
duoram.hpp

@@ -58,6 +58,8 @@ public:
     class Shape;
     // These are the different Shapes that exist
     class Flat;
+    class Pad;
+    class Stride;
 
     // Pass the player number and desired size
     Duoram(int player, size_t size);
@@ -82,6 +84,8 @@ template <typename T>
 class Duoram<T>::Shape {
     // Subclasses should be able to access _other_ Shapes' indexmap
     friend class Flat;
+    friend class Pad;
+    friend class Stride;
 
     // When you index into a shape (A[x]), you get one of these types,
     // depending on the type of x (the index), _not_ on the type T (the
@@ -176,7 +180,7 @@ protected:
     // Get a pair (for the server) of references to the underlying
     // Duoram entries at share virtual index idx.  (That is, it gets
     // duoram.p0_blind[indexmap(idx)], etc.)
-    inline std::tuple<T&,T&> get_server(size_t idx,
+    virtual inline std::tuple<T&,T&> get_server(size_t idx,
         std::nullopt_t null = std::nullopt) const {
         size_t physaddr = indexmap(idx);
         return std::tie(
@@ -187,7 +191,7 @@ protected:
     // Get a triple (for the computational players) of references to the
     // underlying Duoram entries at share virtual index idx.  (That is,
     // it gets duoram.database[indexmap(idx)], etc.)
-    inline std::tuple<T&,T&,T&> get_comp(size_t idx,
+    virtual inline std::tuple<T&,T&,T&> get_comp(size_t idx,
         std::nullopt_t null = std::nullopt) const {
         size_t physaddr = indexmap(idx);
         return std::tie(

+ 68 - 0
online.cpp

@@ -6,6 +6,7 @@
 #include "duoram.hpp"
 #include "cdpf.hpp"
 #include "cell.hpp"
+#include "shapes.hpp"
 
 
 static void online_test(MPCIO &mpcio,
@@ -1127,6 +1128,70 @@ static void sort_test(MPCIO &mpcio,
     pool.join();
 }
 
+static void pad_test(MPCIO &mpcio,
+    const PRACOptions &opts, char **args)
+{
+    nbits_t depth=6;
+
+    if (*args) {
+        depth = atoi(*args);
+        ++args;
+    }
+    address_t len = (1<<depth);
+    if (*args) {
+        len = atoi(*args);
+        ++args;
+    }
+
+    MPCTIO tio(mpcio, 0, opts.num_threads);
+    run_coroutines(tio, [&mpcio, &tio, depth, len] (yield_t &yield) {
+        int player = tio.player();
+        Duoram<RegAS> oram(player, len);
+        auto A = oram.flat(tio, yield);
+        // Initialize the ORAM in explicit mode
+        A.explicitonly(true);
+        for (address_t i=0; i<len; ++i) {
+            RegAS v;
+            v.set((player*0xffff+1)*i);
+            A[i] = v;
+        }
+        A.explicitonly(false);
+        // Add 0 to A[0], which reblinds the whole database
+        RegAS z;
+        A[z] += z;
+        auto check = A.reconstruct();
+        if (player == 0) {
+            for (address_t i=0;i<len;++i) {
+                if (depth <= 10) {
+                    printf("%04x %016lx\n", i, check[i].share());
+                }
+            }
+            printf("\n");
+        }
+        address_t maxsize = address_t(1)<<depth;
+        Duoram<RegAS>::Pad P(A, tio, yield, maxsize);
+        for (address_t i=0; i<maxsize; ++i) {
+            RegAS v = P[i];
+            if (depth <= 10) {
+                value_t vval = mpc_reconstruct(tio, yield, v);
+                printf("%04x %016lx %016lx\n", i, v.share(), vval);
+            }
+        }
+        printf("\n");
+        for (address_t i=0; i<maxsize; ++i) {
+            RegAS ind;
+            ind.set(player*i);
+            RegAS v = P[ind];
+            if (depth <= 10) {
+                value_t vval = mpc_reconstruct(tio, yield, v);
+                printf("%04x %016lx %016lx\n", i, v.share(), vval);
+            }
+        }
+        printf("\n");
+    });
+}
+
+
 static void bsearch_test(MPCIO &mpcio,
     const PRACOptions &opts, char **args)
 {
@@ -1292,6 +1357,9 @@ void online_main(MPCIO &mpcio, const PRACOptions &opts, char **args)
     } else if (!strcmp(*args, "sorttest")) {
         ++args;
         sort_test(mpcio, opts, args);
+    } else if (!strcmp(*args, "padtest")) {
+        ++args;
+        pad_test(mpcio, opts, args);
     } else if (!strcmp(*args, "bsearch")) {
         ++args;
         bsearch_test(mpcio, opts, args);

+ 195 - 0
shapes.hpp

@@ -0,0 +1,195 @@
+#ifndef __SHAPES_HPP__
+#define __SHAPES_HPP__
+
+// Various Shapes beyond the standard Flat (in duoram.hpp)
+
+#include "duoram.hpp"
+
+
+// A Pad is a Shape that pads an underlying Shape so that read accesses
+// past the end return a fixed constant value.  Do _not_ write into a
+// Pad!
+
+template <typename T>
+class Duoram<T>::Pad : public Duoram<T>::Shape {
+    T *padvalp;
+    T *peerpadvalp;
+    T *zerop;
+    address_t padded_size;
+
+    inline size_t indexmap(size_t idx) const override {
+        return this->parent.indexmap(idx);
+    }
+
+    Pad &operator=(const Pad &) = delete;
+
+public:
+    // Constructor
+    Pad(Shape &parent, MPCTIO &tio, yield_t &yield,
+        address_t padded_size, value_t padval = 0x7fffffffffffffff);
+
+    // Copy the given Pad except for the tio and yield
+    Pad(const Pad &copy_from, MPCTIO &tio, yield_t &yield);
+
+    // Destructor
+    ~Pad();
+
+    // Update the context (MPCTIO and yield if you've started a new
+    // thread, or just yield if you've started a new coroutine in the
+    // same thread).  Returns a new Shape with an updated context.
+    Pad context(MPCTIO &new_tio, yield_t &new_yield) const {
+        return Pad(*this, new_tio, new_yield);
+    }
+    Pad context(yield_t &new_yield) const {
+        return Pad(*this, this->tio, new_yield);
+    }
+
+    // Get a pair (for the server) of references to the underlying
+    // Duoram entries at share virtual index idx.  (That is, it gets
+    // duoram.p0_blind[indexmap(idx)], etc.)
+    inline std::tuple<T&,T&> get_server(size_t idx,
+        std::nullopt_t null = std::nullopt) const override {
+        if (idx < this->parent.shape_size) {
+            size_t physaddr = indexmap(idx);
+            return std::tie(
+                this->duoram.p0_blind[physaddr],
+                this->duoram.p1_blind[physaddr]);
+        } else {
+            return std::tie(*zerop, *zerop);
+        }
+    }
+
+    // Get a triple (for the computational players) of references to the
+    // underlying Duoram entries at share virtual index idx.  (That is,
+    // it gets duoram.database[indexmap(idx)], etc.)
+    inline std::tuple<T&,T&,T&> get_comp(size_t idx,
+        std::nullopt_t null = std::nullopt) const override {
+        if (idx < this->parent.shape_size) {
+        size_t physaddr = indexmap(idx);
+            return std::tie(
+                this->duoram.database[physaddr],
+                this->duoram.blind[physaddr],
+                this->duoram.peer_blinded_db[physaddr]);
+        } else {
+            return std::tie(*padvalp, *zerop, *peerpadvalp);
+        }
+    }
+
+    // Index into this Pad in various ways
+    typename Duoram::Shape::template MemRefS<RegAS,T,std::nullopt_t,Pad>
+            operator[](const RegAS &idx) {
+        typename Duoram<T>::Shape::
+            template MemRefS<RegAS,T,std::nullopt_t,Pad>
+            res(*this, idx, std::nullopt);
+        return res;
+    }
+    typename Duoram::Shape::template MemRefS<RegXS,T,std::nullopt_t,Pad>
+            operator[](const RegXS &idx) {
+        typename Duoram<T>::Shape::
+            template MemRefS<RegXS,T,std::nullopt_t,Pad>
+            res(*this, idx, std::nullopt);
+        return res;
+    }
+    typename Duoram::Shape::template MemRefExpl<T,std::nullopt_t>
+            operator[](address_t idx) {
+        typename Duoram<T>::Shape::
+            template MemRefExpl<T,std::nullopt_t>
+            res(*this, idx, std::nullopt);
+        return res;
+    }
+    template <typename U>
+    Duoram::Shape::MemRefInd<U, Pad>
+            operator[](const std::vector<U> &indcs) {
+        typename Duoram<T>::Shape::
+            template MemRefInd<U,Pad>
+            res(*this, indcs);
+        return res;
+    }
+    template <typename U, size_t N>
+    Duoram::Shape::MemRefInd<U, Pad>
+            operator[](const std::array<U,N> &indcs) {
+        typename Duoram<T>::Shape::
+            template MemRefInd<U,Pad>
+            res(*this, indcs);
+        return res;
+    }
+};
+
+
+// A Stride is a Shape that represents evenly spaced elements of its
+// parent Shape, starting with some offset, and then every stride
+// elements.
+
+template <typename T>
+class Duoram<T>::Stride : public Duoram<T>::Shape {
+    size_t offset;
+    size_t stride;
+
+    inline size_t indexmap(size_t idx) const override {
+        size_t paridx = offset + idx*stride;
+        return this->parent.indexmap(paridx);
+    }
+
+public:
+    // Constructor
+    Stride(Shape &parent, MPCTIO &tio, yield_t &yield, size_t offset,
+        size_t stride);
+
+    // Copy the given Stride except for the tio and yield
+    Stride(const Stride &copy_from, MPCTIO &tio, yield_t &yield) :
+        Shape(copy_from, tio, yield), offset(copy_from.offset),
+        stride(copy_from.stride) {}
+
+    // Update the context (MPCTIO and yield if you've started a new
+    // thread, or just yield if you've started a new coroutine in the
+    // same thread).  Returns a new Shape with an updated context.
+    Stride context(MPCTIO &new_tio, yield_t &new_yield) const {
+        return Stride(*this, new_tio, new_yield);
+    }
+    Stride context(yield_t &new_yield) const {
+        return Stride(*this, this->tio, new_yield);
+    }
+
+    // Index into this Stride in various ways
+    typename Duoram::Shape::template MemRefS<RegAS,T,std::nullopt_t,Stride>
+            operator[](const RegAS &idx) {
+        typename Duoram<T>::Shape::
+            template MemRefS<RegAS,T,std::nullopt_t,Stride>
+            res(*this, idx, std::nullopt);
+        return res;
+    }
+    typename Duoram::Shape::template MemRefS<RegXS,T,std::nullopt_t,Stride>
+            operator[](const RegXS &idx) {
+        typename Duoram<T>::Shape::
+            template MemRefS<RegXS,T,std::nullopt_t,Stride>
+            res(*this, idx, std::nullopt);
+        return res;
+    }
+    typename Duoram::Shape::template MemRefExpl<T,std::nullopt_t>
+            operator[](address_t idx) {
+        typename Duoram<T>::Shape::
+            template MemRefExpl<T,std::nullopt_t>
+            res(*this, idx, std::nullopt);
+        return res;
+    }
+    template <typename U>
+    Duoram::Shape::MemRefInd<U, Stride>
+            operator[](const std::vector<U> &indcs) {
+        typename Duoram<T>::Shape::
+            template MemRefInd<U,Stride>
+            res(*this, indcs);
+        return res;
+    }
+    template <typename U, size_t N>
+    Duoram::Shape::MemRefInd<U, Stride>
+            operator[](const std::array<U,N> &indcs) {
+        typename Duoram<T>::Shape::
+            template MemRefInd<U,Stride>
+            res(*this, indcs);
+        return res;
+    }
+};
+
+#include "shapes.tcc"
+
+#endif

+ 63 - 0
shapes.tcc

@@ -0,0 +1,63 @@
+#ifndef __SHAPES_TCC__
+#define __SHAPES_TCC__
+
+// Constructor for the Pad shape.
+template <typename T>
+Duoram<T>::Pad::Pad(Shape &parent, MPCTIO &tio, yield_t &yield,
+    address_t padded_size, size_t padval) :
+    Shape(parent, parent.duoram, tio, yield)
+{
+    int player = tio.player();
+    padvalp = new T;
+    padvalp->set(player*padval);
+    zerop = new T;
+    peerpadvalp = new T;
+    peerpadvalp->set((1-player)*padval);
+    this->set_shape_size(padded_size);
+}
+
+// Copy the given Pad except for the tio and yield
+template <typename T>
+Duoram<T>::Pad::Pad(const Pad &copy_from, MPCTIO &tio, yield_t &yield) :
+    Shape(copy_from, tio, yield)
+{
+    padvalp = new T;
+    padvalp->set(copy_from.padvalp->share());
+    zerop = new T;
+    peerpadvalp = new T;
+    peerpadvalp->set(copy_from.peerpadvalp->share());
+}
+
+// Destructor
+template <typename T>
+Duoram<T>::Pad::~Pad()
+{
+    delete padvalp;
+    delete zerop;
+    delete peerpadvalp;
+}
+
+// Constructor for the Stride shape.
+template <typename T>
+Duoram<T>::Stride::Stride(Shape &parent, MPCTIO &tio, yield_t &yield,
+    size_t offset, size_t stride) :
+    Shape(parent, parent.duoram, tio, yield)
+{
+    size_t parentsize = parent.size();
+    if (offset > parentsize) {
+        offset = parentsize;
+    }
+    this->offset = offset;
+    this->stride = stride;
+    // How many items are there if you take every stride'th item,
+    // starting at offset?  strideregionsize corrects for the offset, so
+    // we're asking how many multiples of stride are there strictly less
+    // than strideregionsize.  That's just ceil(strideregionsize/stride)
+    // which is the same as (strideregionsize + stride - 1)/stride with
+    // integer truncated division.
+    size_t strideregionsize = parentsize - offset;
+    size_t numelements = (strideregionsize + stride - 1) / stride;
+    this->set_shape_size(numelements);
+}
+
+#endif