Переглянути джерело

Simultaneous independent reads and updates

Ian Goldberg 1 рік тому
батько
коміт
e9fa1a0289
3 змінених файлів з 120 додано та 16 видалено
  1. 47 0
      duoram.hpp
  2. 62 0
      duoram.tcc
  3. 11 16
      online.cpp

+ 47 - 0
duoram.hpp

@@ -98,6 +98,12 @@ class Duoram<T>::Shape {
     class MemRefS;
     // When x is unshared explicit value
     class MemRefExpl;
+    // When x is a vector or array of values of type U, used to denote a
+    // collection of independent memory operations that can be performed
+    // simultaneously.  Sh is the specific Shape subtype used to create
+    // the MemRefInd.
+    template <typename U, typename Sh>
+    class MemRefInd;
 
 protected:
     // A reference to the parent shape.  As with ".." in the root
@@ -257,6 +263,16 @@ public:
         return Flat(*this, this->tio, new_yield);
     }
 
+    // Generate independent memory references for this Flat
+    template <typename U>
+    Duoram::Shape::MemRefInd<U, Flat> indep(const std::vector<U> &indcs) {
+        return typename Duoram::Shape::MemRefInd<U,Flat>(*this, indcs);
+    }
+    template <typename U, size_t N>
+    Duoram::Shape::MemRefInd<U, Flat> indep(const std::array<U,N> &indcs) {
+        return typename Duoram::Shape::MemRefInd<U,Flat>(*this, indcs);
+    }
+
     // Oblivious sort the elements indexed by the two given indices.
     // Without reconstructing the values, if dir=0, this[idx1] will
     // become a share of the smaller of the reconstructed values, and
@@ -347,6 +363,37 @@ public:
     MemRefExpl &operator-=(const T& M) { *this += (-M); return *this; }
 };
 
+// A collection of independent memory references that can be processed
+// simultaneously.  You get one of these from a Shape A (of specific
+// subclass Sh) and a vector or array of indices v with each element of
+// type U.
+
+template <typename T> template <typename U, typename Sh>
+class Duoram<T>::Shape::MemRefInd {
+    Sh &shape;
+    std::vector<U> indcs;
+
+public:
+    MemRefInd(Sh &shape, std::vector<U> indcs) :
+        shape(shape), indcs(indcs) {}
+    template <size_t N>
+    MemRefInd(Sh &shape, std::array<U,N> aindcs) :
+        shape(shape) { for ( auto &i : aindcs ) { indcs.push_back(i); } }
+
+    // Explicit read from a given index of Duoram memory
+    operator std::vector<T>();
+
+    // Explicit update to a given index of Duoram memory
+    MemRefInd &operator+=(const std::vector<T>& M);
+    template <size_t N>
+    MemRefInd &operator+=(const std::array<T,N>& M);
+
+    // Convenience function
+    MemRefInd &operator-=(const std::vector<T>& M) { *this += (-M); return *this; }
+    template <size_t N>
+    MemRefInd &operator-=(const std::array<T,N>& M) { *this += (-M); return *this; }
+};
+
 #include "duoram.tcc"
 
 #endif

+ 62 - 0
duoram.tcc

@@ -596,3 +596,65 @@ typename Duoram<T>::Shape::MemRefExpl
     }
     return *this;
 }
+
+// Independent U-shared reads into a Shape of subtype Sh on a Duoram
+// with values of sharing type T
+template <typename T> template <typename U, typename Sh>
+Duoram<T>::Shape::MemRefInd<U,Sh>::operator std::vector<T>()
+{
+    std::vector<T> res;
+    size_t size = indcs.size();
+    res.resize(size);
+    std::vector<coro_t> coroutines;
+    for (size_t i=0;i<size;++i) {
+        coroutines.emplace_back([this, &res, i] (yield_t &yield) {
+            Sh Sh_coro = shape.context(yield);
+            res[i] = Sh_coro[indcs[i]];
+        });
+    }
+    run_coroutines(shape.yield, coroutines);
+
+    return res;
+}
+
+// Independent U-shared updates into a Shape of subtype Sh on a Duoram
+// with values of sharing type T (vector version)
+template <typename T> template <typename U, typename Sh>
+typename Duoram<T>::Shape::template MemRefInd<U,Sh>
+    &Duoram<T>::Shape::MemRefInd<U,Sh>::operator+=(const std::vector<T>& M)
+{
+    size_t size = indcs.size();
+    assert(M.size() == size);
+
+    std::vector<coro_t> coroutines;
+    for (size_t i=0;i<size;++i) {
+        coroutines.emplace_back([this, &M, i] (yield_t &yield) {
+            Sh Sh_coro = shape.context(yield);
+            Sh_coro[indcs[i]] += M[i];
+        });
+    }
+    run_coroutines(shape.yield, coroutines);
+
+    return *this;
+}
+
+// Independent U-shared updates into a Shape of subtype Sh on a Duoram
+// with values of sharing type T (array version)
+template <typename T> template <typename U, typename Sh> template <size_t N>
+typename Duoram<T>::Shape::template MemRefInd<U,Sh>
+    &Duoram<T>::Shape::MemRefInd<U,Sh>::operator+=(const std::array<T,N>& M)
+{
+    size_t size = indcs.size();
+    assert(N == size);
+
+    std::vector<coro_t> coroutines;
+    for (size_t i=0;i<size;++i) {
+        coroutines.emplace_back([this, &M, i] (yield_t &yield) {
+            Sh Sh_coro = shape.context(yield);
+            Sh_coro[indcs[i]] += M[i];
+        });
+    }
+    run_coroutines(shape.yield, coroutines);
+
+    return *this;
+}

+ 11 - 16
online.cpp

@@ -612,22 +612,17 @@ static void duoram_test(MPCIO &mpcio,
         }
 
         // Simultaneous independent reads
-        std::vector<T> Av;
-        Av.resize(3);
-        std::vector<coro_t> coroutines;
-        run_coroutines(yield,
-            [&A, &Av, &aidx] (yield_t &yield) {
-                auto Acoro = A.context(yield);
-                Av[0] = Acoro[aidx];
-            },
-            [&A, &Av, &aidx2] (yield_t &yield) {
-                auto Acoro = A.context(yield);
-                Av[1] = Acoro[aidx2];
-            },
-            [&A, &Av, &aidx3] (yield_t &yield) {
-                auto Acoro = A.context(yield);
-                Av[2] = Acoro[aidx3];
-            });
+        std::vector<T> Av = A.indep(std::array {
+            aidx, aidx2, aidx3
+        });
+
+        // Simultaneous independent updates
+        T Aw1, Aw2, Aw3;
+        Aw1.set(0x101010101010101 * tio.player());
+        Aw2.set(0x202020202020202 * tio.player());
+        Aw3.set(0x303030303030303 * tio.player());
+        A.indep(std::array { aidx, aidx2, aidx3 }) +=
+            std::array { Aw1, Aw2, Aw3 };
 
         if (depth <= 10) {
             oram.dump();