Browse Source

Merge branch 'avadapal/heaps'

Ian Goldberg 5 months ago
parent
commit
62d835388b

+ 14 - 10
Makefile

@@ -9,7 +9,7 @@ LDLIBS=-lbsd -lboost_system -lboost_context -lboost_chrono -lboost_thread -lpthr
 
 BIN=prac
 SRCS=prac.cpp mpcio.cpp preproc.cpp online.cpp mpcops.cpp rdpf.cpp \
-    cdpf.cpp duoram.cpp cell.cpp bst.cpp avl.cpp
+    cdpf.cpp duoram.cpp cell.cpp bst.cpp avl.cpp heap.cpp
 OBJS=$(SRCS:.cpp=.o)
 ASMS=$(SRCS:.cpp=.s)
 
@@ -42,7 +42,7 @@ preproc.o: rdpf.tcc mpcops.hpp mpcops.tcc cdpf.hpp cdpf.tcc
 online.o: online.hpp mpcio.hpp types.hpp bitutils.hpp corotypes.hpp mpcio.tcc
 online.o: options.hpp mpcops.hpp coroutine.hpp mpcops.tcc rdpf.hpp dpf.hpp
 online.o: prg.hpp aes.hpp rdpf.tcc duoram.hpp duoram.tcc cdpf.hpp cdpf.tcc
-online.o: cell.hpp shapes.hpp shapes.tcc bst.hpp avl.hpp
+online.o: cell.hpp heap.hpp shapes.hpp shapes.tcc bst.hpp avl.hpp
 mpcops.o: mpcops.hpp types.hpp bitutils.hpp mpcio.hpp corotypes.hpp mpcio.tcc
 mpcops.o: coroutine.hpp mpcops.tcc
 rdpf.o: rdpf.hpp mpcio.hpp types.hpp bitutils.hpp corotypes.hpp mpcio.tcc
@@ -50,14 +50,18 @@ rdpf.o: coroutine.hpp dpf.hpp prg.hpp aes.hpp rdpf.tcc mpcops.hpp mpcops.tcc
 cdpf.o: bitutils.hpp cdpf.hpp mpcio.hpp types.hpp corotypes.hpp mpcio.tcc
 cdpf.o: coroutine.hpp dpf.hpp prg.hpp aes.hpp cdpf.tcc
 duoram.o: duoram.hpp types.hpp bitutils.hpp mpcio.hpp corotypes.hpp mpcio.tcc
-duoram.o: coroutine.hpp duoram.tcc mpcops.hpp mpcops.tcc cdpf.hpp dpf.hpp
-duoram.o: prg.hpp aes.hpp cdpf.tcc rdpf.hpp rdpf.tcc shapes.hpp shapes.tcc
+duoram.o: coroutine.hpp rdpf.hpp dpf.hpp prg.hpp aes.hpp rdpf.tcc mpcops.hpp
+duoram.o: mpcops.tcc duoram.tcc cdpf.hpp cdpf.tcc shapes.hpp shapes.tcc
 cell.o: types.hpp bitutils.hpp duoram.hpp mpcio.hpp corotypes.hpp mpcio.tcc
-cell.o: coroutine.hpp duoram.tcc mpcops.hpp mpcops.tcc cdpf.hpp dpf.hpp
-cell.o: prg.hpp aes.hpp cdpf.tcc rdpf.hpp rdpf.tcc cell.hpp options.hpp
+cell.o: coroutine.hpp rdpf.hpp dpf.hpp prg.hpp aes.hpp rdpf.tcc mpcops.hpp
+cell.o: mpcops.tcc duoram.tcc cdpf.hpp cdpf.tcc cell.hpp options.hpp
 bst.o: bst.hpp types.hpp bitutils.hpp duoram.hpp mpcio.hpp corotypes.hpp
-bst.o: mpcio.tcc coroutine.hpp duoram.tcc mpcops.hpp mpcops.tcc cdpf.hpp
-bst.o: dpf.hpp prg.hpp aes.hpp cdpf.tcc rdpf.hpp rdpf.tcc options.hpp
+bst.o: mpcio.tcc coroutine.hpp rdpf.hpp dpf.hpp prg.hpp aes.hpp rdpf.tcc
+bst.o: mpcops.hpp mpcops.tcc duoram.tcc cdpf.hpp cdpf.tcc options.hpp
 avl.o: avl.hpp types.hpp bitutils.hpp duoram.hpp mpcio.hpp corotypes.hpp
-avl.o: mpcio.tcc coroutine.hpp duoram.tcc mpcops.hpp mpcops.tcc cdpf.hpp
-avl.o: dpf.hpp prg.hpp aes.hpp cdpf.tcc rdpf.hpp rdpf.tcc options.hpp bst.hpp
+avl.o: mpcio.tcc coroutine.hpp rdpf.hpp dpf.hpp prg.hpp aes.hpp rdpf.tcc
+avl.o: mpcops.hpp mpcops.tcc duoram.tcc cdpf.hpp cdpf.tcc options.hpp bst.hpp
+heap.o: types.hpp bitutils.hpp duoram.hpp mpcio.hpp corotypes.hpp mpcio.tcc
+heap.o: coroutine.hpp rdpf.hpp dpf.hpp prg.hpp aes.hpp rdpf.tcc mpcops.hpp
+heap.o: mpcops.tcc duoram.tcc cdpf.hpp cdpf.tcc cell.hpp options.hpp
+heap.o: shapes.hpp shapes.tcc heap.hpp

+ 1 - 1
docker/Dockerfile

@@ -1,6 +1,6 @@
 FROM ubuntu:22.04
 ARG DEBIAN_FRONTEND=noninteractive
-RUN apt update && apt install -y wget git build-essential net-tools iproute2 iperf iputils-ping libbsd-dev libboost-all-dev numactl
+RUN apt update && apt install -y wget git build-essential net-tools iproute2 iperf iputils-ping libbsd-dev libboost-all-dev numactl time
 WORKDIR /root
 COPY . prac
 WORKDIR /root/prac

+ 5 - 0
docker/build-docker

@@ -1,4 +1,9 @@
 #!/bin/bash
 
+# cd into the directory containing this script (from the bash faq 028)
+if [[ $BASH_SOURCE = */* ]]; then
+  cd -- "${BASH_SOURCE%/*}/" || exit
+fi
+
 cd ..
 docker build $* -t prac -f docker/Dockerfile .

+ 34 - 3
docker/run-experiment

@@ -5,12 +5,43 @@ p0addr=$(docker inspect --format='{{ .NetworkSettings.IPAddress }}' prac_p0)
 p1addr=$(docker inspect --format='{{ .NetworkSettings.IPAddress }}' prac_p1)
 
 # Gather options and arguments
+preproc=0
 opts=""
 while getopts paot:ex arg; do
     opts+=" -${arg} ${OPTARG}"
+    if [ "$arg" = "p" -o "$arg" = "a" ]; then
+        preproc=1
+    fi
 done
 shift $((OPTIND-1))
 
+# Get the number of cores usable by each party
+ncores_p0=1
+ncores_p1=1
+ncores_p2=1
+if [ "$whichexps" != "none" ]; then
+    ncores_p0=`docker exec -i prac_p0 $PRAC_NUMA_P0 nproc 2>/dev/null`
+    ncores_p1=`docker exec -i prac_p1 $PRAC_NUMA_P1 nproc 2>/dev/null`
+    ncores_p2=`docker exec -i prac_p2 $PRAC_NUMA_P2 nproc 2>/dev/null`
+fi
+
+ptarg_p0=""
+ptarg_p1=""
+ptarg_p2=""
+targ_p0=""
+targ_p1=""
+targ_p2=""
+
+if [ "$preproc" = 1 ]; then
+    ptarg_p0="p:${ncores_p0}"
+    ptarg_p1="p:${ncores_p1}"
+    ptarg_p2="p:${ncores_p2}"
+else
+    targ_p0="-t ${ncores_p0}"
+    targ_p1="-t ${ncores_p1}"
+    targ_p2="-t ${ncores_p2}"
+fi
+
 echo ===== Running prac $opts -- $*
 date "+===== Start %s %F %T"
 
@@ -18,9 +49,9 @@ date "+===== Start %s %F %T"
 savefile0=$$.p0.out
 savefile1=$$.p1.out
 savefile2=$$.p2.out
-docker exec -w /root/prac prac_p0 bash -c "$PRAC_NUMA_P0 stdbuf -o 0 ./prac $opts 0 $* > $savefile0" &
-docker exec -w /root/prac prac_p1 bash -c "$PRAC_NUMA_P1 stdbuf -o 0 ./prac $opts 1 $p0addr $* > $savefile1" &
-docker exec -w /root/prac prac_p2 bash -c "$PRAC_NUMA_P2 stdbuf -o 0 ./prac $opts 2 $p0addr $p1addr $* > $savefile2" &
+docker exec -w /root/prac prac_p0 bash -c "$PRAC_NUMA_P0 stdbuf -o 0 ./prac $opts $targ_p0 0 $* $ptarg_p0 > $savefile0 2>&1" &
+docker exec -w /root/prac prac_p1 bash -c "$PRAC_NUMA_P1 stdbuf -o 0 ./prac $opts $targ_p1 1 $p0addr $* $ptarg_p1 > $savefile1 2>&1" &
+docker exec -w /root/prac prac_p2 bash -c "$PRAC_NUMA_P2 stdbuf -o 0 ./prac $opts $targ_p2 2 $p0addr $p1addr $* $ptarg_p2 > $savefile2 2>&1" &
 wait
 echo ===== P0 output
 docker exec -w /root/prac prac_p0 cat $savefile0

+ 5 - 0
docker/set-networking

@@ -1,5 +1,10 @@
 #!/bin/bash
 
+# cd into the directory containing this script (from the bash faq 028)
+if [[ $BASH_SOURCE = */* ]]; then
+  cd -- "${BASH_SOURCE%/*}/" || exit
+fi
+
 latency=30ms
 bw=100mbit
 

+ 3 - 3
docker/stop-docker

@@ -1,6 +1,6 @@
 #!/bin/bash
 
-docker stop prac_p0 &
-docker stop prac_p1 &
-docker stop prac_p2 &
+docker stop -t 0 prac_p0 &
+docker stop -t 0 prac_p1 &
+docker stop -t 0 prac_p2 &
 wait

+ 40 - 3
duoram.hpp

@@ -7,6 +7,7 @@
 #include "types.hpp"
 #include "mpcio.hpp"
 #include "coroutine.hpp"
+#include "rdpf.hpp"
 
 // Implementation of the 3-party protocols described in:
 // Adithya Vadapalli, Ryan Henry, Ian Goldberg, "Duoram: A
@@ -535,12 +536,46 @@ public:
         next_windex(0), incremental(true), idx(RegXS())
     {
         if (player < 2) {
-            dt = tio.rdpftriple(yield, depth, true);
+            dt = tio.rdpftriple<WIDTH>(yield, depth, true);
         } else {
-            dp = tio.rdpfpair(yield, depth, true);
+            dp = tio.rdpfpair<WIDTH>(yield, depth, true);
         }
     }
 
+
+   // The function unit_vector takes in an XOR-share of an index foundindx and a size
+   // The function outputs _boolean shares_ of a standard-basis vector of size (with the non-zero index at foundindx)
+   // For example suppose nitems = 6; and suppose P0 and P1 take parameters foundindx0 and foundindx1 such that, foundindx0 \oplus foundindx1 = 3
+   // P0 and P1 output vectors r0 and r1 such that r0 \oplus r1 = [000100]
+   std::vector<RegBS> unit_vector(MPCTIO &tio, yield_t &yield, size_t nitems, RegXS foundidx)
+   {
+      std::vector<RegBS> standard_basis(nitems);
+
+      if (player < 2) {
+          U indoffset;
+          dt->get_target(indoffset);
+          indoffset -= foundidx;
+          U peerindoffset;
+          tio.queue_peer(&indoffset, BITBYTES(curdepth));
+          yield();
+          tio.recv_peer(&peerindoffset, BITBYTES(curdepth));
+          auto indshift = combine(indoffset, peerindoffset, curdepth);
+
+          // Pick one of the DPF triples, we can also pick dpf[0] or dpf[2]
+          auto se = StreamEval(dt->dpf[1], 0, indshift,  tio.aes_ops(), true);
+
+          for (size_t j = 0; j < nitems; ++j) {
+               typename RDPF<WIDTH>::LeafNode  leaf = se.next();
+               standard_basis[j] = dt->dpf[1].unit_bs(leaf);
+          }
+
+       } else {
+          yield();
+       }
+
+       return standard_basis;
+    }
+
     // Incrementally append a (shared) bit to the oblivious index
     void incr(RegBS bit)
     {
@@ -557,6 +592,8 @@ public:
     // Get a copy of the index
     U index() { return idx; }
 
+    nbits_t depth() {return curdepth;}
+
     // Get the next wide-RDPF index
     nbits_t windex() { assert(next_windex < WIDTH); return next_windex++; }
 };
@@ -611,7 +648,7 @@ public:
         oblividx = &obidx;
     }
 
-    // Create a MemRefExpl for accessing a partcular field of T
+    // Create a MemRefS for accessing a partcular field of T
     template <typename SFT>
     MemRefS<U,SFT,SFT T::*,Sh,WIDTH> field(SFT T::*subfieldsel) {
         auto res = MemRefS<U,SFT,SFT T::*,Sh,WIDTH>(this->shape,

+ 10 - 6
duoram.tcc

@@ -89,22 +89,26 @@ void Duoram<T>::Shape::explicitonly(bool enable)
 
 // For debugging or checking your answers (using this in general is
 // of course insecure)
-// This one reconstructs the whole database
+// This one reconstructs the whole Shape
 template <typename T>
 std::vector<T> Duoram<T>::Shape::reconstruct() const
 {
     int player = tio.player();
     std::vector<T> res;
-    res.resize(duoram.size());
+    res.resize(shape_size);
     // Player 1 sends their share of the database to player 0
     if (player == 1) {
-        tio.queue_peer(duoram.database.data(), duoram.size()*sizeof(T));
+        for (size_t i=0; i < shape_size; ++i) {
+            T elt = std::get<0>(get_comp(i));
+            tio.queue_peer(&elt, sizeof(T));
+        }
         yield();
     } else if (player == 0) {
         yield();
-        tio.recv_peer(res.data(), duoram.size()*sizeof(T));
-        for(size_t i=0;i<duoram.size();++i) {
-            res[i] += duoram.database[i];
+        for(size_t i=0; i < shape_size; ++i) {
+            tio.recv_peer(&res[i], sizeof(T));
+            T myelt = std::get<0>(get_comp(i));
+            res[i] += myelt;
         }
     } else if (player == 2) {
         // The server (player 2) only syncs with the yield

+ 809 - 0
heap.cpp

@@ -0,0 +1,809 @@
+#include <functional>
+#include "types.hpp"
+#include "duoram.hpp"
+#include "cell.hpp"
+#include "rdpf.hpp"
+#include "shapes.hpp"
+#include "heap.hpp"
+
+/*
+The heap datastructure is stored in an array with the starting index as 1 (and not 0)
+For nodes stored in index i of the array, the parent is stored at i/2 and
+The left and right children are stored at 2i and 2i + 1
+All the unused array indicies have MAX_INT stored in them
+
+                                 x1
+                               /   \
+                              x2    x3
+                             /  \   / \
+                            x4  x5 x6  ()
+
+  A Heap like above is stored in array like below.
+
+  NULL| x1 | x2 | x3 | x4 | x5 | x6 | MAXINT |
+
+*/
+
+/*
+The Optimized Insert Protocol
+Takes in the additive share of the value to be inserted
+and adds the the value into the heap while keeping the heap property intact
+
+ _Protocol 4_ from PRAC: Round-Efficient 3-Party MPC for Dynamic Data Structures
+  Consider the following insertion path with:  x0 < x1 < x2 < NewElement < x3 < x4
+
+        x0                      x0                               x0
+        / \                    /  \                             /  \
+          x1                      x1                                x1
+         /                        /                                 /
+        x2                       x2                                x2
+         \                        \                                 \
+          x3                      ( )                               NewElement
+           \                        \                                 \
+            x4                       x3                                x3
+           /                        /                                 /
+         ( )                       x4                                x4
+
+      (Path with new element)       (binary search to determine             (After insertion)
+                                     the point where New Element
+                                     should be and shift the elements
+                                     from that point down the path
+                                     from the point)
+
+ The insert protocol begins by adding an empty node at the end of the heap array
+ The key observation is that after the insert operation, the only entries that might change are the ones on the path from the root to the new node
+ The path from the root to the new node is determined based on the number of entries in the heap, which is publicly known
+ The observation is that this path starts off sorted and will end up with the new element (NewElement) inserted into the correct position, preserving the sorted property of the path
+ The length of the path is logarithmic with respect to the heap size (path length = log(heap size))
+ To find the appropriate insertion position, we use binary search with a single IDPF of height logarithmic with respect to the logarithm of the heap size (IDPF height = log(log(heap size)))
+ The advice bits of the IDPF correspond to the bit shares of a vector 'flag' with a single '1' indicating the position where the new value (insertval) must be inserted.
+ The shares of 'flag' are locally converted to shares of a vector 'u = [000011111]' using running XORs.
+ The bits of 'flag' and 'u' are then used in parallel Flag-Word multiplications, totaling 2 times the logarithm of the heap size, to shift the elements greater than 'insertval' down one position
+ And write 'insertval' into the resulting empty location in the path
+ This process requires a single message of communication
+ The protocol requires one binary search on a database of size log(heap size) (height of the tree)
+ Overall, the insert protocol achieves efficient insertion of a new element into the heap, with a complexity of log(log(heap size)) oblivious comparisons
+ and 2 x log(heap size) flag multiplications. The flag multiplications
+ are all done in a single round.
+*/
+void MinHeap::insert_optimized(MPCTIO tio, yield_t & yield, RegAS val) {
+    auto HeapArray = oram.flat(tio, yield);
+    num_items++;
+    typename Duoram<RegAS>::Path P(HeapArray, tio, yield, num_items);
+    const RegXS foundidx = P.binary_search(val);
+    size_t childindex = num_items;
+    // height is the number of nodes on the path from root to the leaf
+    uint64_t height = P.size();
+    RegAS zero;
+    HeapArray[childindex] = zero;
+
+
+    #ifdef HEAP_VERBOSE
+    uint64_t val_reconstruction = mpc_reconstruct(tio, yield, val);
+    std::cout << "val_reconstruction = " << val_reconstruction << std::endl;
+    #endif
+
+    uint64_t  logheight = std::floor(double(std::log2(height))) + 1;
+
+    std::vector<RegBS> flag;
+    std::vector<RegBS> u(height);
+    typename Duoram<RegAS>::template OblivIndex<RegXS,1> oidx(tio, yield, foundidx, logheight);
+    flag = oidx.unit_vector(tio, yield, height, foundidx);
+
+    #ifdef HEAP_VERBOSE
+    uint64_t foundidx_reconstruction = mpc_reconstruct(tio, yield, foundidx);
+    std::cout << "foundidx_reconstruction = " << foundidx_reconstruction << std::endl;
+    std::cout << std::endl << " =============== " << std::endl;
+    for (size_t j = 0; j < height; ++j) {
+        uint64_t reconstruction = mpc_reconstruct(tio, yield, flag[j]);
+        std::cout << " --->> flag[" << j << "] = " << reconstruction  <<  std::endl;
+    }
+    #endif
+
+    for (size_t j = 0; j < height; ++j) {
+        if(j > 0) {
+            u[j] = flag[j] ^ u[j-1];
+        } else {
+            u[j] = flag[j];
+        }
+    }
+
+    #ifdef HEAP_VERBOSE
+    for (size_t j = 0; j < height; ++j) {
+        uint64_t reconstruction = mpc_reconstruct(tio, yield, u[j]);
+        std::cout << " --->> [0000111111]][" << j << "] = " << reconstruction << std::endl;
+    }
+    #endif
+
+    std::vector<RegAS> path(height);
+    std::vector<RegAS> w(height);
+    std::vector<RegAS> v(height);
+
+    for (size_t j = 0; j < height; ++j) path[j] = P[j];
+
+    std::vector<coro_t> coroutines;
+    for (size_t j = 0; j < height; ++j) {
+        if (j > 0) {
+            coroutines.emplace_back(
+                    [&tio, &w, &u, &path, j](yield_t &yield) {
+                mpc_flagmult(tio, yield, w[j], u[j-1], path[j-1]-path[j]);
+            }
+            );
+        }
+        coroutines.emplace_back(
+                [&tio, &v, flag, val, &path, j](yield_t &yield) {
+            mpc_flagmult(tio, yield, v[j], flag[j], val - path[j]);
+        }
+        );
+    }
+
+    run_coroutines(tio, coroutines);
+
+    #ifdef HEAP_VERBOSE
+    std::cout << "\n\n=================Before===========\n\n";
+    auto path_rec_before = P.reconstruct();
+    for (size_t j = 0; j < height; ++j) {
+        std::cout << j << " --->: " << path_rec_before[j].share() << std::endl;
+    }
+    std::cout << "\n\n============================\n\n";
+    #endif
+
+    coroutines.clear();
+
+    for (size_t j = 0; j < height; ++j) {
+        coroutines.emplace_back( [&tio, &v, &w, &P, j](yield_t &yield) {
+            auto Pcoro = P.context(yield);
+            Pcoro[j] += (w[j] + v[j]);
+        });
+    }
+    run_coroutines(tio, coroutines);
+
+    #ifdef HEAP_VERBOSE
+    std::cout << "\n\n=================After===========\n\n";
+    auto path_rec_after = P.reconstruct();
+    for (size_t j = 0; j < height; ++j) {
+        std::cout << j << " --->: " << path_rec_after[j].share() << std::endl;
+    }
+    std::cout << "\n\n============================\n\n";
+    #endif
+}
+
+// The Basic Insert Protocol
+// Takes in the additive share of the value to be inserted
+// And adds the the value into the heap while keeping the heap property intact
+// The insert protocol works as follows:
+// Step 1: Add a new element to the last entry of the array.
+// This new element becomes a leaf in the heap.
+// Step 2: Starting from the leaf (the newly added element), compare it with its parent.
+// Perform 1 oblivious comparison to determine if the parent is greater than the child.
+// Step 3: If the parent is greater than the child, swap them obliviously to maintain the heap property.
+// This swap ensures that the parent is always greater than both its children.
+// Step 4: Continue moving up the tree by repeating steps 2 and 3 until we reach the root.
+// This process ensures that the newly inserted element is correctly positioned in the heap.
+// The total cost of the insert protocol is log(num_items) oblivious comparisons and log(num_items) oblivious swaps.
+// This protocol follows the approach described as Protocol 3 in the paper "PRAC: Round-Efficient 3-Party MPC for Dynamic Data Structures."
+void MinHeap::insert(MPCTIO tio, yield_t & yield, RegAS val) {
+
+    auto HeapArray = oram.flat(tio, yield);
+    num_items++;
+    size_t childindex = num_items;
+    size_t parentindex = childindex / 2;
+
+    #ifdef HEAP_VERBOSE
+    std::cout << "childindex = " << childindex << std::endl;
+    std::cout << "parentindex = " << parentindex << std::endl;
+    #endif
+
+    HeapArray[num_items] = val;
+
+    while (parentindex > 0) {
+        RegAS sharechild = HeapArray[childindex];
+        RegAS shareparent = HeapArray[parentindex];
+        CDPF cdpf = tio.cdpf(yield);
+        RegAS diff = sharechild - shareparent;
+        auto[lt, eq, gt] = cdpf.compare(tio, yield, diff, tio.aes_ops());
+        mpc_oswap(tio, yield, sharechild, shareparent, lt);
+        HeapArray[childindex]  = sharechild;
+        HeapArray[parentindex] = shareparent;
+        childindex = parentindex;
+        parentindex = parentindex / 2;
+    }
+}
+
+
+
+// Note: This function is intended for testing purposes only.
+// The purpose of this function is to verify that the heap property is satisfied.
+// The function checks if the heap property holds for the given heap structure. It ensures that for each node in the heap, the value of the parent node is less than or equal to the values of its children.
+// By calling this function during debugging, you can validate the integrity of the heap structure and ensure that the heap property is maintained correctly.
+// It is important to note that this function is not meant for production use and should be used solely for  testing purposes.
+void MinHeap::verify_heap_property(MPCTIO tio, yield_t & yield) {
+
+    #ifdef HEAP_VERBOSE
+    std::cout << std::endl << std::endl << "verify_heap_property is being called " << std::endl;
+    #endif
+
+    auto HeapArray = oram.flat(tio, yield);
+
+    auto heapreconstruction = HeapArray.reconstruct();
+
+    #ifdef HEAP_VERBOSE
+     for (size_t j = 1; j < num_items + 1; ++j) {
+            if(tio.player() < 2) std::cout << j << " -----> heapreconstruction[" << j << "] = " << heapreconstruction[j].share() << std::endl;
+        }
+    #endif
+
+    for (size_t j = 2; j <= num_items; ++j) {
+        if (heapreconstruction[j/2].share() > heapreconstruction[j].share()) {
+            std::cout << "heap property failure\n\n";
+            std::cout << "j = " << j << std::endl;
+            std::cout << heapreconstruction[j].share() << std::endl;
+            std::cout << "j/2 = " << j/2 << std::endl;
+            std::cout << heapreconstruction[j/2].share() << std::endl;
+        }
+
+        assert(heapreconstruction[j/2].share() <= heapreconstruction[j].share());
+    }
+
+}
+
+
+
+#ifdef HEAP_DEBUG
+// Note: This function is intended for debugging purposes only.
+// The purpose of this function is to assert the fact that the reconstruction values of both the left child and right child are greater than or equal to the reconstruction value of the parent.
+// The function performs an assertion check to validate this condition. If the condition is not satisfied, an assertion error will be triggered.
+// This function is useful for verifying the correctness of reconstruction values during debugging and ensuring the integrity of the heap structure.
+// It is important to note that this function is not meant for production use and should be used solely for debugging purposes.
+static void verify_parent_children_heaps(MPCTIO tio, yield_t & yield, RegAS parent, RegAS leftchild, RegAS rightchild) {
+    uint64_t parent_reconstruction = mpc_reconstruct(tio, yield, parent);
+    uint64_t leftchild_reconstruction = mpc_reconstruct(tio, yield, leftchild);
+    uint64_t rightchild_reconstruction = mpc_reconstruct(tio, yield, rightchild);
+    std::cout << "parent_reconstruction = " << parent_reconstruction << std::endl;
+    std::cout << "leftchild_reconstruction = " << leftchild_reconstruction << std::endl;
+    std::cout << "rightchild_reconstruction = " << rightchild_reconstruction << std::endl << std::endl << std::endl;
+    assert(parent_reconstruction <= leftchild_reconstruction);
+    assert(parent_reconstruction <= rightchild_reconstruction);
+}
+#endif
+
+/*
+Protocol 6 from PRAC: Round-Efficient 3-Party MPC for Dynamic Data Structures
+Takes in as an input the XOR shares of the index at which the heap property has to be restored
+Returns the XOR shares of the index of the smaller child
+
+Basic restore heap property has the following functionality:
+
+Before restoring heap property:                      z
+                                                    /  \
+                                                   y    x
+
+After restoring heap property:        if(y < x AND z < y)       if(y < x AND z > y)        if(y > x AND z < x)           if(y > x AND z > x)
+
+                                                z                         y                        z                              x
+                                               /  \                      / \                      / \                            / \
+                                              y    x                    z   x                    y    x                         y   z
+
+
+The function is relying on the "unused" entries in the heap being MAXINT
+
+The protocol works as follows:
+
+Step 1: Compare the left and right children.
+Step 2: Compare the smaller child with the parent.
+If the smaller child is smaller than the parent, swap the smaller child with the root.
+
+The protocol requires three DORAM (Distributed Oblivious RAM) reads performed in parallel:
+- Read the parent, left child, and right child.
+
+Two comparisons are performed:
+a) Comparison between the left and right child.
+b) Comparison between the smaller child and the parent.
+
+Two MPC-selects are performed in parallel:
+- Computing the smaller child and its index using MPC-select operations.
+
+Next, the offsets by which the parent and children need to be updated are computed.
+Offset computation involves:
+- One flag-flag multiplication.
+- Two flag-word multiplications performed in parallel.
+
+Three DORAM update operations are performed in parallel:
+- Update the parent, left child, and right child.
+
+The function returns the XOR-share of the smaller child's index.
+
+The total cost of the protocol includes:
+- 3 DORAM reads (performed in parallel).
+- 2 comparisons.
+- 2 MPC-selects (performed in parallel).
+- 1 flag-flag multiplication.
+- 2 flag-word multiplications (performed in parallel).
+- 3 DORAM updates (performed in parallel).
+*/
+RegXS MinHeap::restore_heap_property(MPCIO & mpcio, MPCTIO tio, yield_t & yield, RegXS index) {
+    RegAS smallest;
+    auto HeapArray = oram.flat(tio, yield);
+    RegXS leftchildindex = index;
+    leftchildindex = index << 1;
+    RegXS rightchildindex;
+    rightchildindex.xshare = leftchildindex.xshare ^ (!tio.player());
+
+    RegAS parent, leftchild, rightchild;
+
+    #ifdef HEAP_VERBOSE
+    auto index_reconstruction = mpc_reconstruct(tio, yield, index);
+    auto leftchildindex_reconstruction = mpc_reconstruct(tio, yield, leftchildindex);
+    auto rightchildindex_reconstruction = mpc_reconstruct(tio, yield, rightchildindex);
+    std::cout << "index_reconstruction               =  " << index_reconstruction << std::endl;
+    std::cout << "leftchildindex_reconstruction      =  " << leftchildindex_reconstruction << std::endl;
+    std::cout << "rightchildindex_reconstruction     =  " << rightchildindex_reconstruction << std::endl;
+    #endif
+
+   run_coroutines(tio, [&tio, &parent, &HeapArray, index](yield_t &yield) {
+                  auto Acoro = HeapArray.context(yield);
+                  parent = Acoro[index];},
+                  [&tio, &HeapArray, &leftchild, leftchildindex](yield_t &yield) {
+                  auto Acoro = HeapArray.context(yield);
+                  leftchild  = Acoro[leftchildindex];},
+                  [&tio, &rightchild, &HeapArray, rightchildindex](yield_t &yield) {
+                  auto Acoro = HeapArray.context(yield);
+                  rightchild = Acoro[rightchildindex];});
+
+    CDPF cdpf = tio.cdpf(yield);
+    auto[lt_c, eq_c, gt_c] = cdpf.compare(tio, yield, leftchild - rightchild, tio.aes_ops());
+
+    RegXS smallerindex;
+    RegAS smallerchild;
+
+    run_coroutines(tio, [&tio, &smallerindex, lt_c, rightchildindex, leftchildindex](yield_t &yield) {
+        mpc_select(tio, yield, smallerindex, lt_c, rightchildindex, leftchildindex);
+    },  [&tio, &smallerchild, lt_c, rightchild, leftchild](yield_t &yield) {
+        mpc_select(tio, yield, smallerchild, lt_c, rightchild, leftchild);
+    }
+    );
+
+    CDPF cdpf0 = tio.cdpf(yield);
+    auto[lt_p, eq_p, gt_p] = cdpf0.compare(tio, yield, smallerchild - parent, tio.aes_ops());
+
+    RegBS ltlt1;
+
+    mpc_and(tio, yield, ltlt1, lt_c, lt_p);
+
+    RegAS update_index_by, update_leftindex_by;
+
+    run_coroutines(tio, [&tio, &update_leftindex_by, ltlt1, parent, leftchild](yield_t &yield) {
+        mpc_flagmult(tio, yield, update_leftindex_by, ltlt1, parent - leftchild);
+    },  [&tio, &update_index_by, lt_p, parent, smallerchild](yield_t &yield) {
+        mpc_flagmult(tio, yield, update_index_by, lt_p, smallerchild - parent);
+    }
+    );
+
+
+    run_coroutines(tio, [&tio, &HeapArray, index, update_index_by](yield_t &yield) {
+                   auto Acoro = HeapArray.context(yield);
+                   Acoro[index] += update_index_by;},
+                   [&tio, &HeapArray, leftchildindex, update_leftindex_by](yield_t &yield) {
+                   auto Acoro = HeapArray.context(yield);
+                   Acoro[leftchildindex] += update_leftindex_by;},
+                   [&tio, &HeapArray, rightchildindex, update_index_by, update_leftindex_by](yield_t &yield) {
+                   auto Acoro = HeapArray.context(yield);
+                   Acoro[rightchildindex] += -(update_index_by + update_leftindex_by);});
+
+    #ifdef HEAP_DEBUG
+            verify_parent_children_heaps(tio, yield, HeapArray[index], HeapArray[leftchildindex] , HeapArray[rightchildindex]);
+    #endif
+
+    return smallerindex;
+}
+
+// This Protocol 7 is derived from PRAC: Round-Efficient 3-Party MPC for Dynamic Data Structures
+// Takes in as an input the XOR shares of the index at which
+// the heap property has to be restored
+// Returns the XOR shares of the index of the smaller child and
+// comparison between the left and right child
+// This protocol represents an optimized version of restoring the heap property
+// The key difference between the optimized and basic versions is that the optimized version utilizes a wide DPF (Distributed Point Function) for reads and writes
+// In addition to restoring the heap property, the function also returns
+// shares of the index of the smaller child, and the result of the
+// comparison (leftchild > rightchild)
+// The (leftchild > rightchild) comparison is utilized in the extract_min operation to increment the oblivindx by a certain value
+// The function restores the heap property at node index
+// The parameter layer is the height at which the node at index lies
+// The optimized version achieves improved efficiency by leveraging wide DPF operations for read and write operations
+std::pair<RegXS, RegBS> MinHeap::restore_heap_property_optimized(MPCTIO tio, yield_t & yield, RegXS index, size_t layer, typename Duoram < RegAS > ::template OblivIndex < RegXS, 3 > oidx) {
+
+    auto HeapArray = oram.flat(tio, yield);
+
+    RegXS leftchildindex = index;
+    leftchildindex = index << 1;
+
+    RegXS rightchildindex;
+    rightchildindex.xshare = leftchildindex.xshare ^ (!tio.player());
+
+    typename Duoram < RegAS > ::Flat P(HeapArray, tio, yield, 1 << layer, 1 << layer);
+    typename Duoram < RegAS > ::Flat C(HeapArray, tio, yield, 2 << layer, 2 << layer);
+    typename Duoram < RegAS > ::Stride L(C, tio, yield, 0, 2);
+    typename Duoram < RegAS > ::Stride R(C, tio, yield, 1, 2);
+
+    RegAS parent, leftchild, rightchild;
+
+    run_coroutines(tio, [&tio, &parent, &P, &oidx](yield_t &yield) {
+                    auto Pcoro = P.context(yield);
+                    parent = Pcoro[oidx]; },
+                    [&tio, &L, &leftchild, &oidx](yield_t &yield) {
+                    auto Lcoro = L.context(yield);
+                    leftchild  = Lcoro[oidx];},
+                    [&tio, &R, &rightchild, &oidx](yield_t &yield) {
+                    auto Rcoro = R.context(yield);
+                    rightchild = Rcoro[oidx];
+                  });
+
+    CDPF cdpf = tio.cdpf(yield);
+
+    auto[lt_c, eq_c, gt_c] = cdpf.compare(tio, yield, leftchild - rightchild, tio.aes_ops());
+
+    RegXS smallerindex;
+    RegAS smallerchild;
+
+    run_coroutines(tio, [&tio, &smallerindex, lt_c, rightchildindex, leftchildindex](yield_t &yield) {
+        mpc_select(tio, yield, smallerindex, lt_c, rightchildindex, leftchildindex);
+    },  [&tio, &smallerchild, lt_c, rightchild, leftchild](yield_t &yield) {
+        mpc_select(tio, yield, smallerchild, lt_c, rightchild, leftchild);
+    }
+    );
+
+    CDPF cdpf0 = tio.cdpf(yield);
+    auto[lt_p, eq_p, gt_p] = cdpf0.compare(tio, yield, smallerchild - parent, tio.aes_ops());
+
+    RegBS ltlt1;
+
+    mpc_and(tio, yield, ltlt1, lt_c, lt_p);
+
+    RegAS update_index_by, update_leftindex_by;
+
+
+    run_coroutines(tio, [&tio, &update_leftindex_by, ltlt1, parent, leftchild](yield_t &yield) {
+    mpc_flagmult(tio, yield, update_leftindex_by, ltlt1, parent - leftchild);
+    },  [&tio, &update_index_by, lt_p, parent, smallerchild](yield_t &yield) {
+        mpc_flagmult(tio, yield, update_index_by, lt_p, smallerchild - parent);
+    }
+    );
+
+    run_coroutines(tio, [&tio, &P, &oidx, update_index_by](yield_t &yield) {
+                    auto Pcoro = P.context(yield);
+                    Pcoro[oidx] += update_index_by;},
+                    [&tio, &L,  &oidx, update_leftindex_by](yield_t &yield) {
+                    auto Lcoro = L.context(yield);
+                    Lcoro[oidx] += update_leftindex_by;},
+                    [&tio, &R,  &oidx, update_leftindex_by, update_index_by](yield_t &yield) {
+                    auto Rcoro = R.context(yield);
+                    Rcoro[oidx] += -(update_leftindex_by + update_index_by);
+                    });
+
+    auto gteq = gt_c ^ eq_c;
+
+    return {smallerindex, gteq};
+}
+
+
+// Intializes the heap array with 0x7fffffffffffff
+void MinHeap::init(MPCTIO tio, yield_t & yield) {
+    auto HeapArray = oram.flat(tio, yield);
+    HeapArray.init(0x7fffffffffffff);
+}
+
+
+// This function simply inits a heap with values 100,200,...,100*n
+// We use this function only to set up our heap
+// to do timing experiments on insert and extractmins
+void MinHeap::init(MPCTIO tio, yield_t & yield, size_t n) {
+    auto HeapArray = oram.flat(tio, yield);
+
+    num_items = n;
+    HeapArray.explicitonly(true);
+    for (size_t j = 1; j <= n; ++j) {
+        RegAS v;
+        v.ashare = (j * tio.player()) * 100;
+        HeapArray[j] = v;
+    }
+    HeapArray.explicitonly(false);
+}
+
+
+// Note: This function is intended for debugging purposes only.
+// The purpose of this function is to reconstruct the heap and print its contents.
+// The function performs the necessary operations to reconstruct the heap, ensuring that the heap property is satisfied. It then prints the contents of the reconstructed heap.
+// This function is useful for debugging and inspecting the state of the heap at a particular point in the program execution.
+// It is important to note that this function is not meant for production use and should be used solely for debugging purposes.
+void MinHeap::print_heap(MPCTIO tio, yield_t & yield) {
+    auto HeapArray = oram.flat(tio, yield);
+    auto Pjreconstruction = HeapArray.reconstruct();
+    for (size_t j = 1; j <= num_items; ++j) {
+        if(2 * j <= num_items) {
+            std::cout << j << "-->> HeapArray[" << j << "] = " <<   Pjreconstruction[j].share() << ", children are: " << Pjreconstruction[2 * j].share() << " and " << Pjreconstruction[2 * j + 1].share() <<  std::endl;
+        } else {
+            std::cout << j << "-->> HeapArray[" << j << "] = " << std::dec << Pjreconstruction[j].share() << " is a LEAF " <<  std::endl;
+        }
+    }
+}
+
+
+/*
+Restore the head property at an explicit index (typically the root).
+the only reason this function exists is because at the root level
+the indices to read (the root and its two children) are explicit and not shared
+Restore heap property at an index in clear
+Takes in as an input the index (in clear) at which
+the heap property has to be restored
+
+                root
+                /  \
+       leftchild    rightchild
+
+After restoring heap property:
+if(leftchild < rightchild AND root < leftchild)       if(leftchild < rightchild AND root > leftchild)     if(leftchild > rightchild AND root < rightchild)     if(leftchild > rightchild AND root > rightchild)
+
+
+                 root                                                        leftchild                                         root                                            rightchild
+               /     \                                                        /   \                                           /    \                                           /      \
+         leftchild    rightchild                                           root   rightchild                          leftchild    rightchild                            leftchild    root
+
+
+The restore_heap_property_at_explicit_index protocol works as follows:
+
+Step 1: Compare the left and right children.
+Step 2: Compare the smaller child with the root.
+If the smaller child is smaller than the root, swap the smaller child with the root.
+Unlike the restore_heap_property protocol, restore_heap_property_at_explicit_index begins with three explicit-index (non-DORAM) read operations:
+- Read the parent, left child, and right child.
+Two comparisons are performed:
+a) Comparison between the left and right child.
+b) Comparison between the smaller child and the parent.
+The above comparisons have to be sequential because we need to find the smallerindex and smallerchild,
+which is dependent on the first comparison
+Next, the offsets by which the parent and children need to be updated are computed.
+Offset computation involves:
+- One flag-flag multiplication.
+- Two flag-word multiplications.
+Three explicit-index (non-DORAM) update operations are required (performed in parallel) to update the parent, left child, and right child.
+In total, this protocol requires:
+- 2 comparisons.
+- 1 flag-flag multiplication.
+- 2 flag-word multiplications.
+- 3 explicit-index (non-DORAM) reads and updates.
+The function returns a pair of a) XOR-share of the index of the smaller child and b) the comparison between left and right children
+*/
+std::pair<RegXS, RegBS> MinHeap::restore_heap_property_at_explicit_index(MPCTIO tio, yield_t & yield, size_t index = 1) {
+    auto HeapArray = oram.flat(tio, yield);
+    RegAS parent = HeapArray[index];
+    RegAS leftchild = HeapArray[2 * index];
+    RegAS rightchild = HeapArray[2 * index + 1];
+    CDPF cdpf = tio.cdpf(yield);
+    auto[lt, eq, gt] = cdpf.compare(tio, yield, leftchild - rightchild, tio.aes_ops());
+
+    auto gteq = gt ^ eq;
+    RegAS smallerchild;
+    mpc_select(tio, yield, smallerchild, lt, rightchild, leftchild);
+
+    uint64_t leftchildindex = (2 * index);
+    uint64_t rightchildindex = (2 * index) + 1;
+    RegXS smallerindex = (RegXS(lt) & leftchildindex) ^ (RegXS(gteq) & rightchildindex);
+    CDPF cdpf0 = tio.cdpf(yield);
+    auto[lt1, eq1, gt1] = cdpf0.compare(tio, yield, smallerchild - parent, tio.aes_ops());
+    RegBS ltlt1;
+
+    mpc_and(tio, yield, ltlt1, lt, lt1);
+    RegAS update_index_by, update_leftindex_by;
+
+    run_coroutines(tio, [&tio, &update_leftindex_by, ltlt1, parent, leftchild](yield_t &yield) {
+        mpc_flagmult(tio, yield, update_leftindex_by, ltlt1, parent - leftchild);
+    }, [&tio, &update_index_by, lt1, parent, smallerchild](yield_t &yield) {
+        mpc_flagmult(tio, yield, update_index_by, lt1, smallerchild - parent);});
+
+    run_coroutines(tio,
+        [&tio, &HeapArray, &update_index_by, index](yield_t &yield) {
+            auto HeapArraycoro = HeapArray.context(yield);
+            HeapArraycoro[index] += update_index_by;
+        },
+        [&tio, &HeapArray, &update_leftindex_by, leftchildindex](yield_t &yield) {
+            auto HeapArraycoro = HeapArray.context(yield);
+            HeapArraycoro[leftchildindex] += update_leftindex_by;
+        },
+        [&tio, &HeapArray, &update_index_by, &update_leftindex_by, rightchildindex](yield_t &yield) {
+            auto HeapArraycoro = HeapArray.context(yield);
+            HeapArraycoro[rightchildindex] += -(update_index_by + update_leftindex_by);
+        });
+
+    #ifdef HEAP_VERBOSE
+    RegAS new_parent = HeapArray[index];
+    RegAS new_left   = HeapArray[leftchildindex];
+    RegAS new_right  = HeapArray[rightchildindex];
+    uint64_t parent_R  = mpc_reconstruct(tio, yield, new_parent);
+    uint64_t left_R    = mpc_reconstruct(tio, yield, new_left);
+    uint64_t right_R   = mpc_reconstruct(tio, yield, new_right);
+    std::cout << "parent_R = " << parent_R << std::endl;
+    std::cout << "left_R = " << left_R << std::endl;
+    std::cout << "right_R = " << right_R << std::endl;
+    #endif
+
+    #ifdef HEAP_DEBUG
+    verify_parent_children_heaps(tio, yield, HeapArray[index], HeapArray[leftchildindex] , HeapArray[rightchildindex]);
+    #endif
+
+    return {smallerindex, gteq};
+}
+
+
+// This Protocol 5 from PRAC: Round-Efficient 3-Party MPC for Dynamic Data Structures
+// The extractmin protocol returns the minimum element (the root), removes it
+// and restores the heap property
+// The function extract_min cannot be called on an empty heap
+// Like in the paper, there is only one version of extract_min
+// and takes in a boolean parameter to decide if the basic or the optimized version needs to be run
+// the optimized version calls the optimized restore_heap_property with everything else remaing the same
+// The extractmin algorithm removes the root and replaces it with last leaf node
+// After extracting the minimum element from the heap, the heap property is temporarily violated.
+// To restore the heap property, we begin at the root layer.
+// Step 1: Swap the root with the smaller child if the smaller child is less than the root.
+// This step is performed by the function restore_heap_property_at_explicit_index.
+// Step 2: Proceed down the tree along the path of the smaller child.
+// Repeat the process of swapping the parent with the smaller child if the parent is greater than the smaller child.
+// After the swap, make the smaller child the new parent.
+// The choice of whether to use restore_heap_property or restore_heap_property_optimized
+// depends on whether it is a basic or optimized extraction of the minimum element.
+// These functions ensure that the heap property is maintained throughout the tree.
+RegAS MinHeap::extract_min(MPCIO & mpcio, MPCTIO tio, yield_t & yield, int is_optimized) {
+
+    size_t height = std::log2(num_items);
+    RegAS minval;
+    auto HeapArray = oram.flat(tio, yield);
+    minval = HeapArray[1];
+    HeapArray[1] = RegAS(HeapArray[num_items]);
+    RegAS v;
+    v.ashare = 0x7fffffffffffff * !tio.player();
+    HeapArray[num_items] = v;
+    num_items--;
+    auto outroot = restore_heap_property_at_explicit_index(tio, yield);
+    RegXS smaller = outroot.first;
+
+    if(is_optimized > 0) {
+        typename Duoram < RegAS > ::template OblivIndex < RegXS, 3 > oidx(tio, yield, height);
+        oidx.incr(outroot.second);
+
+        for (size_t i = 0; i < height-1; ++i) {
+            auto out = restore_heap_property_optimized(tio, yield, smaller, i + 1,  oidx);
+            smaller = out.first;
+            oidx.incr(out.second);
+        }
+    }
+
+    if(is_optimized == 0) {
+        for (size_t i = 0; i < height - 1; ++i) {
+            smaller = restore_heap_property(mpcio, tio, yield, smaller);
+        }
+    }
+
+    return minval;
+}
+
+
+
+void Heap(MPCIO & mpcio,  const PRACOptions & opts, char ** args) {
+
+
+    MPCTIO tio(mpcio, 0, opts.num_threads);
+
+    int nargs = 0;
+
+    while (args[nargs] != nullptr) {
+        ++nargs;
+    }
+
+    int maxdepth = 0;
+    int heapdepth = 0;
+    size_t n_inserts = 0;
+    size_t n_extracts = 0;
+    int is_optimized = 0;
+    int run_sanity = 0;
+
+    for (int i = 0; i < nargs; i += 2) {
+        std::string option = args[i];
+        if (option == "-m" && i + 1 < nargs) {
+            maxdepth = std::atoi(args[i + 1]);
+        } else if (option == "-d" && i + 1 < nargs) {
+            heapdepth = std::atoi(args[i + 1]);
+        } else if (option == "-i" && i + 1 < nargs) {
+            n_inserts = std::atoi(args[i + 1]);
+        } else if (option == "-e" && i + 1 < nargs) {
+            n_extracts = std::atoi(args[i + 1]);
+        } else if (option == "-opt" && i + 1 < nargs) {
+            is_optimized = std::atoi(args[i + 1]);
+        } else if (option == "-s" && i + 1 < nargs) {
+            run_sanity = std::atoi(args[i + 1]);
+        }
+    }
+
+    run_coroutines(tio, [ & tio, maxdepth, heapdepth, n_inserts, n_extracts, is_optimized, run_sanity, &mpcio](yield_t & yield) {
+        size_t size = size_t(1) << maxdepth;
+        MinHeap tree(tio.player(), size);
+        tree.init(tio, yield);
+        // This form of init with a third parameter of n sets the heap
+        // to contain 100, 200, 300, ..., 100*n.
+        tree.init(tio, yield, (size_t(1) << heapdepth) - 1);
+        std::cout << "\n===== Init Stats =====\n";
+        tio.sync_lamport();
+        mpcio.dump_stats(std::cout);
+        mpcio.reset_stats();
+        tio.reset_lamport();
+        for (size_t j = 0; j < n_inserts; ++j) {
+
+            RegAS inserted_val;
+            inserted_val.randomize(8);
+
+            #ifdef HEAP_VERBOSE
+            inserted_val.ashare = inserted_val.ashare;
+            uint64_t inserted_val_rec = mpc_reconstruct(tio, yield, inserted_val);
+            std::cout << "inserted_val_rec = " << inserted_val_rec << std::endl << std::endl;
+            #endif
+
+            if(is_optimized > 0)  tree.insert_optimized(tio, yield, inserted_val);
+            if(is_optimized == 0) tree.insert(tio, yield, inserted_val);
+        }
+
+        std::cout << "\n===== Insert Stats =====\n";
+        tio.sync_lamport();
+        mpcio.dump_stats(std::cout);
+
+
+        if(run_sanity == 1 && n_inserts != 0) tree.verify_heap_property(tio, yield);
+
+
+        mpcio.reset_stats();
+        tio.reset_lamport();
+
+        #ifdef HEAP_VERBOSE
+        tree.print_heap(tio, yield);
+        #endif
+
+        bool have_lastextract = false;
+        uint64_t lastextract = 0;
+
+        for (size_t j = 0; j < n_extracts; ++j) {
+
+            if(run_sanity == 1) {
+                RegAS minval = tree.extract_min(mpcio, tio, yield, is_optimized);
+                uint64_t minval_reconstruction = mpc_reconstruct(tio, yield, minval);
+                std::cout << "minval_reconstruction = " << minval_reconstruction << std::endl;
+                if (have_lastextract) {
+                    assert(minval_reconstruction >= lastextract);
+                }
+                lastextract = minval_reconstruction;
+                have_lastextract = true;
+            } else {
+                tree.extract_min(mpcio, tio, yield, is_optimized);
+            }
+
+            if (run_sanity == 1) {
+                tree.verify_heap_property(tio, yield);
+            }
+
+            #ifdef HEAP_VERBOSE
+            tree.print_heap(tio, yield);
+            #endif
+       }
+
+       std::cout << "\n===== Extract Min Stats =====\n";
+       tio.sync_lamport();
+       mpcio.dump_stats(std::cout);
+
+       #ifdef HEAP_VERBOSE
+       tree.print_heap(tio, yield);
+       #endif
+
+
+       if(run_sanity == 1 && n_extracts != 0) tree.verify_heap_property(tio, yield);
+
+    }
+    );
+}

+ 74 - 0
heap.hpp

@@ -0,0 +1,74 @@
+#ifndef __HEAP_HPP__
+#define __HEAP_HPP__
+
+#include "types.hpp"
+#include "mpcio.hpp"
+#include "coroutine.hpp"
+#include "options.hpp"
+#include "mpcops.hpp"
+
+class MinHeap {
+private:
+    Duoram < RegAS > oram;
+    size_t MAX_SIZE;
+    size_t num_items;
+
+    // Basic restore heap property at a secret shared index
+    // Takes in as an input the XOR shares of the index at which
+    // the heap property has to be restored
+    // Returns the XOR shares of the index of the smaller child
+    RegXS restore_heap_property(MPCIO &mpcio, MPCTIO tio, yield_t & yield, RegXS index);
+
+    // Optimized restore heap property at a secret shared index
+    // Takes in as an input the XOR shares of the index at which
+    // the heap property has to be restored
+    // Returns the XOR shares of the index of the smaller child and
+    // comparison between the left and right child
+    std::pair<RegXS, RegBS> restore_heap_property_optimized(MPCTIO tio, yield_t & yield, RegXS index, size_t layer, typename Duoram<RegAS>::template OblivIndex<RegXS,3> oidx);
+
+    // Restore heap property at an index in clear
+    // Takes in as an input the index (in clear) at which
+    // the heap property has to be restored
+    // Returns the XOR shares of the index of the smaller child and
+    // comparison between the left and right child
+    std::pair<RegXS, RegBS> restore_heap_property_at_explicit_index(MPCTIO tio, yield_t & yield,  size_t index);
+
+public:
+    MinHeap(int player_num, size_t size) : oram(player_num, size) {};
+
+    // The extractmin protocol returns the minimum element (the root), removes it
+    // and restores the heap property
+    // and takes in a boolean parameter to decide if the basic or the optimized version needs to be run
+    // return value is the share of the minimum value (the root)
+    RegAS extract_min(MPCIO &mpcio, MPCTIO tio, yield_t & yield, int is_optimized);
+
+    // Intializes the heap array with 0x7fffffffffffff
+    void init(MPCTIO tio, yield_t & yield);
+
+    // This function simply inits a heap with values 100,200,...,100*n
+    // We use this function only to set up our heap
+    // to do timing experiments on insert and extractmins
+    void init(MPCTIO tio, yield_t & yield, size_t n);
+
+    // The Basic Insert Protocol
+    // Takes in the additive share of the value to be inserted
+    // And adds the the value into the heap while keeping the heap property intact
+    void insert(MPCTIO tio, yield_t & yield, RegAS val);
+
+    // The Optimized Insert Protocol
+    // Takes in the additive share of the value to be inserted
+    // And adds the the value into the heap while keeping the heap property intact
+    void insert_optimized(MPCTIO tio, yield_t & yield, RegAS val);
+
+    // Note: This function is intended for testing purposes only.
+    // The purpose of this function is to verify that the heap property is satisfied.
+    void verify_heap_property(MPCTIO tio, yield_t & yield);
+
+
+    // Prints the current heap
+    void print_heap(MPCTIO tio, yield_t & yield);
+};
+
+void Heap(MPCIO &mpcio, const PRACOptions &opts, char **args);
+
+#endif

+ 76 - 15
online.cpp

@@ -6,6 +6,7 @@
 #include "duoram.hpp"
 #include "cdpf.hpp"
 #include "cell.hpp"
+#include "heap.hpp"
 #include "shapes.hpp"
 #include "bst.hpp"
 #include "avl.hpp"
@@ -892,6 +893,42 @@ static void duoram(MPCIO &mpcio,
     });
 }
 
+// This measures just sequential (dependent) reads
+// T is RegAS or RegXS for additive or XOR shared database respectively
+template <typename T>
+static void read_test(MPCIO &mpcio,
+    const PRACOptions &opts, char **args)
+{
+    nbits_t depth = 6;
+    int items = 4;
+
+    if (*args) {
+        depth = atoi(*args);
+        ++args;
+    }
+    if (*args) {
+        items = atoi(*args);
+        ++args;
+    }
+
+    MPCTIO tio(mpcio, 0, opts.num_threads);
+    run_coroutines(tio, [&mpcio, &tio, depth, items] (yield_t &yield) {
+        size_t size = size_t(1)<<depth;
+        Duoram<T> oram(tio.player(), size);
+        auto A = oram.flat(tio, yield);
+
+        std::cout << "\n===== SEQUENTIAL READS =====\n";
+        T totval;
+        for (int i=0;i<items;++i) {
+            RegXS idx;
+            idx.randomize(depth);
+            T val = A[idx];
+            totval += val;
+        }
+        printf("Total value read: %016lx\n", totval.share());
+    });
+}
+
 static void cdpf_test(MPCIO &mpcio,
     const PRACOptions &opts, char **args)
 {
@@ -1216,6 +1253,14 @@ static void bsearch_test(MPCIO &mpcio,
     arc4random_buf(&target, sizeof(target));
     target >>= 1;
     nbits_t depth=6;
+    bool is_presorted = true;
+
+    // Use a random array (which we explicitly sort) instead of a
+    // presorted array
+    if (*args && !strcmp(args[0], "-r")) {
+        is_presorted = false;
+        ++args;
+    }
 
     if (*args) {
         depth = atoi(*args);
@@ -1226,13 +1271,16 @@ static void bsearch_test(MPCIO &mpcio,
         len = atoi(*args);
         ++args;
     }
+    if (is_presorted) {
+        target %= (len << 16);
+    }
     if (*args) {
         target = strtoull(*args, NULL, 16);
         ++args;
     }
 
     MPCTIO tio(mpcio, 0, opts.num_threads);
-    run_coroutines(tio, [&tio, &mpcio, depth, len, target, basic] (yield_t &yield) {
+    run_coroutines(tio, [&tio, &mpcio, depth, len, target, basic, is_presorted] (yield_t &yield) {
         RegAS tshare;
         std::cout << "\n===== SETUP =====\n";
 
@@ -1254,28 +1302,31 @@ static void bsearch_test(MPCIO &mpcio,
         tio.sync_lamport();
         mpcio.dump_stats(std::cout);
 
-        std::cout << "\n===== SORT RANDOM DATABASE =====\n";
+        std::cout << "\n===== " << (is_presorted ? "CREATE" : "SORT RANDOM")
+            << " DATABASE =====\n";
         mpcio.reset_stats();
         tio.reset_lamport();
-        // Create a random database and sort it
-        // size_t &aes_ops = tio.aes_ops();
+        // If is_presorted is true, create a database of presorted
+        // values.  If is_presorted is false, create a database of
+        // random values and explicitly sort it.
         Duoram<RegAS> oram(tio.player(), len);
         auto A = oram.flat(tio, yield);
         A.explicitonly(true);
-        // Initialize the memory to random values in parallel
-        std::vector<coro_t> coroutines;
+        // Initialize the memory to sorted or random values, depending
+        // on the is_presorted flag
         for (address_t i=0; i<len; ++i) {
-            coroutines.emplace_back(
-                [&A, i](yield_t &yield) {
-                    auto Acoro = A.context(yield);
-                    RegAS v;
-                    v.randomize(62);
-                    Acoro[i] += v;
-                });
+            RegAS v;
+            if (!is_presorted) {
+                v.randomize(62);
+            } else {
+                v.ashare = (tio.player() * i) << 16;
+            }
+            A[i] = v;
         }
-        run_coroutines(yield, coroutines);
-        A.bitonic_sort(0, len);
         A.explicitonly(false);
+        if (!is_presorted) {
+            A.bitonic_sort(0, len);
+        }
 
         tio.sync_lamport();
         mpcio.dump_stats(std::cout);
@@ -1579,6 +1630,13 @@ void online_main(MPCIO &mpcio, const PRACOptions &opts, char **args)
         } else {
             duoram_test<RegAS>(mpcio, opts, args);
         }
+    } else if (!strcmp(*args, "read")) {
+        ++args;
+        if (opts.use_xor_db) {
+            read_test<RegXS>(mpcio, opts, args);
+        } else {
+            read_test<RegAS>(mpcio, opts, args);
+        }
     } else if (!strcmp(*args, "cdpftest")) {
         ++args;
         cdpf_test(mpcio, opts, args);
@@ -1626,6 +1684,9 @@ void online_main(MPCIO &mpcio, const PRACOptions &opts, char **args)
     } else if (!strcmp(*args, "avl_tests")) {
         ++args;
         avl_tests(mpcio, opts, args);
+    } else if (!strcmp(*args, "heap")) {
+        ++args;
+        Heap(mpcio, opts, args);
     } else {
         std::cerr << "Unknown mode " << *args << "\n";
     }

+ 82 - 0
repro/append-experiment-results.py

@@ -0,0 +1,82 @@
+import sys
+
+# Define a function to extract variables from the data file
+def extract_variables_from_file(file_path):
+    variables_list = []
+
+    with open(file_path, 'r') as file:
+        header = file.readline().strip()
+
+        for line in file:
+            columns = line.strip().split()
+            ds, op, is_optimized, heapsize, y, pm, y_err = columns
+
+            variables_dict = {
+                'DS': ds,
+                'OP': op,
+                'is_optimized': is_optimized,
+                'heapsize': heapsize,
+                'y': float(y),  # Convert 'y' to a float to handle averages
+                'pm': pm,
+                'y_err': float(y_err)  # Convert 'y_err' to a float to handle averages
+            }
+
+            variables_list.append(variables_dict)
+
+    return variables_list
+
+# Define a function to update the data based on input and add new rows if needed
+def update_data(file_path, ds, op, is_optimized, heapsize, y0):
+    extracted_variables = extract_variables_from_file(file_path)
+
+    # Find the row with matching 'DS', 'OP', 'is_optimized', and 'heapsize'
+    matching_row = None
+    for variables in extracted_variables:
+        if (variables['DS'] == ds and
+            variables['OP'] == op and
+            variables['is_optimized'] == is_optimized and
+            variables['heapsize'] == heapsize):
+            matching_row = variables
+            break
+
+    if matching_row:
+        # Update 'y' and 'y_err' based on 'y0'
+        matching_row['y'] = (matching_row['y'] + y0) / 2
+        matching_row['y_err'] = abs(matching_row['y'] - y0)
+    else:
+        # If no matching row found, add a new row
+        new_row = {
+            'DS': ds,
+            'OP': op,
+            'is_optimized': is_optimized,
+            'heapsize': heapsize,
+            'y': y0,
+            'pm': '±',
+            'y_err': 0  # Since it's a new row, we set y_err to 0
+        }
+        extracted_variables.append(new_row)
+
+    # Write the updated data back to the file
+    with open(file_path, 'w') as file:
+        header_line = "DS OP is_optimized heapsize y pm y-err\n"
+        file.write(header_line)
+
+        for variables in extracted_variables:
+            row = f"{variables['DS']} {variables['OP']} {variables['is_optimized']} {variables['heapsize']} {variables['y']} {variables['pm']} {variables['y_err']}\n"
+            file.write(row)
+
+# Check if the user provided the required input
+#if len(sys.argv) != 6:
+ #   print("Usage: python3 script.py <file_path> <DS> <OP> <is_optimized> <heapsize> <y0>")
+ #   sys.exit(1)
+
+# Get the input values from the command-line arguments
+file_path = sys.argv[1]
+ds = sys.argv[2]
+op = sys.argv[3]
+is_optimized = sys.argv[4]
+heapsize = sys.argv[5]
+y0 = float(sys.argv[6])  # Convert y0 to a float
+
+# Call the function to update the data based on input
+update_data(file_path, ds, op, is_optimized, heapsize, y0)

+ 11 - 0
repro/experimental_data_bs_total

@@ -0,0 +1,11 @@
+DS OP is_optimized heapsize 0 pm 0
+BS Search 0 16 6247.0 ± 16.8
+BS Search 1 16 6162.0 ± 2.8
+BS Search 0 18 7269.0 ± 27.0
+BS Search 1 18 6967.5 ± 1.0
+BS Search 0 20 9571.5 ± 70.5
+BS Search 1 20 8463.0 ± 151.5
+BS Search 0 22 15766.5 ± 236.0
+BS Search 1 22 12566.0 ± 291.5
+BS Search 0 24 36912.5 ± 611.8
+BS Search 1 24 22167.0 ± 25.6

+ 70 - 0
repro/extract_data.py

@@ -0,0 +1,70 @@
+import re
+import sys
+
+def parse_stats(input_string):
+    # Regular expressions to extract relevant stats
+    heapsize_pattern = r'heapsize: (\d+)'
+    insert_stats_pattern = r'===== Insert Stats =====\n([\s\S]*?)\n\n'
+    extract_stats_pattern = r'===== Extract Min Stats =====\n([\s\S]*?)(?:\n\n|\Z)'
+    optimized_pattern = r'is_optimized: (\d)'
+
+    heapsize = re.search(heapsize_pattern, input_string).group(1)
+    insert_stats = re.search(insert_stats_pattern, input_string).group(1)
+
+    # Handling the case when "Extract Min Stats" is not present
+    extract_stats_match = re.search(extract_stats_pattern, input_string)
+    if extract_stats_match:
+        extract_stats = extract_stats_match.group(1)
+    else:
+        extract_stats = ""
+
+    optimized = re.search(optimized_pattern, input_string).group(1)
+
+    # Extracting insert and extract statistics
+    insert_stat_values = re.findall(r'(\d+) messages sent\n(\d+) message bytes sent\n(\d+) Lamport clock \(latencies\)\n(\d+) local AES operations\n(\d+) milliseconds wall clock time', insert_stats)
+    extract_stat_values = re.findall(r'(\d+) messages sent\n(\d+) message bytes sent\n(\d+) Lamport clock \(latencies\)\n(\d+) local AES operations\n(\d+) milliseconds wall clock time', extract_stats)
+
+    return {
+        "heapsize": heapsize,
+        "is_optimized": optimized,
+        "insert_stats": insert_stat_values,
+        "extract_stats": extract_stat_values
+    }
+
+if __name__ == "__main__":
+    if len(sys.argv) != 2:
+        print("Usage: python extract_data.py <filename>")
+        sys.exit(1)
+
+    filename = sys.argv[1]
+
+    try:
+        with open(filename, "r") as file:
+            input_string = file.read()
+    except FileNotFoundError:
+        print(f"Error: File '{filename}' not found.")
+        sys.exit(1)
+
+    parsed_data = parse_stats(input_string)
+
+    insert_stats_wallclock  = 0
+    insert_stats_message_bytes = 0
+    if parsed_data["insert_stats"]:
+        stat = parsed_data["insert_stats"][0]
+        insert_stats_wallclock = stat[4]
+        insert_stats_message_bytes = stat[1]
+   
+    extract_stats_wallclock  = 0
+    extract_stats_message_bytes = 0
+
+    if parsed_data["extract_stats"]:
+        stat = parsed_data["extract_stats"][0]
+        extract_stats_wallclock  = stat[4]
+        extract_stats_message_bytes = stat[1]
+
+    print(parsed_data["heapsize"])
+    print(parsed_data["is_optimized"])
+    print(insert_stats_wallclock)
+    print(insert_stats_message_bytes)
+    print(extract_stats_wallclock)
+    print(extract_stats_message_bytes)

+ 37 - 0
repro/extract_data_bs.py

@@ -0,0 +1,37 @@
+import sys
+import re
+
+def extract_binary_search_data(output):
+    bytes_sent_pattern = r"BINARY SEARCH =====\n\d+ messages sent\n(\d+) message bytes sent"
+    wall_clock_pattern = r"BINARY SEARCH =====\n\d+ messages sent\n\d+ message bytes sent\n\d+ Lamport clock \(latencies\)\n\d+ local AES operations\n(\d+) milliseconds wall clock time"
+
+    bytes_sent_matches = re.findall(bytes_sent_pattern, output)
+    wall_clock_matches = re.findall(wall_clock_pattern, output)
+
+    bytes_sent = [int(match) for match in bytes_sent_matches]
+    wall_clock_time = [int(match) for match in wall_clock_matches]
+
+    return bytes_sent, wall_clock_time
+
+if __name__ == "__main__":
+    if len(sys.argv) != 2:
+        print("Usage: python script.py <input_file>")
+        sys.exit(1)
+
+    input_file = sys.argv[1]
+
+    try:
+        with open(input_file, 'r') as f:
+            output = f.read()
+    except FileNotFoundError:
+        print(f"File not found: {input_file}")
+        sys.exit(1)
+
+    bytes_sent, wall_clock_time = extract_binary_search_data(output)
+
+    max_bytes_sent = max(bytes_sent)
+    max_wall_clock_time = max(wall_clock_time)
+
+    print(f"{max_bytes_sent}")
+    print(f"{max_wall_clock_time}")
+

+ 72 - 0
repro/extract_data_from_preproc.py

@@ -0,0 +1,72 @@
+import argparse
+
+def parse_file(file_path):
+    data = {'heapsize': None, 'is_optimized': None, 'P0': {}, 'P1': {}, 'P2': {}}
+    current_party = None
+    with open(file_path, 'r') as file:
+        lines = file.readlines()
+        for line in lines:
+            if line.startswith("heapsize:"):
+                data['heapsize'] = int(line.split(":")[1].strip())
+            elif line.startswith("is_optimized:"):
+                data['is_optimized'] = int(line.split(":")[1].strip())
+            elif line.startswith("===== P"):
+                current_party = line.split()[1]
+            elif current_party and "milliseconds wall clock time" in line:
+                time_parts = line.split()
+                data[current_party]['wall_clock_time'] = int(time_parts[0])
+            elif current_party and "messages sent" in line:
+                data[current_party]['messages_sent'] = int(line.split()[0])
+            elif current_party and "message bytes sent" in line:
+                data[current_party]['message_bytes_sent'] = int(line.split()[0])
+    return data
+
+def main():
+    parser = argparse.ArgumentParser(description="Parse file and extract heapsize, is_optimized, wall clock time, messages sent, and message bytes sent for all three parties.")
+    parser.add_argument("file_path", type=str, help="Path to the input file")
+    args = parser.parse_args()
+    
+    file_path = args.file_path
+    parsed_data = parse_file(file_path)
+    
+    print(f"{parsed_data['heapsize']}")
+    print(f"{parsed_data['is_optimized']}")
+    #for party, party_data in parsed_data.items():
+     #   if party != 'heapsize' and party != 'is_optimized':
+      #      print(f"Party: {party}")
+      #      if 'wall_clock_time' in party_data:
+       #         print(f"Wall Clock Time: {party_data['wall_clock_time']} ms")
+       #     if 'messages_sent' in party_data:
+        #        print(f"Messages Sent: {party_data['messages_sent']}")
+        #    if 'message_bytes_sent' in party_data:
+         #       print(f"Message Bytes Sent: {party_data['message_bytes_sent']}")
+         #   print()
+   # max_wall_clock_time = 0
+   # max_message_bytes_sent = 0
+
+    max_wall_clock_time = 0
+    max_message_bytes_sent = 0
+
+    for party, party_data in parsed_data.items():
+        if party != 'heapsize' and party != 'is_optimized':
+            #print(f"Party: {party}")
+            if 'wall_clock_time' in party_data:
+                wall_clock_time = party_data['wall_clock_time']
+               # print(f"Wall Clock Time: {wall_clock_time} ms")
+                max_wall_clock_time = max(max_wall_clock_time, wall_clock_time)
+            if 'messages_sent' in party_data:
+                messages_sent = party_data['messages_sent']
+                #print(f"Messages Sent: {messages_sent}")
+            if 'message_bytes_sent' in party_data:
+                message_bytes_sent = party_data['message_bytes_sent']
+                #print(f"Message Bytes Sent: {message_bytes_sent}")
+                max_message_bytes_sent = max(max_message_bytes_sent, message_bytes_sent)
+            #print()
+
+    print(f"{max_wall_clock_time}")
+    print(f"{max_message_bytes_sent}")
+
+
+if __name__ == "__main__":
+    main()
+

+ 19 - 0
repro/extract_preproc_string.sh

@@ -0,0 +1,19 @@
+#!/bin/bash
+
+file_path="$1"
+
+extract_values() {
+    while IFS= read -r line; do
+        if [[ $line == "Precomputed values used:"* ]]; then
+            echo "${line#Precomputed values used: }"
+            break
+        fi
+    done < "$file_path"
+}
+
+if [ -z "$file_path" ]; then
+    echo "Usage: ./extract_values.sh <file_path>"
+else
+    extract_values
+fi
+

+ 203 - 0
repro/generate_raw_data.sh

@@ -0,0 +1,203 @@
+  
+# This generates the raw data for all all the small (n <= 2^26) heap experiments
+
+
+echo "Running the Heap Extract Experiments (Basic)"
+cd ..
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+   
+    echo "heapsize: 16\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_preproc_16_itr_$itr"	
+    ./docker/run-experiment -p m:63 a:16 s:15 r16:90 c:32 p:128 >> "repro/data/log_basic_heap_extract_preproc_16_itr_$itr"
+    echo "(basic preproc heap size 16), iteration $itr done"
+    echo "heapsize: 16\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_online_16_itr_$itr"	
+    ./docker/run-experiment heap -m 16 -d 16 -i 0 -e 1 -opt 0 -s 0 -itr itr >> "repro/data/log_basic_heap_extract_online_16_itr_$itr"
+    echo "(basic online heap size 16), teration $itr done"
+done    
+ 
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+    echo "heapsize: 18\nis_optimized: 0\n" > 		            "repro/data/log_basic_heap_extract_preproc_18_itr_$itr"	
+    ./docker/run-experiment -p m:71 a:18 s:17 r18:102 c:36 p:128 >> "repro/data/log_basic_heap_extract_preproc_18_itr_$itr"
+    echo "(basic preproc heapsize 18), iteration $itr done"
+    echo "heapsize: 18\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_online_18_itr_$itr"
+    ./docker/run-experiment heap -m 18 -d 18 -i 0 -e 1 -opt 0 -s 0 -itr itr >> "repro/data/log_basic_heap_extract_online_18_itr_$itr"
+    echo "(basic online heapsize 18), iteration $itr done"	
+done
+
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+    echo "heapsize: 20\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_preproc_20_itr_$itr"	
+    ./docker/run-experiment -p m:80 a:20 s:19 r20:114 c:40 p:128 >> "repro/data/log_basic_heap_extract_preproc_20_itr_$itr"
+    echo "(basic preproc heapsize 20), iteration $itr done"
+    echo "heapsize: 20\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_online_20_itr_$itr"
+    ./docker/run-experiment -t 8 heap -m 20 -d 20 -i 0 -e 1 -opt 0 -s 0 -itr itr >> "repro/data/log_basic_heap_extract_online_20_itr_$itr"
+    echo "(basic online heapsize 20), iteration $itr done"
+done
+
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+    echo "heapsize: 22\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_preproc_22_itr_$itr"	
+   ./docker/run-experiment -p m:87 a:22 s:21 r22:126 c:44 p:128 >> "repro/data/log_basic_heap_extract_preproc_22_itr_$itr"
+   echo "(basic preproc heapsize 22), iteration $itr done"
+   echo "heapsize: 22\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_online_22_itr_$itr"
+   ./docker/run-experiment -t 8 heap -m 22 -d 22 -i 0 -e 1 -opt 0 -s 0 -itr itr >>  "repro/data/log_basic_heap_extract_online_22_itr_$itr"
+   echo "(basic online heapsize 22), iteration $itr done"
+done
+
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+  echo "heapsize: 24\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_preproc_24_itr_$itr"
+  ./docker/run-experiment -p m:69 a:23 s:22 r24:132 c:46 >>  "repro/data/log_basic_heap_extract_preproc_24_itr_$itr"
+  echo "(basic preproc heapsize 24), iteration $itr done"
+  echo "heapsize: 24\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_online_24_itr_$itr"
+  ./docker/run-experiment -t 8 heap -m 24 -d 24 -i 0 -e 1 -opt 0 -s 0 -itr itr >>   "repro/data/log_basic_heap_extract_online_24_itr_$itr"
+  echo "(basic preproc heapsize 24), iteration $itr done"
+done
+
+nitrs=0
+for itr in $(seq 1 $nitrs); do
+  echo "heapsize: 26\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_preproc_26_itr_$itr"
+  ./docker/run-experiment -p m:75 a:25 s:24 r26:100 c:50 p:8 >>  "repro/data/log_basic_heap_extract_preproc_26_itr_$itr"
+  echo "heapsize: 26\nis_optimized: 0\n" > "repro/data/logb_basic_heap_extract_preproc_26_itr_$itr"
+  ./docker/run-experiment -p -a r26:44  p:8 >>  "repro/data/logb_basic_heap_extract_prproc_26_itr_$itr"
+  echo "preprocessing_heap_26 (basic preproc)"
+  echo "heapsize: 26\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_online_26_itr_$itr"
+  ./docker/run-experiment -t 8 heap -m 26 -d 26 -i 0 -e 1 -opt 0 -s 0 -itr itr >> "repro/data/log_basic_heap_extract_online_26_itr_$itr"
+  echo "preprocessing_heap_26 (basic online)"
+done
+
+
+echo "Running the Heap Extract Experiments (Optimized)"
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+     echo "heapsize: 16\nis_optimized: 1\n" 				  >  "repro/data/log_opt_heap_extract_preproc_16_itr_$itr"	 
+     ./docker/run-experiment -p  m:63 a:16 s:15  i15.3:1 c:32 p:128 	  >> "repro/data/log_opt_heap_extract_preproc_16_itr_$itr"
+     echo "(optimized preproc heapsize 16), iteration $itr done"
+     echo "heapsize: 16\nis_optimized: 1\n" 				     > "repro/data/log_opt_heap_extract_online_16_itr_$itr"
+     ./docker/run-experiment heap  -m 16 -d 16 -i 0 -e 1 -opt 1 -s 0 -itr itr  >> "repro/data/log_opt_heap_extract_online_16_itr_$itr"
+     echo "(optimized online heapsize 16), iteration $itr done"
+done
+
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+     echo "heapsize: 18\nis_optimized: 1\n" 		     	              > "repro/data/log_opt_heap_extract_preproc_18_itr_$itr"	
+     ./docker/run-experiment -p m:71 a:18 s:17 i17.3:1 c:36 p:128               >> "repro/data/log_opt_heap_extract_preproc_18_itr_$itr"
+     echo "(optimized preproc heapsize 18), iteration $itr done"
+     echo "heapsize: 18\nis_optimized: 1\n"                                    > "repro/data/log_opt_heap_extract_online_18_itr_$itr"
+     ./docker/run-experiment heap  -m 18 -d 18 -i 0 -e 1 -opt 1 -s 0 -itr itr  >> "repro/data/log_opt_heap_extract_online_18_itr_$itr"
+     echo "(optimized online heapsize 18), iteration $itr done"
+done
+
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+     echo "heapsize: 20\nis_optimized: 1\n"                         > "repro/data/log_opt_heap_extract_preproc_20_itr_$itr"
+     ./docker/run-experiment -p m:80 a:20 s:19 i19.3:1 c:40 p:128 >> "repro/data/log_opt_heap_extract_preproc_20_itr_$itr"
+     echo "(optimized preproc heapsize 20), iteration $itr done"
+     echo "heapsize: 20\nis_optimized: 1\n"                                   > "repro/data/log_opt_heap_extract_online_20_itr_$itr"
+     ./docker/run-experiment heap  -m 20 -d 20 -i 0 -e 1 -opt 1 -s 0 -itr itr >> "repro/data/log_opt_heap_extract_online_20_itr_$itr"
+     echo "(optimized online heapsize 20), iteration $itr done"
+done
+
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+     echo "heapsize: 22\nis_optimized: 1\n" > "repro/data/log_opt_heap_extract_preproc_22_itr_$itr"
+     ./docker/run-experiment -p m:87 a:22 s:21 i21.3:1 c:44 p:128  >> "repro/data/log_opt_heap_extract_preproc_22_itr_$itr"
+     echo "(optimized preproc heapsize 22), iteration $itr done"
+     echo "heapsize: 22\nis_optimized: 1\n" > "repro/data/log_opt_heap_extract_online_22_itr_$itr"
+     ./docker/run-experiment -t 16 heap  -m 22 -d 22 -i 0 -e 1 -opt 1 -s 0 -itr itr >> "repro/data/log_opt_heap_extract_online_22_itr_$itr"
+     echo "(optimized online heapsize 22), iteration $itr done"
+done
+
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+     echo "heapsize: 24\nis_optimized: 1\n" > "repro/data/log_opt_heap_extract_preproc_24_itr_$itr"
+     ./docker/run-experiment -p m:95 a:24 s:23 i23.3:1 c:48 p:128 >> "repro/data/log_opt_heap_extract_preproc_24_itr_$itr"
+     echo "(optimized preproc heapsize 24), iteration $itr done"
+     echo "heapsize: 24\nis_optimized: 1\n" > "repro/data/log_opt_heap_extract_online_24_itr_$itr"
+     ./docker/run-experiment -t 32 heap  -m 24 -d 24 -i 0 -e 1 -opt 1 -s 0 -itr itr >> "repro/data/log_opt_heap_extract_online_24_itr_$itr"
+     echo "(optimized online heapsize 24), iteration $itr done"
+done
+
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+     echo "heapsize: 26\nis_optimized: 1\n" > "repro/data/log_opt_heap_extract_preproc_26_itr_$itr"
+     ./docker/run-experiment -p m:103 a:26 s:25 i25.3:1 c:52 p:128 >> "repro/data/log_opt_heap_extract_preproc_26_itr_$itr"
+     echo "(optimized preproc heapsize 26), iteration $itr done"
+     echo "heapsize: 26\nis_optimized: 1\n" > "repro/data/log_opt_heap_extract_online_26_itr_$itr"
+     ./docker/run-experiment -t 64 heap  -m 26 -d 26 -i 0 -e 1 -opt 1 -s 0 -itr itr >> "repro/data/log_opt_heap_extract_online_26_itr_$itr"
+     echo "(optimized online heapsize 26), iteration $itr done"
+done
+
+echo "\n\n\n ============================================ [Heap Extract Experiments Complete] ===================================================== \n\n\n"
+
+
+nitrs=0
+echo "Basic Insert Experiments\n"
+
+for itr in $(seq 1 $nitrs); do
+  echo "heapsize: 16\nis_optimized: 0\n" > "repro/data/log_basic_heap_insert_preproc_16_itr_$itr"
+  ./docker/run-experiment -p m:16 c:16 >>  "repro/data/log_basic_heap_insert_preproc_16_itr_$itr"
+  echo "heapsize: 16\nis_optimized: 0\n" >                                         "repro/data/log_basic_heap_insert_online_16_itr_$itr"
+  ./docker/run-experiment -t 64 heap -m 17 -d 16 -i 1 -e 0 -opt 0 -s 0 -itr itr >> "repro/data/log_basic_heap_insert_online_16_itr_$itr"
+ 
+  echo "heapsize: 18\nis_optimized: 0\n" > "repro/data/log_basic_heap_insert_preproc_18_itr_$itr"
+  ./docker/run-experiment -p m:18 c:18 >> "repro/data/log_basic_heap_insert_preproc_18_itr_$itr"
+   echo "heapsize: 18\nis_optimized: 0\n" >                                           "repro/data/log_basic_heap_insert_online_18_itr_$itr"
+  ./docker/run-experiment -t 64 heap -m 19 -d 18 -i 1 -e 0 -opt 0 -s 0  -itr itr  >> "repro/data/log_basic_heap_insert_online_18_itr_$itr"
+
+  echo "heapsize: 20\nis_optimized: 0\n" >       "repro/data/log_basic_heap_insert_preproc_20_itr_$itr"
+  ./docker/run-experiment -p  m:60 a:20 c:40 >> "repro/data/log_basic_heap_insert_preproc_20_itr_$itr"
+  echo "heapsize: 20\nis_optimized: 0\n" >                           "repro/data/log_basic_heap_insert_online_20_itr_$itr"
+  ./docker/run-experiment heap -m 21 -d 20 -i 1 -e 0 -opt 0 -s 0  -itr itr  >> "repro/data/log_basic_heap_insert_online_20_itr_$itr"
+
+  echo "heapsize: 22\nis_optimized: 0\n" >                              "repro/data/log_basic_heap_insert_preproc_22_itr_$itr"
+  ./docker/run-experiment -p m:22 c:22 >>                              "repro/data/log_basic_heap_insert_preproc_22_itr_$itr"
+  echo "heapsize: 22\nis_optimized: 0\n" >                             "repro/data/log_basic_heap_insert_online_22_itr_$itr"
+  ./docker/run-experiment  heap -m 23 -d 22 -i 1 -e 0 -opt 0 -s 0  -itr itr  >>  "repro/data/log_basic_heap_insert_online_22_itr_$itr"
+
+  echo "heapsize: 24\nis_optimized: 0\n" > "repro/data/log_basic_heap_insert_preproc_24_itr_$itr"
+  ./docker/run-experiment -p m:24 c:24 >> "repro/data/log_basic_heap_insert_preproc_24_itr_$itr"
+  echo "heapsize: 24\nis_optimized: 0\n" >                          "repro/data/log_basic_heap_insert_online_24_itr_$itr"
+  ./docker/run-experiment heap -m 25 -d 24 -i 1 -e 0 -opt 0 -s 0  -itr itr  >> "repro/data/log_basic_heap_insert_online_24_itr_$itr"
+
+  echo "heapsize: 26\nis_optimized: 0\n" > "repro/data/log_basic_heap_insert_preproc_26_itr_$itr"
+  ./docker/run-experiment -p m:26 c:26 >>  "repro/data/log_basic_heap_insert_preproc_26_itr_$itr"
+  echo "heapsize: 26\nis_optimized: 0\n" >                           "repro/data/log_basic_heap_insert_online_26_itr_$itr"
+  ./docker/run-experiment heap -m 27 -d 26 -i 1 -e 0 -opt 0 -s 0  -itr itr  >> "repro/data/log_basic_heap_insert_online_26_itr_$itr"
+done
+
+
+echo "Optimized Insert Experiments\n"
+for itr in $(seq 1 $nitrs); do
+  echo "heapsize: 16\nis_optimized: 1\n" >                                "repro/data/log_opt_heap_insert_preproc_16_itr_$itr"
+  ./docker/run-experiment -p m:36 r6:1 i4:1 c:5 >>                        "repro/data/log_opt_heap_insert_preproc_16_itr_$itr"
+  echo "heapsize: 16\nis_optimized: 1\n" >                                "repro/data/log_opt_heap_insert_online_16_itr_$itr"
+  ./docker/run-experiment -t 64 heap -m 17 -d 16 -i 1 -e 0 -opt 1 -s 0 -itr itr >> "repro/data/log_opt_heap_insert_online_16_itr_$itr"
+  echo "done"	
+  echo "heapsize: 18\nis_optimized: 1\n" >                                "repro/data/log_opt_heap_insert_preproc_18_itr_$itr"
+  ./docker/run-experiment -p m:40 r6:1 i4:1 c:5  >>                       "repro/data/log_opt_heap_insert_preproc_18_itr_$itr"
+  echo "heapsize: 18\nis_optimized: 1\n" >                                "repro/data/log_opt_heap_insert_online_18_itr_$itr"
+  ./docker/run-experiment -t 64 heap -m 19 -d 18 -i 1 -e 0 -opt 1 -s 0  -itr itr >> "repro/data/log_opt_heap_insert_online_18_itr_$itr"
+  echo "done"
+  echo "heapsize: 20\nis_optimized: 1\n" >                          "repro/data/log_opt_heap_insert_preproc_20_itr_$itr"
+  ./docker/run-experiment -p T0 m:44 r6:1 i4:1 c:5               >> "repro/data/log_opt_heap_insert_preproc_20_itr_$itr"
+  echo "heapsize: 20\nis_optimized: 1\n" >                          "repro/data/log_opt_heap_insert_online_20_itr_$itr"
+  ./docker/run-experiment heap -m 21 -d 20 -i 1 -e 0 -opt 1 -s 0  -itr itr >> "repro/data/log_opt_heap_insert_online_20_itr_$itr"
+  echo "done"
+  echo "heapsize: 22\nis_optimized: 1\n" >             "repro/data/log_opt_heap_insert_preproc_22_itr_$itr"
+  ./docker/run-experiment -p T0 m:48 r6:1 i4:1 c:5  >> "repro/data/log_opt_heap_insert_preproc_22_itr_$itr"
+  echo "heapsize: 22\nis_optimized: 1\n" >                           "repro/data/log_opt_heap_insert_online_22_itr_$itr"
+  ./docker/run-experiment  heap -m 23 -d 22 -i 1 -e 0 -opt 1 -s 0  -itr itr >> "repro/data/log_opt_heap_insert_online_22_itr_$itr"
+  echo "done"
+  echo "heapsize: 24\nis_optimized: 1\n" >             "repro/data/log_opt_heap_insert_preproc_24_itr_$itr"
+  ./docker/run-experiment -p T0 m:52 r6:1 i4:1 c:5  >> "repro/data/log_opt_heap_insert_preproc_24_itr_$itr"
+  echo "heapsize: 24\nis_optimized: 1\n" >                          "repro/data/log_opt_heap_insert_online_24_itr_$itr"
+  ./docker/run-experiment heap -m 25 -d 24 -i 1 -e 0 -opt 1 -s 0 -itr itr >> "repro/data/log_opt_heap_insert_online_24_itr_$itr"
+  echo "done"
+  echo "heapsize: 26\nis_optimized: 1\n" >             "repro/data/log_opt_heap_insert_preproc_26_itr_$itr"
+  ./docker/run-experiment -p T0 m:56 r6:1 i4:1 c:5  >> "repro/data/log_opt_heap_insert_preproc_26_itr_$itr"
+  echo "heapsize: 26\nis_optimized: 1\n" >                          "repro/data/log_opt_heap_insert_online_26_itr_$itr"
+  ./docker/run-experiment heap -m 27 -d 26 -i 1 -e 0 -opt 1 -s 0 -itr itr >> "repro/data/log_opt_heap_insert_online_26_itr_$itr"
+  echo "done"  	
+done

+ 117 - 0
repro/generate_raw_data_bs.sh

@@ -0,0 +1,117 @@
+
+echo "Running the Binary Experiments (Basic)"
+cd ..
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+    echo $itr
+    echo "bssize: 16\nis_optimized: 0\n" > "repro/data/log_basic_bs_preproc_16_itr_$itr"	
+    ./docker/run-experiment -p m:17 r17:17 c:17 p:64 >> "repro/data/log_basic_bs_preproc_16_itr_$itr"
+    echo "(basic preproc BS size 16), iteration $itr done"
+    echo "bssize: 16\nis_optimized: 0\n" > "repro/data/log_basic_bs_online_16_itr_$itr"	
+    ./docker/run-experiment  -t 64 bbsearch 16 >> "repro/data/log_basic_bs_online_16_itr_$itr"
+    echo "(basic online BS size 16), iteration $itr done"
+done    
+  
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+    echo "bssize: 18\nis_optimized: 0\n" > 		            "repro/data/log_basic_bs_preproc_18_itr_$itr"	
+    ./docker/run-experiment -p m:19 r19:19 c:19 p:64 >> "repro/data/log_basic_bs_preproc_18_itr_$itr"
+    echo "(basic preproc BS size 18), iteration $itr done"
+    echo "bssize: 18\nis_optimized: 0\n" > "repro/data/log_basic_bs_online_18_itr_$itr"
+    ./docker/run-experiment  -t 64 bbsearch 18 >> "repro/data/log_basic_bs_online_18_itr_$itr"
+    echo "(basic online BS size 18), iteration $itr done"	
+done
+
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+    echo "bssize: 20\nis_optimized: 0\n" > "repro/data/log_basic_bs_preproc_20_itr_$itr"	
+    ./docker/run-experiment -p m:21 r21:21 c:21 p:64 >> "repro/data/log_basic_bs_preproc_20_itr_$itr"
+    echo "(basic preproc BS size 20), iteration $itr done"
+    echo "bssize: 20\nis_optimized: 0\n" > "repro/data/log_basic_bs_online_20_itr_$itr"
+    ./docker/run-experiment  -t 64 bbsearch 20 >> "repro/data/log_basic_bs_online_20_itr_$itr"
+    echo "(basic online BS size 20), iteration $itr done"
+done
+
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+    echo "bssize: 22\nis_optimized: 0\n" > "repro/data/log_basic_bs_preproc_22_itr_$itr"	
+   ./docker/run-experiment -p m:23 r23:23 c:23 p:64 >> "repro/data/log_basic_bs_preproc_22_itr_$itr"
+   echo "(basic preproc BS size 22), iteration $itr done"
+   echo "bssize: 22\nis_optimized: 0\n" > "repro/data/log_basic_bs_online_22_itr_$itr"
+   ./docker/run-experiment  -t 64 bbsearch 22 >>  "repro/data/log_basic_bs_online_22_itr_$itr"
+   echo "(basic online BS size 22), iteration $itr done"
+done
+
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+  echo "bssize: 24\nis_optimized: 0\n" > "repro/data/log_basic_bs_preproc_24_itr_$itr"
+  ./docker/run-experiment -p m:25 r25:25 c:25 p:64 >>  "repro/data/log_basic_bs_preproc_24_itr_$itr"
+  echo "(basic preproc BS size 24), iteration $itr done"
+  echo "bssize: 24\nis_optimized: 0\n" > "repro/data/log_basic_bs_online_24_itr_$itr"
+  ./docker/run-experiment  -t 64 bbsearch 24 >>   "repro/data/log_basic_bs_online_24_itr_$itr"
+  echo "(basic online BS size 24), iteration $itr done"
+done
+
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+  echo "bssize: 26\nis_optimized: 0\n" > "repro/data/log_basic_bs_preproc_26_itr_$itr"
+  ./docker/run-experiment -p m:27 r27:27 c:27 p:64 >>  "repro/data/log_basic_bs_preproc_26_itr_$itr"
+  echo "(basic preproc BS size 26), iteration $itr done"	
+  echo "bssize: 26\nis_optimized: 0\n" > "repro/data/log_basic_bs_online_26_itr_$itr"
+  ./docker/run-experiment  -t 64 bbsearch 26 >> "repro/data/log_basic_bs_online_26_itr_$itr"
+  echo "(online preproc BS size 26), iteration $itr done"
+done
+
+
+echo "Running the Binary Search Experiments (Optimized)"
+nitrs=1
+for itr in $(seq 1 $nitrs); do
+   echo "bssize: 16\nis_optimized: 1\n" 			  >  "repro/data/log_opt_bs_preproc_16_itr_$itr"	 
+  ./docker/run-experiment -p  i16:1 c:17 p:64 	  >> "repro/data/log_opt_bs_preproc_16_itr_$itr"
+  echo "(optimized preproc BS size 16), iteration $itr done"
+   echo "bssize: 16\nis_optimized: 1\n" 				     > "repro/data/log_opt_bs_online_16_itr_$itr"
+  ./docker/run-experiment -t 64 bsearch 16   >> "repro/data/log_opt_bs_online_16_itr_$itr"
+   echo "(optimized online BS size 16), iteration $itr done"
+
+   echo "bssize: 18\nis_optimized: 1\n" 		     	              > "repro/data/log_opt_bs_preproc_18_itr_$itr"	
+  ./docker/run-experiment -p  i18:1 c:19 p:64                        >> "repro/data/log_opt_bs_preproc_18_itr_$itr"
+    echo "(optimized preproc BS size 18), iteration $itr done"
+   echo "bssize: 18\nis_optimized: 1\n"                                    > "repro/data/log_opt_bs_online_18_itr_$itr"
+  ./docker/run-experiment -t 64 bsearch 18   >> "repro/data/log_opt_bs_online_18_itr_$itr"
+   echo "(optimized online BS size 18), iteration $itr done"
+
+  echo "bssize: 20\nis_optimized: 1\n"                         > "repro/data/log_opt_bs_preproc_20_itr_$itr"
+  ./docker/run-experiment -p  i20:1 c:21 p:64    >> "repro/data/log_opt_bs_preproc_20_itr_$itr"
+  echo "(optimized preproc BS size 20), iteration $itr done"
+   echo "bssize: 20\nis_optimized: 1\n"                                   > "repro/data/log_opt_bs_online_20_itr_$itr"
+  ./docker/run-experiment -t 64 bsearch 20  >> "repro/data/log_opt_bs_online_20_itr_$itr"
+  echo "(optimized online BS size 20), iteration $itr done"
+
+   echo "bssize: 22\nis_optimized: 1\n"        > "repro/data/log_opt_bs_preproc_22_itr_$itr"
+ ./docker/run-experiment -p  i22:1 c:23 p:64    >> "repro/data/log_opt_bs_preproc_22_itr_$itr"
+  echo "(optimized preproc BS size 22), iteration $itr done"
+   echo "bssize: 22\nis_optimized: 1\n" > "repro/data/log_opt_bs_online_22_itr_$itr"
+ ./docker/run-experiment -t 64 bsearch 22  >> "repro/data/log_opt_bs_online_22_itr_$itr"
+  echo "(optimized online BS size 22), iteration $itr done"
+
+ echo "bssize: 24\nis_optimized: 1\n" > "repro/data/log_opt_bs_preproc_24_itr_$itr"
+ ./docker/run-experiment -p  i24:1 c:25 p:64 >> "repro/data/log_opt_bs_preproc_24_itr_$itr"
+  echo "(optimized preproc BS size 24), iteration $itr done"
+   echo "bssize: 24\nis_optimized: 1\n" > "repro/data/log_opt_bs_online_24_itr_$itr"
+ ./docker/run-experiment -t 64 bsearch 24  >> "repro/data/log_opt_bs_online_24_itr_$itr"
+  echo "(optimized online BS size 24), iteration $itr done"
+
+  echo "bssize: 26\nis_optimized: 1\n" > "repro/data/log_opt_bs_preproc_26_itr_$itr"
+ ./docker/run-experiment -p  i26:1 c:27 p:64 >> "repro/data/log_opt_bs_preproc_26_itr_$itr"
+  echo "(optimized preproc BS size 26), iteration $itr done"
+  echo "bssize: 26\nis_optimized: 1\n" > "repro/data/log_opt_bs_online_26_itr_$itr"
+  ./docker/run-experiment -t 64 bsearch 26  >> "repro/data/log_opt_bs_online_26_itr_$itr"
+  echo "(optimized online BS size 26), iteration $itr done"
+
+    echo "bssize: 28\nis_optimized: 1\n" > "repro/data/log_opt_bs_preproc_28_itr_$itr"
+ ./docker/run-experiment -p  i28:1 c:29 p:64 >> "repro/data/log_opt_bs_preproc_28_itr_$itr"
+  echo "(optimized preproc BS size 28), iteration $itr done"
+  echo "bssize: 28\nis_optimized: 1\n" > "repro/data/log_opt_bs_online_28_itr_$itr"
+  ./docker/run-experiment -t 64 bsearch 28  >> "repro/data/log_opt_bs_online_28_itr_$itr"
+  echo "(optimized online BS size 28), iteration $itr done"
+done

+ 109 - 0
repro/generate_raw_data_bs_const_db.sh

@@ -0,0 +1,109 @@
+
+echo "Running the Binary Experiments (Basic)"
+cd ..
+nitrs=20
+for itr in $(seq 1 $nitrs); do
+    echo $itr
+    echo "bssize: 16\nis_optimized: 0\n" > "repro/data/log_basic_bs_preproc_16_itr_$itr"	
+    ./docker/run-experiment -p m:17 r17:17 c:17 p:64 >> "repro/data/log_basic_bs_preproc_16_itr_$itr"
+    echo "preprocessing_bs_16 (basic preproc) - iteration $itr"
+    echo "bssize: 16\nis_optimized: 0\n" > "repro/data/log_basic_bs_online_16_itr_$itr"	
+    ./docker/run-experiment  -t 64 bbsearch 16 >> "repro/data/log_basic_bs_online_16_itr_$itr"
+    echo "preprocessing_bs_16 (basic online) - iteration $itr"
+done    
+  
+nitrs=20
+for itr in $(seq 1 $nitrs); do
+    echo "bssize: 18\nis_optimized: 0\n" > 		            "repro/data/log_basic_bs_preproc_18_itr_$itr"	
+    ./docker/run-experiment -p m:19 r19:19 c:19 p:64 >> "repro/data/log_basic_bs_preproc_18_itr_$itr"
+    echo "preprocessing_bs_18 (basic online)"
+    echo "bssize: 18\nis_optimized: 0\n" > "repro/data/log_basic_bs_online_18_itr_$itr"
+    ./docker/run-experiment  -t 64 bbsearch 16 >> "repro/data/log_basic_bs_online_18_itr_$itr"
+    echo "preprocessing_bs_18 (basic online)"	
+done
+
+nitrs=20
+for itr in $(seq 1 $nitrs); do
+    echo "bssize: 20\nis_optimized: 0\n" > "repro/data/log_basic_bs_preproc_20_itr_$itr"	
+    ./docker/run-experiment -p m:21 r21:21 c:21 p:64 >> "repro/data/log_basic_bs_preproc_20_itr_$itr"
+    echo "preprocessing_bs_20 (basic preproc)"
+    echo "bssize: 20\nis_optimized: 0\n" > "repro/data/log_basic_bs_online_20_itr_$itr"
+    ./docker/run-experiment  -t 64 bbsearch 16 >> "repro/data/log_basic_bs_online_20_itr_$itr"
+    echo "preprocessing_bs_20 (basic online)"
+done
+
+nitrs=10
+for itr in $(seq 1 $nitrs); do
+    echo "bssize: 22\nis_optimized: 0\n" > "repro/data/log_basic_bs_preproc_22_itr_$itr"	
+   ./docker/run-experiment -p m:23 r23:23 c:23 p:64 >> "repro/data/log_basic_bs_preproc_22_itr_$itr"
+   echo "preprocessing_bs_22 (basic preproc)"
+   echo "bssize: 22\nis_optimized: 0\n" > "repro/data/log_basic_bs_online_22_itr_$itr"
+   ./docker/run-experiment  -t 64 bbsearch 16 >>  "repro/data/log_basic_bs_online_22_itr_$itr"
+   echo "preprocessing_bs_22 (basic online)"
+done
+
+nitrs=10
+for itr in $(seq 1 $nitrs); do
+  echo "bssize: 24\nis_optimized: 0\n" > "repro/data/log_basic_bs_preproc_24_itr_$itr"
+  ./docker/run-experiment -p m:25 r25:25 c:25 p:64 >>  "repro/data/log_basic_bs_preproc_24_itr_$itr"
+  echo "preprocessing_bs_24 (basic preproc)"
+  echo "bssize: 24\nis_optimized: 0\n" > "repro/data/log_basic_bs_online_24_itr_$itr"
+  ./docker/run-experiment  -t 64 bbsearch 16 >>   "repro/data/log_basic_bs_online_24_itr_$itr"
+  echo "preprocessing_bs_24 (basic online)"
+done
+
+nitrs=5
+for itr in $(seq 1 $nitrs); do
+  echo "bssize: 26\nis_optimized: 0\n" > "repro/data/log_basic_bs_preproc_26_itr_$itr"
+  ./docker/run-experiment -p m:27 r27:27 c:27 p:64 >>  "repro/data/log_basic_bs_preproc_26_itr_$itr"
+  echo "bssize: 26\nis_optimized: 0\n" > "repro/data/log_basic_bs_online_26_itr_$itr"
+  ./docker/run-experiment  -t 64 bbsearch 16 >> "repro/data/log_basic_bs_online_26_itr_$itr"
+  echo "preprocessing_bs_26 (basic online)"
+done
+
+
+echo "Running the Binary Search Experiments (Optimized)"
+nitrs=10
+for itr in $(seq 1 $nitrs); do
+   echo "bssize: 16\nis_optimized: 1\n" 			  >  "repro/data/log_opt_bs_preproc_16_itr_$itr"	 
+  ./docker/run-experiment -p  i16:1 c:17 p:64 	  >> "repro/data/log_opt_bs_preproc_16_itr_$itr"
+  echo "preprocessing_bs_16 (opt preproc)"
+   echo "bssize: 16\nis_optimized: 1\n" 				     > "repro/data/log_opt_bs_online_16_itr_$itr"
+  ./docker/run-experiment -t 64 bsearch 16   >> "repro/data/log_opt_bs_online_16_itr_$itr"
+  echo "preprocessing_bs_16 (opt online)"
+
+   echo "bssize: 18\nis_optimized: 1\n" 		     	              > "repro/data/log_opt_bs_preproc_18_itr_$itr"	
+  ./docker/run-experiment -p  i18:1 c:19 p:64                        >> "repro/data/log_opt_bs_preproc_18_itr_$itr"
+   echo "preprocessing_bs_18 (opt preproc)"
+   echo "bssize: 18\nis_optimized: 1\n"                                    > "repro/data/log_opt_bs_online_18_itr_$itr"
+  ./docker/run-experiment -t 64 bsearch 18   >> "repro/data/log_opt_bs_online_18_itr_$itr"
+  echo "preprocessing_bs_18 (opt online)"
+
+  echo "bssize: 20\nis_optimized: 1\n"                         > "repro/data/log_opt_bs_preproc_20_itr_$itr"
+  ./docker/run-experiment -p  i20:1 c:21 p:64    >> "repro/data/log_opt_bs_preproc_20_itr_$itr"
+  echo "preprocessing_bs_20 (opt preproc)"
+   echo "bssize: 20\nis_optimized: 1\n"                                   > "repro/data/log_opt_bs_online_20_itr_$itr"
+  ./docker/run-experiment -t 64 bsearch 20  >> "repro/data/log_opt_bs_online_20_itr_$itr"
+  echo "preprocessing_bs_20 (opt online)"
+
+   echo "bssize: 22\nis_optimized: 1\n"        > "repro/data/log_opt_bs_preproc_22_itr_$itr"
+ ./docker/run-experiment -p  i22:1 c:23 p:64    >> "repro/data/log_opt_bs_preproc_22_itr_$itr"
+  echo "preprocessing_bs_22 (opt preproc)"
+   echo "bssize: 22\nis_optimized: 1\n" > "repro/data/log_opt_bs_online_22_itr_$itr"
+ ./docker/run-experiment -t 64 bsearch 22  >> "repro/data/log_opt_bs_online_22_itr_$itr"
+  echo "preprocessing_bs_22 (opt online)"
+
+ echo "bssize: 24\nis_optimized: 1\n" > "repro/data/log_opt_bs_preproc_24_itr_$itr"
+ ./docker/run-experiment -p  i24:1 c:25 p:64 >> "repro/data/log_opt_bs_preproc_24_itr_$itr"
+  echo "preprocessing_bs_24 (opt preproc)"
+   echo "bssize: 24\nis_optimized: 1\n" > "repro/data/log_opt_bs_online_24_itr_$itr"
+ ./docker/run-experiment -t 64 bsearch 24  >> "repro/data/log_opt_bs_online_24_itr_$itr"
+  echo "preprocessing_bs_24 (opt online)"
+
+  echo "bssize: 26\nis_optimized: 1\n" > "repro/data/log_opt_bs_preproc_26_itr_$itr"
+ ./docker/run-experiment -p  i26:1 c:27 p:64 >> "repro/data/log_opt_bs_preproc_26_itr_$itr"
+  echo "preprocessing_bs_26 (opt preproc)"
+  echo "bssize: 26\nis_optimized: 1\n" > "repro/data/log_opt_bs_online_26_itr_$itr"
+  ./docker/run-experiment -t 64 bsearch 26  >> "repro/data/log_opt_bs_online_26_itr_$itr"
+  echo "preprocessing_bs_26 (opt online)"
+done

+ 234 - 0
repro/generate_raw_data_const_db.sh

@@ -0,0 +1,234 @@
+
+cd ..
+nitrs=10
+for itr in $(seq 1 $nitrs); do
+echo "heapsize: 4\nis_optimized: 0\n" > "repro/data/log_heap_extract_basic_pre_20_4_itr_$itr"
+./run-experiment -p  m:316 a:80 s:76 r20:456 c:160 p:128 >> "repro/data/log_heap_extract_basic_pre_20_4_itr_$itr"
+echo "preprocessing_heap_16 (basic preproc)"
+echo "heapsize: 4\nis_optimized: 0\n" > "repro/data/log_heap_extract_basic_pre_20_4_itr_$itr"
+./run-experiment -t 16 heap 20 20 0 4 0 >> "repro/data/log_heap_extract_basic_online_20_4_itr_$itr"
+echo "preprocessing_heap_16 (basic online)"
+echo "heapsize: 4\nis_optimized: 0\n" > "repro/data/log_heap_extract_basic_pre_20_4_itr_$itr"
+./run-experiment -p  m:316 a:80 s:76 i19.3:4 c:160 p:128 >> "repro/data/log_heap_extract_opt_pre_20_4_itr_$itr"
+ echo "preprocessing_heap_16 (opt preproc)"
+ echo "heapsize: 4\nis_optimized: 0\n" > "repro/data/log_heap_extract_basic_pre_20_4_itr_$itr"
+ ./run-experiment -t 16 heap 20 20 0 4 1 >> "repro/data/log_heap_extract_opt_online_20_4_itr_$itr"
+ echo "preprocessing_heap_16 (opt online)":
+
+
+./run-experiment -p m:632 a:160 s:152 r20:912 c:320 p:128 >> "repro/data/log_heap_extract_basic_pre_20_8_itr_$itr"
+echo "preprocessing_heap_16 (basic preproc)"
+./run-experiment -t 16 heap 20 20 0 8 0 >> "repro/data/log_heap_extract_basic_online_20_8_itr_$itr"
+echo "preprocessing_heap_16 (basic online)"
+
+./run-experiment -p  m:632 a:160 s:152 i19.3:8 c:320 p:128 >> "repro/data/log_heap_extract_opt_pre_20_8_itr_$itr"
+ echo "preprocessing_heap_16 (opt preproc)"
+./run-experiment -t 16 heap 20 20 0 8 1 >> "repro/data/log_heap_extract_opt_online_20_8_itr_$itr"
+ echo "preprocessing_heap_16 (opt online)":
+
+
+
+./run-experiment -p  m:1264 a:320 s:304 r20:1824 c:640 p:128 >> "repro/data/log_heap_extract_basic_pre_20_16_itr_$itr"
+ echo "preprocessing_heap_16 (basic preproc)"
+./run-experiment -t 16 heap 20 20 0 16 0 >> "repro/data/log_heap_extract_basic_online_20_16_itr_$itr"
+ echo "preprocessing_heap_16 (basic online)"
+
+./run-experiment -p  m:1264 a:320 s:304 i19.3:16 c:640 p:128 >> "repro/data/log_heap_extract_opt_pre_20_16_itr_$itr"
+ echo "preprocessing_heap_16 (opt preproc)"
+./run-experiment -t 16 heap 20 20 0 16 1 >> "repro/data/log_heap_extract_opt_online_20_16_itr_$itr"
+ echo "preprocessing_heap_16 (opt online)":
+
+
+
+ ./run-experiment -p m:2528 a:640 s:608 r20:3648 c:1280 p:128 >> "repro/data/log_heap_extract_basic_pre_20_32_itr_$itr"
+  echo "preprocessing_heap_32 (basic preproc)"
+ ./run-experiment -t 16 heap 20 20 0 32 0 >> "repro/data/log_heap_extract_basic_online_20_32_itr_$itr"
+ echo "preprocessing_heap_32 (basic online)"
+
+ ./run-experiment -p  m:2528 a:640 s:608 i19.3:32 c:1280 p:128 >> "repro/data/log_heap_extract_opt_pre_20_32_itr_$itr"
+ echo "preprocessing_heap_32 (opt preproc)"
+ ./run-experiment -t 16 heap 20 20 0 32 1 >> "repro/data/log_heap_extract_opt_online_20_32_itr_$itr"
+ echo "preprocessing_heap_32 (opt online)":  
+done
+
+echo "Running the Heap Extract Experiments (Basic)"
+
+
+for itr in $(seq 1 $nitrs); do
+    echo $itr
+    echo "heapsize: 16\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_preproc_16_itr_$itr"	
+    ./docker/run-experiment -p m:63 a:16 s:15 r16:90 c:32 p:128 >> "repro/data/log_basic_heap_extract_preproc_16_itr_$itr"
+    echo "preprocessing_heap_16 (basic preproc) - iteration $itr"
+    echo "heapsize: 16\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_online_16_itr_$itr"	
+    ./docker/run-experiment heap -m 16 -d 16 -i 0 -e 1 -opt 0 -s 0 -itr itr >> "repro/data/log_basic_heap_extract_online_16_itr_$itr"
+    echo "preprocessing_heap_16 (basic online) - iteration $itr"
+done    
+  
+nitrs=10
+for itr in $(seq 1 $nitrs); do
+    echo "heapsize: 18\nis_optimized: 0\n" > 		            "repro/data/log_basic_heap_extract_preproc_18_itr_$itr"	
+    ./docker/run-experiment -p m:71 a:18 s:17 r18:102 c:36 p:128 >> "repro/data/log_basic_heap_extract_preproc_18_itr_$itr"
+    echo "preprocessing_heap_18 (basic online)"
+    echo "heapsize: 18\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_online_18_itr_$itr"
+    ./docker/run-experiment heap -m 18 -d 18 -i 0 -e 1 -opt 0 -s 0 -itr itr >> "repro/data/log_basic_heap_extract_online_18_itr_$itr"
+    echo "preprocessing_heap_18 (basic online)"	
+done
+
+nitrs=10
+for itr in $(seq 1 $nitrs); do
+    echo "heapsize: 20\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_preproc_20_itr_$itr"	
+    ./docker/run-experiment -p m:80 a:20 s:19 r20:114 c:40 p:128 >> "repro/data/log_basic_heap_extract_preproc_20_itr_$itr"
+    echo "preprocessing_heap_20 (basic preproc)"
+    echo "heapsize: 20\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_online_20_itr_$itr"
+    ./docker/run-experiment -t 8 heap -m 20 -d 20 -i 0 -e 1 -opt 0 -s 0 -itr itr >> "repro/data/log_basic_heap_extract_online_20_itr_$itr"
+    echo "preprocessing_heap_20 (basic online)"
+done
+
+nitrs=10
+for itr in $(seq 1 $nitrs); do
+    echo "heapsize: 22\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_preproc_22_itr_$itr"	
+   ./docker/run-experiment -p m:87 a:22 s:21 r22:126 c:44 p:128 >> "repro/data/log_basic_heap_extract_preproc_22_itr_$itr"
+   echo "preprocessing_heap_22 (basic preproc)"
+   echo "heapsize: 22\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_online_22_itr_$itr"
+   ./docker/run-experiment -t 8 heap -m 22 -d 22 -i 0 -e 1 -opt 0 -s 0 -itr itr >>  "repro/data/log_basic_heap_extract_online_22_itr_$itr"
+   echo "preprocessing_heap_22 (basic online)"
+done
+
+nitrs=10
+for itr in $(seq 1 $nitrs); do
+  echo "heapsize: 24\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_preproc_24_itr_$itr"
+  ./docker/run-experiment -p m:69 a:23 s:22 r24:132 c:46 >>  "repro/data/log_basic_heap_extract_preproc_24_itr_$itr"
+  echo "preprocessing_heap_24 (basic preproc)"
+  echo "heapsize: 24\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_online_24_itr_$itr"
+  ./docker/run-experiment -t 8 heap -m 24 -d 24 -i 0 -e 1 -opt 0 -s 0 -itr itr >>   "repro/data/log_basic_heap_extract_online_24_itr_$itr"
+  echo "preprocessing_heap_24 (basic online)"
+done
+
+nitrs=5
+for itr in $(seq 1 $nitrs); do
+  echo "heapsize: 26\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_preproc_26_itr_$itr"
+  ./docker/run-experiment -p m:75 a:25 s:24 r26:100 c:50 p:8 >>  "repro/data/log_basic_heap_extract_preproc_26_itr_$itr"
+  echo "heapsize: 26\nis_optimized: 0\n" > "repro/data/logb_basic_heap_extract_preproc_26_itr_$itr"
+  ./docker/run-experiment -p -a r26:44  p:8 >>  "repro/data/logb_basic_heap_extract_prproc_26_itr_$itr"
+  echo "preprocessing_heap_26 (basic preproc)"
+  echo "heapsize: 26\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_online_26_itr_$itr"
+  ./docker/run-experiment -t 8 heap -m 26 -d 26 -i 0 -e 1 -opt 0 -s 0 -itr itr >> "repro/data/log_basic_heap_extract_online_26_itr_$itr"
+  echo "preprocessing_heap_26 (basic online)"
+done
+
+
+echo "Running the Heap Extract Experiments (Optimized)"
+
+for itr in $(seq 1 $nitrs); do
+     echo "heapsize: 16\nis_optimized: 1\n" 				  >  "repro/data/log_opt_heap_extract_preproc_16_itr_$itr"	 
+     ./docker/run-experiment -p  m:63 a:16 s:15  i15.3:1 c:32 p:128 	  >> "repro/data/log_opt_heap_extract_preproc_16_itr_$itr"
+     echo "preprocessing_heap_16 (opt preproc)"
+     echo "heapsize: 16\nis_optimized: 1\n" 				     > "repro/data/log_opt_heap_extract_online_16_itr_$itr"
+     ./docker/run-experiment heap  -m 16 -d 16 -i 0 -e 1 -opt 1 -s 0 -itr itr  >> "repro/data/log_opt_heap_extract_online_16_itr_$itr"
+     echo "preprocessing_heap_16 (opt online)"
+
+     echo "heapsize: 18\nis_optimized: 1\n" 		     	              > "repro/data/log_opt_heap_extract_preproc_18_itr_$itr"	
+     ./docker/run-experiment -p m:71 a:18 s:17 i17.3:1 c:36 p:128               >> "repro/data/log_opt_heap_extract_preproc_18_itr_$itr"
+     echo "preprocessing_heap_18 (opt preproc)"
+     echo "heapsize: 18\nis_optimized: 1\n"                                    > "repro/data/log_opt_heap_extract_online_18_itr_$itr"
+     ./docker/run-experiment heap  -m 18 -d 18 -i 0 -e 1 -opt 1 -s 0 -itr itr  >> "repro/data/log_opt_heap_extract_online_18_itr_$itr"
+     echo "preprocessing_heap_18 (opt online)"
+
+     echo "heapsize: 20\nis_optimized: 1\n"                         > "repro/data/log_opt_heap_extract_preproc_20_itr_$itr"
+     ./docker/run-experiment -p m:80 a:20 s:19 i19.3:1 c:40 p:128 >> "repro/data/log_opt_heap_extract_preproc_20_itr_$itr"
+     echo "preprocessing_heap_20 (opt preproc)"
+     echo "heapsize: 20\nis_optimized: 1\n"                                   > "repro/data/log_opt_heap_extract_online_20_itr_$itr"
+     ./docker/run-experiment heap  -m 20 -d 20 -i 0 -e 1 -opt 1 -s 0 -itr itr >> "repro/data/log_opt_heap_extract_online_20_itr_$itr"
+     echo "preprocessing_heap_20 (opt online)"
+
+     echo "heapsize: 22\nis_optimized: 1\n" > "repro/data/log_opt_heap_extract_preproc_22_itr_$itr"
+     ./docker/run-experiment -p m:87 a:22 s:21 i21.3:1 c:44 p:128  >> "repro/data/log_opt_heap_extract_preproc_22_itr_$itr"
+     echo "preprocessing_heap_22 (opt preproc)"
+     echo "heapsize: 22\nis_optimized: 1\n" > "repro/data/log_opt_heap_extract_online_22_itr_$itr"
+     ./docker/run-experiment -t 16 heap  -m 22 -d 22 -i 0 -e 1 -opt 1 -s 1 -itr itr >> "repro/data/log_opt_heap_extract_online_22_itr_$itr"
+     echo "preprocessing_heap_22 (opt online)"
+
+     echo "heapsize: 24\nis_optimized: 1\n" > "repro/data/log_opt_heap_extract_preproc_24_itr_$itr"
+     ./docker/run-experiment -p m:95 a:24 s:23 i23.3:1 c:48 p:128 >> "repro/data/log_opt_heap_extract_preproc_24_itr_$itr"
+     echo "preprocessing_heap_24 (opt preproc)"
+     echo "heapsize: 24\nis_optimized: 1\n" > "repro/data/log_opt_heap_extract_online_24_itr_$itr"
+     ./docker/run-experiment -t 32 heap  -m 24 -d 24 -i 0 -e 1 -opt 1 -s 0 -itr itr >> "repro/data/log_opt_heap_extract_online_24_itr_$itr"
+     echo "preprocessing_heap_24 (opt online)"
+
+     echo "heapsize: 26\nis_optimized: 1\n" > "repro/data/log_opt_heap_extract_preproc_26_itr_$itr"
+     ./docker/run-experiment -p m:103 a:26 s:25 i25.3:1 c:52 p:128 >> "repro/data/log_opt_heap_extract_preproc_26_itr_$itr"
+     echo "preprocessing_heap_26 (opt preproc)"
+     echo "heapsize: 26\nis_optimized: 1\n" > "repro/data/log_opt_heap_extract_online_26_itr_$itr"
+     ./docker/run-experiment -t 64 heap  -m 26 -d 26 -i 0 -e 1 -opt 1 -s 0 -itr itr >> "repro/data/log_opt_heap_extract_online_26_itr_$itr"
+     echo "preprocessing_heap_26 (opt online)"
+done
+
+
+echo "Basic Insert Experiments\n"
+
+for itr in $(seq 1 $nitrs); do
+  echo "heapsize: 16\nis_optimized: 0\n" > "repro/data/log_basic_heap_insert_preproc_16_itr_$itr"
+  ./docker/run-experiment -p m:16 c:16 >>  "repro/data/log_basic_heap_insert_preproc_16_itr_$itr"
+  echo "heapsize: 16\nis_optimized: 0\n" >                                         "repro/data/log_basic_heap_insert_online_16_itr_$itr"
+  ./docker/run-experiment -t 64 heap -m 17 -d 16 -i 1 -e 0 -opt 0 -s 0 -itr itr >> "repro/data/log_basic_heap_insert_online_16_itr_$itr"
+ 
+  echo "heapsize: 18\nis_optimized: 0\n" > "repro/data/log_basic_heap_insert_preproc_18_itr_$itr"
+  ./docker/run-experiment -p m:18 c:18 >> "repro/data/log_basic_heap_insert_preproc_18_itr_$itr"
+   echo "heapsize: 18\nis_optimized: 0\n" >                                           "repro/data/log_basic_heap_insert_online_18_itr_$itr"
+  ./docker/run-experiment -t 64 heap -m 19 -d 18 -i 1 -e 0 -opt 0 -s 0  -itr itr  >> "repro/data/log_basic_heap_insert_online_18_itr_$itr"
+
+  echo "heapsize: 20\nis_optimized: 0\n" >       "repro/data/log_basic_heap_insert_preproc_20_itr_$itr"
+  ./docker/run-experiment -p  m:60 a:20 c:40 >> "repro/data/log_basic_heap_insert_preproc_20_itr_$itr"
+  echo "heapsize: 20\nis_optimized: 0\n" >                           "repro/data/log_basic_heap_insert_online_20_itr_$itr"
+  ./docker/run-experiment heap -m 21 -d 20 -i 1 -e 0 -opt 0 -s 0  -itr itr  >> "repro/data/log_basic_heap_insert_online_20_itr_$itr"
+
+  echo "heapsize: 22\nis_optimized: 0\n" >                              "repro/data/log_basic_heap_insert_preproc_22_itr_$itr"
+  ./docker/run-experiment -p m:22 c:22 >>                              "repro/data/log_basic_heap_insert_preproc_22_itr_$itr"
+  echo "heapsize: 22\nis_optimized: 0\n" >                             "repro/data/log_basic_heap_insert_online_22_itr_$itr"
+  ./docker/run-experiment  heap -m 23 -d 22 -i 1 -e 0 -opt 0 -s 0  -itr itr  >>  "repro/data/log_basic_heap_insert_online_22_itr_$itr"
+
+  echo "heapsize: 24\nis_optimized: 0\n" > "repro/data/log_basic_heap_insert_preproc_24_itr_$itr"
+  ./docker/run-experiment -p m:24 c:24 >> "repro/data/log_basic_heap_insert_preproc_24_itr_$itr"
+  echo "heapsize: 24\nis_optimized: 0\n" >                          "repro/data/log_basic_heap_insert_online_24_itr_$itr"
+  ./docker/run-experiment heap -m 25 -d 24 -i 1 -e 0 -opt 0 -s 0  -itr itr  >> "repro/data/log_basic_heap_insert_online_24_itr_$itr"
+
+  echo "heapsize: 26\nis_optimized: 0\n" > "repro/data/log_basic_heap_insert_preproc_26_itr_$itr"
+  ./docker/run-experiment -p m:26 c:26 >>  "repro/data/log_basic_heap_insert_preproc_26_itr_$itr"
+  echo "heapsize: 26\nis_optimized: 0\n" >                           "repro/data/log_basic_heap_insert_online_26_itr_$itr"
+  ./docker/run-experiment heap -m 27 -d 26 -i 1 -e 0 -opt 0 -s 0  -itr itr  >> "repro/data/log_basic_heap_insert_online_26_itr_$itr"
+done
+
+
+echo "Optimized Insert Experiments\n"
+for itr in $(seq 1 $nitrs); do
+  echo "heapsize: 16\nis_optimized: 1\n" >                                "repro/data/log_opt_heap_insert_preproc_16_itr_$itr"
+  ./docker/run-experiment -p m:36 r6:1 i4:1 c:5 >>                        "repro/data/log_opt_heap_insert_preproc_16_itr_$itr"
+  echo "heapsize: 16\nis_optimized: 1\n" >                                "repro/data/log_opt_heap_insert_online_16_itr_$itr"
+  ./docker/run-experiment -t 64 heap -m 17 -d 16 -i 1 -e 0 -opt 1 -s 0 -itr itr >> "repro/data/log_opt_heap_insert_online_16_itr_$itr"
+  echo "done"	
+  echo "heapsize: 18\nis_optimized: 1\n" >                                "repro/data/log_opt_heap_insert_preproc_18_itr_$itr"
+  ./docker/run-experiment -p m:40 r6:1 i4:1 c:5  >>                       "repro/data/log_opt_heap_insert_preproc_18_itr_$itr"
+  echo "heapsize: 18\nis_optimized: 1\n" >                                "repro/data/log_opt_heap_insert_online_18_itr_$itr"
+  ./docker/run-experiment -t 64 heap -m 19 -d 18 -i 1 -e 0 -opt 1 -s 0  -itr itr >> "repro/data/log_opt_heap_insert_online_18_itr_$itr"
+  echo "done"
+  echo "heapsize: 20\nis_optimized: 1\n" >                          "repro/data/log_opt_heap_insert_preproc_20_itr_$itr"
+  ./docker/run-experiment -p T0 m:44 r6:1 i4:1 c:5               >> "repro/data/log_opt_heap_insert_preproc_20_itr_$itr"
+  echo "heapsize: 20\nis_optimized: 1\n" >                          "repro/data/log_opt_heap_insert_online_20_itr_$itr"
+  ./docker/run-experiment heap -m 21 -d 20 -i 1 -e 0 -opt 1 -s 0  -itr itr >> "repro/data/log_opt_heap_insert_online_20_itr_$itr"
+  echo "done"
+  echo "heapsize: 22\nis_optimized: 1\n" >             "repro/data/log_opt_heap_insert_preproc_22_itr_$itr"
+  ./docker/run-experiment -p T0 m:48 r6:1 i4:1 c:5  >> "repro/data/log_opt_heap_insert_preproc_22_itr_$itr"
+  echo "heapsize: 22\nis_optimized: 1\n" >                           "repro/data/log_opt_heap_insert_online_22_itr_$itr"
+  ./docker/run-experiment  heap -m 23 -d 22 -i 1 -e 0 -opt 1 -s 0  -itr itr >> "repro/data/log_opt_heap_insert_online_22_itr_$itr"
+  echo "done"
+  echo "heapsize: 24\nis_optimized: 1\n" >             "repro/data/log_opt_heap_insert_preproc_24_itr_$itr"
+  ./docker/run-experiment -p T0 m:52 r6:1 i4:1 c:5  >> "repro/data/log_opt_heap_insert_preproc_24_itr_$itr"
+  echo "heapsize: 24\nis_optimized: 1\n" >                          "repro/data/log_opt_heap_insert_online_24_itr_$itr"
+  ./docker/run-experiment heap -m 25 -d 24 -i 1 -e 0 -opt 1 -s 0 -itr itr >> "repro/data/log_opt_heap_insert_online_24_itr_$itr"
+  echo "done"
+  echo "heapsize: 26\nis_optimized: 1\n" >             "repro/data/log_opt_heap_insert_preproc_26_itr_$itr"
+  ./docker/run-experiment -p T0 m:56 r6:1 i4:1 c:5  >> "repro/data/log_opt_heap_insert_preproc_26_itr_$itr"
+  echo "heapsize: 26\nis_optimized: 1\n" >                          "repro/data/log_opt_heap_insert_online_26_itr_$itr"
+  ./docker/run-experiment heap -m 27 -d 26 -i 1 -e 0 -opt 1 -s 0 -itr itr >> "repro/data/log_opt_heap_insert_online_26_itr_$itr"
+  echo "done"  	
+done

+ 52 - 0
repro/generate_raw_data_large.sh

@@ -0,0 +1,52 @@
+
+echo "Running the Heap Extract Experiments (Basic)"
+cd ..
+nitrs=2
+for itr in $(seq 1 $nitrs); do
+    echo $itr
+    echo "heapsize: 28\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_preproc_28_itr_$itr"	
+    ./docker/run-experiment -p m:111 a:28 s:27 r28:12 c:56 p:128 >> "repro/data/log_basic_heap_extract_preproc_28_itr_$itr"
+    
+    for npre in $(seq 1 15); do
+	 echo "heapsize: 28\nis_optimized: 0\n" > "repro/data/log_"$npre"_basic_heap_extract_preproc_28_itr_$itr"
+    	 ./docker/run-experiment -a -p r28:10 >> "repro/data/log_"$npre"_basic_heap_extract_preproc_28_itr_$itr"
+     done 
+    echo "preprocessing_heap_28 (basic preproc) - iteration $itr"
+    echo "heapsize: 28\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_online_28_itr_$itr"	
+    ./docker/run-experiment heap -m 28 -d 28 -i 0 -e 1 -opt 0 -s 0 -itr itr >> "repro/data/log_basic_heap_extract_online_28_itr_$itr"
+    echo "preprocessing_heap_16 (basic online) - iteration $itr"
+done    
+ 
+nitrs=2
+for itr in $(seq 1 $nitrs); do
+    echo "heapsize: 30\nis_optimized: 0\n" > 		            "repro/data/log_basic_heap_extract_preproc_30_itr_$itr"	
+    ./docker/run-experiment -p m:119 a:30 s:29 r30:17 c:60 p:128 >> "repro/data/log_basic_heap_extract_preproc_30_itr_$itr"
+    echo "preprocessing_heap_30 (basic online)"
+    for npre in $(seq 1 15); do
+    echo "heapsize: 30\nis_optimized: 0\n" >                        "repro/data/log3_basic_heap_extract_preproc_30_itr_$itr"
+    ./docker/run-experiment -a -p r30:10  >> "repro/data/log_"$npre"_basic_heap_extract_preproc_30_itr_$itr"
+     echo "preprocessing_heap_30 (basic online)"
+	done
+    echo "heapsize: 30\nis_optimized: 0\n" > "repro/data/log_basic_heap_extract_online_30_itr_$itr"
+    ./docker/run-experiment heap -m 30 -d 30 -i 0 -e 1 -opt 0 -s 0 -itr itr >> "repro/data/log_basic_heap_extract_online_30_itr_$itr"
+    echo "preprocessing_heap_30 (basic online)"	
+done
+
+
+echo "Running the Heap Extract Experiments (Optimized)"
+nitrs=2
+for itr in $(seq 1 $nitrs); do
+     echo "heapsize: 28\nis_optimized: 1\n" 				  >  "repro/data/log_opt_heap_extract_preproc_28_itr_$itr"	 
+     ./docker/run-experiment -p  m:111 a:28 s:27 i27.3:1 c:56 p:128 	  >> "repro/data/log_opt_heap_extract_preproc_28_itr_$itr"
+     echo "preprocessing_heap_16 (opt preproc)"
+     echo "heapsize: 28\nis_optimized: 1\n" 				     > "repro/data/log_opt_heap_extract_online_28_itr_$itr"
+     ./docker/run-experiment heap  -m 28 -d 28 -i 0 -e 1 -opt 1 -s 0 -itr itr  >> "repro/data/log_opt_heap_extract_online_28_itr_$itr"
+     echo "preprocessing_heap_16 (opt online)"
+
+     echo "heapsize: 30\nis_optimized: 1\n" 		     	              > "repro/data/log_opt_heap_extract_preproc_30_itr_$itr"	
+     ./docker/run-experiment -p m:119 a:30 s:29 i29.3:1 c:60 p:128               >> "repro/data/log_opt_heap_extract_preproc_30_itr_$itr"
+     echo "preprocessing_heap_18 (opt preproc)"
+     echo "heapsize: 30\nis_optimized: 1\n"                                    > "repro/data/log_opt_heap_extract_online_30_itr_$itr"
+     ./docker/run-experiment heap  -m 30 -d 30 -i 0 -e 1 -opt 1 -s 0 -itr itr  >> "repro/data/log_opt_heap_extract_online_30_itr_$itr"
+     echo "preprocessing_heap_30 (opt online)"
+  done

+ 40 - 0
repro/prac_parser.py

@@ -0,0 +1,40 @@
+import re
+import sys
+
+# Regular expressions to match the relevant log lines
+insert_stats_regex = r"===== Insert Stats =====\n(\d+) messages sent\n(\d+) message bytes sent\n(\d+) Lamport clock \(latencies\)\n(\d+) local AES operations\n(\d+) milliseconds wall clock time\n\{(\d+);(\d+);(\d+)\} nanoseconds \{real;user;system\}\nMem: (\d+) KiB"
+extract_stats_regex = r"===== Extract Min Stats =====\n(\d+) messages sent\n(\d+) message bytes sent\n(\d+) Lamport clock \(latencies\)\n(\d+) local AES operations\n(\d+) milliseconds wall clock time\n\{(\d+);(\d+);(\d+)\} nanoseconds \{real;user;system\}\nMem: (\d+) KiB"
+
+# Function to parse insert and extract stats
+def parse_logs(log_file):
+    with open(log_file, "r") as file:
+        log_data = file.read()
+
+    insert_stats = re.findall(insert_stats_regex, log_data)
+    extract_stats = re.findall(extract_stats_regex, log_data)
+
+    return insert_stats, extract_stats
+
+# Function to print stats table
+def print_stats_table(stats):
+    print("Messages Sent | Message Bytes Sent | Lamport Clock | Local AES Operations | Wall Clock Time (ms) | Real Time | User Time | System Time | Memory (KiB)")
+    print("-" * 117)
+    for stat in stats:
+        print("{:14} | {:19} | {:13} | {:21} | {:21} | {:10} | {:9} | {:12} | {:12}".format(*stat))
+    print()
+
+if len(sys.argv) != 2:
+    print("Usage: python log_parser.py <log_file>")
+    sys.exit(1)
+
+log_file = sys.argv[1]
+
+# Parse logs and print stats table
+insert_stats, extract_stats = parse_logs(log_file)
+
+print("Insert Stats:")
+print_stats_table(insert_stats)
+
+print("Extract Min Stats:")
+print_stats_table(extract_stats)
+

+ 28 - 0
repro/repro-bs-online-mode.sh

@@ -0,0 +1,28 @@
+./run-experiment -o -t 64 bsearch 16 > basic_bs_complete_16
+echo "[Basic Binary Search for 2^16 done]"
+./run-experiment -o -t 64 bsearch 18 > basic_bs_complete_18
+echo "[Basic Binary Search for 2^18 done]"
+./run-experiment -o -t 64 bsearch 20 > basic_bs_complete_20
+echo "[Basic Binary Search for 2^20 done]"
+./run-experiment -o -t 64 bsearch 22 > basic_bs_complete_22
+echo "[Basic Binary Search for 2^22 done]"
+./run-experiment -o -t 64 bsearch 24 > basic_bs_complete_24
+echo "[Basic Binary Search for 2^24 done]"
+./run-experiment -o -t 64 bsearch 26 > basic_bs_complete_26
+echo "[Basic Binary Search for 2^26 done]"
+
+echo "basic binary search online complete"
+
+./run-experiment -o -t 64 bbsearch 16 > opt_bs_complete_16
+echo "[Opt Binary Search for 2^16 done]"
+./run-experiment -o -t 64 bbsearch 18 > opt_bs_complete_18
+echo "[Opt Binary Search for 2^18 done]"
+./run-experiment -o -t 64 bbsearch 20 > opt_bs_complete_20
+echo "[Opt Binary Search for 2^20 done]"
+./run-experiment -o -t 64 bbsearch 22 > opt_bs_complete_22
+echo "[Opt Binary Search for 2^22 done]"
+./run-experiment -o -t 64 bbsearch 24 > opt_bs_complete_24
+echo "[Opt Binary Search for 2^24 done]"
+./run-experiment -o -t 64 bbsearch 26 > opt_bs_complete_26
+echo "[Opt Binary Search for 2^26 done]"
+~                     

+ 50 - 0
repro/repro-bs.sh

@@ -0,0 +1,50 @@
+./run-experiment -p m:17 r17:17 c:17 p:64 > basic_bs_preproc_16
+./run-experiment -t 64 bsearch 16 > basic_bs_online_16
+echo "[Basic Binary Search for 2^16 done]"
+
+./run-experiment -p m:19 r19:19 c:19 p:64 > basic_bs_preproc_18
+./run-experiment -t 64 bsearch 18 > basic_bs_online_18
+echo "[Basic Binary Search for 2^18 done]"
+
+./run-experiment -p m:21 r21:21 c:21 p:64 > basic_bs_preproc_20
+./run-experiment -t 64 bsearch 20 > basic_bs_online_20
+echo "[Basic Binary Search for 2^20 done]"
+
+./run-experiment -p m:23 r23:23 c:23 p:64 > basic_bs_preproc_22
+./run-experiment -t 64 bsearch 22 > basic_bs_online_22
+echo "[Basic Binary Search for 2^22 done]"
+
+./run-experiment -p m:25 r25:25 c:25 p:64 > basic_bs_preproc_24
+./run-experiment -t 64 bsearch 24 > basic_bs_online_24
+echo "[Basic Binary Search for 2^24 done]"
+
+./run-experiment -p m:27 r27:27 c:27 p:64 > basic_bs_preproc_26
+./run-experiment -t 64 bsearch 26 > basic_bs_online_26
+echo "[Basic Binary Search for 2^26 done]"
+
+echo "basic binary search online complete"
+
+./run-experiment -p  i16:1 c:17 p:64 > opt_bs_preproc_16
+./run-experiment -t 64 bbsearch 16 > opt_bs_online_16
+echo "[Opt Binary Search for 2^16 done]"
+
+./run-experiment -p  i18:1 c:19 p:64 > opt_bs_preproc_18
+./run-experiment -t 64 bbsearch 18 > opt_bs_online_18
+echo "[Opt Binary Search for 2^18 done]"
+
+./run-experiment -p  i20:1 c:21 p:64 > opt_bs_preproc_20
+./run-experiment -t 64 bbsearch 20 > opt_bs_online_20
+echo "[Opt Binary Search for 2^20 done]"
+
+./run-experiment -p  i22:1 c:23 p:64 > opt_bs_preproc_22
+./run-experiment -t 64 bbsearch 22 > opt_bs_online_22
+echo "[Opt Binary Search for 2^22 done]"
+
+./run-experiment -p  i24:1 c:25 p:64 > opt_bs_preproc_24
+./run-experiment -t 64 bbsearch 24 > opt_bs_online_24
+echo "[Opt Binary Search for 2^24 done]"
+
+./run-experiment -p  i26:1 c:27 p:64 > opt_bs_preproc_26
+./run-experiment -t 64 bbsearch 26 > opt_bs_online_26
+echo "[Opt Binary Search for 2^26 done]"
+~                     

+ 79 - 0
repro/repro-heap-extract.sh

@@ -0,0 +1,79 @@
+   ./run-experiment -p m:63 a:16 s:15 r16:90 c:32 p:128 > basic_heap_extract_preproc_16
+   echo "preprocessing_heap_16 (basic preproc)"
+   
+   ./run-experiment heap -m 16 -d 16 -i 0 -e 1 -opt 0 -s 0 > basic_heap_extract_online_16
+   echo "preprocessing_heap_16 (basic online)"
+ 
+
+   echo "preprocessing_heap_16 (opt online)"
+
+
+   ./run-experiment -p m:71 a:18 s:17 r18:102 c:36 p:128 > basic_heap_extract_preproc_18
+   echo "preprocessing_heap_18 (basic preproc)"
+   
+   ./run-experiment heap -m 18 -d 18 -i 0 -e 1 -opt 0 -s 0 > basic_heap_extract_online_18
+   echo "preprocessing_heap_18 (basic online)"
+
+   ./run-experiment -p m:80 a:20 s:19 r20:114 c:40 p:128 > basic_heap_extract_preproc_20
+   echo "preprocessing_heap_20 (basic preproc)"
+   
+   ./run-experiment -t 16 heap -m 20 -d 20 -i 0 -e 1 -opt 0 -s 0 > basic_heap_extract_online_20
+   echo "preprocessing_heap_20 (basic online)"
+
+
+   echo "preprocessing_heap_20 (opt online)"
+
+   ./run-experiment -p m:87 a:22 s:21 r22:126 c:44 p:128 > basic_heap_extract_preproc_22
+   echo "preprocessing_heap_22 (basic preproc)"
+ 
+   ./run-experiment -t 16 heap -m 22 -d 22 -i 0 -e 1 -opt 0 -s 0> basic_heap_extract_online_22
+   echo "preprocessing_heap_22 (basic online)"
+ 
+
+   echo "preprocessing_heap_22 (opt online)"
+
+  ./run-experiment -p m:95 a:24 s:23 r24:138 c:48 p:128 > basic_heap_extract_preproc_24
+  echo "preprocessing_heap_24 (basic preproc)"
+ 
+  ./run-experiment -t 64 heap -m 24 -d 24 -i 0 -e 1 -opt 0 -s 0> basic_heap_extract_online_24
+  echo "preprocessing_heap_24 (basic online)"
+ 
+
+  echo "preprocessing_heap_24 (opt online)"
+
+  ./run-experiment -p m:103 a:26 s:25 r26:150 c:52 p:128 > basic_heap_extract_preproc_26
+  echo "preprocessing_heap_26 (basic preproc)"
+ ./run-experiment -t 64 heap -m 26 -d 26 -i 0 -e 1 -opt 0 -s 0 > basic_heap_extract_online_26
+  echo "preprocessing_heap_26 (basic online)"
+ 
+  ./run-experiment -p m:103 a:26 s:25 i25.3:1 c:52 p:128 > opt_heap_extract_preproc_26
+
+
+
+   ./run-experiment -p  m:63 a:16 s:15  i15.3:1 c:32 p:128 > opt_heap_extract_preproc_16
+   echo "preprocessing_heap_16 (opt preproc)"
+   
+   ./run-experiment heap -m 16 -d 16 -i 0 -e 1 -opt 1 -s 0 > opt_heap_extract_online_16
+
+
+      ./run-experiment -p m:71 a:18 s:17 i17.3:1 c:36 p:128 > opt_heap_extract_preproc_18
+   echo "preprocessing_heap_18 (opt preproc)"
+   
+   ./run-experiment heap -m 18 -d 18 -i 0 -e 1 -opt 1 -s 0 > opt_heap_extract_online_18
+   echo "preprocessing_heap_18 (opt online)"
+
+      ./run-experiment -p m:80 a:20 s:19 i19.3:1 c:40 p:128 > opt_heap_extract_preproc_20
+   echo "preprocessing_heap_20 (opt preproc)"
+ 
+   ./run-experiment heap -m 20 -d 20 -i 0 -e 1 -opt 1 -s 0 > opt_heap_extract_online_20
+   ./run-experiment -p m:87 a:22 s:21 i21.3:1 c:44 p:128  > opt_heap_extract_preproc_22
+   echo "preprocessing_heap_22 (opt preproc)"
+ 
+   ./run-experiment -t 16 heap -m 22 -d 22 -i 0 -e 1 -opt 1 -s 0 > opt_heap_extract_online_22
+  ./run-experiment -p m:95 a:24 s:23 i23.3:1 c:48 p:128 > opt_heap_extract_preproc_24
+  echo "preprocessing_heap_24 (opt preproc)"
+ 
+  ./run-experiment -t 32 heap -m 24 -d 24 -i 0 -e 1 -opt 1 -s 0 > opt_heap_extract_online_24
+  echo "preprocessing_heap_26 (opt preproc)"
+ ./run-experiment -t 64 heap -m 26 -d 26 -i 0 -e 1 -opt 1 -s 0 > opt_heap_extract_online_26
+  echo "preprocessing_heap_26 (opt online)"

+ 51 - 0
repro/repro-heap-insert.sh

@@ -0,0 +1,51 @@
+  ./run-experiment -p m:16 c:16 > basic_heap_insert_preproc_16
+  ./run-experiment -t 64 heap -m 17 -d 16 -i 1 -e 0 -opt 0 -s 0 > basic_heap_insert_online_16
+  echo "basic heap insert for 2^16"
+ 
+  ./run-experiment -p m:18 c:18 > basic_heap_insert_preproc_18
+  ./run-experiment -t 64 heap -m 19 -d 18 -i 1 -e 0 -opt 0 -s 0 > basic_heap_insert_online_18
+  echo "basic heap insert for 2^18"
+ 
+  ./run-experiment -p  m:20 c:20 > basic_heap_insert_preproc_20
+  ./run-experiment heap -m 21 -d 20 -i 1 -e 0 -opt 0 -s 0 > basic_heap_insert_online_20
+  echo "basic heap insert for 2^20"
+
+ 
+  ./run-experiment -p m:22 c:22 > basic_heap_insert_basic_preproc_22
+  ./run-experiment  heap -m 23 -d 22 -i 1 -e 0 -opt 0 -s 0 > basic_heap_insert_online_22
+  echo "basic heap insert for 2^22"
+
+  ./run-experiment -p m:24 c:24 > basic_heap_insert_preproc_24
+  ./run-experiment heap -m 25 -d 24 -i 1 -e 0 -opt 0 -s 0 > basic_heap_insert_online_24
+  echo "basic heap insert for 2^24"
+
+ 
+  ./run-experiment -p m:26 c:26 > basic_heap_insert_preproc_26
+  ./run-experiment heap -m 27 -d 26 -i 1 -e 0 -opt 0 -s 0> basic_heap_insert_online_26
+  echo "basic heap insert for 2^26"
+
+
+
+  ./run-experiment -p T0 m:32 r6:1 i4:1 c:5  > opt_heap_insert_preproc_16
+  ./run-experiment heap -m 17 -d 16 -i 1 -e 0 -opt 1 -s 0 > opt_heap_insert_online_16
+  echo "basic heap insert for 2^16"
+
+  ./run-experiment -p T0 m:36 r6:1 i4:1 c:5  > opt_heap_insert_preproc_18
+  ./run-experiment heap -m 19 -d 18 -i 1 -e 0 -opt 1 -s 0 > opt_heap_insert_online_18
+  echo "basic heap insert for 2^18"
+
+  ./run-experiment -p T0 m:40 r6:1 i4:1 c:5  > opt_heap_insert_preproc_20
+  ./run-experiment heap -m 20 -d 21 -i 1 -e 0 -opt 1 -s 0 > opt_heap_insert_online_20
+  echo "basic heap insert for 2^20"
+
+  ./run-experiment -p T0 m:44 r6:1 i4:1 c:5  > opt_heap_insert_preproc_22
+  ./run-experiment heap -m 23 -d 22 -i 1 -e 0 -opt 1 -s 0 > opt_heap_insert_online_22
+   echo "basic heap insert for 2^22"	
+
+  ./run-experiment -p T0 m:48 r6:1 i4:1 c:5  > opt_heap_insert_preproc_24
+  ./run-experiment heap -m 25 -d 24 -i 1 -e 0 -opt 1 -s 0 > opt_heap_insert_online_24
+  echo "basic heap insert for 2^24"
+
+  ./run-experiment -p T0 m:52 r6:1 i4:1 c:5  > opt_heap_insert_preproc_26
+  ./run-experiment heap -m 27 -d 26 -i 1 -e 0 -opt 1 -s 0 > opt_heap_insert_online_26
+  echo "basic heap insert for 2^26"

+ 150 - 0
repro/reproduce_plots

@@ -0,0 +1,150 @@
+#!/bin/bash
+
+# Replace "python3" with your Python interpreter if needed
+DS="Heap" 
+Operation="Extract" 
+cd ..
+
+rm "repro/experimentaldata_preproc_wallclock"  "repro/experimentaldata_wallclock"
+touch "repro/experimentaldata_preproc_wallclock"  "repro/experimentaldata_wallclock"
+nitrs=10
+minsize=16
+maxsize=30
+stepsize=2
+for itr in $(seq 1 $nitrs); do
+ for heapsize in $(seq $minsize $stepsize $maxsize); do
+ 	input_file="repro/data/log_basic_heap_extract_online_${heapsize}_itr_${itr}"
+	echo $input_file
+ 	output=$(python3 repro/extract_data.py "$input_file")
+
+ 	# Read the output into an array
+	 IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+	 # Store each value in different variables
+	 heapsize=${values[0]}  
+ 	 optimization_flag=${values[1]} 
+	 milliseconds=${values[4]}
+         #seconds=$((milliseconds / 1000))
+	 seconds=$(echo "scale=3; $milliseconds / 1000" | bc)  # Use bc for decimal division
+	 extract_wc=$seconds
+ 
+	 echo $optimization_flag 
+	 echo $heapsize 
+	 echo $milliseconds
+	 echo "$extract_wc"
+
+	 datafile="repro/experimentaldata_wallclock"
+ 	 python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $extract_wc
+
+ 	 input_file="repro/data/log_opt_heap_extract_online_${heapsize}_itr_${itr}"
+ 	 echo $input_file
+ 	 output=$(python3 repro/extract_data.py "$input_file")
+
+	 # Read the output into an array
+ 	 IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+ 	# Store each value in different variables
+	 milliseconds=${values[4]}
+	 seconds=$(echo "scale=3; $milliseconds / 1000" | bc)  # Use bc for decimal division
+	 heapsize=${values[0]}  optimization_flag=${values[1]} extract_wc=$seconds
+ 	 datafile="repro/experimentaldata_wallclock"
+ 	 python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $extract_wc
+ done	
+done
+echo "OnlineTable"
+cat $datafile
+echo -e "\n\n"
+
+nitrs=2
+for itr in $(seq 1 $nitrs); do
+  for heapsize in $(seq $minsize $stepsize $maxsize); do
+	input_file="repro/data/log_basic_heap_extract_preproc_${heapsize}_itr_$itr"
+  	output=$(python3 repro/extract_data_from_preproc.py "$input_file")
+
+  	# Read the output into an array
+  	IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+  	# Store each value in different variables
+ 	heapsize=${values[0]}  optimization_flag=${values[1]} milliseconds=${values[2]} 
+	seconds=$(echo "scale=3; $milliseconds / 1000" | bc)  # Use bc for decimal division
+	preproc_wc=$seconds
+	datafile="repro/experimentaldata_preproc_wallclock"
+  	python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $preproc_wc
+
+  	input_file="repro/data/log_opt_heap_extract_preproc_${heapsize}_itr_$itr"
+  	output=$(python3 repro/extract_data_from_preproc.py "$input_file")
+
+ 	 # Read the output into an array
+  	IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+  	# Store each value in different variables
+  	heapsize=${values[0]}  optimization_flag=${values[1]} milliseconds=${values[2]}
+	seconds=$(echo "scale=3; $milliseconds / 1000" | bc)  # Use bc for decimal division
+        preproc_wc=$seconds
+  	datafile="repro/experimentaldata_preproc_wallclock"
+  	python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $preproc_wc
+  done
+done
+echo "Preproc Table"
+cat $datafile
+
+
+
+echo -e "\n\n"
+nitrs=0
+# Replace "python3" with your Python interpreter if needed
+DS="Heap"
+Operation="Insert"
+
+for itr in $(seq 1 $niters); do
+  for heapsize in $(seq $minsize $stepsize $maxsize); do
+	 input_file="repro/data/log_basic_heap_insert_online_${heapsize}_itr_$itr"
+	 output=$(python3 repro/extract_data.py "$input_file")
+
+	 # Read the output into an array
+ 	IFS=$'\n' read -d '' -r -a values <<< "$output"
+	 # Store each value in different variables
+ 	heapsize=${values[0]}  optimization_flag=${values[1]}  insert_wc=${values[2]}
+	datafile="repro/experimentaldata_wallclock"
+ 	python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $insert_wc
+ 	input_file="repro/data/log_opt_heap_insert_online_${heapsize}_itr_$itr"
+ 	output=$(python3 repro/extract_data.py "$input_file")
+
+	 # Read the output into an array
+	 IFS=$'\n' read -d '' -r -a values <<< "$output"
+ 	# Store each value in different variables
+ 	heapsize=${values[0]}  optimization_flag=${values[1]}  insert_wc=${values[2]}
+ 	datafile="repro/experimentaldata_wallclock"
+	python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $insert_wc 
+  done
+done
+
+echo "Online Table"
+cat $datafile
+echo -e "\n\n"
+
+for itr in $(seq 1 $niters); do
+    for heapsize in $(seq $minsize $stepsize $maxsize); do	
+	 
+	input_file="repro/data/log_basic_heap_insert_preproc_${heapsize}_itr_$itr"
+ 	output=$(python3 repro/extract_data_from_preproc.py "$input_file")
+ 	# Read the output into an array
+ 	IFS=$'\n' read -d '' -r -a values <<< "$output"
+ 	# Store each value in different variables
+ 	heapsize=${values[0]}  optimization_flag=${values[1]} preproc_wc=${values[2]}
+ 	datafile="repro/experimentaldata_preproc_wallclock"
+ 	python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $preproc_wc
+
+ 	input_file="repro/data/log_opt_heap_insert_preproc_${heapsize}_itr_$itr"
+	 output=$(python3 repro/extract_data_from_preproc.py "$input_file")
+	 # Read the output into an array
+	 IFS=$'\n' read -d '' -r -a values <<< "$output"
+	 # Store each value in different variables
+	 heapsize=${values[0]}  optimization_flag=${values[1]} preproc_wc=${values[2]}
+	 datafile="repro/experimentaldata_preproc_wallclock"
+	 python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $preproc_wc
+   done
+done
+echo "Preproc Table"
+cat $datafile
+

+ 128 - 0
repro/reproduce_plots2

@@ -0,0 +1,128 @@
+#!/bin/bash
+
+# Replace "python3" with your Python interpreter if needed
+DS="Heap" 
+Operation="Extract" 
+cd ..
+
+
+
+
+for itr in $(seq 1 1); do
+ for heapsize in $(seq 16 2 26); do
+ echo $heapsize
+ input_file="repro/data/log_basic_heap_extract_online_${heapsize}_itr_${itr}"
+ output=$(python3 repro/extract_data.py "$input_file")
+
+ # Read the output into an array
+ IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+ # Store each value in different variables
+ heapsize=${values[0]}  
+ optimization_flag=${values[1]} 
+ extract_wc=${values[4]} 
+ 
+ datafile="repro/experimentaldata_wallclock"
+ python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $extract_wc
+
+ input_file="repro/data/log_opt_heap_extract_online_${heapsize}_itr_${itr}"
+ echo $input_file
+ output=$(python3 repro/extract_data.py "$input_file")
+
+ # Read the output into an array
+ IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+ # Store each value in different variables
+ heapsize=${values[0]}  optimization_flag=${values[1]} extract_wc=${values[4]}
+ datafile="repro/experimentaldata_wallclock"
+ python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $extract_wc
+ done	
+done
+
+cat $datafile
+echo "\n\n"
+
+for itr in $(seq 1 1); do
+  for heapsize in $(seq 16 2 26); do
+  input_file="repro/data/log_basic_heap_extract_preproc_${heapsize}_itr_$itr"
+  output=$(python3 repro/extract_data_from_preproc.py "$input_file")
+
+  # Read the output into an array
+  IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+  # Store each value in different variables
+  heapsize=${values[0]}  optimization_flag=${values[1]} preproc_wc=${values[2]} 
+  datafile="repro/experimentaldata_preproc_wallclock"
+  python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $preproc_wc
+
+  input_file="repro/data/log_opt_heap_extract_preproc_16_itr_$itr"
+  output=$(python3 repro/extract_data_from_preproc.py "$input_file")
+
+  # Read the output into an array
+  IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+  # Store each value in different variables
+  heapsize=${values[0]}  optimization_flag=${values[1]} preproc_wc=${values[2]}
+  datafile="repro/experimentaldata_preproc_wallclock"
+  python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $preproc_wc
+ done
+done
+
+
+cat $datafile
+echo "\n\n"
+
+# Replace "python3" with your Python interpreter if needed
+DS="Heap"
+Operation="Insert"
+
+for itr in $(seq 1 1); do
+  for heapsize in $(seq 16 2 26); do
+	 input_file="repro/data/log_basic_heap_insert_online_${heapsize}_itr_$itr"
+	 output=$(python3 repro/extract_data.py "$input_file")
+
+	 # Read the output into an array
+ 	IFS=$'\n' read -d '' -r -a values <<< "$output"
+	 # Store each value in different variables
+ 	heapsize=${values[0]}  optimization_flag=${values[1]}  insert_wc=${values[2]}
+	datafile="repro/experimentaldata_wallclock"
+ 	python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $insert_wc
+ 	input_file="repro/data/log_opt_heap_insert_online_${heapsize}_itr_$itr"
+ 	output=$(python3 repro/extract_data.py "$input_file")
+
+	 # Read the output into an array
+	 IFS=$'\n' read -d '' -r -a values <<< "$output"
+ 	# Store each value in different variables
+ 	heapsize=${values[0]}  optimization_flag=${values[1]}  insert_wc=${values[2]}
+ 	datafile="repro/experimentaldata_wallclock"
+	python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $insert_wc 
+  done
+done
+
+cat $datafile
+echo "\n---------\n"
+
+for itr in $(seq 1 1); do
+    for heapsize in $(seq 16 2 26); do	
+	echo ${heapsize} 
+	input_file="repro/data/log_basic_heap_insert_preproc_${heapsize}_itr_$itr"
+ 	output=$(python3 repro/extract_data_from_preproc.py "$input_file")
+ 	# Read the output into an array
+ 	IFS=$'\n' read -d '' -r -a values <<< "$output"
+ 	# Store each value in different variables
+ 	heapsize=${values[0]}  optimization_flag=${values[1]} preproc_wc=${values[2]}
+ 	datafile="repro/experimentaldata_preproc_wallclock"
+ 	python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $preproc_wc
+
+ 	input_file="repro/data/log_opt_heap_insert_preproc_${heapsize}_itr_$itr"
+	 output=$(python3 repro/extract_data_from_preproc.py "$input_file")
+	 # Read the output into an array
+	 IFS=$'\n' read -d '' -r -a values <<< "$output"
+	 # Store each value in different variables
+	 heapsize=${values[0]}  optimization_flag=${values[1]} preproc_wc=${values[2]}
+	 datafile="repro/experimentaldata_preproc_wallclock"
+	 python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $preproc_wc
+   done
+done
+cat $datafile
+

+ 77 - 0
repro/reproduce_plots_bs

@@ -0,0 +1,77 @@
+#!/bin/bash
+
+# Replace "python3" with your Python interpreter if needed
+DS="BS" 
+Operation="Search" 
+cd ..
+touch "repro/experimentaldata_bs_preproc_wallclock"  "repro/experimentaldata_bs_wallclock"
+rm "repro/experimentaldata_bs_preproc_wallclock"  "repro/experimentaldata_bs_wallclock"
+touch "repro/experimentaldata_bs_preproc_wallclock"  "repro/experimentaldata_bs_wallclock"
+nitrs=2
+minsize=16
+maxsize=24
+stepsize=2
+for itr in $(seq 1 $nitrs); do
+ for bssize in $(seq $minsize $stepsize $maxsize); do
+ 	input_file="repro/data/log_basic_bs_online_${bssize}_itr_${itr}"
+	echo $input_file
+ 	output=$(python3 repro/extract_data_bs.py "$input_file")
+
+ 	# Read the output into an array
+	 IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+	 # Store each value in different variables
+	 bssize=$bssize  
+ 	 optimization_flag=0 
+	 extract_wc=${values[1]} 
+ 
+	 datafile="repro/experimentaldata_bs_wallclock"
+ 	 python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $bssize $extract_wc
+
+ 	 input_file="repro/data/log_opt_bs_online_${bssize}_itr_${itr}"
+ 	 echo $input_file
+ 	 output=$(python3 repro/extract_data_bs.py "$input_file")
+
+	 # Read the output into an array
+ 	 IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+ 	# Store each value in different variables
+	 bssize=$bssize  optimization_flag=1 extract_wc=${values[1]}
+ 	 datafile="repro/experimentaldata_bs_wallclock"
+ 	 python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $bssize $extract_wc
+ done	
+done
+echo "OnlineTable"
+cat $datafile
+echo -e "\n\n"
+
+for itr in $(seq 1 $nitrs); do
+  for bssize in $(seq $minsize $stepsize $maxsize); do
+	input_file="repro/data/log_basic_bs_preproc_${bssize}_itr_$itr"
+  	output=$(python3 repro/extract_data_from_preproc.py "$input_file")
+
+  	# Read the output into an array
+  	IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+  	# Store each value in different variables
+ 	bssize=$bssize  optimization_flag=0 preproc_wc=${values[2]} 
+  	datafile="repro/experimentaldata_bs_preproc_wallclock"
+  	python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $bssize $preproc_wc
+
+  	input_file="repro/data/log_opt_bs_preproc_${bssize}_itr_$itr"
+  	output=$(python3 repro/extract_data_from_preproc.py "$input_file")
+
+ 	 # Read the output into an array
+  	IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+  	# Store each value in different variables
+  	bssize=$bssize  optimization_flag=1 preproc_wc=${values[2]}
+  	datafile="repro/experimentaldata_bs_preproc_wallclock"
+  	python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $bssize $preproc_wc
+  done
+done
+
+echo "PrprocTable"
+cat $datafile
+echo -e "\n\n"
+

+ 159 - 0
repro/reproduce_plots_bw

@@ -0,0 +1,159 @@
+#!/bin/bash
+
+# Replace "python3" with your Python interpreter if needed
+DS="Heap" 
+Operation="Extract" 
+cd ..
+
+touch "repro/experimentaldata_preproc_bw"  "repro/experimentaldata_bw"
+rm "repro/experimentaldata_preproc_bw"  "repro/experimentaldata_bw"
+touch "repro/experimentaldata_preproc_bw"  "repro/experimentaldata_bw"
+nitrs=2
+minsize=16
+maxsize=26
+stepsize=2
+for itr in $(seq 1 $nitrs); do
+ for heapsize in $(seq $minsize $stepsize $maxsize); do
+ 	input_file="repro/data/log_basic_heap_extract_online_${heapsize}_itr_${itr}"
+	echo $input_file
+ 	output=$(python3 repro/extract_data.py "$input_file")
+
+ 	# Read the output into an array
+	 IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+	 # Store each value in different variables
+	 echo ${values[0]}
+	 echo ${values[1]}
+	 echo ${values[2]}
+	 echo ${values[3]}
+	 echo ${values[4]}
+         echo ${values[5]}
+	 echo "------"	 
+	 heapsize=${values[0]}  
+ 	 optimization_flag=${values[1]} 
+	 bytes=${values[5]}
+	 #seconds=$((milliseconds / 1000))
+	 KiB=$(echo "scale=3; $bytes / 1024" | bc)  # Use bc for decimal division
+	 bw=$KiB
+
+	 datafile="repro/experimentaldata_bw"
+ 	 python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $bw
+
+ 	 input_file="repro/data/log_opt_heap_extract_online_${heapsize}_itr_${itr}"
+ 	 echo $input_file
+ 	 output=$(python3 repro/extract_data.py "$input_file")
+
+	 # Read the output into an array
+ 	 IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+ 	# Store each value in different variables
+	 bytes=${values[5]}
+	 KiB=$(echo "scale=3; $bytes / 1024" | bc)  # Use bc for decimal division
+	 heapsize=${values[0]}  optimization_flag=${values[1]} bw=$KiB
+ 	 datafile="repro/experimentaldata_bw"
+ 	 python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $bw
+ done	
+done
+echo "OnlineTable"
+cat $datafile
+echo -e "\n\n"
+
+nitrs=2
+for itr in $(seq 1 $nitrs); do
+  for heapsize in $(seq $minsize $stepsize $maxsize); do
+	input_file="repro/data/log_basic_heap_extract_preproc_${heapsize}_itr_$itr"
+  	output=$(python3 repro/extract_data_from_preproc.py "$input_file")
+
+  	# Read the output into an array
+  	IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+  	# Store each value in different variables
+ 	 heapsize=${values[0]}  optimization_flag=${values[1]} preproc_wc=${values[2]} 
+  	 bytes=${values[3]}
+         KiB=$(echo "scale=3; $bytes / 1024" | bc)  # Use bc for decimal division
+	 bw=$KiB
+	 echo ${values[0]}
+         echo ${values[1]}
+         echo ${values[2]}
+         echo ${values[3]}
+         echo "-------" 	 
+	 datafile="repro/experimentaldata_preproc_bw"
+  	 python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $bw
+
+  	 input_file="repro/data/log_opt_heap_extract_preproc_${heapsize}_itr_$itr"
+  	 output=$(python3 repro/extract_data_from_preproc.py "$input_file")
+
+ 	 # Read the output into an array
+  	 IFS=$'\n' read -d '' -r -a values <<< "$output"
+
+  	 # Store each value in different variables
+  	 heapsize=${values[0]}  optimization_flag=${values[1]} preproc_wc=${values[2]}
+	 bytes=${values[3]}
+         KiB=$(echo "scale=3; $bytes / 1024" | bc)  # Use bc for decimal division
+         bw=$KiB
+
+  	 datafile="repro/experimentaldata_preproc_bw"
+  	 python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $bw
+  done
+done
+
+echo "Preproc Table"
+cat $datafile
+echo -e "\n\n"
+nitrs=0
+# Replace "python3" with your Python interpreter if needed
+DS="Heap"
+Operation="Insert"
+
+for itr in $(seq 1 $niters); do
+  for heapsize in $(seq $minsize $stepsize $maxsize); do
+	 input_file="repro/data/log_basic_heap_insert_online_${heapsize}_itr_$itr"
+	 output=$(python3 repro/extract_data.py "$input_file")
+
+	 # Read the output into an array
+ 	IFS=$'\n' read -d '' -r -a values <<< "$output"
+	 # Store each value in different variables
+ 	heapsize=${values[0]}  optimization_flag=${values[1]}  insert_wc=${values[2]}
+	datafile="repro/experimentaldata_bw"
+ 	python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $insert_wc
+ 	input_file="repro/data/log_opt_heap_insert_online_${heapsize}_itr_$itr"
+ 	output=$(python3 repro/extract_data.py "$input_file")
+
+	 # Read the output into an array
+	 IFS=$'\n' read -d '' -r -a values <<< "$output"
+ 	# Store each value in different variables
+ 	heapsize=${values[0]}  optimization_flag=${values[1]}  insert_wc=${values[2]}
+ 	datafile="repro/experimentaldata_bw"
+	python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $insert_wc 
+  done
+done
+
+echo "Online Table"
+cat $datafile
+echo -e "\n\n"
+
+for itr in $(seq 1 $niters); do
+    for heapsize in $(seq $minsize $stepsize $maxsize); do	
+	 
+	input_file="repro/data/log_basic_heap_insert_preproc_${heapsize}_itr_$itr"
+ 	output=$(python3 repro/extract_data_from_preproc.py "$input_file")
+ 	# Read the output into an array
+ 	IFS=$'\n' read -d '' -r -a values <<< "$output"
+ 	# Store each value in different variables
+ 	heapsize=${values[0]}  optimization_flag=${values[1]} preproc_wc=${values[2]}
+ 	datafile="repro/experimentaldata_preproc_bw"
+ 	python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $preproc_wc
+
+ 	input_file="repro/data/log_opt_heap_insert_preproc_${heapsize}_itr_$itr"
+	 output=$(python3 repro/extract_data_from_preproc.py "$input_file")
+	 # Read the output into an array
+	 IFS=$'\n' read -d '' -r -a values <<< "$output"
+	 # Store each value in different variables
+	 heapsize=${values[0]}  optimization_flag=${values[1]} preproc_wc=${values[2]}
+	 datafile="repro/experimentaldata_preproc_bw"
+	 python3 repro/append-experiment-results.py $datafile $DS $Operation $optimization_flag $heapsize $preproc_wc
+   done
+done
+echo "Preproc Table"
+cat $datafile
+

+ 52 - 0
repro/sum_preproc_online

@@ -0,0 +1,52 @@
+#!/bin/bash
+
+if [ $# -ne 2 ]; then
+    echo "Usage: $0 <file1> <file2>"
+    exit 1
+fi
+
+file1="$1"
+file2="$2"
+
+if [ ! -f "$file1" ] || [ ! -f "$file2" ]; then
+    echo "Error: Both input files must exist."
+    exit 1
+fi
+
+data1=()
+data2=()
+
+while read line; do
+    if [[ "$line" == "BS Search"* ]]; then
+        parts=($line)
+        y="${parts[4]}"
+        y_err="${parts[6]#±}"
+        data1+=("$y,$y_err")
+    fi
+done < "$file1"
+
+while read line; do
+    if [[ "$line" == "BS Search"* ]]; then
+        parts=($line)
+        y="${parts[4]}"
+        y_err="${parts[6]#±}"
+        data2+=("$y,$y_err")
+    fi
+done < "$file2"
+
+if [ ${#data1[@]} -ne ${#data2[@]} ]; then
+    echo "Error: The two files do not have the same number of data points."
+    exit 1
+fi
+
+echo "Data point y_sum y_err_sum"
+for i in "${!data1[@]}"; do
+    IFS=',' read -ra data1_parts <<< "${data1[i]}"
+    IFS=',' read -ra data2_parts <<< "${data2[i]}"
+    
+    y_sum=$(echo "${data1_parts[0]} + ${data2_parts[0]}" | bc -l)
+    y_err_sum=$(echo "scale=4; sqrt(${data1_parts[1]}^2 + ${data2_parts[1]}^2)" | bc -l)
+    
+    echo "$((i+1)) $y_sum $y_err_sum"
+done
+