Browse Source

Complete sparta implementation.

Kyle Fredrickson 1 year ago
parent
commit
83fda8df10
5 changed files with 259 additions and 271 deletions
  1. 1 1
      otils
  2. 143 141
      sparta/src/load_balancer.rs
  3. 10 7
      sparta/src/main.rs
  4. 30 29
      sparta/src/omap.rs
  5. 75 93
      sparta/src/record.rs

+ 1 - 1
otils

@@ -1 +1 @@
-Subproject commit 90685fc013a12244824499fabb13d8d2eba187e4
+Subproject commit 70bf6686dc29da3966727847499480d2986fa33e

+ 143 - 141
sparta/src/load_balancer.rs

@@ -1,41 +1,41 @@
-pub use crate::record::{Record, SubmapRequest};
-use crate::ObliviousMap;
+use crate::omap::ObliviousMap;
+pub use crate::record::{IndexRecord, Record, RecordType, SubmapRecord};
 use fastapprox::fast;
 use otils::{self, ObliviousOps};
 use std::{cmp, f64::consts::E};
 
 const LAMBDA: usize = 128;
 
-pub struct BalanceRecord(Record);
+// pub struct BalanceRecord(Record);
 
-impl BalanceRecord {}
+// impl BalanceRecord {}
 
-impl PartialEq for BalanceRecord {
-    fn eq(&self, other: &Self) -> bool {}
-}
+// impl PartialEq for BalanceRecord {
+//     fn eq(&self, other: &Self) -> bool {}
+// }
 
-impl PartialOrd for BalanceRecord {
-    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {}
-}
+// impl PartialOrd for BalanceRecord {
+//     fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {}
+// }
 
 pub struct LoadBalancer {
     num_users: i64,
     num_threads: usize,
     num_submaps: usize,
 
-    pub user_store: Vec<Record>,
-    pub omaps: Vec<ObliviousMap>,
+    pub user_store: Vec<IndexRecord>,
+    pub submaps: Vec<ObliviousMap>,
 }
 
 impl LoadBalancer {
     pub fn new(num_users: i64, num_threads: usize, num_submaps: usize) -> Self {
         let mut user_store = Vec::new();
         user_store.reserve(num_users as usize);
-        user_store.extend((0..num_users).map(|i| Record::new(i)));
+        user_store.extend((0..num_users).map(|i| IndexRecord::new(i, RecordType::User)));
 
-        let mut omaps = Vec::new();
-        omaps.reserve(num_submaps as usize);
-        omaps.extend(
+        let mut submaps = Vec::new();
+        submaps.reserve(num_submaps as usize);
+        submaps.extend(
             (0..num_submaps).map(|_| ObliviousMap::new(num_threads / num_submaps as usize)),
         );
 
@@ -44,7 +44,7 @@ impl LoadBalancer {
             num_threads,
             num_submaps,
             user_store,
-            omaps,
+            submaps,
         }
     }
 
@@ -58,86 +58,87 @@ impl LoadBalancer {
             .ceil() as usize
     }
 
-    fn update_with_sends(&mut self, sends: Vec<Record>) {
-        let mut size = (self.user_store.len() + sends.len()).next_power_of_two();
-        size -= self.user_store.len();
-        self.user_store.reserve(size);
-        size -= sends.len();
-
-        self.user_store.extend(sends);
+    pub fn pad_for_submap(
+        &self,
+        mut requests: Vec<SubmapRecord>,
+        submap_size: usize,
+        is_send: bool,
+    ) -> Vec<SubmapRecord> {
+        requests.reserve(self.num_submaps * submap_size);
 
-        self.user_store.extend((0..size).map(|_| Record::max()));
+        for submap in 0..self.num_submaps {
+            if is_send {
+                requests.extend(SubmapRecord::dummy_send(submap_size, submap as u8));
+            } else {
+                requests.extend(SubmapRecord::dummy_fetch(submap_size, submap as u8));
+            }
+        }
+        requests
     }
 
-    fn update_with_fetches(&mut self, fetches: Vec<Record>, num_fetches: usize) {
-        let mut size = (self.user_store.len() + num_fetches).next_power_of_two();
-        size -= self.user_store.len();
-        self.user_store.reserve(size);
+    pub fn get_submap_requests(
+        &self,
+        requests: Vec<IndexRecord>,
+        submap_size: usize,
+        is_send: bool,
+    ) -> Vec<SubmapRecord> {
+        let requests = requests.into_iter().map(|r| SubmapRecord(r.0)).collect();
+        let mut requests = self.pad_for_submap(requests, submap_size, is_send);
 
-        size -= num_fetches;
-        for fetch in fetches.into_iter() {
-            self.user_store.extend(fetch.dummies());
+        requests = otils::sort(requests, self.num_threads); // sort by omap, then by dummy
+
+        let mut prev_map = self.num_submaps;
+        let mut remaining_marks = submap_size as i32;
+        for request in requests.iter_mut() {
+            let submap = request.0.map as u32;
+            remaining_marks = i32::oselect(
+                submap != prev_map as u32,
+                submap_size as i32,
+                remaining_marks,
+            );
+            request.0.mark = u16::oselect(remaining_marks > 0, 1, 0);
+            remaining_marks += i32::oselect(remaining_marks > 0, -1, 0);
+            prev_map = submap as usize;
         }
 
-        self.user_store.extend((0..size).map(|_| Record::max()));
+        otils::compact(&mut requests[..], |r| r.0.mark == 1, self.num_threads);
+        requests
     }
 
-    fn construct_send_indices(&mut self) {
+    fn propagate_send_indices(&mut self) {
         let mut idx: u32 = 0;
         let mut is_same_u: bool;
 
         let mut user_store_iter = self.user_store.iter_mut().peekable();
         while let Some(record) = user_store_iter.next() {
-            let is_user_store = record.is_user_store();
+            let is_user_store = record.0.is_user_store();
 
             idx = u32::oselect(
                 is_user_store,
-                cmp::max(record.last_fetch, record.last_send),
+                cmp::max(record.0.last_fetch, record.0.last_send),
                 idx + 1,
             );
 
-            record.idx = u32::oselect(is_user_store, 0, record.get_idx(idx));
-            record.map = (record.idx % self.num_submaps) as u8;
-            record.last_send = idx;
+            record.0.idx = u32::oselect(is_user_store, 0, record.get_idx(idx));
+            record.0.map = (record.0.idx % (self.num_submaps as u32)) as u8;
+            record.0.last_send = idx;
 
             if let Some(next_record) = user_store_iter.peek() {
-                is_same_u = record.uid == next_record.uid;
+                is_same_u = record.0.uid == next_record.0.uid;
             } else {
                 is_same_u = false;
             }
-            record.mark = u16::oselect(is_same_u, 0, 1);
+            record.0.mark = u16::oselect(is_same_u, 0, 1);
         }
     }
 
-    fn construct_fetch_indices(&mut self) {
-        let mut idx: u32 = 0;
-        let mut is_same_u: bool;
-
-        let mut user_store_iter = self.user_store.iter_mut().peekable();
-        while let Some(record) = user_store_iter.next() {
-            let is_user_store = record.is_user_store();
-
-            idx = u32::oselect(is_user_store, record.last_fetch, idx + 1);
-
-            record.idx = u32::oselect(is_user_store, 0, record.get_idx(idx));
-            record.map = (record.idx % self.num_submaps) as u8;
-            record.last_fetch = idx;
-
-            if let Some(next_record) = user_store_iter.peek() {
-                is_same_u = record.uid == next_record.uid;
-            } else {
-                is_same_u = false;
-            }
-            record.mark = u16::oselect(is_same_u, 0, 1);
-        }
-    }
-
-    pub fn get_send_requests(&mut self, sends: Vec<Record>) -> Vec<Record> {
+    pub fn get_send_indices(&mut self, sends: Vec<IndexRecord>) -> Vec<IndexRecord> {
         let num_requests = sends.len();
-        self.update_with_sends(sends);
+        self.user_store.reserve(num_requests);
+        self.user_store.extend(sends);
 
-        otils::sort(&mut self.user_store[..], self.num_threads);
-        self.construct_send_indices();
+        self.user_store = otils::sort(std::mem::take(&mut self.user_store), self.num_threads);
+        self.propagate_send_indices();
 
         otils::compact(
             &mut self.user_store[..],
@@ -148,9 +149,10 @@ impl LoadBalancer {
 
         otils::compact(
             &mut self.user_store[..],
-            |r| r.is_new_user_store(),
+            |r| r.is_updated_user_store(),
             self.num_threads,
         );
+
         self.user_store.truncate(self.num_users as usize);
         self.user_store.iter_mut().for_each(|r| {
             r.set_user_store();
@@ -159,14 +161,61 @@ impl LoadBalancer {
         requests
     }
 
-    pub fn get_fetch_requests(&mut self, fetches: Vec<Record>) -> Vec<Record> {
-        let num_requests = fetches
-            .iter()
-            .fold(0, |acc, fetch| acc + fetch.data as usize);
+    pub fn batch_send(&mut self, sends: Vec<Record>) {
+        let sends = sends.into_iter().map(|r| IndexRecord(r)).collect();
+        let requests = self.get_send_indices(sends);
+        let submap_size = self.pad_size(requests.len() as f64);
+        let mut requests: Vec<Record> = self
+            .get_submap_requests(requests, submap_size, true)
+            .into_iter()
+            .map(|r| r.0)
+            .collect();
+
+        for idx in 0..self.num_submaps {
+            let batch: Vec<Record> = requests.drain(0..submap_size).collect();
+            self.submaps[idx].batch_send(batch);
+        }
+    }
+
+    fn update_with_fetches(&mut self, fetches: Vec<IndexRecord>, num_fetches: usize) {
+        self.user_store.reserve(num_fetches);
+        for fetch in fetches.into_iter() {
+            self.user_store.extend(fetch.dummy_fetches());
+        }
+    }
+
+    fn propagate_fetch_indices(&mut self) {
+        let mut idx: u32 = 0;
+        let mut is_same_u: bool;
+
+        let mut user_store_iter = self.user_store.iter_mut().peekable();
+        while let Some(record) = user_store_iter.next() {
+            let is_user_store = record.0.is_user_store();
+
+            idx = u32::oselect(is_user_store, record.0.last_fetch, idx + 1);
+
+            record.0.idx = u32::oselect(is_user_store, 0, record.get_idx(idx));
+            record.0.map = (record.0.idx % (self.num_submaps as u32)) as u8;
+            record.0.last_fetch = idx;
+
+            if let Some(next_record) = user_store_iter.peek() {
+                is_same_u = record.0.uid == next_record.0.uid;
+            } else {
+                is_same_u = false;
+            }
+            record.0.mark = u16::oselect(is_same_u, 0, 1);
+        }
+    }
+
+    pub fn get_fetch_indices(
+        &mut self,
+        fetches: Vec<IndexRecord>,
+        num_requests: usize,
+    ) -> Vec<IndexRecord> {
         self.update_with_fetches(fetches, num_requests);
 
-        otils::sort(&mut self.user_store[..], self.num_threads);
-        self.construct_fetch_indices();
+        self.user_store = otils::sort(std::mem::take(&mut self.user_store), self.num_threads);
+        self.propagate_fetch_indices();
 
         otils::compact(
             &mut self.user_store[..],
@@ -177,9 +226,10 @@ impl LoadBalancer {
 
         otils::compact(
             &mut self.user_store[..],
-            |r| r.is_new_user_store(),
+            |r| r.is_updated_user_store(),
             self.num_threads,
         );
+
         self.user_store.truncate(self.num_users as usize);
         self.user_store.iter_mut().for_each(|r| {
             r.set_user_store();
@@ -188,80 +238,32 @@ impl LoadBalancer {
         deliver
     }
 
-    pub fn pad_for_submap(&self, requests: Vec<Record>, submap_size: usize) -> Vec<SubmapRequest> {
-        let num_submaps = self.num_submaps as usize;
-        let mut remaining = (requests.len() + num_submaps * submap_size).next_power_of_two();
-        remaining -= requests.len();
-
-        let mut requests: Vec<SubmapRequest> = requests.into_iter().map(|r| r.into()).collect();
-        requests.reserve(remaining);
-
-        for idx in 0..num_submaps {
-            requests.extend(SubmapRequest::dummies(
-                submap_size,
-                idx as u32,
-                self.num_submaps,
-            ));
-        }
-        remaining -= num_submaps * submap_size;
-        requests.extend((0..remaining).map(|_| Record::max().into()));
-        requests
-    }
-
-    pub fn get_submap_requests(
-        &self,
-        requests: Vec<Record>,
-        submap_size: usize,
-    ) -> Vec<SubmapRequest> {
-        let mut requests = self.pad_for_submap(requests, submap_size);
-
-        otils::sort(&mut requests[..], self.num_threads); // sort by omap, then by dummy
-
-        let mut prev_map = self.num_submaps;
-        let mut remaining_marks = submap_size as i32;
-        for request in requests.iter_mut() {
-            let submap = request.value.map as u32;
-            remaining_marks = i32::oselect(submap != prev_map, submap_size as i32, remaining_marks);
-            request.value.mark = u16::oselect(remaining_marks > 0, 1, 0);
-            remaining_marks += i32::oselect(remaining_marks > 0, -1, 0);
-            prev_map = submap;
-        }
-
-        otils::compact(&mut requests[..], |r| r.value.mark == 1, self.num_threads);
-        requests
-    }
-
-    pub fn batch_send(&mut self, sends: Vec<Record>) {
-        let requests = self.get_send_requests(sends);
-        let submap_size = self.pad_size(requests.len() as f64);
-        let mut requests: Vec<Record> = self
-            .get_submap_requests(requests, submap_size)
-            .into_iter()
-            .map(|r| r.value)
-            .collect();
-
-        for idx in 0..self.num_submaps {
-            let batch = requests.drain(0..submap_size).collect();
-            self.omaps[idx].batch_send(batch);
-        }
-    }
-
     pub fn batch_fetch(&mut self, fetches: Vec<Record>) -> Vec<Record> {
-        let requests = self.get_fetch_requests(fetches);
+        let num_requests = fetches
+            .iter()
+            .fold(0, |acc, fetch| acc + fetch.data as usize);
+        let fetches = fetches.into_iter().map(|r| IndexRecord(r)).collect();
+        let requests = self.get_fetch_indices(fetches, num_requests);
+
         let submap_size = self.pad_size(requests.len() as f64);
         let mut requests: Vec<Record> = self
-            .get_submap_requests(requests, submap_size)
+            .get_submap_requests(requests, submap_size, false)
             .into_iter()
-            .map(|r| r.value)
+            .map(|r| r.0)
             .collect();
 
-        let mut responses: Vec<Record> = Vec::new();
+        let mut responses: Vec<IndexRecord> = Vec::new();
         responses.reserve(submap_size * self.num_submaps);
+
+        // parallelize
         for idx in 0..self.num_submaps {
-            let batch = requests.drain(0..submap_size).collect();
-            responses.extend(self.omaps[idx].batch_fetch(batch));
+            let batch: Vec<Record> = requests.drain(0..submap_size).collect();
+            responses.extend(self.submaps[idx].batch_fetch(batch));
         }
 
-        responses
+        // this only really needs to be a shuffle
+        responses = otils::sort(responses, self.num_threads);
+        otils::compact(&mut responses, |r| r.0.is_send(), self.num_threads);
+        responses.drain(0..num_requests).map(|r| r.0).collect()
     }
 }

+ 10 - 7
sparta/src/main.rs

@@ -1,22 +1,25 @@
 mod load_balancer;
 mod omap;
 mod record;
+
 use load_balancer::LoadBalancer;
-use omap::ObliviousMap;
+// use omap::ObliviousMap;
 use record::Record;
 
 fn main() {
     let mut l = LoadBalancer::new(5, 8, 4);
     let sends: Vec<Record> = (0..3)
-        .map(|x| Record::new_send(x, x.try_into().unwrap()))
+        .map(|x| Record::send(x, x.try_into().unwrap()))
         .collect();
 
-    // l.batch_send(sends);
+    println!("--- SEND ---\n");
+    l.batch_send(sends);
 
-    let fetches: Vec<Record> = vec![Record::new_fetch(0, 3)];
+    let fetches: Vec<Record> = vec![Record::fetch(0, 3)];
 
-    let indices = l.batch_fetch(fetches);
-    for i in indices.iter() {
-        // println!("{:?}", i);
+    println!("--- FETCH ---\n");
+    let responses = l.batch_fetch(fetches);
+    for response in responses.iter() {
+        println!("{:?}", response);
     }
 }

+ 30 - 29
sparta/src/omap.rs

@@ -1,18 +1,12 @@
-use crate::record::{Record, RecordType};
-use otils::ObliviousOps;
+use crate::record::{IndexRecord, Record, RecordType};
+use otils::{Max, ObliviousOps};
 use std::cmp::Ordering;
 
 struct MapRecord(Record);
 
 impl MapRecord {
-    fn dummies(len: usize) -> Vec<Self> {
-        (0..len).map(|_| MapRecord(Record::max())).collect()
-    }
-
-    fn fetch_pad(record: Record) -> Self {
-        let mut record = Self(record);
-        record.0.type_rec = RecordType::Send;
-        record
+    fn dummy_send(idx: u32) -> Self {
+        MapRecord(Record::new(0, RecordType::Dummy, 0, 0, idx))
     }
 
     fn should_deliver(&self) -> bool {
@@ -26,14 +20,14 @@ impl MapRecord {
 
 impl PartialEq for MapRecord {
     fn eq(&self, other: &Self) -> bool {
-        self.0.idx == other.0.idx && self.0.type_rec == other.0.type_rec
+        self.0.idx == other.0.idx && self.0.rec_type == other.0.rec_type
     }
 }
 
 impl PartialOrd for MapRecord {
     fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
         let idx_ord = self.0.idx.partial_cmp(&other.0.idx);
-        let type_ord = self.0.type_rec.partial_cmp(&other.0.type_rec);
+        let type_ord = self.0.rec_type.partial_cmp(&other.0.rec_type);
         match idx_ord {
             Some(Ordering::Equal) => type_ord,
             x => x,
@@ -41,6 +35,12 @@ impl PartialOrd for MapRecord {
     }
 }
 
+impl Max for MapRecord {
+    fn maximum() -> Self {
+        MapRecord(Record::new(0, RecordType::Dummy, 0, 0, u32::MAX))
+    }
+}
+
 pub struct ObliviousMap {
     num_threads: usize,
     message_store: Vec<MapRecord>,
@@ -62,48 +62,48 @@ impl ObliviousMap {
     }
 
     fn update_with_fetches(&mut self, requests: Vec<Record>) {
-        let mut remaining = (self.message_store.len() + 2 * requests.len()).next_power_of_two();
-        remaining -= self.message_store.len() + 2 * requests.len();
-        self.message_store.reserve(remaining);
+        self.message_store.reserve(2 * requests.len());
 
         // add padding for fetches
         self.message_store.extend(
             requests
                 .iter()
-                .map(|record| MapRecord::fetch_pad(record.clone())),
+                .map(|record| MapRecord::dummy_send(record.idx)),
         );
 
         // add fetches
         self.message_store
             .extend(requests.into_iter().map(|r| MapRecord(r)));
-
-        // add padding to next power of two
-        self.message_store.extend(MapRecord::dummies(remaining));
     }
 
-    pub fn batch_fetch(&mut self, requests: Vec<Record>) -> Vec<Record> {
-        let original_size = self.message_store.len();
+    pub fn batch_fetch(&mut self, requests: Vec<Record>) -> Vec<IndexRecord> {
+        let final_size = self.message_store.len();
         let num_requests = requests.len();
 
         self.update_with_fetches(requests);
 
-        otils::sort(&mut self.message_store[..], self.num_threads);
+        self.message_store = otils::sort(std::mem::take(&mut self.message_store), self.num_threads);
 
-        let mut prev_fetch = 0;
+        let mut prev_idx = u32::MAX;
+        let mut remaining = 0;
         for record in self.message_store.iter_mut() {
-            record.0.mark = u16::oselect(prev_fetch == 1, 1, 0);
-            prev_fetch = i32::oselect(record.0.is_fetch(), 1, 0)
+            remaining = i32::oselect(prev_idx == record.0.idx, remaining, 0);
+            record.0.mark = u16::oselect(record.0.is_fetch(), 0, u16::oselect(remaining > 0, 1, 0));
+
+            prev_idx = record.0.idx;
+            remaining += i32::oselect(record.0.is_fetch(), 1, i32::oselect(remaining > 0, -1, 0));
         }
 
         otils::compact(
             &mut self.message_store[..],
-            |record| record.should_deliver(),
+            |r| r.should_deliver(),
             self.num_threads,
         );
-        let response: Vec<Record> = self
+
+        let response: Vec<IndexRecord> = self
             .message_store
             .drain(0..num_requests)
-            .map(|r| r.0)
+            .map(|r| IndexRecord(r.0))
             .collect();
 
         otils::compact(
@@ -111,7 +111,8 @@ impl ObliviousMap {
             |record| record.should_defer(),
             self.num_threads,
         );
-        self.message_store.truncate(original_size);
+        self.message_store.truncate(final_size);
+
         response
     }
 }

+ 75 - 93
sparta/src/record.rs

@@ -1,4 +1,5 @@
 use blake3;
+use otils::Max;
 use std::cmp::Ordering;
 
 #[derive(Clone, Debug, PartialEq, PartialOrd)]
@@ -12,11 +13,11 @@ pub enum RecordType {
 #[derive(Debug, Clone)]
 pub struct Record {
     pub uid: i64,
-
+    pub idx: u32,
     pub map: u8,
-    pub type_rec: RecordType,
+
+    pub rec_type: RecordType,
     pub mark: u16,
-    pub idx: u32,
 
     pub last_fetch: u32,
     pub last_send: u32,
@@ -27,153 +28,134 @@ pub struct Record {
 }
 
 impl Record {
-    pub fn new(uid: i64) -> Self {
+    pub fn new(uid: i64, type_rec: RecordType, data: u64, map: u8, idx: u32) -> Self {
         Record {
             uid,
-            map: 0,
-            type_rec: RecordType::User,
+            idx,
+            map,
+            rec_type: type_rec,
             mark: 0,
-            idx: 0,
             last_fetch: 0,
             last_send: 0,
-            data: 0,
+            data,
             _dum: [0; 12],
         }
     }
 
-    pub fn new_send(uid: i64, message: u64) -> Self {
-        Record {
-            uid,
-            map: 0,
-            type_rec: RecordType::Send,
-            mark: 0,
-            idx: 0,
-            last_fetch: 0,
-            last_send: 0,
-            data: message,
-            _dum: [0; 12],
-        }
+    pub fn send(uid: i64, message: u64) -> Self {
+        Record::new(uid, RecordType::Send, message, 0, 0)
     }
 
-    pub fn new_fetch(uid: i64, volume: u64) -> Self {
-        Record {
-            uid,
-            map: 0,
-            type_rec: RecordType::Fetch,
-            mark: 0,
-            idx: 0,
-            last_fetch: 0,
-            last_send: 0,
-            data: volume,
-            _dum: [0; 12],
-        }
+    pub fn fetch(uid: i64, message: u64) -> Self {
+        Record::new(uid, RecordType::Fetch, message, 0, 0)
     }
 
-    pub fn dummies(&self) -> Vec<Self> {
-        (0..self.data)
-            .map(|_| Record::new_fetch(self.uid, 0))
-            .collect()
+    pub fn is_user_store(&self) -> bool {
+        self.rec_type == RecordType::User
     }
 
-    pub fn max() -> Self {
-        Record {
-            uid: Self::max_uid(),
-            map: 0,
-            type_rec: RecordType::User,
-            mark: 0,
-            idx: 0,
-            last_fetch: 0,
-            last_send: 0,
-            data: 0,
-            _dum: [0; 12],
-        }
+    pub fn is_fetch(&self) -> bool {
+        self.rec_type == RecordType::Fetch
     }
 
-    pub fn max_uid() -> i64 {
-        i64::MAX
+    pub fn is_send(&self) -> bool {
+        self.rec_type == RecordType::Send
     }
+}
 
-    pub fn is_request(&self) -> bool {
-        self.type_rec != RecordType::User
+#[derive(Clone)]
+pub struct IndexRecord(pub Record);
+
+impl IndexRecord {
+    pub fn new(uid: i64, rec_type: RecordType) -> Self {
+        IndexRecord(Record::new(uid, rec_type, 0, 0, 0))
     }
 
-    pub fn is_new_user_store(&self) -> bool {
-        self.mark == 1 && self.uid != i64::MAX
+    pub fn dummy_fetches(&self) -> Vec<Self> {
+        (0..self.0.data)
+            .map(|_| IndexRecord::new(self.0.uid, RecordType::Fetch))
+            .collect()
     }
 
-    pub fn is_user_store(&self) -> bool {
-        self.type_rec == RecordType::User
+    pub fn get_idx(&self, idx: u32) -> u32 {
+        let mut hasher = blake3::Hasher::new();
+        hasher.update(&self.0.uid.to_ne_bytes());
+        hasher.update(&idx.to_ne_bytes());
+        let hash = hasher.finalize();
+        u32::from_ne_bytes(<[u8; 4]>::try_from(&hash.as_bytes()[0..4]).unwrap())
     }
 
-    pub fn is_fetch(&self) -> bool {
-        self.type_rec == RecordType::Fetch
+    pub fn is_request(&self) -> bool {
+        self.0.rec_type != RecordType::User
     }
 
-    pub fn set_user_store(&mut self) {
-        self.type_rec = RecordType::User;
+    pub fn is_updated_user_store(&self) -> bool {
+        self.0.mark == 1 && self.0.uid != i64::MAX
     }
 
-    pub fn get_idx(&mut self, idx: u32) -> u32 {
-        let mut hasher = blake3::Hasher::new();
-        hasher.update(&self.uid.to_ne_bytes());
-        hasher.update(&idx.to_ne_bytes());
-        let hash = hasher.finalize();
-        u32::from_ne_bytes(<[u8; 4]>::try_from(&hash.as_bytes()[0..4]).unwrap())
+    pub fn set_user_store(&mut self) {
+        self.0.rec_type = RecordType::User;
     }
 }
 
-impl PartialEq for Record {
+impl PartialEq for IndexRecord {
     fn eq(&self, other: &Self) -> bool {
-        self.uid == other.uid && self.type_rec == other.type_rec
+        self.0.uid == other.0.uid && self.0.rec_type == other.0.rec_type
     }
 }
 
-impl PartialOrd for Record {
+impl PartialOrd for IndexRecord {
     fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
-        let user_ord = self.uid.partial_cmp(&other.uid);
-        let type_ord = self.type_rec.partial_cmp(&other.type_rec);
-        match user_ord {
+        let uid_ord = self.0.uid.partial_cmp(&other.0.uid);
+        let type_ord = self.0.rec_type.partial_cmp(&other.0.rec_type);
+        match uid_ord {
             Some(Ordering::Equal) => type_ord,
             x => x,
         }
     }
 }
 
-#[derive(Debug)]
-pub struct SubmapRequest {
-    pub value: Record,
+impl Max for IndexRecord {
+    fn maximum() -> Self {
+        IndexRecord(Record::new(i64::MAX, RecordType::Dummy, 0, 0, 0))
+    }
 }
 
-impl SubmapRequest {
-    pub fn dummies(num: usize, idx: u32, num_submaps: u32) -> Vec<Self> {
-        (0..num)
-            .map(|_| {
-                let mut m = Record::max();
-                m.map = (idx % num_submaps) as u8;
-                m.into()
-            })
+pub struct SubmapRecord(pub Record);
+
+impl SubmapRecord {
+    pub fn dummy_send(num_requests: usize, map: u8) -> Vec<Self> {
+        (0..num_requests)
+            .map(|_| SubmapRecord(Record::new(0, RecordType::Dummy, 0, map, u32::MAX)))
             .collect()
     }
-}
 
-impl From<Record> for SubmapRequest {
-    fn from(value: Record) -> Self {
-        SubmapRequest { value }
+    pub fn dummy_fetch(num_requests: usize, map: u8) -> Vec<Self> {
+        (0..num_requests)
+            .map(|_| SubmapRecord(Record::new(0, RecordType::Fetch, 0, map, u32::MAX)))
+            .collect()
     }
 }
 
-impl PartialEq for SubmapRequest {
+impl PartialEq for SubmapRecord {
     fn eq(&self, other: &Self) -> bool {
-        self.value.idx == other.value.idx && self.value.uid == other.value.uid
+        self.0.map == other.0.map && self.0.rec_type == other.0.rec_type
     }
 }
-impl PartialOrd for SubmapRequest {
+
+impl PartialOrd for SubmapRecord {
     fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
-        let map_ord = self.value.map.partial_cmp(&other.value.map);
-        let uid_ord = self.value.uid.partial_cmp(&other.value.uid);
+        let map_ord = self.0.map.partial_cmp(&other.0.map);
+        let idx_ord = self.0.idx.partial_cmp(&other.0.idx);
         match map_ord {
-            Some(Ordering::Equal) => uid_ord,
+            Some(Ordering::Equal) => idx_ord,
             x => x,
         }
     }
 }
+
+impl Max for SubmapRecord {
+    fn maximum() -> Self {
+        SubmapRecord(Record::new(0, RecordType::Dummy, 0, u8::MAX, 0))
+    }
+}