Browse Source

Distributed, experiments.

Kyle Fredrickson 1 year ago
parent
commit
76eb157f44

+ 9 - 1
baseline/src/omq.rs

@@ -4,6 +4,7 @@ use rayon::ThreadPool;
 
 #[derive(Debug)]
 pub struct ObliviousMultiQueue {
+    num_threads: usize,
     pool: ThreadPool,
     message_store: Vec<Request>,
 }
@@ -15,6 +16,7 @@ impl ObliviousMultiQueue {
             .build()
             .unwrap();
         ObliviousMultiQueue {
+            num_threads,
             pool,
             message_store: Vec::new(),
         }
@@ -41,7 +43,11 @@ impl ObliviousMultiQueue {
         let fetch_sum = fetches.iter().fold(0, |acc, f| acc + f.volume) as usize;
         self.update_store(fetches, fetch_sum);
 
-        self.message_store = otils::sort(std::mem::take(&mut self.message_store), &self.pool);
+        self.message_store = otils::sort(
+            std::mem::take(&mut self.message_store),
+            &self.pool,
+            self.num_threads,
+        );
 
         let mut user_sum: isize = 0;
         let mut prev_user: i32 = -1;
@@ -64,6 +70,7 @@ impl ObliviousMultiQueue {
             &mut self.message_store[..],
             |r| r.should_deliver(),
             &self.pool,
+            self.num_threads,
         );
         let deliver: Vec<Request> = self.message_store.drain(0..fetch_sum).collect();
         // for r in deliver.iter() {
@@ -74,6 +81,7 @@ impl ObliviousMultiQueue {
             &mut self.message_store[..],
             |r| r.should_defer(),
             &self.pool,
+            self.num_threads,
         );
         self.message_store.truncate(final_size);
         // for r in self.message_store.iter() {

+ 35 - 0
experiments/d_scaling.py

@@ -0,0 +1,35 @@
+import os
+import subprocess
+
+SENDS = 2**22
+USERS = 2**17
+
+THREADS = 48
+D_MAPS = 15
+
+RUNS = 10
+WARMUP = 0
+
+DATA_DIR = os.path.join(os.getcwd(), "data", "d-scaling")
+os.makedirs(DATA_DIR, exist_ok=True)
+
+SPARTA_D_DIR = os.path.join(os.getcwd(), "sparta-d")
+SPARTA_D_FILE = os.path.join(
+    DATA_DIR, f"sparta-d-{SENDS}-{USERS}-{THREADS}.csv")
+
+
+def sparta_d_cmd(maps):
+    cmd = ["cargo", "run", "--release", "--",
+           str(SENDS), str(USERS), str(THREADS), str(USERS), str(maps), "-r", str(RUNS), "-w", str(WARMUP)]
+    result = subprocess.run(cmd, capture_output=True,
+                            text=True, cwd=SPARTA_D_DIR)
+    return result.stdout
+
+
+for maps in range(1, 16):
+    print(maps)
+
+    with open(SPARTA_D_FILE, "a") as sparta_d_file:
+        output = sparta_d_cmd(maps)
+        print("\tsparta_d:", output)
+        sparta_d_file.write(output)

+ 20 - 1
experiments/message_scaling.py

@@ -1,12 +1,13 @@
 import os
 import subprocess
 
-SENDS = [2**i for i in range(18, 26)]
+SENDS = [2**i for i in range(25, 26)]
 FETCHES = 8192
 USERS = FETCHES
 
 THREADS = 48
 MAPS = 5
+D_MAPS = 15
 
 RUNS = 10
 WARMUP = 0
@@ -15,10 +16,13 @@ DATA_DIR = os.path.join(os.getcwd(), "data", "message-scaling")
 os.makedirs(DATA_DIR, exist_ok=True)
 
 SPARTA_DIR = os.path.join(os.getcwd(), "sparta")
+SPARTA_D_DIR = os.path.join(os.getcwd(), "sparta-d")
 BASELINE_DIR = os.path.join(os.getcwd(), "baseline")
 
 BASELINE_FILE = os.path.join(DATA_DIR, f"baseline-{FETCHES}-{THREADS}.csv")
 SPARTA_FILE = os.path.join(DATA_DIR, f"sparta-{FETCHES}-{THREADS}-{MAPS}.csv")
+SPARTA_D_FILE = os.path.join(
+    DATA_DIR, f"sparta-d-{FETCHES}-{THREADS}-{D_MAPS}.csv")
 
 
 def sparta_cmd(sends):
@@ -31,6 +35,16 @@ def sparta_cmd(sends):
     return result.stdout
 
 
+def sparta_d_cmd(sends):
+
+    cmd = ["cargo", "run", "--release", "--",
+           str(sends), str(FETCHES), str(THREADS), str(USERS), str(D_MAPS), "-r", str(RUNS), "-w", str(WARMUP)]
+    result = subprocess.run(cmd, capture_output=True,
+                            text=True, cwd=SPARTA_D_DIR)
+    print(result.stderr)
+    return result.stdout
+
+
 def baseline_cmd(sends):
     cmd = ["cargo", "run", "--release", "--",
            str(sends), str(FETCHES), str(THREADS),  "-r", str(RUNS), "-w", str(WARMUP)]
@@ -47,6 +61,11 @@ for send in SENDS:
         print("\tsparta:", output)
         sparta_file.write(output)
 
+    with open(SPARTA_D_FILE, "a") as sparta_d_file:
+        output = sparta_d_cmd(send)
+        print("\tsparta-d:", output)
+        sparta_d_file.write(output)
+
     with open(BASELINE_FILE, "a") as baseline_file:
         output = baseline_cmd(send)
         print("\tbaseline:", output)

+ 27 - 9
experiments/submap_scaling.py

@@ -6,7 +6,8 @@ FETCHES = 8192
 USERS = FETCHES
 
 THREADS = 8
-MAP_THREADS = [(i, THREADS * i + THREADS) for i in range(1, int(48 / THREADS))]
+MAP_THREADS = [(i, 8 + 8 * i) for i in range(6, int(16))]
+# MAP_THREADS = [(i, 8 + 8 * i) for i in range(1, int(48 / THREADS))]
 
 RUNS = 10
 WARMUP = 0
@@ -15,10 +16,12 @@ DATA_DIR = os.path.join(os.getcwd(), "data", "submap-scaling")
 os.makedirs(DATA_DIR, exist_ok=True)
 
 SPARTA_DIR = os.path.join(os.getcwd(), "sparta")
+SPARTA_D_DIR = os.path.join(os.getcwd(), "sparta-d")
 BASELINE_DIR = os.path.join(os.getcwd(), "baseline")
 
 BASELINE_FILE = os.path.join(DATA_DIR, f"baseline-{FETCHES}-{THREADS}.csv")
 SPARTA_FILE = os.path.join(DATA_DIR, f"sparta-{FETCHES}-{THREADS}.csv")
+SPARTA_D_FILE = os.path.join(DATA_DIR, f"sparta-d-{FETCHES}-{THREADS}.csv")
 
 
 def sparta_cmd(mt):
@@ -31,6 +34,16 @@ def sparta_cmd(mt):
     return result.stdout
 
 
+def sparta_d_cmd(mt):
+    (num_maps, _num_threads) = mt
+    cmd = ["cargo", "run", "--release", "--",
+           str(SENDS), str(FETCHES), str(48),
+           str(USERS), str(num_maps), "-r", str(RUNS), "-w", str(WARMUP)]
+    result = subprocess.run(cmd, capture_output=True,
+                            text=True, cwd=SPARTA_D_DIR)
+    return result.stdout
+
+
 def baseline_cmd(mt):
     (_num_maps, num_threads) = mt
     cmd = ["cargo", "run", "--release", "--",
@@ -43,12 +56,17 @@ def baseline_cmd(mt):
 for mt in MAP_THREADS:
     print(mt)
 
-    with open(SPARTA_FILE, "a") as sparta_file:
-        output = sparta_cmd(mt)
-        print("\tsparta:", output)
-        sparta_file.write(output)
+    # with open(SPARTA_FILE, "a") as sparta_file:
+    #     output = sparta_cmd(mt)
+    #     print("\tsparta:", output)
+    #     sparta_file.write(output)
+
+    with open(SPARTA_D_FILE, "a") as sparta_d_file:
+        output = sparta_d_cmd(mt)
+        print("\tsparta_d:", output)
+        sparta_d_file.write(output)
 
-    with open(BASELINE_FILE, "a") as baseline_file:
-        output = baseline_cmd(mt)
-        print("\tbaseline:", output)
-        baseline_file.write(output)
+    # with open(BASELINE_FILE, "a") as baseline_file:
+    #     output = baseline_cmd(mt)
+    #     print("\tbaseline:", output)
+    #     baseline_file.write(output)

+ 17 - 1
experiments/user_and_message_scaling.py

@@ -1,10 +1,11 @@
 import os
 import subprocess
 
-SENDS = [2**i for i in range(18, 25)]
+SENDS = [2**i for i in range(21, 25)]
 
 THREADS = 48
 MAPS = 5
+D_MAPS = 15
 
 RUNS = 10
 WARMUP = 0
@@ -13,10 +14,12 @@ DATA_DIR = os.path.join(os.getcwd(), "data", "user-message-scaling")
 os.makedirs(DATA_DIR, exist_ok=True)
 
 SPARTA_DIR = os.path.join(os.getcwd(), "sparta")
+SPARTA_D_DIR = os.path.join(os.getcwd(), "sparta-d")
 BASELINE_DIR = os.path.join(os.getcwd(), "baseline")
 
 BASELINE_FILE = os.path.join(DATA_DIR, f"baseline-{THREADS}.csv")
 SPARTA_FILE = os.path.join(DATA_DIR, f"sparta-{THREADS}.csv")
+SPARTA_D_FILE = os.path.join(DATA_DIR, f"sparta-d-{THREADS}.csv")
 
 
 def sparta_cmd(sends):
@@ -27,6 +30,14 @@ def sparta_cmd(sends):
     return result.stdout
 
 
+def sparta_d_cmd(sends):
+    cmd = ["cargo", "run", "--release", "--",
+           str(sends), str(sends), str(THREADS), str(sends), str(D_MAPS), "-r", str(RUNS), "-w", str(WARMUP)]
+    result = subprocess.run(cmd, capture_output=True,
+                            text=True, cwd=SPARTA_D_DIR)
+    return result.stdout
+
+
 def baseline_cmd(sends):
     cmd = ["cargo", "run", "--release", "--",
            str(sends), str(sends), str(THREADS), "-r", str(RUNS), "-w", str(WARMUP)]
@@ -43,6 +54,11 @@ for send in SENDS:
         print("\tsparta:", output)
         sparta_file.write(output)
 
+    with open(SPARTA_D_FILE, "a") as sparta_d_file:
+        output = sparta_d_cmd(send)
+        print("\tsparta_d:", output)
+        sparta_d_file.write(output)
+
     with open(BASELINE_FILE, "a") as baseline_file:
         output = baseline_cmd(send)
         print("\tbaseline:", output)

+ 1 - 1
otils

@@ -1 +1 @@
-Subproject commit b372424e36f1ebc6f3d088e2b61062469d331482
+Subproject commit 1c3e4f931bed4bff1369538d8a066344fb31e4ec

+ 5 - 0
sparta-d/.cargo/config.toml

@@ -0,0 +1,5 @@
+[target.x86_64-fortanix-unknown-sgx]
+runner = "ftxsgx-runner-cargo"
+
+[build]
+target = "x86_64-fortanix-unknown-sgx"

+ 349 - 0
sparta-d/Cargo.lock

@@ -0,0 +1,349 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "anstream"
+version = "0.6.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b"
+dependencies = [
+ "anstyle",
+ "anstyle-parse",
+ "anstyle-query",
+ "anstyle-wincon",
+ "colorchoice",
+ "is_terminal_polyfill",
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b"
+
+[[package]]
+name = "anstyle-parse"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4"
+dependencies = [
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle-query"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5"
+dependencies = [
+ "windows-sys",
+]
+
+[[package]]
+name = "anstyle-wincon"
+version = "3.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19"
+dependencies = [
+ "anstyle",
+ "windows-sys",
+]
+
+[[package]]
+name = "arrayref"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545"
+
+[[package]]
+name = "arrayvec"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711"
+
+[[package]]
+name = "blake3"
+version = "1.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52"
+dependencies = [
+ "arrayref",
+ "arrayvec",
+ "cc",
+ "cfg-if",
+ "constant_time_eq",
+]
+
+[[package]]
+name = "cc"
+version = "1.0.98"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "clap"
+version = "4.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0"
+dependencies = [
+ "clap_builder",
+ "clap_derive",
+]
+
+[[package]]
+name = "clap_builder"
+version = "4.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4"
+dependencies = [
+ "anstream",
+ "anstyle",
+ "clap_lex",
+ "strsim",
+]
+
+[[package]]
+name = "clap_derive"
+version = "4.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64"
+dependencies = [
+ "heck",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce"
+
+[[package]]
+name = "colorchoice"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422"
+
+[[package]]
+name = "constant_time_eq"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2"
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
+dependencies = [
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
+dependencies = [
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80"
+
+[[package]]
+name = "either"
+version = "1.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b"
+
+[[package]]
+name = "fastapprox"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9dfa3c0fd35278e839805680f4c2f673ca71eb91068115b4a611e71429bc0c46"
+
+[[package]]
+name = "heck"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
+
+[[package]]
+name = "is_terminal_polyfill"
+version = "1.70.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800"
+
+[[package]]
+name = "otils"
+version = "0.1.0"
+dependencies = [
+ "cc",
+ "rayon",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.84"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec96c6a92621310b51366f1e28d05ef11489516e93be030060e5fc12024a49d6"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.36"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rayon"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
+dependencies = [
+ "either",
+ "rayon-core",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
+dependencies = [
+ "crossbeam-deque",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "sparta"
+version = "0.1.0"
+dependencies = [
+ "blake3",
+ "clap",
+ "fastapprox",
+ "otils",
+ "rayon",
+]
+
+[[package]]
+name = "strsim"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
+
+[[package]]
+name = "syn"
+version = "2.0.66"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+
+[[package]]
+name = "utf8parse"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
+
+[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0"

+ 19 - 0
sparta-d/Cargo.toml

@@ -0,0 +1,19 @@
+[package]
+name = "sparta"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+otils = { path = "../otils" }
+blake3 = "1.5.1"
+fastapprox = "0.3.1"
+clap = { version = "4.5.4", features = ["derive"] }
+rayon = "1.10.0"
+
+[package.metadata.fortanix-sgx]
+stack-size=0x400000
+heap-size=0x100000000
+threads=49
+
+[profile.release]
+debug = true

+ 28 - 0
sparta-d/makefile

@@ -0,0 +1,28 @@
+NAME = sparta
+BUILD_DIR = target/x86_64-fortanix-unknown-sgx/release
+KEY = private.pem
+
+BINARY = $(BUILD_DIR)/$(NAME)
+TARGET = $(BUILD_DIR)/$(NAME).sgxs
+SIGNATURE = $(BUILD_DIR)/$(NAME).sig
+
+HEAP_SIZE = 0x100000000
+STACK_SIZE = 0x400000
+MAX_THREADS = 9
+
+.PHONY: build run clean
+
+$(TARGET): $(BINARY)
+	ftxsgx-elf2sgxs -o $(TARGET) $(BINARY) --heap-size $(HEAP_SIZE) --stack-size $(STACK_SIZE) --threads $(MAX_THREADS)
+	sgxs-sign --key $(KEY) $(TARGET) $(SIGNATURE)
+
+$(BINARY): build
+
+build:
+	cargo build --release
+
+run: $(TARGET)
+	ftxsgx-runner $(TARGET) --help
+
+clean:
+	rm -rf $(BUILD_DIR)

+ 39 - 0
sparta-d/private.pem

@@ -0,0 +1,39 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIG4gIBAAKCAYEAuod+uoYcb9c2Xm4P7NxVwKBBavYbri4etwTs+V4g5zZ+RoZT
+AjlNYEDl33m+UE/3azOCvu5sHCMUjeRsPPAvcnA1o5+XTde1zR4oqK6zkPYC3kp4
+x5l2hVbxdACwn4/HlDM/DF8U2hxQ1c+vp3W0xSn1YJmOHYdHkRJBS14oZcVr7k4i
+kCfx9vSNqP6ALTV3OpQZgtS+lbCFiu5YX3A2COVKcxwS/bzrcFYsS5yI/s53CGDL
+IzAegcEVXftHGZHHR523CWo27KBtMQPsoLA0w2XLFQYiKYPfXV1yfrD2x4Wwx8TD
+NiOv21bV1lP2C6J7pBpj28Bre6+Db/faQ7IUqAQqqlqiYnc9KIkRAlfSYtL8KkzL
+O1Jd8Kxh3BCGIsa3ZJp+5MkKlkG11CmJA/YTreqHMR+DgdRUMYmmx4NC4z3KrR2c
+FkysHoatsNMI34q7WbZr/gzGnS2rkzHRnlWRejqyBIMPdFcSGBXEek4KYUkpK3FK
+NALuo906JGvzJ5iJAgEDAoIBgHxaVHxZaEqPeZRJX/M9joBq1kdOvR7Jac9YnfuU
+Fe95qYRZjKwmM5WAmT+mfuA1T5zNAdSe8r1suF6YSCigH6GgI8JqZN6Pzoi+xcXJ
+zQtOrJQxpdpmTwOPS6KrIGpf2mLM1LLqDea9izk1H8T5Iy4b+OsRCWkE2mC21jI+
+xZkuR/Q0FwrFS/n4XnCpqsjOT3xiu6yN1GPLA7H0OupKzrCY3Ey9Yf598krkHYe9
+sKne+gWV3MIgFFaAuOlSL2ZhLmG20HuC+yttQDUC7e+lGaXGEcLDQCj07SOQ3X0I
+Ws1YoOISqsndDEAfjA8Fevx/2+pOH4wqzg7tBBzxvJGI4KmD+rkl3BJWYWjCXQuC
+VQ4FXLxhQLQNU7TqAhZxa5eeO3geDNSEZhZfVH4p6Rim/vxbvIDH+pe1HSXqJvFu
+Zfy9sX9SMShgWeprf/RdULprF0iwBO+USjHuZY4lnaRTtrwa5tJ8QCknO6EUy35u
+xA+oQJeS+yuLIsyLF2Lx24knYwKBwQDbRjhHeQfAVZhW6yKOGmFMwkeAztsqRvbJ
+welT1jW6cBjzdbnaJv83GHTBNfEzUh06s3xZmXN4TKEWrZjLdnlhGZvvyJnxtj58
+gyI0iUErtN5s4Ma04ThQczxcm8lHmSzvoWi+cZakv7D3GRxVZ+FJQkmVVW3G3KMT
+KnPcCsAdv2M22Y42aWNA5PZ2vL/XYDvmIHpUer+wgVfllAumUqNmaYZbU2cy7Tmt
+tt8G+yILYDGTIktCpEmbq6epaY2IYjcCgcEA2cVGCKy2ayb0ipRlKx4s/fpo8BJm
+wc2E3kI870442RCmgTMJLTEeRA8O/pXMXZCUifG4L+bC7P9cLd+mne5KSKSWpNk+
+VT9sjuJjPIsRohYSotojeF0oENoiXcqbd8AxQMwC55Daz/MGH9GPCZOz6h6i2p4d
+12gFZX/4gVLpymHdN0jEI1LB/gB3S4pP8Incho3qQjOXoca7WGVZgYVb+ajRbXVI
+A67++L+0NazCPmyWtPKGMA3w1WDvZpecUXs/AoHBAJIu0C+mBSrjuuScwbQRljMs
+L6s0khwvTzEr8OKOznxKu0z5JpFvVM9loyt5S3eME3x3qDu7olAzFg8eZdz5pkC7
+vUqFu/Z5fv2swXhbgMfN6Z3rLyNA0DWiKD29MNpmHfUWRdRLucMqdfoQvY5FQNuB
+hmOOSS89wgzG9+gHKr5/l3nmXs7w7NXt+aR91TpAJ+7AUY2nKnWrj+5ism7hwkRG
+WZI3miHze8kklK9SFrJAIQzBh4HC270dGnDxCQWWzwKBwQCRLi6wcyRHb02xuENy
+FB3+pvCgDESBM63pgX303tCQtcRWIgYeIL7YCgn/Dog+YGMGoSV1RIHzVOgelRm+
+ntwwbbnDO37jf520luzTB2EWuWHB5sJQPhq15sGT3GelKsuAiAHvtec1TK6/4QoG
+YnfxacHnFBPk8AOY//sA4fExlpN6MILCNyv+qvoyXDVLBpMEXpwsImUWhHzlmOZW
+WOf7xeDzo4VXyf9Qf815HdbUSGR4oa7Ks/XjlfTvD72Lp38CgcBj4cZu9aeR6jQ/
+z6XPi6M/iiVxhrW5qL2pWegSYBB5TsLnAafNs7j/WVtKIErJWe+QzGk7fKqH2SEJ
+YkZAWv01d+UPojd2pfvQY/EwWe62oo4NpDCHMS6ql/PwIaxSZS7lR535AKiHGVJJ
+3AJi3vvfVRY1Re+p7LRkaDlVJQNDfZONKg1VG+3Y2MMgA1NyA7w54t8i1VlM5FjS
+r8FDDHKRGwNvLINus54JWj4StKec4MlHA8ZUww37zz5GzbsTrx0=
+-----END RSA PRIVATE KEY-----

+ 338 - 0
sparta-d/src/load_balancer.rs

@@ -0,0 +1,338 @@
+use crate::omap::ObliviousMap;
+pub use crate::record::{IndexRecord, Record, RecordType, SubmapRecord};
+use fastapprox::fast;
+use otils::{self, Max, ObliviousOps};
+use rayon::ThreadPool;
+use std::{
+    cmp,
+    f64::consts::E,
+    sync::{Arc, Mutex},
+    // time::UNIX_EPOCH,
+};
+
+const LAMBDA: usize = 128;
+
+pub struct LoadBalancer {
+    num_users: i64,
+    num_submaps: usize,
+    num_threads: usize,
+    pool: ThreadPool,
+    pub user_store: Vec<IndexRecord>,
+    pub submaps: Vec<ObliviousMap>,
+}
+
+impl LoadBalancer {
+    pub fn new(num_users: i64, num_threads: usize, num_submaps: usize) -> Self {
+        let pool = rayon::ThreadPoolBuilder::new()
+            .num_threads(num_threads)
+            .build()
+            .unwrap();
+
+        let mut user_store = Vec::new();
+        user_store.reserve(num_users as usize);
+        user_store.extend((0..num_users).map(|i| IndexRecord::new(i, RecordType::User)));
+
+        let mut submaps = Vec::with_capacity(num_submaps as usize);
+        submaps.extend((0..num_submaps).map(|_| ObliviousMap::new()));
+
+        LoadBalancer {
+            num_users,
+            num_submaps,
+            num_threads,
+
+            pool,
+            user_store,
+            submaps,
+        }
+    }
+
+    fn pad_size(&self, num_requests: f64) -> usize {
+        let num_submaps = self.num_submaps as f64;
+        let mu = num_requests / num_submaps;
+        let gamma = (num_submaps + 2_f64.powf(LAMBDA as f64)).ln();
+        let rhs = (gamma / mu - 1_f64) / E;
+        num_requests
+            .min(mu * E.powf(fast::lambertw(rhs as f32) as f64 + 1_f64))
+            .ceil() as usize
+    }
+
+    pub fn pad_for_submap(
+        &self,
+        mut requests: Vec<SubmapRecord>,
+        submap_size: usize,
+        is_send: bool,
+    ) -> Vec<SubmapRecord> {
+        requests.reserve(self.num_submaps * submap_size);
+
+        for submap in 0..self.num_submaps {
+            if is_send {
+                requests.extend(SubmapRecord::dummy_send(submap_size, submap as u8));
+            } else {
+                requests.extend(SubmapRecord::dummy_fetch(submap_size, submap as u8));
+            }
+        }
+        requests
+    }
+
+    pub fn get_submap_requests(
+        &self,
+        requests: Vec<IndexRecord>,
+        submap_size: usize,
+        is_send: bool,
+    ) -> Vec<SubmapRecord> {
+        let requests: Vec<SubmapRecord> = requests.into_iter().map(|r| SubmapRecord(r.0)).collect();
+
+        let mut requests = self.pad_for_submap(requests, submap_size, is_send);
+
+        requests = otils::sort(requests, &self.pool, self.num_threads); // sort by omap, then by dummy
+
+        let mut prev_map = self.num_submaps;
+        let mut remaining_marks = submap_size as i32;
+        for request in requests.iter_mut() {
+            let submap = request.0.map as u32;
+            remaining_marks = i32::oselect(
+                submap != prev_map as u32,
+                submap_size as i32,
+                remaining_marks,
+            );
+            request.0.mark = u16::oselect(remaining_marks > 0, 1, 0);
+            remaining_marks += i32::oselect(remaining_marks > 0, -1, 0);
+            prev_map = submap as usize;
+        }
+
+        otils::compact(
+            &mut requests[..],
+            |r| r.0.mark == 1,
+            &self.pool,
+            self.num_threads,
+        );
+        requests.truncate(self.num_submaps * submap_size);
+        requests
+    }
+
+    fn propagate_send_indices(&mut self) {
+        let mut idx: u32 = 0;
+        let mut is_same_u: bool;
+
+        let mut user_store_iter = self.user_store.iter_mut().peekable();
+        while let Some(record) = user_store_iter.next() {
+            let is_user_store = record.0.is_user_store();
+
+            idx = u32::oselect(
+                is_user_store,
+                cmp::max(record.0.last_fetch, record.0.last_send),
+                idx + 1,
+            );
+
+            record.0.idx = u32::oselect(is_user_store, 0, record.get_idx(idx));
+            record.0.map = (record.0.idx % (self.num_submaps as u32)) as u8;
+            record.0.last_send = idx;
+
+            if let Some(next_record) = user_store_iter.peek() {
+                is_same_u = record.0.uid == next_record.0.uid;
+            } else {
+                is_same_u = false;
+            }
+            record.0.mark = u16::oselect(is_same_u, 0, 1);
+        }
+    }
+
+    pub fn get_send_indices(&mut self, sends: Vec<IndexRecord>) -> Vec<IndexRecord> {
+        let num_requests = sends.len();
+        self.user_store.reserve(num_requests);
+        self.user_store.extend(sends);
+
+        self.user_store = otils::sort(
+            std::mem::take(&mut self.user_store),
+            &self.pool,
+            self.num_threads,
+        );
+        self.propagate_send_indices();
+
+        otils::compact(
+            &mut self.user_store[..],
+            |r| r.is_request(),
+            &self.pool,
+            self.num_threads,
+        );
+        let requests = self.user_store.drain(0..num_requests).collect();
+
+        otils::compact(
+            &mut self.user_store[..],
+            |r| r.is_updated_user_store(),
+            &self.pool,
+            self.num_threads,
+        );
+
+        self.user_store.truncate(self.num_users as usize);
+        self.user_store.iter_mut().for_each(|r| {
+            r.set_user_store();
+        });
+
+        requests
+    }
+
+    pub fn batch_send(&mut self, sends: Vec<Record>) {
+        let sends = sends.into_iter().map(|r| IndexRecord(r)).collect();
+        let requests = self.get_send_indices(sends);
+        let submap_size = self.pad_size(requests.len() as f64);
+        let mut requests: Vec<Record> = self
+            .get_submap_requests(requests, submap_size, true)
+            .into_iter()
+            .map(|r| r.0)
+            .collect();
+
+        let mut remaining_submaps = &mut self.submaps[..];
+
+        self.pool.scope(|s| {
+            for _ in 0..self.num_submaps {
+                let (submap, rest_submaps) = remaining_submaps.split_at_mut(1);
+                remaining_submaps = rest_submaps;
+
+                let batch = requests.drain(0..submap_size).collect();
+                s.spawn(|_| submap[0].batch_send(batch));
+            }
+
+            // let (submap, rest_submaps) = remaining_submaps.split_at_mut(1);
+            // remaining_submaps = rest_submaps;
+
+            // let batch = requests.drain(0..submap_size).collect();
+            // submap[0].batch_send(batch);
+        });
+    }
+
+    fn update_with_fetches(&mut self, fetches: Vec<IndexRecord>, num_fetches: usize) {
+        self.user_store.reserve(num_fetches);
+        for fetch in fetches.into_iter() {
+            self.user_store.extend(fetch.dummy_fetches());
+        }
+    }
+
+    fn propagate_fetch_indices(&mut self) {
+        let mut idx: u32 = 0;
+        let mut is_same_u: bool;
+
+        let mut user_store_iter = self.user_store.iter_mut().peekable();
+        while let Some(record) = user_store_iter.next() {
+            let is_user_store = record.0.is_user_store();
+
+            idx = u32::oselect(is_user_store, record.0.last_fetch, idx + 1);
+
+            record.0.idx = u32::oselect(is_user_store, 0, record.get_idx(idx));
+            record.0.map = (record.0.idx % (self.num_submaps as u32)) as u8;
+            record.0.last_fetch = idx;
+
+            if let Some(next_record) = user_store_iter.peek() {
+                is_same_u = record.0.uid == next_record.0.uid;
+            } else {
+                is_same_u = false;
+            }
+            record.0.mark = u16::oselect(is_same_u, 0, 1);
+        }
+    }
+
+    pub fn get_fetch_indices(
+        &mut self,
+        fetches: Vec<IndexRecord>,
+        num_requests: usize,
+    ) -> Vec<IndexRecord> {
+        self.update_with_fetches(fetches, num_requests);
+
+        self.user_store = otils::sort(
+            std::mem::take(&mut self.user_store),
+            &self.pool,
+            self.num_threads,
+        );
+        self.propagate_fetch_indices();
+
+        otils::compact(
+            &mut self.user_store[..],
+            |r| r.is_request(),
+            &self.pool,
+            self.num_threads,
+        );
+        let deliver = self.user_store.drain(0..num_requests).collect();
+
+        otils::compact(
+            &mut self.user_store[..],
+            |r| r.is_updated_user_store(),
+            &self.pool,
+            self.num_threads,
+        );
+
+        self.user_store.truncate(self.num_users as usize);
+        self.user_store.iter_mut().for_each(|r| {
+            r.set_user_store();
+        });
+
+        deliver
+    }
+
+    pub fn batch_fetch(&mut self, fetches: Vec<Record>) -> (Vec<Record>, usize) {
+        let num_requests = fetches
+            .iter()
+            .fold(0, |acc, fetch| acc + fetch.data as usize);
+        let fetches = fetches.into_iter().map(|r| IndexRecord(r)).collect();
+
+        // let t1 = std::time::SystemTime::now()
+        //     .duration_since(UNIX_EPOCH)
+        //     .unwrap()
+        //     .as_secs_f64();
+
+        let requests = self.get_fetch_indices(fetches, num_requests);
+
+        let submap_size = self.pad_size(requests.len() as f64);
+
+        let mut requests: Vec<Record> = self
+            .get_submap_requests(requests, submap_size, false)
+            .into_iter()
+            .map(|r| r.0)
+            .collect();
+
+        let remaining_submaps = &mut self.submaps[..];
+        let responses: Arc<Mutex<Vec<IndexRecord>>> = Arc::new(Mutex::new(Vec::with_capacity(
+            submap_size * self.num_submaps,
+        )));
+
+        // let t2 = std::time::SystemTime::now()
+        //     .duration_since(UNIX_EPOCH)
+        //     .unwrap()
+        //     .as_secs_f64();
+
+        let (submap, _rest_submaps) = remaining_submaps.split_at_mut(1);
+        let batch = requests.drain(0..submap_size).collect();
+        {
+            let responses = Arc::clone(&responses);
+            let response = submap[0].batch_fetch(batch, &self.pool, self.num_threads);
+            let mut responses = responses.lock().unwrap();
+            responses.extend(response);
+            responses
+                .extend((0..submap_size * (self.num_submaps - 1)).map(|_| IndexRecord::maximum()));
+        }
+
+        // let t3 = std::time::SystemTime::now()
+        //     .duration_since(UNIX_EPOCH)
+        //     .unwrap()
+        //     .as_secs_f64();
+
+        let mutex = Arc::into_inner(responses).unwrap();
+        let mut responses: Vec<IndexRecord> = mutex.into_inner().unwrap();
+        responses = otils::sort(responses, &self.pool, self.num_threads);
+        otils::compact(
+            &mut responses,
+            |r| r.0.is_send(),
+            &self.pool,
+            self.num_threads,
+        );
+
+        // let t4 = std::time::SystemTime::now()
+        //     .duration_since(UNIX_EPOCH)
+        //     .unwrap()
+        //     .as_secs_f64();
+
+        (
+            responses.drain(0..num_requests).map(|r| r.0).collect(),
+            submap_size * self.num_submaps,
+        )
+    }
+}

+ 82 - 0
sparta-d/src/main.rs

@@ -0,0 +1,82 @@
+mod load_balancer;
+mod omap;
+mod record;
+
+use clap::Parser;
+use load_balancer::LoadBalancer;
+use record::Record;
+use std::time::UNIX_EPOCH;
+
+const RTT: f64 = 0.160; // 160ms
+const BPS: f64 = 125000000.0; //bytes per second
+
+/// Baseline oblivious sort based multiqueue.
+#[derive(Parser, Debug)]
+#[command(version, about, long_about = None)]
+struct Args {
+    /// Number of send requests to store in the database.
+    sends: usize,
+
+    /// Number of messages to fetch from the database.
+    fetches: u64,
+
+    /// Total number of threads available.
+    threads: usize,
+
+    /// Number of users in the user store.
+    users: usize,
+
+    /// Number of submaps.
+    maps: usize,
+
+    /// Total number of runs.
+    #[arg(short, long, default_value = "1")]
+    runs: usize,
+
+    /// Number of runs before measurements are recorded.
+    #[arg(short, long, default_value = "0")]
+    warmup_runs: usize,
+}
+
+fn main() {
+    let args = Args::parse();
+
+    let mut l = LoadBalancer::new(args.users as i64, args.threads, args.maps);
+    let sends: Vec<Record> = (0..args.sends)
+        .map(|x| Record::send(0 as i64, x.try_into().unwrap()))
+        .collect();
+
+    l.batch_send(sends);
+    let mut net_size = 0;
+
+    let results: Vec<f64> = (0..(args.runs + args.warmup_runs))
+        .map(|_| {
+            let start = std::time::SystemTime::now()
+                .duration_since(UNIX_EPOCH)
+                .unwrap()
+                .as_secs_f64();
+            let (_responses, ns) = l.batch_fetch(vec![Record::fetch(0, args.fetches)]);
+            net_size = ns;
+            let end = std::time::SystemTime::now()
+                .duration_since(UNIX_EPOCH)
+                .unwrap()
+                .as_secs_f64();
+
+            // for response in responses.iter() {
+            //     println!("{:?}", response);
+            // }
+
+            end - start + RTT + ((net_size * std::mem::size_of::<Record>() * 2) as f64 / BPS)
+        })
+        .collect();
+
+    print!("{}\t", args.sends);
+    for result in results[..].iter() {
+        print!(
+            "{}\t",
+            *result,
+            // *result - RTT - ((net_size * std::mem::size_of::<Record>() * 2) as f64 / BPS)
+        );
+    }
+    println!();
+}

+ 124 - 0
sparta-d/src/omap.rs

@@ -0,0 +1,124 @@
+use crate::record::{IndexRecord, Record, RecordType};
+use otils::{Max, ObliviousOps};
+use rayon::ThreadPool;
+use std::cmp::Ordering;
+
+struct MapRecord(Record);
+
+impl MapRecord {
+    fn dummy_send(idx: u32) -> Self {
+        MapRecord(Record::new(0, RecordType::Dummy, 0, 0, idx))
+    }
+
+    fn should_deliver(&self) -> bool {
+        !self.0.is_fetch() && self.0.mark == 1
+    }
+
+    fn should_defer(&self) -> bool {
+        !self.0.is_fetch() && self.0.mark == 0
+    }
+}
+
+impl PartialEq for MapRecord {
+    fn eq(&self, other: &Self) -> bool {
+        self.0.idx == other.0.idx && self.0.rec_type == other.0.rec_type
+    }
+}
+
+impl PartialOrd for MapRecord {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        let idx_ord = self.0.idx.partial_cmp(&other.0.idx);
+        let type_ord = self.0.rec_type.partial_cmp(&other.0.rec_type);
+        match idx_ord {
+            Some(Ordering::Equal) => type_ord,
+            x => x,
+        }
+    }
+}
+
+impl Max for MapRecord {
+    fn maximum() -> Self {
+        MapRecord(Record::new(0, RecordType::Dummy, 0, 0, u32::MAX))
+    }
+}
+
+pub struct ObliviousMap {
+    message_store: Vec<MapRecord>,
+}
+
+impl ObliviousMap {
+    pub fn new() -> Self {
+        ObliviousMap {
+            message_store: Vec::new(),
+        }
+    }
+
+    pub fn batch_send(&mut self, requests: Vec<Record>) {
+        self.message_store.reserve(requests.len());
+        self.message_store
+            .extend(requests.into_iter().map(|r| MapRecord(r)));
+    }
+
+    fn update_with_fetches(&mut self, requests: Vec<Record>) {
+        self.message_store.reserve(2 * requests.len());
+
+        // add padding for fetches
+        self.message_store.extend(
+            requests
+                .iter()
+                .map(|record| MapRecord::dummy_send(record.idx)),
+        );
+
+        // add fetches
+        self.message_store
+            .extend(requests.into_iter().map(|r| MapRecord(r)));
+    }
+
+    pub fn batch_fetch(
+        &mut self,
+        requests: Vec<Record>,
+        pool: &ThreadPool,
+        num_threads: usize,
+    ) -> Vec<IndexRecord> {
+        // println!("num fetches {}", requests.len());
+
+        let final_size = self.message_store.len();
+        let num_requests = requests.len();
+
+        self.update_with_fetches(requests);
+
+        self.message_store =
+            otils::sort(std::mem::take(&mut self.message_store), pool, num_threads);
+
+        let mut prev_idx = u32::MAX;
+        let mut remaining = 0;
+        for record in self.message_store.iter_mut() {
+            remaining = i32::oselect(prev_idx == record.0.idx, remaining, 0);
+            record.0.mark = u16::oselect(record.0.is_fetch(), 0, u16::oselect(remaining > 0, 1, 0));
+
+            prev_idx = record.0.idx;
+            remaining += i32::oselect(record.0.is_fetch(), 1, i32::oselect(remaining > 0, -1, 0));
+        }
+
+        otils::compact(
+            &mut self.message_store[..],
+            |record| record.should_deliver(),
+            pool,
+            num_threads,
+        );
+        let response = self
+            .message_store
+            .drain(0..num_requests)
+            .map(|r| IndexRecord(r.0))
+            .collect();
+
+        otils::compact(
+            &mut self.message_store[..],
+            |record| record.should_defer(),
+            pool,
+            num_threads,
+        );
+        self.message_store.truncate(final_size);
+        response
+    }
+}

+ 159 - 0
sparta-d/src/record.rs

@@ -0,0 +1,159 @@
+use blake3;
+use otils::Max;
+use std::cmp::Ordering;
+
+#[derive(Clone, Debug, PartialEq, PartialOrd)]
+pub enum RecordType {
+    User,
+    Fetch,
+    Send,
+    Dummy,
+}
+
+#[derive(Debug)]
+pub struct Record {
+    pub uid: i64,
+    pub idx: u32,
+    pub map: u8,
+
+    pub rec_type: RecordType,
+    pub mark: u16,
+
+    pub last_fetch: u32,
+    pub last_send: u32,
+
+    pub data: u64,
+    pub _dum: [u64; 12],
+}
+
+impl Record {
+    pub fn new(uid: i64, type_rec: RecordType, data: u64, map: u8, idx: u32) -> Self {
+        Record {
+            uid,
+            idx,
+            map,
+            rec_type: type_rec,
+            mark: 0,
+            last_fetch: 0,
+            last_send: 0,
+            data,
+            _dum: [0; 12],
+        }
+    }
+
+    pub fn send(uid: i64, message: u64) -> Self {
+        Record::new(uid, RecordType::Send, message, 0, 0)
+    }
+
+    pub fn fetch(uid: i64, volume: u64) -> Self {
+        Record::new(uid, RecordType::Fetch, volume, 0, 0)
+    }
+
+    pub fn is_user_store(&self) -> bool {
+        self.rec_type == RecordType::User
+    }
+
+    pub fn is_fetch(&self) -> bool {
+        self.rec_type == RecordType::Fetch
+    }
+
+    pub fn is_send(&self) -> bool {
+        self.rec_type == RecordType::Send
+    }
+}
+
+pub struct IndexRecord(pub Record);
+
+impl IndexRecord {
+    pub fn new(uid: i64, rec_type: RecordType) -> Self {
+        IndexRecord(Record::new(uid, rec_type, 0, 0, 0))
+    }
+
+    pub fn dummy_fetches(&self) -> Vec<Self> {
+        (0..self.0.data)
+            .map(|_| IndexRecord::new(self.0.uid, RecordType::Fetch))
+            .collect()
+    }
+
+    pub fn get_idx(&self, idx: u32) -> u32 {
+        let mut hasher = blake3::Hasher::new();
+        hasher.update(&self.0.uid.to_ne_bytes());
+        hasher.update(&idx.to_ne_bytes());
+        let hash = hasher.finalize();
+        u32::from_ne_bytes(<[u8; 4]>::try_from(&hash.as_bytes()[0..4]).unwrap())
+    }
+
+    pub fn is_request(&self) -> bool {
+        self.0.rec_type != RecordType::User
+    }
+
+    pub fn is_updated_user_store(&self) -> bool {
+        self.0.mark == 1 && self.0.uid != i64::MAX
+    }
+
+    pub fn set_user_store(&mut self) {
+        self.0.rec_type = RecordType::User;
+    }
+}
+
+impl PartialEq for IndexRecord {
+    fn eq(&self, other: &Self) -> bool {
+        self.0.uid == other.0.uid && self.0.rec_type == other.0.rec_type
+    }
+}
+
+impl PartialOrd for IndexRecord {
+    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+        let uid_ord = self.0.uid.partial_cmp(&other.0.uid);
+        let type_ord = self.0.rec_type.partial_cmp(&other.0.rec_type);
+        match uid_ord {
+            Some(Ordering::Equal) => type_ord,
+            x => x,
+        }
+    }
+}
+
+impl Max for IndexRecord {
+    fn maximum() -> Self {
+        IndexRecord(Record::new(i64::MAX, RecordType::Dummy, 0, 0, 0))
+    }
+}
+
+pub struct SubmapRecord(pub Record);
+
+impl SubmapRecord {
+    pub fn dummy_send(num_requests: usize, map: u8) -> Vec<Self> {
+        (0..num_requests)
+            .map(|_| SubmapRecord(Record::new(0, RecordType::Dummy, 0, map, u32::MAX)))
+            .collect()
+    }
+
+    pub fn dummy_fetch(num_requests: usize, map: u8) -> Vec<Self> {
+        (0..num_requests)
+            .map(|_| SubmapRecord(Record::new(0, RecordType::Fetch, 0, map, u32::MAX)))
+            .collect()
+    }
+}
+
+impl PartialEq for SubmapRecord {
+    fn eq(&self, other: &Self) -> bool {
+        self.0.map == other.0.map && self.0.rec_type == other.0.rec_type
+    }
+}
+
+impl PartialOrd for SubmapRecord {
+    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+        let map_ord = self.0.map.partial_cmp(&other.0.map);
+        let idx_ord = self.0.idx.partial_cmp(&other.0.idx);
+        match map_ord {
+            Some(Ordering::Equal) => idx_ord,
+            x => x,
+        }
+    }
+}
+
+impl Max for SubmapRecord {
+    fn maximum() -> Self {
+        SubmapRecord(Record::new(0, RecordType::Dummy, 0, u8::MAX, 0))
+    }
+}

+ 1 - 1
sparta/Cargo.toml

@@ -12,7 +12,7 @@ rayon = "1.10.0"
 
 [package.metadata.fortanix-sgx]
 stack-size=0x400000
-heap-size=0x2000000000
+heap-size=0x800000000
 threads=49
 
 [profile.release]

+ 44 - 13
sparta/src/load_balancer.rs

@@ -7,7 +7,7 @@ use std::{
     cmp,
     f64::consts::E,
     sync::{Arc, Mutex},
-    time::UNIX_EPOCH,
+    // time::UNIX_EPOCH,
 };
 
 const LAMBDA: usize = 128;
@@ -15,6 +15,7 @@ const LAMBDA: usize = 128;
 pub struct LoadBalancer {
     num_users: i64,
     num_submaps: usize,
+    num_threads: usize,
 
     pool: ThreadPool,
     pub user_store: Vec<IndexRecord>,
@@ -23,8 +24,9 @@ pub struct LoadBalancer {
 
 impl LoadBalancer {
     pub fn new(num_users: i64, num_threads: usize, num_submaps: usize) -> Self {
+        let component_threads = num_threads / (num_submaps + 1);
         let pool = rayon::ThreadPoolBuilder::new()
-            .num_threads(num_threads / (num_submaps + 1))
+            .num_threads(component_threads)
             .build()
             .unwrap();
 
@@ -33,13 +35,12 @@ impl LoadBalancer {
         user_store.extend((0..num_users).map(|i| IndexRecord::new(i, RecordType::User)));
 
         let mut submaps = Vec::with_capacity(num_submaps as usize);
-        submaps.extend(
-            (0..num_submaps).map(|_| ObliviousMap::new(num_threads / (num_submaps + 1) as usize)),
-        );
+        submaps.extend((0..num_submaps).map(|_| ObliviousMap::new(component_threads)));
 
         LoadBalancer {
             num_users,
             num_submaps,
+            num_threads: component_threads,
             pool,
             user_store,
             submaps,
@@ -84,7 +85,7 @@ impl LoadBalancer {
 
         let mut requests = self.pad_for_submap(requests, submap_size, is_send);
 
-        requests = otils::sort(requests, &self.pool); // sort by omap, then by dummy
+        requests = otils::sort(requests, &self.pool, self.num_threads); // sort by omap, then by dummy
 
         let mut prev_map = self.num_submaps;
         let mut remaining_marks = submap_size as i32;
@@ -100,7 +101,12 @@ impl LoadBalancer {
             prev_map = submap as usize;
         }
 
-        otils::compact(&mut requests[..], |r| r.0.mark == 1, &self.pool);
+        otils::compact(
+            &mut requests[..],
+            |r| r.0.mark == 1,
+            &self.pool,
+            self.num_threads,
+        );
         requests.truncate(self.num_submaps * submap_size);
         requests
     }
@@ -137,16 +143,26 @@ impl LoadBalancer {
         self.user_store.reserve(num_requests);
         self.user_store.extend(sends);
 
-        self.user_store = otils::sort(std::mem::take(&mut self.user_store), &self.pool);
+        self.user_store = otils::sort(
+            std::mem::take(&mut self.user_store),
+            &self.pool,
+            self.num_threads,
+        );
         self.propagate_send_indices();
 
-        otils::compact(&mut self.user_store[..], |r| r.is_request(), &self.pool);
+        otils::compact(
+            &mut self.user_store[..],
+            |r| r.is_request(),
+            &self.pool,
+            self.num_threads,
+        );
         let requests = self.user_store.drain(0..num_requests).collect();
 
         otils::compact(
             &mut self.user_store[..],
             |r| r.is_updated_user_store(),
             &self.pool,
+            self.num_threads,
         );
 
         self.user_store.truncate(self.num_users as usize);
@@ -223,16 +239,26 @@ impl LoadBalancer {
     ) -> Vec<IndexRecord> {
         self.update_with_fetches(fetches, num_requests);
 
-        self.user_store = otils::sort(std::mem::take(&mut self.user_store), &self.pool);
+        self.user_store = otils::sort(
+            std::mem::take(&mut self.user_store),
+            &self.pool,
+            self.num_threads,
+        );
         self.propagate_fetch_indices();
 
-        otils::compact(&mut self.user_store[..], |r| r.is_request(), &self.pool);
+        otils::compact(
+            &mut self.user_store[..],
+            |r| r.is_request(),
+            &self.pool,
+            self.num_threads,
+        );
         let deliver = self.user_store.drain(0..num_requests).collect();
 
         otils::compact(
             &mut self.user_store[..],
             |r| r.is_updated_user_store(),
             &self.pool,
+            self.num_threads,
         );
 
         self.user_store.truncate(self.num_users as usize);
@@ -314,8 +340,13 @@ impl LoadBalancer {
         //     .duration_since(UNIX_EPOCH)
         //     .unwrap()
         //     .as_nanos();
-        responses = otils::sort(responses, &self.pool);
-        otils::compact(&mut responses, |r| r.0.is_send(), &self.pool);
+        responses = otils::sort(responses, &self.pool, self.num_threads);
+        otils::compact(
+            &mut responses,
+            |r| r.0.is_send(),
+            &self.pool,
+            self.num_threads,
+        );
         // let end = std::time::SystemTime::now()
         //     .duration_since(UNIX_EPOCH)
         //     .unwrap()

+ 4 - 4
sparta/src/main.rs

@@ -45,17 +45,17 @@ fn main() {
 
     l.batch_send(sends);
 
-    let results: Vec<u128> = (0..(args.runs + args.warmup_runs))
+    let results: Vec<f64> = (0..(args.runs + args.warmup_runs))
         .map(|_| {
             let start = std::time::SystemTime::now()
                 .duration_since(UNIX_EPOCH)
                 .unwrap()
-                .as_nanos();
+                .as_secs_f64();
             let _responses = l.batch_fetch(vec![Record::fetch(0, args.fetches)]);
             let end = std::time::SystemTime::now()
                 .duration_since(UNIX_EPOCH)
                 .unwrap()
-                .as_nanos();
+                .as_secs_f64();
 
             // for response in responses.iter() {
             //     println!("{:?}", response);
@@ -67,7 +67,7 @@ fn main() {
 
     print!("{}\t", args.sends);
     for result in results[..].iter() {
-        print!("{}\t", *result as f64 / 1000000000.0);
+        print!("{}\t", *result);
     }
     println!();
 }

+ 9 - 1
sparta/src/omap.rs

@@ -43,6 +43,7 @@ impl Max for MapRecord {
 }
 
 pub struct ObliviousMap {
+    num_threads: usize,
     pool: ThreadPool,
     message_store: Vec<MapRecord>,
 }
@@ -56,6 +57,7 @@ impl ObliviousMap {
 
         let message_store = Vec::new();
         ObliviousMap {
+            num_threads,
             pool,
             message_store,
         }
@@ -91,7 +93,11 @@ impl ObliviousMap {
 
         self.update_with_fetches(requests);
 
-        self.message_store = otils::sort(std::mem::take(&mut self.message_store), &self.pool);
+        self.message_store = otils::sort(
+            std::mem::take(&mut self.message_store),
+            &self.pool,
+            self.num_threads,
+        );
 
         let mut prev_idx = u32::MAX;
         let mut remaining = 0;
@@ -107,6 +113,7 @@ impl ObliviousMap {
             &mut self.message_store[..],
             |r| r.should_deliver(),
             &self.pool,
+            self.num_threads,
         );
         let response = self
             .message_store
@@ -118,6 +125,7 @@ impl ObliviousMap {
             &mut self.message_store[..],
             |record| record.should_defer(),
             &self.pool,
+            self.num_threads,
         );
         self.message_store.truncate(final_size);
         response