Browse Source

Tweaks to run_experiments

Sajin 1 year ago
parent
commit
16d87209d2
1 changed files with 32 additions and 9 deletions
  1. 32 9
      run_experiments.py

+ 32 - 9
run_experiments.py

@@ -23,16 +23,22 @@ from gen_enclave_config import generate_config
 
 
 MANIFEST_FILE = "App/manifest.yaml"
 MANIFEST_FILE = "App/manifest.yaml"
 #MANIFESTS_FOLDER = "Manifests/"
 #MANIFESTS_FOLDER = "Manifests/"
-LOG_FOLDER = "Experiments_Plot/"
+LOG_FOLDER = "Experiments_Plot_170424_Final/"
 RESULT_FOLDER = "Experiment_results/"
 RESULT_FOLDER = "Experiment_results/"
 
 
-
+'''
 # N = number of clients
 # N = number of clients
 N = [1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21]
 N = [1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21]
 # M = number of servers
 # M = number of servers
-M = [3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 20, 24, 26, 28, 32, 36, 40, 48, 56, 64, 72]
+M = [1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 20, 24, 26, 28, 32, 36, 40, 48, 56, 64, 72]
 # T = threads per server instance
 # T = threads per server instance
 T = [2, 4, 8, 16]
 T = [2, 4, 8, 16]
+'''
+
+N = [1<<16, 1<<17, 1<<18, 1<<19, 1<<20]
+M = [72, 64, 48, 36, 24, 16, 8, 4, 2, 1]
+T = [16, 8, 4, 2, 1]
+
 # Max servers depending on number of threads
 # Max servers depending on number of threads
 M_MAX = {
 M_MAX = {
     1:72, # 24 in M
     1:72, # 24 in M
@@ -51,11 +57,28 @@ def epoch_time(n, m, t, b):
     etime_base = 5
     etime_base = 5
     clients_per_server = math.ceil(n/m)
     clients_per_server = math.ceil(n/m)
     # Using 1 sec for ~50K clients in compute time
     # Using 1 sec for ~50K clients in compute time
-    etime_compute = math.ceil(clients_per_server/50000)
-    etime_inc = math.ceil(clients_per_server/25000)
-    etime_client = ((math.ceil(2/t))**2) * etime_inc
-    etime = etime_base + etime_compute + etime_client
-    return etime
+    # Using 8 sec for 2^20 clients in route_compute time as the base for calculations below
+    # (About 1 sec actual route, 6.5 sec for storage generate_tokens
+    # and process_msgs)
+    etime_route_compute = 0.8 * math.ceil(clients_per_server/100000)
+    etime_precompute = 1.5 * math.ceil(clients_per_server/100000)
+
+    # If we have less than 3 threads the precompute happens sequentially
+    # so epoch_interval needs to account for all the precompute
+    #if(t < 3):
+    #    etime_precompute *=3
+
+    # Client time:
+    # Takes about 30 sec for handling 2^20 clients
+    etime_client = math.ceil(clients_per_server/100000) * 3
+    #if(t>3):
+    #    etime_client/=(t/4)
+
+    #etime_inc = math.ceil(clients_per_server/25000)
+    #etime_client = ((math.ceil(2/t))**2) * etime_inc
+    etime = etime_base + etime_precompute + etime_route_compute + etime_client
+    return int(etime)
+
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
 
 
@@ -68,7 +91,7 @@ if __name__ == "__main__":
         # Set M_MAX depending on t
         # Set M_MAX depending on t
         m_end = M_MAX[t]
         m_end = M_MAX[t]
         for m in M:
         for m in M:
-            if(m < m_end):
+            if(m <= m_end):
                 for n in N:
                 for n in N:
                     print("\n\n Running Experiment t = %d, m = %d, n = %d \n\n" % (t, m, n))
                     print("\n\n Running Experiment t = %d, m = %d, n = %d \n\n" % (t, m, n))
                     generate_manifest(n, m, t, b)
                     generate_manifest(n, m, t, b)