Browse Source

Preparing artifacts for external review

Stan Gurtler 2 years ago
commit
44de1f2746
100 changed files with 3473 additions and 0 deletions
  1. 75 0
      README.md
  2. 0 0
      cuckoo_simulation/.gitkeep
  3. 120 0
      cuckoo_simulation/Cargo.lock
  4. 14 0
      cuckoo_simulation/Cargo.toml
  5. 99 0
      cuckoo_simulation/cuckoo_vars.py
  6. 417 0
      cuckoo_simulation/src/bin/old-main.rs
  7. 102 0
      cuckoo_simulation/src/main.rs
  8. 214 0
      cuckoo_simulation/src/sim.rs
  9. 127 0
      cuckoo_simulation/src/stats.rs
  10. 8 0
      cuckoo_simulation/src/test.rs
  11. 19 0
      cuckoo_simulation/src/types.rs
  12. BIN
      dhtpir_simulation/library/__pycache__/base_client.cpython-36.pyc
  13. BIN
      dhtpir_simulation/library/__pycache__/base_client.cpython-38.pyc
  14. BIN
      dhtpir_simulation/library/__pycache__/base_node.cpython-36.pyc
  15. BIN
      dhtpir_simulation/library/__pycache__/base_node.cpython-38.pyc
  16. BIN
      dhtpir_simulation/library/__pycache__/dht_common.cpython-36.pyc
  17. BIN
      dhtpir_simulation/library/__pycache__/dht_common.cpython-38.pyc
  18. BIN
      dhtpir_simulation/library/__pycache__/dht_simulator.cpython-36.pyc
  19. BIN
      dhtpir_simulation/library/__pycache__/dht_simulator.cpython-38.pyc
  20. BIN
      dhtpir_simulation/library/__pycache__/dhtpir_client.cpython-36.pyc
  21. BIN
      dhtpir_simulation/library/__pycache__/dhtpir_client.cpython-38.pyc
  22. BIN
      dhtpir_simulation/library/__pycache__/dhtpir_node.cpython-36.pyc
  23. BIN
      dhtpir_simulation/library/__pycache__/dhtpir_node.cpython-38.pyc
  24. BIN
      dhtpir_simulation/library/__pycache__/qp_client.cpython-36.pyc
  25. BIN
      dhtpir_simulation/library/__pycache__/qp_client.cpython-38.pyc
  26. BIN
      dhtpir_simulation/library/__pycache__/qp_node.cpython-36.pyc
  27. BIN
      dhtpir_simulation/library/__pycache__/qp_node.cpython-38.pyc
  28. BIN
      dhtpir_simulation/library/__pycache__/qplasthop_client.cpython-36.pyc
  29. BIN
      dhtpir_simulation/library/__pycache__/qplasthop_client.cpython-38.pyc
  30. BIN
      dhtpir_simulation/library/__pycache__/qplasthop_node.cpython-36.pyc
  31. BIN
      dhtpir_simulation/library/__pycache__/qplasthop_node.cpython-38.pyc
  32. BIN
      dhtpir_simulation/library/__pycache__/rcp_client.cpython-36.pyc
  33. BIN
      dhtpir_simulation/library/__pycache__/rcp_client.cpython-38.pyc
  34. BIN
      dhtpir_simulation/library/__pycache__/rcp_node.cpython-36.pyc
  35. BIN
      dhtpir_simulation/library/__pycache__/rcp_node.cpython-38.pyc
  36. 141 0
      dhtpir_simulation/library/base_client.py
  37. 133 0
      dhtpir_simulation/library/base_node.py
  38. 72 0
      dhtpir_simulation/library/dht_common.py
  39. 244 0
      dhtpir_simulation/library/dht_simulator.py
  40. 57 0
      dhtpir_simulation/library/dhtpir_client.py
  41. 124 0
      dhtpir_simulation/library/dhtpir_node.py
  42. 169 0
      dhtpir_simulation/library/qp_client.py
  43. 90 0
      dhtpir_simulation/library/qp_node.py
  44. 80 0
      dhtpir_simulation/library/qplasthop_client.py
  45. 80 0
      dhtpir_simulation/library/qplasthop_node.py
  46. 166 0
      dhtpir_simulation/library/rcp_client.py
  47. 177 0
      dhtpir_simulation/library/rcp_node.py
  48. 126 0
      dhtpir_simulation/options_setup.c
  49. 101 0
      dhtpir_simulation/run_tests.c
  50. 343 0
      dhtpir_simulation/test_harness.py
  51. 6 0
      dhtpir_simulation/timetests.sh
  52. 7 0
      outputs/Base_Node/1024/1/1600/a/avg_node.out
  53. 7 0
      outputs/Base_Node/1024/1/1600/a/avg_node_pub.out
  54. 1 0
      outputs/Base_Node/1024/1/1600/a/client.out
  55. 1 0
      outputs/Base_Node/1024/1/1600/a/client_pub.out
  56. 1 0
      outputs/Base_Node/1024/1/1600/a/usage.out
  57. 7 0
      outputs/Base_Node/1024/1/1600/b/avg_node.out
  58. 7 0
      outputs/Base_Node/1024/1/1600/b/avg_node_pub.out
  59. 1 0
      outputs/Base_Node/1024/1/1600/b/client.out
  60. 1 0
      outputs/Base_Node/1024/1/1600/b/client_pub.out
  61. 1 0
      outputs/Base_Node/1024/1/1600/b/usage.out
  62. 7 0
      outputs/Base_Node/1024/1/1600/c/avg_node.out
  63. 7 0
      outputs/Base_Node/1024/1/1600/c/avg_node_pub.out
  64. 1 0
      outputs/Base_Node/1024/1/1600/c/client.out
  65. 1 0
      outputs/Base_Node/1024/1/1600/c/client_pub.out
  66. 1 0
      outputs/Base_Node/1024/1/1600/c/usage.out
  67. 7 0
      outputs/Base_Node/1024/1/1600/d/avg_node.out
  68. 7 0
      outputs/Base_Node/1024/1/1600/d/avg_node_pub.out
  69. 1 0
      outputs/Base_Node/1024/1/1600/d/client.out
  70. 1 0
      outputs/Base_Node/1024/1/1600/d/client_pub.out
  71. 1 0
      outputs/Base_Node/1024/1/1600/d/usage.out
  72. 7 0
      outputs/Base_Node/1024/1/1600/e/avg_node.out
  73. 7 0
      outputs/Base_Node/1024/1/1600/e/avg_node_pub.out
  74. 1 0
      outputs/Base_Node/1024/1/1600/e/client.out
  75. 1 0
      outputs/Base_Node/1024/1/1600/e/client_pub.out
  76. 1 0
      outputs/Base_Node/1024/1/1600/e/usage.out
  77. 7 0
      outputs/Base_Node/1024/1/1600/f/avg_node.out
  78. 7 0
      outputs/Base_Node/1024/1/1600/f/avg_node_pub.out
  79. 1 0
      outputs/Base_Node/1024/1/1600/f/client.out
  80. 1 0
      outputs/Base_Node/1024/1/1600/f/client_pub.out
  81. 1 0
      outputs/Base_Node/1024/1/1600/f/usage.out
  82. 7 0
      outputs/Base_Node/1024/1/1600/g/avg_node.out
  83. 7 0
      outputs/Base_Node/1024/1/1600/g/avg_node_pub.out
  84. 1 0
      outputs/Base_Node/1024/1/1600/g/client.out
  85. 1 0
      outputs/Base_Node/1024/1/1600/g/client_pub.out
  86. 1 0
      outputs/Base_Node/1024/1/1600/g/usage.out
  87. 7 0
      outputs/Base_Node/1024/1/1600/h/avg_node.out
  88. 7 0
      outputs/Base_Node/1024/1/1600/h/avg_node_pub.out
  89. 1 0
      outputs/Base_Node/1024/1/1600/h/client.out
  90. 1 0
      outputs/Base_Node/1024/1/1600/h/client_pub.out
  91. 1 0
      outputs/Base_Node/1024/1/1600/h/usage.out
  92. 7 0
      outputs/Base_Node/1024/1/1600/i/avg_node.out
  93. 7 0
      outputs/Base_Node/1024/1/1600/i/avg_node_pub.out
  94. 1 0
      outputs/Base_Node/1024/1/1600/i/client.out
  95. 1 0
      outputs/Base_Node/1024/1/1600/i/client_pub.out
  96. 1 0
      outputs/Base_Node/1024/1/1600/i/usage.out
  97. 7 0
      outputs/Base_Node/1024/1/1600/j/avg_node.out
  98. 7 0
      outputs/Base_Node/1024/1/1600/j/avg_node_pub.out
  99. 1 0
      outputs/Base_Node/1024/1/1600/j/client.out
  100. 1 0
      outputs/Base_Node/1024/1/1600/j/client_pub.out

+ 75 - 0
README.md

@@ -0,0 +1,75 @@
+# DHTPIR
+
+#### Simulation code for research performed by Miti Mazmudar, Stan Gurtler, and Ian Goldberg at the University of Waterloo
+
+## Directories
+
+- `./dhtpir_simulation/`: The code used to simulate DHTPIR and other related works, in order to compare their respective computational and bandwidth usage. (Details on running this code in "Usage" below)
+- `./cuckoo_simulation/`: The code used to simulate behaviour of the cuckoo and commensal cuckoo rules referred to in the work, in order to obtain specific values for the relevant parameters. (Details on running this code in "Usage" below)
+- `./outputs/`: The raw outputs of the code in `./dhtpir_simulation/`, as obtained when we ran it on our machines. Subdirectories are organized by type of system, then by the number of quorums in the simulation, then by the number of nodes per quorum, then by the number of documents in the simulation, then by seed. At the leaf level, a variety of output files (depending on the system) are present describing different measured values for that given simulation run. 
+- `./plots/`: The graphs we generated from these outputs, as well as the code to generate these graphs. Many of these graphs are in the work itself, but a few additional graphs are also present in this directory; further detail on graphs in "Graphs" below.
+
+## Usage
+
+### DHTPIR Simulation
+
+To run an individual run of our simulator, you will need to be in the `./dhtpir_simulation/` directory. From there, run `./test_harness.py <type> <numDocuments> <sizeOfDocuments> <numGroups> <numNodes> --seed [seed]`, with appropriate values for each of these variables.
+
+`<type>` takes one of the following forms, depending on which type of system you intend to simulate:
+- `-b`: "BaseNode", or a basic DHT system with no additional privacy-preserving or secure behaviour of any kind simulated
+- `-r`: "RCPNode", which simulates the RCP procedures described by Young et al. on top of a basic DHT system
+- `-q`: "QPNode", which simulates the QP procedures described by Backes et al. on top of the RCP system (but does not implement any form of oblivious transfer at the final quorum, which returns back the sought document)
+- `-l`: "QPNode + LastHop", which simulates the same QP procedures as `-q`, but additionally implements oblivious transfer at the final quorum (which returns back the sought document)
+- `-d`: "DHTPIRNode", which simulates DHTPIR procedures (that is, QP procedures until the final quorum, and then IT-PIR among the final quorum instead of oblivious transfer)
+
+`<numDocuments>` describes the number of documents that will be in the system during simulation (as used in the paper, these are intended to simulate erasure-coded chunks of documents, rather than full documents themselves).
+
+`<sizeOfDocuments>` describes the size of documents in bytes that will be in the system during simulation (again, as used in the paper, these actually set the size of the erasure-coded chunks of documents: we used 512 bytes consistently throughout our simulation). All documents in the system will be randomly generated with exactly the specified number of bytes.
+
+`<numGroups>` describes the number of quorums in the system as a whole. For BaseNode, this describes the number of total nodes in the DHT, as BaseNode does not use quorums at all.
+
+`<numNodes>` describes the number of nodes *per quorum* in the system. For BaseNode, this value is ignored, as BaseNode does not use quorums.
+
+`[seed]` is an optional parameter, which seeds the randomness used to generate documents. Because document IDs within the system are their hashes (in our case, the SHA256 hash of the file), this impacts the distribution of documents throughout the system; in our simulations, we ran several instances of each type of node with each set of relevant parameters, varying the seed, in a Monte Carlo simulation.
+
+In addition, `./test_harness.py -h` displays a help message similar in content to what is discussed in this README.
+
+There are two other programs in the directory, `options_setup.c` and `run_tests.c`. `options_setup.c` generates a config file used by `run_tests.c` to run several simulation runs simultaneously/in succession. Both can be compiled in the normal ways (e.g. `gcc -o options_setup options_setup.c`), without any additional installation necessary beyond gcc or clang (though `run_tests.c` will need the `-pthread` flag during compilation/linking). `options_setup.c` does not take any arguments, and `run_tests.c` takes one argument, the maximum number of processes it may run simulations inside of simultaneously.
+
+### Cuckoo Simulation
+
+In order to build this code, you will need Cargo installed. From the `./dhtpir_simulation/` directory, run `cargo build` in order to build the simulation code. With the code built, run `cargo run --bin cuckoo_simulation <honest> <malicious> <log2_regions> <log2_regionsPerQuorum> <k> <nodesPerRegion> <iterations> <seed>`, with appropriate values for each of these variables. 
+
+`<honest>` describes the number of honest nodes in the system.
+
+`<malicious>` describes the number of malicious nodes in the system.
+
+`<log2_regions>` describes log2(the number of regions in the system).
+
+`<log2_regionsPerQuorum>` describes log2(the number of regions per quorum in the system).
+
+`<k>` describes k-1, the number of secondary joins in the commensal cuckoo rule before new primary joins are accepted.
+
+`<nodesPerRegion>` describes the desired number of nodes in a given region.
+
+`<seed>` is a random seed.
+
+There is an additional file in this directory, `cuckoo_vars.py`, which was used to do arithmetic to calculate certain variables associated with the cuckoo and commensal cuckoo rules as used in the work.
+
+## Graphs
+
+The graphs present in the `./plots/` directory are generated by `make_graphs.py`, from output in the `./outputs/` directory, using `plots.json` as a config file for determining text content in the graphs. Simply running `./make_graphs.py` from the `./plots/` directory, so long as `plots.json` has not been altered (or has been altered appropriately to generate new graphs), will generate these graphs.
+
+The graphs present are organized as follows: `<y-axis>-<x-axis>-<which>.pdf`.
+
+The `<y-axis>` value can be one of the following:
+- "latencies": latency per query request (generally in seconds)
+- "num_bytes_recv": number of bytes sent by the client (received by the system) per query request
+- "num_bytes_sent": number of bytes received by the client (sent by the system) per query request
+- "num_bytes_total": sum of "num_bytes_sent" and "num_bytes_recv"
+- "num_pub_bytes_total": number of bytes required for a client to publish a document (note that, as DHTPIR does not do anything novel around publishing a document, merely around retrieving it, this graph does not show much of interest)
+- "throughputs": throughput of the system, in maximum number of lookups possible per second
+
+The `<x-axis>` value can be one of the following:
+- "num_chunks_per_quorum": Number of chunks per quorum. The `<which>` associated with these graphs identifies what the number of quorums and nodes per quorum were in the simulation runs that make the selected graph.
+- "num_quorums": Number of quorums. The `<which>` associated with these graphs identifies what the number of chunks per quorum were in the simulation runs that make the selected graph.

+ 0 - 0
cuckoo_simulation/.gitkeep


+ 120 - 0
cuckoo_simulation/Cargo.lock

@@ -0,0 +1,120 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "cuckoo_simulation"
+version = "0.1.0"
+dependencies = [
+ "rand",
+ "rand_core 0.5.1",
+ "rand_pcg",
+ "rand_xoshiro",
+ "reservoir-sampling",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.83"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7eb0c4e9c72ee9d69b767adebc5f4788462a3b45624acd919475c92597bcaf4f"
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
+
+[[package]]
+name = "rand"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
+dependencies = [
+ "libc",
+ "rand_chacha",
+ "rand_core 0.6.1",
+ "rand_hc",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
+dependencies = [
+ "ppv-lite86",
+ "rand_core 0.6.1",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
+
+[[package]]
+name = "rand_core"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "rand_hc"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
+dependencies = [
+ "rand_core 0.6.1",
+]
+
+[[package]]
+name = "rand_pcg"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7de198537002b913568a3847e53535ace266f93526caf5c360ec41d72c5787f0"
+dependencies = [
+ "rand_core 0.6.1",
+]
+
+[[package]]
+name = "rand_xoshiro"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004"
+dependencies = [
+ "rand_core 0.5.1",
+]
+
+[[package]]
+name = "reservoir-sampling"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ae97164005cd70c087378a9c47a47449e3bb68b8098aefd1c8e9801e608df7"
+dependencies = [
+ "rand",
+]
+
+[[package]]
+name = "wasi"
+version = "0.10.2+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"

+ 14 - 0
cuckoo_simulation/Cargo.toml

@@ -0,0 +1,14 @@
+[package]
+name = "cuckoo_simulation"
+version = "0.1.0"
+authors = ["Miti Mazmudar <mitimazmudar@gmail.com>", "Ian Goldberg <iang@uwaterloo.ca>"]
+edition = "2018"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+rand_core = "0.5.1"
+rand_xoshiro = "0.4.0"
+reservoir-sampling = "0.3.1"
+rand = "0.8.3"
+rand_pcg = "0.3.0"

+ 99 - 0
cuckoo_simulation/cuckoo_vars.py

@@ -0,0 +1,99 @@
+import math
+
+def get_b_0(k, epsilon, ell, u):
+    #    plot([(1+(1+u)*epsilon*x)/((1-ell)*x+(1+u)*epsilon*x+1), 0.25], (x, 0, 1))
+    return ((1+(1+u)*epsilon*k)/((1-ell)*k+(1+u)*epsilon*k+1))
+
+def get_delta_e(phi_e, gamma, K, n, epsilon):
+    return math.sqrt(abs(math.log(phi_e)*32*epsilon/(gamma*K**2*math.log(n))))
+
+def get_phi_e(delta_e, gamma, K, n, epsilon):
+    return math.exp(-1*gamma*((delta_e*K)**2)*math.log(n)/(32*epsilon))
+
+def get_delta_age_ell(phi_age_ell, c, K, n):
+    C=c*math.log(n)
+    return math.sqrt((2*C*K/n) - (2*math.log(phi_age_ell)/C))
+
+def get_phi_age_ell(delta_age_ell, C, K, n):
+    return math.exp(((C**2)*K/n) - ((delta_age_ell**2)*C/2))
+
+def get_delta_age_u(u, c, K, n):
+    C=c*math.log(n)
+    #delta_age_u**2/(1+delta_age_u)=A
+    A = ((2*C*K/n) - (2*math.log(u)/C))
+    #quadratic of the form x^2 - A*x - A = 0
+    #only one positive root as A is always positive.
+    root = 0.5 * (A + math.sqrt(A**2+4*A))
+    return root
+
+def get_phi_age_u(delta_age_u, C, K, n):
+    return math.exp(((C**2)*K/n) - ((delta_age_u**2)*C/(2*(1+delta_age_u))))
+
+def get_delta_n(gamma, epsilon, K, n):
+    return gamma*K*(math.log(n))**3/(epsilon*n)
+
+def get_delta_ell(delta_e, delta_age_ell, delta_n):
+    return delta_e + delta_age_ell - (delta_e * delta_age_ell) - ((1 - delta_e) * delta_n)
+
+def get_delta_u(delta_e, delta_age_u, delta_n):
+    return delta_e + delta_age_u + delta_e * delta_age_u + ((1 + delta_e) * delta_n)
+
+def get_p_i(phi_e, epsilon, phi_age_ell, phi_age_u, C):
+    phi_e_honest = phi_e**C
+    print(phi_e_honest)
+    phi_e_byz = (phi_e**(epsilon**2))**C
+    print(phi_e_byz)
+    p1 = phi_e_honest + 2*phi_age_ell + phi_e_byz
+    p2 = phi_e_honest + 2*phi_age_u + phi_e_byz
+    p3 = phi_e_honest + phi_age_ell
+    p4 = phi_e_byz + phi_age_u
+    return [p1, p2, p3, p4]
+
+n=10**8
+K=20
+c=40
+C=c*math.log(n)
+gamma=1
+epsilon=0.2
+
+delta_e = 0.1
+delta_age_ell = 0.12
+delta_age_u = 0.11
+
+#phi_age_ell_1=0.02
+#phi_age_u_1=0.02
+#phi_e=5*(10**-30)
+
+#delta_age_u_1 = get_delta_age_u(phi_age_u_1, c, K, n)
+#print("delta_age_u is: ", delta_age_u_1)
+
+#delta_age_ell_1 = get_delta_age_ell(phi_age_ell_1, c, K, n)
+#print("delta_age_l is: ", delta_age_ell_1)
+
+delta_e_2 = get_delta_e(5*(10**-30), gamma, K, n, epsilon)
+print("delta_e_2 is: ", delta_e_2)
+
+delta_n = get_delta_n(gamma, epsilon, K, n)
+print("delta_n is: ", delta_n)
+
+delta_ell = get_delta_ell(delta_e, delta_age_ell, delta_n)
+print("delta_ell is: ", delta_ell)
+
+delta_u = get_delta_u(delta_e, delta_age_ell, delta_n)
+print("delta_u is: ", delta_u)
+
+b_0 = get_b_0(K, epsilon, delta_ell, delta_u)
+print("b_0 is: ", b_0)
+
+phi_age_ell = get_phi_age_ell(delta_age_ell, C, K, n)
+print("phi_age_ell is: ", phi_age_ell)
+
+phi_age_u = get_phi_age_u(delta_age_u, C, K, n)
+print("phi_age_u is: ", phi_age_u)
+
+phi_e = get_phi_e(delta_e, gamma, K, n, epsilon)
+print("phi_e is: ", phi_e)
+print("phi_e^(epsilon^2) is: ", phi_e**(epsilon**2))
+
+[p1, p2, p3, p4] = get_p_i(phi_e, epsilon, phi_age_ell, phi_age_u, C)
+print(p1, p2, p3, p4)

+ 417 - 0
cuckoo_simulation/src/bin/old-main.rs

@@ -0,0 +1,417 @@
+use std::env;
+use std::process;
+use rand_core::SeedableRng;
+use rand_core::RngCore;
+use rand_xoshiro::SplitMix64;
+
+type NodeCount = usize;
+type RegionCount = usize;
+type TimeCount = usize;
+
+#[derive(Debug, Clone, Copy)]
+struct Region {
+    num_honest: NodeCount,
+    num_malicious: NodeCount,
+    last_join: TimeCount,
+}
+
+#[derive(Debug, Clone, Copy)]
+struct Quorum {
+    tot_honest: NodeCount,
+    tot_malicious: NodeCount,
+    tot_last_join: TimeCount,
+}
+
+#[derive(Debug, Clone, Copy, Default)]
+struct CurrentStats {
+    dirty: bool,
+    min_tot_nodes: NodeCount,
+    min_tot_nodes_quorum: RegionCount,
+    max_tot_nodes: NodeCount,
+    max_tot_nodes_quorum: RegionCount,
+    min_tot_honest: NodeCount,
+    min_tot_honest_quorum: RegionCount,
+    max_tot_honest: NodeCount,
+    max_tot_honest_quorum: RegionCount,
+    min_tot_malicious: NodeCount,
+    min_tot_malicious_quorum: RegionCount,
+    max_tot_malicious: NodeCount,
+    max_tot_malicious_quorum: RegionCount,
+    min_tot_last_join: TimeCount,
+    min_tot_last_join_quorum: RegionCount,
+    max_tot_last_join: TimeCount,
+    max_tot_last_join_quorum: RegionCount,
+    min_epsilon: f64,
+    min_epsilon_quorum: RegionCount,
+    max_epsilon: f64,
+    max_epsilon_quorum: RegionCount,
+}
+
+impl CurrentStats {
+    fn update(&mut self, i: RegionCount, q: &Quorum, force: bool) {
+        if self.dirty == false && (
+                self.min_tot_nodes_quorum == i ||
+                self.max_tot_nodes_quorum == i ||
+                self.min_tot_honest_quorum == i ||
+                self.max_tot_honest_quorum == i ||
+                self.min_tot_malicious_quorum == i ||
+                self.max_tot_malicious_quorum == i ||
+                self.min_tot_last_join_quorum == i ||
+                self.max_tot_last_join_quorum == i ||
+                self.min_epsilon_quorum == i ||
+                self.max_epsilon_quorum == i) {
+            self.dirty = true;
+        }
+        let nodes = q.tot_honest + q.tot_malicious;
+        if force || nodes < self.min_tot_nodes {
+            self.min_tot_nodes = nodes;
+            self.min_tot_nodes_quorum = i;
+        }
+        if force || nodes > self.max_tot_nodes {
+            self.max_tot_nodes = nodes;
+            self.max_tot_nodes_quorum = i;
+        }
+        if force || q.tot_honest < self.min_tot_honest {
+            self.min_tot_honest = q.tot_honest;
+            self.min_tot_honest_quorum = i;
+        }
+        if force || q.tot_honest > self.max_tot_honest {
+            self.max_tot_honest = q.tot_honest;
+            self.max_tot_honest_quorum = i;
+        }
+        if force || q.tot_malicious < self.min_tot_malicious {
+            self.min_tot_malicious = q.tot_malicious;
+            self.min_tot_malicious_quorum = i;
+        }
+        if force || q.tot_malicious > self.max_tot_malicious {
+            self.max_tot_malicious = q.tot_malicious;
+            self.max_tot_malicious_quorum = i;
+        }
+        if force || q.tot_last_join < self.min_tot_last_join {
+            self.min_tot_last_join = q.tot_last_join;
+            self.min_tot_last_join_quorum = i;
+        }
+        if force || q.tot_last_join > self.max_tot_last_join {
+            self.max_tot_last_join = q.tot_last_join;
+            self.max_tot_last_join_quorum = i;
+        }
+        let epsilon : f64 = if q.tot_honest > 0 {
+            (q.tot_malicious as f64) /
+            (q.tot_honest as f64)
+        } else if q.tot_malicious > 0 {
+            1000000.0
+        } else {
+            0.0
+        };
+        if force || epsilon < self.min_epsilon {
+            self.min_epsilon = epsilon;
+            self.min_epsilon_quorum = i;
+        }
+        if force || epsilon > self.max_epsilon {
+            self.max_epsilon = epsilon;
+            self.max_epsilon_quorum = i;
+        }
+    }
+
+    #[allow(dead_code)]
+    fn print(&self) {
+        print!("nodes {} ({}) {} ({}) ", self.min_tot_nodes, self.min_tot_nodes_quorum, self.max_tot_nodes, self.max_tot_nodes_quorum);
+        print!("honest {} ({}) {} ({}) ", self.min_tot_honest, self.min_tot_honest_quorum, self.max_tot_honest, self.max_tot_honest_quorum);
+        print!("malicious {} ({}) {} ({}) ", self.min_tot_malicious, self.min_tot_malicious_quorum, self.max_tot_malicious, self.max_tot_malicious_quorum);
+        print!("lastjoin {} ({}) {} ({}) ", self.min_tot_last_join, self.min_tot_last_join_quorum, self.max_tot_last_join, self.max_tot_last_join_quorum);
+        println!("epsilon {} ({}) {} ({})", self.min_epsilon, self.min_epsilon_quorum, self.max_epsilon, self.max_epsilon_quorum);
+    }
+}
+
+#[derive(Debug, Clone, Copy, Default)]
+struct CumulativeStats {
+    min_tot_honest: NodeCount,
+    max_tot_honest: NodeCount,
+    min_tot_malicious: NodeCount,
+    max_tot_malicious: NodeCount,
+    min_tot_nodes: NodeCount,
+    max_tot_nodes: NodeCount,
+    min_age: TimeCount,
+    max_age: TimeCount,
+    min_epsilon: f64,
+    max_epsilon: f64,
+}
+
+impl CumulativeStats {
+    #[allow(dead_code)]
+    fn print(&self) {
+        print!("nodes {} {} ", self.min_tot_nodes, self.max_tot_nodes);
+        print!("honest {} {} ", self.min_tot_honest, self.max_tot_honest);
+        print!("malicious {} {} ", self.min_tot_malicious, self.max_tot_malicious);
+        print!("age {} {} ", self.min_age, self.max_age);
+        println!("epsilon {} {}", self.min_epsilon, self.max_epsilon);
+    }
+}
+
+struct Simulation {
+    done_init: bool,
+    now: TimeCount,
+    rand: SplitMix64,
+    lg_regions_per_quorum: u8,
+    num_region_mask: RegionCount,
+    regions: Vec<Region>,
+    quorums: Vec<Quorum>,
+    cur_stats: CurrentStats,
+    cum_stats: CumulativeStats,
+}
+
+impl Simulation {
+    fn rand_region(&mut self) -> RegionCount {
+        (self.rand.next_u64() as RegionCount) & self.num_region_mask
+    }
+
+    // Insert a new node into a random region in the DHT.
+    fn insert(&mut self, is_malicious: bool) {
+        let insregion = self.rand_region();
+        let quorum = insregion >> self.lg_regions_per_quorum;
+        if is_malicious {
+            self.regions[insregion].num_malicious += 1;
+            self.quorums[quorum].tot_malicious += 1;
+        } else {
+            self.regions[insregion].num_honest += 1;
+            self.quorums[quorum].tot_honest += 1;
+        }
+        if self.done_init {
+            self.cur_stats.update(quorum, &self.quorums[quorum], false);
+        }
+    }
+
+    // Insert a new node into a random region in the DHT, then
+    // evict all the other nodes in that region into random
+    // regions in the DHT (but don't evict nodes from the regions
+    // _those_ nodes land in).
+    fn cuckoo_insert(&mut self, is_malicious: bool) {
+        // Pick a random region to put the new node into. Also get the quorum for that region.
+        let insregion = self.rand_region();
+        let quorum = insregion >> self.lg_regions_per_quorum;
+        // Kick out all nodes in that region
+        // (subtract honest, malicious counts for that region from the quorum totals).
+        let num_malicious = self.regions[insregion].num_malicious;
+        let num_honest = self.regions[insregion].num_honest;
+        self.quorums[quorum].tot_malicious -= num_malicious;
+        self.quorums[quorum].tot_honest -= num_honest;
+        // Insert the new node into that region.
+        // Also update honest/malicious counts for region + quorum.
+        if is_malicious {
+            self.regions[insregion].num_malicious = 1;
+            self.quorums[quorum].tot_malicious += 1;
+            self.regions[insregion].num_honest = 0;
+        } else {
+            self.regions[insregion].num_malicious = 0;
+            self.regions[insregion].num_honest = 1;
+            self.quorums[quorum].tot_honest += 1;
+        }
+        // Re-insert each node that was kicked out earlier, into new k-regions, while maintaining
+        // honest/malicious status.
+        for _ in 0..num_honest {
+            self.insert(false);
+        }
+        for _ in 0..num_malicious {
+            self.insert(true);
+        }
+        // Update the age of the region that was emptied out.
+        let last_join = self.regions[insregion].last_join;
+        self.regions[insregion].last_join = self.now;
+        self.quorums[quorum].tot_last_join += self.now - last_join;
+        self.now += 1;
+        // Cuckoo-ing after initialization: update stats for the quorum with the cuckood-out region
+        if self.done_init {
+            self.cur_stats.update(quorum, &self.quorums[quorum], false);
+            self.collect_stats();
+            self.update_cumstats(false);
+        }
+    }
+
+    fn init(&mut self, num_honest : NodeCount, num_malicious : NodeCount) {
+        for _ in 0..num_honest {
+            self.cuckoo_insert(false);
+        }
+        for _ in 0..num_malicious {
+            self.cuckoo_insert(true);
+        }
+        self.done_init = true;
+        self.cur_stats.dirty = true;
+        self.collect_stats();
+        self.update_cumstats(true);
+    }
+
+    fn move_malicious(&mut self) {
+        // Remove an existing malicious node
+        // For now, just randomly
+        loop {
+            let region = self.rand_region();
+            let quorum = region >> self.lg_regions_per_quorum;
+            if self.regions[region].num_malicious > 0 {
+                self.regions[region].num_malicious -= 1;
+                self.quorums[quorum].tot_malicious -= 1;
+                self.cur_stats.update(quorum, &self.quorums[quorum], false);
+                break;
+            }
+        }
+        // Insert it back into the DHT
+        self.cuckoo_insert(true);
+    }
+
+    #[allow(dead_code)]
+    fn count_nodes(&mut self) {
+        let mut num_honest : NodeCount = 0;
+        let mut num_malicious : NodeCount = 0;
+        let mut last_join : TimeCount = 0;
+        for r in self.regions.iter() {
+            num_honest += r.num_honest;
+            num_malicious += r.num_malicious;
+            last_join += r.last_join;
+        }
+        print!("regions h={} m={} l={} ", num_honest, num_malicious, last_join);
+        let mut tot_honest : NodeCount = 0;
+        let mut tot_malicious : NodeCount = 0;
+        let mut tot_last_join : TimeCount = 0;
+        for q in self.quorums.iter() {
+            tot_honest += q.tot_honest;
+            tot_malicious += q.tot_malicious;
+            tot_last_join += q.tot_last_join;
+        }
+        println!("quorums h={} m={} l={}", tot_honest, tot_malicious, tot_last_join);
+    }
+
+    fn collect_stats(&mut self) {
+        if self.cur_stats.dirty {
+            for (i, q) in self.quorums.iter().enumerate() {
+                self.cur_stats.update(i, q, i==0);
+            }
+            self.cur_stats.dirty = false;
+        }
+    }
+
+    fn update_cumstats(&mut self, force: bool) {
+        let stat = &self.cur_stats;
+        if force || stat.min_tot_nodes < self.cum_stats.min_tot_nodes {
+            self.cum_stats.min_tot_nodes = stat.min_tot_nodes;
+        }
+        if force || stat.max_tot_nodes > self.cum_stats.max_tot_nodes {
+            self.cum_stats.max_tot_nodes = stat.max_tot_nodes;
+        }
+        if force || stat.min_tot_honest < self.cum_stats.min_tot_honest {
+            self.cum_stats.min_tot_honest = stat.min_tot_honest;
+        }
+        if force || stat.max_tot_honest > self.cum_stats.max_tot_honest {
+            self.cum_stats.max_tot_honest = stat.max_tot_honest;
+        }
+        if force || stat.min_tot_malicious < self.cum_stats.min_tot_malicious {
+            self.cum_stats.min_tot_malicious = stat.min_tot_malicious;
+        }
+        if force || stat.max_tot_malicious > self.cum_stats.max_tot_malicious {
+            self.cum_stats.max_tot_malicious = stat.max_tot_malicious;
+        }
+        let min_age = (self.now<<self.lg_regions_per_quorum) - stat.max_tot_last_join;
+        let max_age = (self.now<<self.lg_regions_per_quorum) - stat.min_tot_last_join;
+        if force || min_age < self.cum_stats.min_age {
+            self.cum_stats.min_age = min_age;
+        }
+        if force || max_age > self.cum_stats.max_age {
+            self.cum_stats.max_age = max_age;
+        }
+        if force || stat.min_epsilon < self.cum_stats.min_epsilon {
+            self.cum_stats.min_epsilon = stat.min_epsilon;
+        }
+        if force || stat.max_epsilon > self.cum_stats.max_epsilon {
+            self.cum_stats.max_epsilon = stat.max_epsilon;
+        }
+    }
+}
+
+fn usage(cmd: &String) {
+    eprintln!("Usage: {} h m lg_r lg_c iters seed", cmd);
+    eprintln!("h:     number of honest nodes");
+    eprintln!("m:     number of malicious nodes");
+    eprintln!("lg_r:  log_2 of the number of regions");
+    eprintln!("lg_c:  log_2 of the number of regions per quorum");
+    eprintln!("iters: number of iterations after initialization");
+    eprintln!("seed:  random seed");
+}
+
+fn main() {
+    let args: Vec<String> = env::args().collect();
+
+    if args.len() != 7 {
+        usage(&args[0]);
+        process::exit(1);
+    }
+
+    let arg_h = args[1].parse::<NodeCount>();
+    let arg_m = args[2].parse::<NodeCount>();
+    let arg_lg_r = args[3].parse::<u8>();
+    let arg_lg_c = args[4].parse::<u8>();
+    let arg_iters = args[5].parse::<TimeCount>();
+    let arg_seed = args[6].parse::<u64>();
+
+    if arg_h.is_err() || arg_m.is_err() || arg_lg_r.is_err() ||
+            arg_lg_c.is_err() || arg_iters.is_err() || arg_seed.is_err() {
+        usage(&args[0]);
+        process::exit(1);
+    }
+
+    // Number of honest nodes
+    let h : NodeCount = arg_h.unwrap();
+    // Number of malicious nodes
+    let m : NodeCount = arg_m.unwrap();
+    // TODO: In the cuckoo-rule, we don't care about each region.
+    //  We only care about a k-region at a time.
+    //   In commensal-cuckooing, we only care about the quorum that is being joined to.
+    // log_2 of number of regions
+    let lg_r : u8 = arg_lg_r.unwrap();
+    // log_2 of number of regions per quorum (must be smaller than r)
+    let lg_c : u8 = arg_lg_c.unwrap();
+    // Number of iterations after initialization
+    let iters : TimeCount = arg_iters.unwrap();
+    // 64-bit random seed
+    let seed : u64 = arg_seed.unwrap();
+
+    if lg_c > lg_r {
+        usage(&args[0]);
+        process::exit(1);
+    }
+
+    let blankregion = Region {
+        num_honest: 0,
+        num_malicious: 0,
+        last_join: 0
+    };
+    let blankquorum = Quorum {
+        tot_honest: 0,
+        tot_malicious: 0,
+        tot_last_join: 0,
+    };
+    let mut sim = Simulation {
+        done_init: false,
+        now: 0,
+        rand: SplitMix64::seed_from_u64(seed),
+        lg_regions_per_quorum: lg_c,
+        num_region_mask: (1<<lg_r)-1,
+        regions: Vec::new(),
+        quorums: Vec::new(),
+        cur_stats: CurrentStats::default(),
+        cum_stats: CumulativeStats::default(),
+    };
+    sim.regions.resize(1<<lg_r, blankregion);
+    sim.quorums.resize(1<<(lg_r-lg_c), blankquorum);
+
+    eprintln!("Starting simulation h={} m={} r={} C={} seed={}",
+            h, m, 1<<lg_r, 1<<lg_c, seed);
+
+    sim.init(h, m);
+    for iter in 0..iters {
+        sim.move_malicious();
+        if iter % 100000 == 0 {
+            eprintln!("iter {}", iter);
+        }
+    }
+
+    print!("{} {} {} {} {} {} ", h, m, lg_r, lg_c, iters, seed);
+    sim.cum_stats.print();
+}

+ 102 - 0
cuckoo_simulation/src/main.rs

@@ -0,0 +1,102 @@
+pub mod sim;
+mod stats;
+mod types;
+
+use std::{env, process};
+use crate::types::{Quorum, Region};
+use crate::stats::{CurrentStats, CumulativeStats};
+use crate::sim::Simulation;
+use rand_pcg::Pcg64;
+use rand::SeedableRng;
+
+fn usage(cmd: &String) {
+    eprintln!("Usage: {} h m r k g iters seed", cmd);
+    eprintln!("h:           number of honest nodes");
+    eprintln!("m:           number of malicious nodes");
+    eprintln!("lg_r:        log_2 of the number of regions");
+    eprintln!("lg_c:        log_2 of the number of regions per quorum");
+    eprintln!("k:           k - 1 no. of secondary joins in CCR before new primary joins are accepted");
+    eprintln!("g:           desired number of nodes in a *region*");
+    eprintln!("T:           number of iterations after initialization");
+    eprintln!("seed:        random seed");
+}
+
+fn main() {
+    let args: Vec<String> = env::args().collect();
+
+    if args.len() != 9 {
+        usage(&args[0]);
+        process::exit(1);
+    }
+
+    let valid_args: Vec<usize> = args.iter().enumerate()
+        .filter(|(i, _)| *i != 0)
+        .map(|(_, v)| v.parse::<usize>())
+        .filter(|x| x.is_ok())
+        .map(|x| x.unwrap())
+        .collect();
+
+    if valid_args.len() != 8 {
+        usage(&args[0]);
+        process::exit(1);
+    }
+
+    let h = valid_args[0];
+    let m = valid_args[1];
+    let lg_r = valid_args[2];
+    let lg_c = valid_args[3];
+    let k = valid_args[4];
+    let g = valid_args[5];
+    let iters = valid_args[6];
+    let seed = valid_args[7];
+
+    if (lg_c > lg_r) | (k > g) {
+        usage(&args[0]);
+        process::exit(1);
+    }
+
+    let blankregion = Region {
+        num_honest: 0,
+        num_malicious: 0,
+        last_join: 0
+    };
+
+    let blankquorum = Quorum {
+        tot_honest: 0,
+        tot_malicious: 0,
+        tot_last_join: 0,
+        num_nodes_since_last_primary_join: 0,
+    };
+
+    let mut sim = Simulation {
+        done_init: false,
+        now: 0,
+        rand: Pcg64::seed_from_u64(seed as u64),
+        quorums: Vec::new(),
+        regions: Vec::new(),
+        lg_regions_per_quorum: lg_c,
+        num_region_mask: (1<<lg_r)-1,
+        cur_stats: CurrentStats::default(),
+        cum_stats: CumulativeStats::default(),
+        k,
+        g,
+    };
+
+    sim.regions.resize(1<<lg_r, blankregion);
+    sim.quorums.resize(1<<(lg_r-lg_c), blankquorum);
+
+    eprintln!("Starting simulation h={} m={} 2^r={} regions 2^c={} regions/quorum k={} g={} desired quorum size T={} seed={}",
+              h, m, 1 << lg_r, 1 << lg_c, k, g, iters, seed);
+
+    sim.init(h, m);
+    for iter in 0..iters {
+        sim.move_malicious();
+        if iter % 100000 == 0 {
+            eprintln!("iter {}", iter);
+        }
+    }
+
+    println!("h={} m={} 2^r={} 2^c={} k={} g={} T={} seed={}",
+             h, m, 1 << lg_r, 1<< lg_c, k, g, iters, seed);
+    sim.cum_stats.print();
+}

+ 214 - 0
cuckoo_simulation/src/sim.rs

@@ -0,0 +1,214 @@
+use crate::types::*;
+// use rand_xoshiro::{SplitMix64, rand_core::RngCore};
+use crate::stats::{CumulativeStats, CurrentStats};
+use reservoir_sampling::unweighted::core::l;
+use rand_pcg::Pcg64;
+use rand::RngCore;
+
+pub struct Simulation {
+    pub done_init: bool,
+    pub now: TimeCount,
+    pub quorums: Vec<Quorum>,
+    pub regions: Vec<Region>,
+    pub rand: Pcg64,
+    pub lg_regions_per_quorum: RegionCount,
+    pub num_region_mask: RegionCount,
+    pub cur_stats: CurrentStats,
+    pub cum_stats: CumulativeStats,
+    pub k: NodeCount,
+    pub g: NodeCount,
+}
+
+impl Simulation {
+    fn rand_region(&mut self) -> RegionCount {
+        (self.rand.next_u64() as RegionCount) & self.num_region_mask
+    }
+
+    // Reservoir-sampling to pick which nodes to kick out for CCR
+    fn pick_honest_malicious_nodes_to_kick_out(&mut self, m: NodeCount, n: NodeCount, n_bad: NodeCount) -> NodeCount {
+        let mut selected_indices: Vec<usize> = vec![0usize; m as usize];
+        // Picks m indices out of 0..n using the optimized L reservoir sampling algo.
+        l(0usize..n, selected_indices.as_mut_slice(), &mut self.rand);
+        // First 0..n_bad - 1 indices out of total are treated as malicious
+        let num_bad = selected_indices.iter().fold(0, |bad_count, &index| if index < n_bad { bad_count + 1 } else { bad_count });
+        num_bad
+    }
+
+    // Kick-out nodes as per CCR.
+    fn kick_out_nodes(&mut self, region: RegionCount) {
+        let quorum = region >> self.lg_regions_per_quorum;
+        let current_region_size = self.regions[region].num_malicious + self.regions[region].num_honest;
+        let number_to_kick_out : usize = ((self.k * current_region_size / self.g ) as f64).round() as usize;
+        let num_malicious_to_kick_out = self.pick_honest_malicious_nodes_to_kick_out(number_to_kick_out, current_region_size, self.regions[region].num_malicious);
+        let num_honest_to_kick_out = number_to_kick_out - num_malicious_to_kick_out;
+
+        self.regions[region].num_malicious -= num_malicious_to_kick_out;
+        self.regions[region].num_honest -= num_honest_to_kick_out;
+        self.quorums[quorum].tot_malicious -= num_malicious_to_kick_out;
+        self.quorums[quorum].tot_honest -= num_honest_to_kick_out;
+
+        // Re-insert each node that was kicked out earlier, into new quorums, while maintaining
+        // honest/malicious status.
+        for _ in 0..num_honest_to_kick_out {
+            let secondary_join_region = self.rand_region();
+            self.insert(false, secondary_join_region, false);
+        }
+        for _ in 0..num_malicious_to_kick_out {
+            let secondary_join_region = self.rand_region();
+            self.insert(true, secondary_join_region, false);
+        }
+    }
+
+    // Insert a new node into a given region in the DHT.
+    fn insert(&mut self, is_malicious: bool, region: RegionCount, is_primary_join: bool) {
+        let quorum = region >> self.lg_regions_per_quorum;
+
+        // Insert the new node into that region.
+        // Also update honest/malicious counts for region + quorum.
+        if is_malicious {
+            self.regions[region].num_malicious += 1;
+            self.quorums[quorum].tot_malicious += 1;
+        } else {
+            self.regions[region].num_honest += 1;
+            self.quorums[quorum].tot_honest += 1;
+        }
+        if is_primary_join {
+            self.quorums[quorum].num_nodes_since_last_primary_join = 0
+        } else {
+            self.quorums[quorum].num_nodes_since_last_primary_join += 1
+        }
+        if self.done_init {
+            self.cur_stats.update(quorum, &self.quorums[quorum], false);
+        }
+    }
+
+    pub fn init(&mut self, num_honest: NodeCount, num_malicious: NodeCount) {
+        for _ in 0..num_honest {
+            // The original honest nodes are simply "mapped" to random locations -
+            let target_region = self.rand_region();
+            self.insert(false, target_region, false);
+        }
+        for _ in 0..num_malicious {
+            self.cuckoo_insert();
+        }
+        self.done_init = true;
+        self.cur_stats.dirty = true;
+        self.collect_stats();
+        self.update_cumstats(true);
+    }
+
+    // Insert a new node into a random region in the DHT, then
+    // evict a fraction of nodes in that region into random
+    // regions in the DHT (but don't evict nodes from the regions
+    // _those_ nodes land in).
+    pub fn cuckoo_insert(&mut self) {
+        loop {
+            // Pick a random region to put the new node into. Also get the quorum for that region.
+            let region = self.rand_region();
+            let quorum = region >> self.lg_regions_per_quorum;
+
+            if self.quorums[quorum].num_nodes_since_last_primary_join >= self.k - 1 {
+                self.kick_out_nodes(region);
+                self.insert(true, region, true); // True for primary join
+
+                // Update the age of the region that was emptied out.
+                // self.quorums[quorum].tot_last_join += self.now - last_join;
+                self.now += 1;
+                // Cuckoo-ing after initialization: update stats for the quorum with the cuckood-out region
+                if self.done_init {
+                    self.cur_stats.update(quorum, &self.quorums[quorum], false);
+                    self.collect_stats();
+                    self.update_cumstats(false);
+                }
+                break;
+            }
+        }
+    }
+
+    // Remove an existing malicious node from the quorum that has the lowest fraction of faulty nodes
+    pub fn move_malicious(&mut self) {
+        let find_min_b_0_quorum = || {
+            let compute_b_0 = |q: Quorum| {
+                let b_0: f64 = if q.tot_honest > 0 {
+                    (q.tot_malicious as f64) /
+                        (q.tot_honest as f64)
+                } else if q.tot_malicious > 0 {
+                    1000000.0
+                } else {
+                    0.0
+                };
+                b_0
+            };
+
+            let b_0_array: Vec<f64> = self.quorums.iter().map(|&q| compute_b_0(q)).collect();
+            let (min_b_0_quorum, _) = b_0_array.iter().enumerate().fold((0, 0.0), |(min_index, min_val), (index, &val)| if val < min_val {
+                (index, val)
+            } else {
+                (min_index, min_val)
+            });
+
+            min_b_0_quorum
+        };
+
+        // Pick quorum with least fraction of byz nodes
+        let min_b_0_quorum = find_min_b_0_quorum();
+        if self.quorums[min_b_0_quorum].tot_malicious > 0 {
+            self.quorums[min_b_0_quorum].tot_malicious -= 1;
+            self.cur_stats.update(min_b_0_quorum, &self.quorums[min_b_0_quorum], false);
+
+            // Insert it back into the DHT
+            self.cuckoo_insert();
+        }
+    }
+
+    pub fn collect_stats(&mut self) {
+        if self.cur_stats.dirty {
+            for (i, q) in self.quorums.iter().enumerate() {
+                self.cur_stats.update(i, q, i==0);
+            }
+            self.cur_stats.dirty = false;
+        }
+    }
+
+    pub fn update_cumstats(&mut self, force: bool) {
+        let stat = &self.cur_stats;
+        if force || stat.min_tot_nodes < self.cum_stats.min_tot_nodes {
+            self.cum_stats.min_tot_nodes = stat.min_tot_nodes;
+        }
+        if force || stat.max_tot_nodes > self.cum_stats.max_tot_nodes {
+            self.cum_stats.max_tot_nodes = stat.max_tot_nodes;
+        }
+        if force || stat.min_tot_honest < self.cum_stats.min_tot_honest {
+            self.cum_stats.min_tot_honest = stat.min_tot_honest;
+        }
+        if force || stat.max_tot_honest > self.cum_stats.max_tot_honest {
+            self.cum_stats.max_tot_honest = stat.max_tot_honest;
+        }
+        if force || stat.min_tot_malicious < self.cum_stats.min_tot_malicious {
+            self.cum_stats.min_tot_malicious = stat.min_tot_malicious;
+        }
+        if force || stat.max_tot_malicious > self.cum_stats.max_tot_malicious {
+            self.cum_stats.max_tot_malicious = stat.max_tot_malicious;
+        }
+/*        let min_age = (self.now<<self.lg_regions_per_quorum) - stat.max_tot_last_join;
+        let max_age = (self.now<<self.lg_regions_per_quorum) - stat.min_tot_last_join;
+        if force || min_age < self.cum_stats.min_age {
+            self.cum_stats.min_age = min_age;
+        }
+        if force || max_age > self.cum_stats.max_age {
+            self.cum_stats.max_age = max_age;
+        }
+*/        if force || stat.min_b_0 < self.cum_stats.min_b_0 {
+            self.cum_stats.min_b_0 = stat.min_b_0;
+        }
+        if force || stat.max_b_0 > self.cum_stats.max_b_0 {
+            self.cum_stats.max_b_0 = stat.max_b_0;
+
+        }
+    }
+
+
+
+
+}
+

+ 127 - 0
cuckoo_simulation/src/stats.rs

@@ -0,0 +1,127 @@
+use crate::types::*;
+
+#[derive(Debug, Clone, Copy, Default)]
+pub struct CurrentStats {
+    pub dirty: bool,
+    pub min_tot_nodes: NodeCount,
+    pub min_tot_nodes_quorum: RegionCount,
+    pub max_tot_nodes: NodeCount,
+    pub max_tot_nodes_quorum: RegionCount,
+    pub min_tot_honest: NodeCount,
+    pub min_tot_honest_quorum: RegionCount,
+    pub max_tot_honest: NodeCount,
+    pub max_tot_honest_quorum: RegionCount,
+    pub min_tot_malicious: NodeCount,
+    pub min_tot_malicious_quorum: RegionCount,
+    pub max_tot_malicious: NodeCount,
+    pub max_tot_malicious_quorum: RegionCount,
+    pub min_tot_last_join: TimeCount,
+    pub min_tot_last_join_quorum: RegionCount,
+    pub max_tot_last_join: TimeCount,
+    pub max_tot_last_join_quorum: RegionCount,
+    pub min_b_0: f64,
+    pub min_b_0_quorum: RegionCount,
+    pub max_b_0: f64,
+    pub max_epsilon_quorum: RegionCount,
+}
+
+impl CurrentStats {
+    pub fn update(&mut self, i: RegionCount, q: &Quorum, force: bool) {
+        if self.dirty == false && (
+            self.min_tot_nodes_quorum == i ||
+                self.max_tot_nodes_quorum == i ||
+                self.min_tot_honest_quorum == i ||
+                self.max_tot_honest_quorum == i ||
+                self.min_tot_malicious_quorum == i ||
+                self.max_tot_malicious_quorum == i ||
+                self.min_tot_last_join_quorum == i ||
+                self.max_tot_last_join_quorum == i ||
+                self.min_b_0_quorum == i ||
+                self.max_epsilon_quorum == i) {
+            self.dirty = true;
+        }
+        let nodes = q.tot_honest + q.tot_malicious;
+        if force || nodes < self.min_tot_nodes {
+            self.min_tot_nodes = nodes;
+            self.min_tot_nodes_quorum = i;
+        }
+        if force || nodes > self.max_tot_nodes {
+            self.max_tot_nodes = nodes;
+            self.max_tot_nodes_quorum = i;
+        }
+        if force || q.tot_honest < self.min_tot_honest {
+            self.min_tot_honest = q.tot_honest;
+            self.min_tot_honest_quorum = i;
+        }
+        if force || q.tot_honest > self.max_tot_honest {
+            self.max_tot_honest = q.tot_honest;
+            self.max_tot_honest_quorum = i;
+        }
+        if force || q.tot_malicious < self.min_tot_malicious {
+            self.min_tot_malicious = q.tot_malicious;
+            self.min_tot_malicious_quorum = i;
+        }
+        if force || q.tot_malicious > self.max_tot_malicious {
+            self.max_tot_malicious = q.tot_malicious;
+            self.max_tot_malicious_quorum = i;
+        }
+        if force || q.tot_last_join < self.min_tot_last_join {
+            self.min_tot_last_join = q.tot_last_join;
+            self.min_tot_last_join_quorum = i;
+        }
+        if force || q.tot_last_join > self.max_tot_last_join {
+            self.max_tot_last_join = q.tot_last_join;
+            self.max_tot_last_join_quorum = i;
+        }
+        let b_0: f64 = if q.tot_honest > 0 {
+            (q.tot_malicious as f64) /
+                (q.tot_honest as f64)
+        } else if q.tot_malicious > 0 {
+            1000000.0
+        } else {
+            0.0
+        };
+        if force || b_0 < self.min_b_0 {
+            self.min_b_0 = b_0;
+            self.min_b_0_quorum = i;
+        }
+        if force || b_0 > self.max_b_0 {
+            self.max_b_0 = b_0;
+            self.max_epsilon_quorum = i;
+        }
+    }
+
+    #[allow(dead_code)]
+    pub fn print(&self) {
+        print!("nodes {} ({}) {} ({}) ", self.min_tot_nodes, self.min_tot_nodes_quorum, self.max_tot_nodes, self.max_tot_nodes_quorum);
+        print!("honest {} ({}) {} ({}) ", self.min_tot_honest, self.min_tot_honest_quorum, self.max_tot_honest, self.max_tot_honest_quorum);
+        print!("malicious {} ({}) {} ({}) ", self.min_tot_malicious, self.min_tot_malicious_quorum, self.max_tot_malicious, self.max_tot_malicious_quorum);
+        print!("lastjoin {} ({}) {} ({}) ", self.min_tot_last_join, self.min_tot_last_join_quorum, self.max_tot_last_join, self.max_tot_last_join_quorum);
+        println!("b_0 {} ({}) {} ({})", self.min_b_0, self.min_b_0_quorum, self.max_b_0, self.max_epsilon_quorum);
+    }
+}
+
+#[derive(Debug, Clone, Copy, Default)]
+pub struct CumulativeStats {
+    pub max_tot_honest: NodeCount,
+    pub min_tot_honest: NodeCount,
+    pub min_tot_malicious: NodeCount,
+    pub max_tot_malicious: NodeCount,
+    pub min_tot_nodes: NodeCount,
+    pub max_tot_nodes: NodeCount,
+    pub min_age: TimeCount,
+    pub max_age: TimeCount,
+    pub min_b_0: f64,
+    pub max_b_0: f64,
+}
+
+impl CumulativeStats {
+    #[allow(dead_code)]
+    pub fn print(&self) {
+        print!("nodes {} {} ", self.min_tot_nodes, self.max_tot_nodes);
+        print!("honest {} {} ", self.min_tot_honest, self.max_tot_honest);
+        print!("malicious {} {} ", self.min_tot_malicious, self.max_tot_malicious);
+        print!("age {} {} ", self.min_age, self.max_age);
+        println!("b_0 {} {}", self.min_b_0, self.max_b_0);
+    }
+}

+ 8 - 0
cuckoo_simulation/src/test.rs

@@ -0,0 +1,8 @@
+use reservoir_sampling::unweighted::l;
+
+pub(crate) fn hehehehe () {
+    let mut sampled_arr = vec![0usize; 10];
+    println!("Orig array: {:?}", sampled_arr);
+    l(0usize..100, sampled_arr.as_mut_slice());
+    println!("Sampled array: {:?}", sampled_arr);
+}

+ 19 - 0
cuckoo_simulation/src/types.rs

@@ -0,0 +1,19 @@
+pub type NodeCount = usize;
+pub type QuorumCount = usize;
+pub type TimeCount = usize;
+pub type RegionCount = usize;
+
+#[derive(Debug, Clone, Copy)]
+pub struct Quorum {
+    pub tot_honest: NodeCount,
+    pub tot_malicious: NodeCount,
+    pub tot_last_join: TimeCount,
+    pub num_nodes_since_last_primary_join: NodeCount,
+}
+
+#[derive(Debug, Clone, Copy)]
+pub struct Region {
+    pub(crate) num_honest: NodeCount,
+    pub(crate) num_malicious: NodeCount,
+    pub(crate) last_join: TimeCount,
+}

BIN
dhtpir_simulation/library/__pycache__/base_client.cpython-36.pyc


BIN
dhtpir_simulation/library/__pycache__/base_client.cpython-38.pyc


BIN
dhtpir_simulation/library/__pycache__/base_node.cpython-36.pyc


BIN
dhtpir_simulation/library/__pycache__/base_node.cpython-38.pyc


BIN
dhtpir_simulation/library/__pycache__/dht_common.cpython-36.pyc


BIN
dhtpir_simulation/library/__pycache__/dht_common.cpython-38.pyc


BIN
dhtpir_simulation/library/__pycache__/dht_simulator.cpython-36.pyc


BIN
dhtpir_simulation/library/__pycache__/dht_simulator.cpython-38.pyc


BIN
dhtpir_simulation/library/__pycache__/dhtpir_client.cpython-36.pyc


BIN
dhtpir_simulation/library/__pycache__/dhtpir_client.cpython-38.pyc


BIN
dhtpir_simulation/library/__pycache__/dhtpir_node.cpython-36.pyc


BIN
dhtpir_simulation/library/__pycache__/dhtpir_node.cpython-38.pyc


BIN
dhtpir_simulation/library/__pycache__/qp_client.cpython-36.pyc


BIN
dhtpir_simulation/library/__pycache__/qp_client.cpython-38.pyc


BIN
dhtpir_simulation/library/__pycache__/qp_node.cpython-36.pyc


BIN
dhtpir_simulation/library/__pycache__/qp_node.cpython-38.pyc


BIN
dhtpir_simulation/library/__pycache__/qplasthop_client.cpython-36.pyc


BIN
dhtpir_simulation/library/__pycache__/qplasthop_client.cpython-38.pyc


BIN
dhtpir_simulation/library/__pycache__/qplasthop_node.cpython-36.pyc


BIN
dhtpir_simulation/library/__pycache__/qplasthop_node.cpython-38.pyc


BIN
dhtpir_simulation/library/__pycache__/rcp_client.cpython-36.pyc


BIN
dhtpir_simulation/library/__pycache__/rcp_client.cpython-38.pyc


BIN
dhtpir_simulation/library/__pycache__/rcp_node.cpython-36.pyc


BIN
dhtpir_simulation/library/__pycache__/rcp_node.cpython-38.pyc


File diff suppressed because it is too large
+ 141 - 0
dhtpir_simulation/library/base_client.py


+ 133 - 0
dhtpir_simulation/library/base_node.py

@@ -0,0 +1,133 @@
+from dht_common import between, SIZE_OF_HASH, SIZE_OF_IP_ADDRESS
+
+class Base_Node(object):
+    def __init__(self, nodeID, documentSize, numItems=0, table=[]):
+        self.nodeID = nodeID
+        self.nextNode = None
+        self.documentSize = documentSize
+        self.numItems = numItems
+        self.table = dict(table)
+
+        self.numRounds = 0
+        self.numMessagesSent = 0
+        self.numMessagesRecv = 0
+        self.numBytesSent = 0
+        self.numBytesRecv = 0
+
+        self.lastNumRounds = 0
+        self.lastNumMessagesSent = 0
+        self.lastNumMessagesRecv = 0
+        self.lastNumBytesSent = 0
+        self.lastNumBytesRecv = 0
+
+    def insert(self):
+        self.numItems += 1
+
+        self.numRounds += 1
+        self.numMessagesSent += 1
+        self.numMessagesRecv += 1
+        self.numBytesSent += SIZE_OF_HASH
+        self.numBytesRecv += self.documentSize
+
+    # There has to be some way of inserting finger table values,
+    # so this is it (Normally only the DHT should be interfacing with it)
+    def insert_relation(self, nodeID, fingerTableValue):
+        # nextNode is kept up to date so that when doing finger table searches,
+        # we can know when we own something or it needs to go somewhere else
+        if not self.nextNode or between(nodeID, self.nodeID, self.nextNode):
+            self.nextNode = nodeID
+        self.table[nodeID] = fingerTableValue
+
+    def retrieve(self):
+        self.numRounds += 1
+        self.numMessagesSent += 1
+        self.numMessagesRecv += 1
+        self.numBytesSent += self.documentSize
+        self.numBytesRecv += SIZE_OF_HASH
+
+    def get_finger_table_val(self, searchID):
+        # if we should own this ID, say so
+        if between(searchID, self.nodeID, self.nextNode):
+            retval = (self.nodeID, -1)
+        else: # otherwise, find the best option in the finger table and give that back
+            options = [x for (x,y) in self.table.items() if x <= searchID]
+            closest = max(options) if len(options) > 0 else max([x for (x,y) in self.table.items()])
+            retval = (closest, self.table[closest])
+
+        self.numRounds += 1
+        self.numMessagesSent += 1
+        self.numMessagesRecv += 1
+        self.numBytesSent += SIZE_OF_HASH + SIZE_OF_IP_ADDRESS
+        self.numBytesRecv += SIZE_OF_HASH
+
+        return retval
+
+    def get_num_rounds(self):
+        self.lastNumRounds = self.numRounds
+        return self.numRounds
+
+    def get_recent_num_rounds(self):
+        retval = self.numRounds - self.lastNumRounds
+        self.lastNumRounds = self.numRounds
+        return retval
+
+    def get_num_messages_sent(self):
+        self.lastNumMessagesSent = self.numMessagesSent
+        return self.numMessagesSent
+
+    def get_recent_num_messages_sent(self):
+        retval = self.numMessagesSent - self.lastNumMessagesSent
+        self.lastNumMessagesSent = self.numMessagesSent
+        return retval
+
+    def get_num_messages_recv(self):
+        self.lastNumMessagesRecv = self.numMessagesRecv
+        return self.numMessagesRecv
+
+    def get_recent_num_messages_recv(self):
+        retval = self.numMessagesRecv - self.lastNumMessagesRecv
+        self.lastNumMessagesRecv = self.numMessagesRecv
+        return retval
+
+    def get_num_bytes_sent(self):
+        self.lastNumBytesSent = self.numBytesSent
+        return self.numBytesSent
+
+    def get_recent_num_bytes_sent(self):
+        retval = self.numBytesSent - self.lastNumBytesSent
+        self.lastNumBytesSent = self.numBytesSent
+        return retval
+
+    def get_num_bytes_recv(self):
+        self.lastNumBytesRecv = self.numBytesRecv
+        return self.numBytesRecv
+
+    def get_recent_num_bytes_recv(self):
+        retval = self.numBytesRecv - self.lastNumBytesRecv
+        self.lastNumBytesRecv = self.numBytesRecv
+        return retval
+
+# Normally this file is a class to be used elsewhere,
+# but if you run it directly it performs some rudimentary unit tests
+# TODO: Add unit tests for size calculations
+if __name__ == "__main__":
+    SIZE_OF_DOCUMENTS_IN_TEST = 1024
+    test = Base_Node(0, SIZE_OF_DOCUMENTS_IN_TEST)
+    
+    [test.insert(x, x) for x in range(10)]
+    retrievals = [test.retrieve(x) for x in range(10)]
+    print("Insert and retrieval fires correctly.")
+    
+    test.retrieve("nothere")
+    print("Nonexistant entries fires correctly.")
+
+    test.insert_relation(1, 1)
+    test.insert_relation(2, 2)
+    test.insert_relation(4, 4)
+    test.insert_relation(8, 8)
+    assert test.get_finger_table_val(1) == (1, 1)
+    assert test.get_finger_table_val(3) == (2, 2)
+    assert test.get_finger_table_val(4) == (4, 4)
+    assert test.get_finger_table_val(7) == (4, 4)
+    assert test.get_finger_table_val(0) == (0, -1)
+    print("Finger table stuff working correctly")

+ 72 - 0
dhtpir_simulation/library/dht_common.py

@@ -0,0 +1,72 @@
+from random import SystemRandom
+import hashlib
+
+defaultcrypto = SystemRandom()
+
+# For now, just defaulting to SHA2 (not SHA3 due to version issues)
+def compute_document_ID(document):
+    return hashlib.sha256(document).digest()
+
+# The maximum ID possible when using SHA3_256
+MAX_ID = 2**256 - 1
+
+# The size of hashes in bytes when using SHA3_256
+SIZE_OF_HASH = 16
+
+# "Between" in the circular sense; helps determine what node a document belongs to
+def between(test, a, b):
+    if a <= test and test < b:
+        return True
+    if a <= test and b < a:
+        return True
+    if b < a and test < b:
+        return True
+    return False
+
+# Just generates some random bytes of a given size
+def generate_file(size, cryptogen=defaultcrypto):
+	return (''.join([chr(cryptogen.randrange(128)) for i in range(size)])).encode('utf-8')
+
+# This comes from Backes et al., which states that what you'll actually do OT for is
+# an AES key to unlock a specific row from the table (and that key will be 256 bits)
+# This is a consequence of the Naor and Pinkas construction Backes et al. recommends
+SIZE_OF_OT_VALUE = int(256 / 8)
+
+# signature and key sizes taken from https://github.com/poanetwork/threshold_crypto/, which implements BLS signatures and the extension to threshold signatures by Boldyreva
+# extensions of BLS signatures to threshold cases are specifically cited in RCP as how they anticipate their threshold signatures to be implemented
+# (as far as I can tell, this signature size applies regardless of threshold/size of message, which is not surprising to me)
+SIZE_OF_KEY = 48
+SIZE_OF_SIGNATURE = 96
+
+# For now, simulating IPv4
+SIZE_OF_IP_ADDRESS = 4
+
+# Assuming a timestamp is 32 bits (could easily be updated for Y2k38 problem)
+SIZE_OF_TIMESTAMP = 4
+
+# A variable used in test_harness; shouldn't make much of a difference what it's set to,
+# as long as it's a positive integer less than the number of nodes in the test
+KNOWN_NODE = 0
+
+# 150 ms; assuming wired but transatlantic, this is a conservative estimate
+AVERAGE_RTT_ESTIMATE = 0.15 # in s
+
+# 50 Mb/s; global average bandwidth
+AVERAGE_CLIENT_BANDWIDTH_ESTIMATE = 50.0 * 1024 * 1024 / 8 # in B/s
+
+# 150 Mb/s; servers estimated with more bandwith
+AVERAGE_SERVER_BANDWIDTH_ESTIMATE = 50.0 * 1024 * 1024 / 8 # in B/s
+
+# ~1 GB/s; approximated from 3 cycles / B to encrypt and a 3 GHz machine
+ENCRYPTION_SPEED_ESTIMATE = 1000 * 1000 * 1000  # in B/s (yes, 1000, not 1024, because 1GHz is 1000000000 Hz, not 1024^3 Hz)
+
+##
+# 4 GB/s; approximated from math Ian and Stan did during a meeting for Shamir PIR
+# (more precisely, we calculated it as 250ms/GB, but that's the same value of course)
+PIR_SPEED_ESTIMATE = 1024 * 1024 * 1024 / 0.25 # in B/s
+
+# Just what we used in the simulations
+SIZE_OF_CHUNK = 1024    # in B
+
+# 99% CI for the error bars
+Z_STAR = 2.576

+ 244 - 0
dhtpir_simulation/library/dht_simulator.py

@@ -0,0 +1,244 @@
+from dht_common import compute_document_ID, MAX_ID
+from base_node import Base_Node
+import math
+
+class DHT_Simulator(object):
+    def __init__(self, nodeType, numGroups, documentSize, numNodes):
+        if nodeType == Base_Node:
+            self.nodes = [nodeType(self.index_to_owner_node(i, numGroups), documentSize) for i in range(numGroups)]
+        else:
+            self.nodes = [nodeType(self.index_to_owner_node(i, numGroups), documentSize, numNodes) for i in range(numGroups)]
+        self.__init_finger_tables()
+
+    # Nobody else needs to call this, but when we put together all the nodes they need their finger table values updated
+    def __init_finger_tables(self):
+        numNodes = len(self.nodes)
+        numEntries = math.ceil(math.log(numNodes, 2))
+        [[self.nodes[i].insert_relation(self.index_to_owner_node(x), x) for x in [(2**j + i) % numNodes for j in range(numEntries)]] for i in range(numNodes)]
+
+    # convert an index (of its internal data structure; consider this analogous to routing information)
+    # to which node it should belong to (note that IDs are SHA3 outputs)
+    def index_to_owner_node(self, which, numNodes=None):
+        if not numNodes:
+            numNodes = len(self.nodes)
+        fullID = math.floor(which * MAX_ID / numNodes)
+        return (fullID).to_bytes(32, byteorder="big")
+
+    # convert a node ID (SHA3 output) to which index in the internal data structure it should be
+    def owner_node_to_index(self, nodeID):
+        numNodes = len(self.nodes)
+        v = int.from_bytes(nodeID, byteorder='big')
+        return math.floor(v * numNodes / MAX_ID)
+
+    # let a client "route" to the node in question (accepts indices, which are analogous to routing information)
+    def access_node(self, nodeID):
+        if nodeID >= 0 and nodeID < len(self.nodes):
+            return self.nodes[nodeID]
+        else:
+            return None
+
+    def get_num_rounds_base(self, nodeID):
+        if nodeID >= 0 and nodeID < len(self.nodes):
+            return self.nodes[nodeID].get_num_rounds()
+        else:
+            return None
+
+    def get_recent_num_rounds_base(self, nodeID):
+        if nodeID >= 0 and nodeID < len(self.nodes):
+            return self.nodes[nodeID].get_recent_num_rounds()
+        else:
+            return None
+
+    def get_num_rounds(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_num_rounds(nodeID)
+        else:
+            return None
+
+    def get_recent_num_rounds(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_recent_num_rounds(nodeID)
+        else:
+            return None
+
+    def get_num_messages_sent_base(self, nodeID):
+        if nodeID >= 0 and nodeID < len(self.nodes):
+            return self.nodes[nodeID].get_num_messages_sent()
+        else:
+            return None
+
+    def get_recent_num_messages_sent_base(self, nodeID):
+        if nodeID >= 0 and nodeID < len(self.nodes):
+            return self.nodes[nodeID].get_recent_num_messages_sent()
+        else:
+            return None
+
+    def get_num_messages_sent(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_num_messages_sent(nodeID)
+        else:
+            return None
+
+    def get_recent_num_messages_sent(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_recent_num_messages_sent(nodeID)
+        else:
+            return None
+
+    def get_num_messages_recv_base(self, nodeID):
+        if nodeID >= 0 and nodeID < len(self.nodes):
+            return self.nodes[nodeID].get_num_messages_recv()
+        else:
+            return None
+
+    def get_recent_num_messages_recv_base(self, nodeID):
+        if nodeID >= 0 and nodeID < len(self.nodes):
+            return self.nodes[nodeID].get_recent_num_messages_recv()
+        else:
+            return None
+
+    def get_num_messages_recv(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_num_messages_recv(nodeID)
+        else:
+            return None
+
+    def get_recent_num_messages_recv(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_recent_num_messages_recv(nodeID)
+        else:
+            return None
+
+    def get_num_bytes_sent_base(self, nodeID):
+        if nodeID >= 0 and nodeID < len(self.nodes):
+            return self.nodes[nodeID].get_num_bytes_sent()
+        else:
+            return None
+
+    def get_recent_num_bytes_sent_base(self, nodeID):
+        if nodeID >= 0 and nodeID < len(self.nodes):
+            return self.nodes[nodeID].get_recent_num_bytes_sent()
+        else:
+            return None
+
+    def get_num_bytes_sent(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_num_bytes_sent(nodeID)
+        else:
+            return None
+
+    def get_recent_num_bytes_sent(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_recent_num_bytes_sent(nodeID)
+        else:
+            return None
+
+    def get_num_bytes_recv_base(self, nodeID):
+        if nodeID >= 0 and nodeID < len(self.nodes):
+            return self.nodes[nodeID].get_num_bytes_recv()
+        else:
+            return None
+
+    def get_recent_num_bytes_recv_base(self, nodeID):
+        if nodeID >= 0 and nodeID < len(self.nodes):
+            return self.nodes[nodeID].get_recent_num_bytes_recv()
+        else:
+            return None
+
+    def get_num_bytes_recv(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_num_bytes_recv(nodeID)
+        else:
+            return None
+
+    def get_recent_num_bytes_recv(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_recent_num_bytes_recv(nodeID)
+        else:
+            return None
+
+    # ONLY QP Nodes and higher will have these
+
+    def get_finger_table_range_accesses(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_finger_table_range_accesses(nodeID)
+        else:
+            return None
+
+    def get_finger_table_accesses(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_finger_table_accesses(nodeID)
+        else:
+            return None
+
+    # ONLY QPLastHop Nodes will have these
+
+    def get_database_accesses(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_database_accesses(nodeID)
+        else:
+            return None
+
+    # ONLY DHTPIR Nodes will have these
+
+    def get_PHF_generations(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_PHF_generations(nodeID)
+        else:
+            return None
+
+    def get_PIR_retrievals(self, quorumID, nodeID):
+        if quorumID >= 0 and quorumID < len(self.nodes):
+            return self.nodes[quorumID].get_PIR_retrievals(nodeID)
+        else:
+            return None
+
+    # End insert for calcuations
+
+    def __access_node_tables(self, nodeID):
+        if nodeID >= 0 and nodeID < len(self.nodes):
+            return self.nodes[nodeID].table
+        else:
+            return None
+
+    def get_num_nodes(self):
+        return len(self.nodes)
+
+    def test_tables(self):
+        if self.get_num_nodes() == 10:
+            retval = True
+            table = self.__access_node_tables(5)
+            retval = retval and table[b'\x99\x99\x99\x99\x99\x99\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'] == 6
+            retval = retval and table[b'\xb3333330\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'] == 7
+            retval = retval and table[b'\xe6fffffh\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'] == 9
+            retval = retval and table[b'L\xcc\xcc\xcc\xcc\xcc\xcc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'] == 3
+            table = self.__access_node_tables(9)
+            retval = retval and table[b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'] == 0
+            retval = retval and table[b'\xb3333330\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'] == 7
+            retval = retval and table[b'\x19\x99\x99\x99\x99\x99\x9a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'] == 1
+            retval = retval and table[b'L\xcc\xcc\xcc\xcc\xcc\xcc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'] == 3
+            return retval
+        else:
+            print("Assumed testing conditions not met; no tests run.")
+            return False
+
+# Normally this file is a class to be used elsewhere,
+# but if you run it directly it performs some rudimentary unit tests
+if __name__ == "__main__":
+    from base_node import Base_Node
+
+    SIZE_OF_DOCUMENTS_IN_TEST = 1024
+    NUM_NODES_IN_TEST = 10
+    
+    # The 1 here isn't used at all within the simulator,
+    # but it is customary to invoke DHT_Simulator with that value there for Base_Node
+    test = DHT_Simulator(Base_Node, NUM_NODES_IN_TEST, SIZE_OF_DOCUMENTS_IN_TEST, 1)
+
+    assert test.index_to_owner_node(8) == b'\xcc\xcc\xcc\xcc\xcc\xcc\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+    assert test.index_to_owner_node(4) == b'ffffffh\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+    assert test.owner_node_to_index(b'3333334\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') == 2
+    assert test.owner_node_to_index(b'\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') == 5
+    print("Index to nodeID conversion functioning correctly.")
+
+    assert test.test_tables()
+    print("Finger tables correctly generated.")

File diff suppressed because it is too large
+ 57 - 0
dhtpir_simulation/library/dhtpir_client.py


+ 124 - 0
dhtpir_simulation/library/dhtpir_node.py

@@ -0,0 +1,124 @@
+from dht_common import SIZE_OF_HASH, SIZE_OF_IP_ADDRESS, SIZE_OF_OT_VALUE, SIZE_OF_KEY, SIZE_OF_SIGNATURE, SIZE_OF_TIMESTAMP
+from qp_node import QP_Quorum
+from math import sqrt, ceil
+from collections import defaultdict 
+
+class DHTPIR_Quorum(QP_Quorum):
+    def __init__(self, quorumID, documentSize, numNodes, numItems=0, table=[]):
+        QP_Quorum.__init__(self, quorumID, documentSize, numNodes, numItems, table)
+        self.PHFGenerations = defaultdict(lambda: 0)
+        self.PIRRetrievals = [defaultdict(lambda: 0) for i in range(self.numNodes)]
+
+    def get_PHF_generations(self, whichNode):
+        return self.PHFGenerations
+
+    def get_PIR_retrievals(self, whichNode):
+        return self.PIRRetrievals[whichNode]
+
+    def insert(self, numKeys, numSignatures):
+        self.numItems += 1
+
+        # Asker's ID
+        sizeOfRequest = SIZE_OF_HASH
+        # timestamp
+        sizeOfRequest += SIZE_OF_TIMESTAMP
+        # keys in request
+        sizeOfRequest += SIZE_OF_KEY * numKeys
+        # signatures in request
+        sizeOfRequest += SIZE_OF_SIGNATURE * numSignatures
+        # actual document sent
+        sizeOfRequest += self.documentSize
+        # signature over whole thing
+        sizeOfRequest += SIZE_OF_SIGNATURE
+
+        sizeOfResponse = SIZE_OF_HASH + SIZE_OF_SIGNATURE
+
+        map(lambda x: x+1, self.nodeNumRounds)
+        map(lambda x: x+1, self.nodeNumMessagesSent)
+        map(lambda x: x+1, self.nodeNumMessagesRecv)
+        map(lambda x: x+sizeOfResponse, self.nodeNumBytesSent)
+        map(lambda x: x+sizeOfRequest, self.nodeNumBytesRecv)
+
+        # This should guarantee that this always gives an optimal request/response
+        # based on whether it's appropriate to block responses
+        blockingFactor = ceil(sqrt(self.numItems / self.documentSize))
+        # NOTE: This works because Python 3 promotes ints to floats for "/"
+        numRecords = ceil(self.numItems / blockingFactor)
+
+        self.PHFGenerations[numRecords] += 1
+
+    def get_hash_function(self, whichNode, numKeys, numSignatures):
+        # Asker's ID
+        sizeOfRequest = SIZE_OF_HASH
+        # timestamp
+        sizeOfRequest += SIZE_OF_TIMESTAMP
+        # keys in request
+        sizeOfRequest += SIZE_OF_KEY * numKeys
+        # signatures in request
+        sizeOfRequest += SIZE_OF_SIGNATURE * numSignatures
+
+        # This should guarantee that this always gives an optimal request/response
+        # based on whether it's appropriate to block responses
+        blockingFactor = ceil(sqrt(self.numItems / self.documentSize))
+        # NOTE: This works because Python 3 promotes ints to floats for "/"
+        numRecords = ceil(self.numItems / blockingFactor)
+
+        # From "Practical Perfect Hashing in Nearly Optimal Space" by Botelho et al.
+        # A conservative but practical estimate of the size of the PHF is 2.7n *bits*,
+        # for n = the number of valid keys in the PHF.
+        # I round up for hopefully obvious reasons
+        sizeOfResponse = ceil((2.7 * numRecords) / 8.0) + SIZE_OF_SIGNATURE
+
+        self.nodeNumRounds[whichNode] += 1
+        self.nodeNumMessagesSent[whichNode] += 1
+        self.nodeNumMessagesRecv[whichNode] += 1
+        self.nodeNumBytesSent[whichNode] += sizeOfResponse
+        self.nodeNumBytesRecv[whichNode] += sizeOfRequest
+
+        return sizeOfResponse
+
+    # This shouldn't be used, just here to make sure you don't try the RCP_Quorum function it overrides
+    def retrieve(self):
+        return None
+
+    def PIR_retrieve(self, whichNode, numKeys, numSignatures):
+        # Asker's ID
+        sizeOfRequest = SIZE_OF_HASH
+        # timestamp
+        sizeOfRequest += SIZE_OF_TIMESTAMP
+        # keys in request
+        sizeOfRequest += SIZE_OF_KEY * numKeys
+        # signatures in request
+        sizeOfRequest += SIZE_OF_SIGNATURE * numSignatures
+
+        # This should guarantee that this always gives an optimal request/response
+        # based on whether it's appropriate to block responses
+        blockingFactor = ceil(sqrt(self.numItems / self.documentSize))
+        # NOTE: This works because Python 3 promotes ints to floats for "/"
+        numRecords = ceil(self.numItems / blockingFactor)
+        recordSize = blockingFactor * self.documentSize
+
+        self.nodeNumRounds[whichNode] += 1
+        self.nodeNumMessagesSent[whichNode] += 1
+        self.nodeNumMessagesRecv[whichNode] += 1
+        self.nodeNumBytesSent[whichNode] += recordSize
+        # This is specifically because in Goldberg PIR, you need 1B per record
+        self.nodeNumBytesRecv[whichNode] += numRecords + sizeOfRequest
+
+        self.PIRRetrievals[whichNode][numRecords] += 1
+
+        return (numRecords, recordSize)
+
+# TODO: Add unit tests for size calculations
+if __name__ == "__main__":
+    SIZE_OF_DOCUMENTS_IN_TEST = 1024
+    NUM_NODES_PER_QUORUM_IN_TEST = 10
+    test = DHTPIR_Quorum(0, SIZE_OF_DOCUMENTS_IN_TEST, NUM_NODES_PER_QUORUM_IN_TEST)
+    
+    [test.insert() for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
+    
+    [test.get_hash_function(x) for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
+    print("Getting PHFs fires on all nodes correctly.")
+
+    [test.PIR_retrieve(x) for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
+    print("PIR retrieval fires on all nodes correctly.")

File diff suppressed because it is too large
+ 169 - 0
dhtpir_simulation/library/qp_client.py


+ 90 - 0
dhtpir_simulation/library/qp_node.py

@@ -0,0 +1,90 @@
+from dht_common import SIZE_OF_HASH, SIZE_OF_IP_ADDRESS, SIZE_OF_OT_VALUE, SIZE_OF_KEY, SIZE_OF_SIGNATURE, SIZE_OF_TIMESTAMP
+from rcp_node import RCP_Quorum
+from collections import defaultdict 
+
+class QP_Quorum(RCP_Quorum):
+    def __init__(self, quorumID, documentSize, numNodes, numItems=0, table=[]):
+        RCP_Quorum.__init__(self, quorumID, documentSize, numNodes, numItems, table)
+        self.fingerTableRangeAccesses = [defaultdict(lambda: 0) for i in range(self.numNodes)]
+        self.fingerTableAccesses = [defaultdict(lambda: 0) for i in range(self.numNodes)]
+
+    def get_finger_table_range_accesses(self, whichNode):
+        return self.fingerTableRangeAccesses[whichNode]
+
+    def get_finger_table_accesses(self, whichNode):
+        return self.fingerTableAccesses[whichNode]
+
+    # This shouldn't be used, just here to make sure you don't try the RCP_Quorum function it overrides
+    def get_finger_table_val(self):
+        return None
+
+    def get_finger_table_ranges(self, whichNode, numKeys, numSignatures):
+        numEntries = len(self.table.items())
+        retval = (list(self.table.keys()), numEntries)
+
+        # Asker's ID
+        sizeOfRequest = SIZE_OF_HASH
+        # timestamp
+        sizeOfRequest += SIZE_OF_TIMESTAMP
+        # keys in request
+        sizeOfRequest += SIZE_OF_KEY * numKeys
+        # signatures in request
+        sizeOfRequest += SIZE_OF_SIGNATURE * numSignatures
+
+        # The set of hashes in the finger table
+        sizeOfResponse = SIZE_OF_HASH * numEntries
+        # Then the entrywise encrypted finger table routing information
+        sizeOfResponse += (SIZE_OF_IP_ADDRESS * self.numNodes + SIZE_OF_KEY) * numEntries
+        # Then the OT prime values
+        sizeOfResponse += 2 * SIZE_OF_OT_VALUE * numEntries
+        # Then, finally, a signature to tie it together
+        sizeOfResponse += SIZE_OF_SIGNATURE
+
+        self.nodeNumRounds[whichNode] += 1
+        self.nodeNumMessagesSent[whichNode] += 1
+        self.nodeNumMessagesRecv[whichNode] += 1
+        self.nodeNumBytesSent[whichNode] += sizeOfResponse
+        self.nodeNumBytesRecv[whichNode] += sizeOfRequest
+
+        self.fingerTableRangeAccesses[whichNode][numEntries] += 1
+
+        return retval
+
+    def OT_get_finger_table_val(self, whichNode, index):
+        numEntries = len(self.table.items())
+        retval = self.table[index]
+
+        # OT prime value to be able to open one of the entries in the finger table
+        sizeOfRequest = SIZE_OF_OT_VALUE
+        # And a signature to confirm it's valid
+        sizeOfRequest += SIZE_OF_SIGNATURE
+
+        # The final OT prime value
+        sizeOfResponse = SIZE_OF_OT_VALUE
+        # And a signature to boot
+        sizeOfResponse += SIZE_OF_SIGNATURE
+
+        self.nodeNumRounds[whichNode] += 1
+        self.nodeNumMessagesSent[whichNode] += 1
+        self.nodeNumMessagesRecv[whichNode] += 1
+        self.nodeNumBytesSent[whichNode] += sizeOfResponse
+        self.nodeNumBytesRecv[whichNode] += sizeOfRequest
+
+        self.fingerTableAccesses[whichNode][numEntries] += 1
+
+        return retval
+
+# TODO: Add unit tests for size calculations
+# TODO: Add unit test to make sure finger_table_ranges/OT_val is still firing correctly?
+if __name__ == "__main__":
+    SIZE_OF_DOCUMENTS_IN_TEST = 1024
+    NUM_NODES_PER_QUORUM_IN_TEST = 10
+    test = QP_Quorum(0, SIZE_OF_DOCUMENTS_IN_TEST, NUM_NODES_PER_QUORUM_IN_TEST)
+
+    randomNode = 0
+    [test.insert() for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
+    [test.retrieve(randomNode) for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
+    print("Insert and retrieval on node 0 fires correctly.")
+
+    [test.retrieve(x) for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
+    print("Retrieval fires on all nodes correctly.")

File diff suppressed because it is too large
+ 80 - 0
dhtpir_simulation/library/qplasthop_client.py


+ 80 - 0
dhtpir_simulation/library/qplasthop_node.py

@@ -0,0 +1,80 @@
+from dht_common import SIZE_OF_HASH, SIZE_OF_IP_ADDRESS, SIZE_OF_OT_VALUE, SIZE_OF_KEY, SIZE_OF_SIGNATURE, SIZE_OF_TIMESTAMP
+from qp_node import QP_Quorum
+from collections import defaultdict 
+
+class QPLastHop_Quorum(QP_Quorum):
+    def __init__(self, quorumID, documentSize, numNodes, numItems=0, table=[]):
+        QP_Quorum.__init__(self, quorumID, documentSize, numNodes, numItems, table)
+        self.databaseAccesses = [defaultdict(lambda: 0) for i in range(self.numNodes)]
+        
+    def get_database_accesses(self, whichNode):
+        return self.databaseAccesses[whichNode]
+
+    def get_final_table_ranges(self, whichNode, numKeys, numSignatures):
+        # Asker's ID
+        sizeOfRequest = SIZE_OF_HASH
+        # timestamp
+        sizeOfRequest += SIZE_OF_TIMESTAMP
+        # keys in request
+        sizeOfRequest += SIZE_OF_KEY * numKeys
+        # signatures in request
+        sizeOfRequest += SIZE_OF_SIGNATURE * numSignatures
+
+        # The set of hashes in the item store
+        sizeOfResponse = SIZE_OF_HASH * self.numItems
+        # Then the entrywise encrypted item store
+        sizeOfResponse += self.documentSize * self.numItems
+        # Then the OT prime values
+        sizeOfResponse += 2 * SIZE_OF_OT_VALUE * self.numItems
+        # Then, finally, a signature to tie it together
+        sizeOfResponse += SIZE_OF_SIGNATURE
+
+        self.nodeNumRounds[whichNode] += 1
+        self.nodeNumMessagesSent[whichNode] += 1
+        self.nodeNumMessagesRecv[whichNode] += 1
+        self.nodeNumBytesSent[whichNode] += sizeOfResponse
+        self.nodeNumBytesRecv[whichNode] += sizeOfRequest
+
+        self.databaseAccesses[whichNode][self.numItems] += 1
+
+        return self.numItems
+
+    # This shouldn't be used, just here to make sure you don't try the RCP_Quorum function it overrides
+    def retrieve(self):
+        return None
+
+    def OT_retrieve(self, whichNode, numKeys, numSignatures):
+        # Asker's ID
+        sizeOfRequest = SIZE_OF_HASH
+        # timestamp
+        sizeOfRequest += SIZE_OF_TIMESTAMP
+        # keys in request
+        sizeOfRequest += SIZE_OF_KEY * numKeys
+        # signatures in request
+        sizeOfRequest += SIZE_OF_SIGNATURE * numSignatures
+        # actual OT crypto usage
+        sizeOfRequest += SIZE_OF_OT_VALUE
+        # signature on whole thing
+        sizeOfRequest += SIZE_OF_SIGNATURE
+
+        sizeOfResponse = SIZE_OF_OT_VALUE + SIZE_OF_SIGNATURE
+
+        self.nodeNumRounds[whichNode] += 1
+        self.nodeNumMessagesSent[whichNode] += 1
+        self.nodeNumMessagesRecv[whichNode] += 1
+        self.nodeNumBytesSent[whichNode] += sizeOfResponse
+        self.nodeNumBytesRecv[whichNode] += sizeOfRequest
+
+# TODO: Add unit tests for size calculations
+if __name__ == "__main__":
+    SIZE_OF_DOCUMENTS_IN_TEST = 1024
+    NUM_NODES_PER_QUORUM_IN_TEST = 10
+    test = QPLastHop_Quorum(0, SIZE_OF_DOCUMENTS_IN_TEST, NUM_NODES_PER_QUORUM_IN_TEST)
+    
+    [test.insert() for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
+    
+    [test.get_final_table_ranges(x, 0, 0) for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
+    print("Getting final table ranges fires on all nodes correctly.")
+
+    [test.OT_retrieve(x) for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
+    print("OT retrieval fires on all nodes correctly.")

File diff suppressed because it is too large
+ 166 - 0
dhtpir_simulation/library/rcp_client.py


+ 177 - 0
dhtpir_simulation/library/rcp_node.py

@@ -0,0 +1,177 @@
+from dht_common import SIZE_OF_HASH, SIZE_OF_IP_ADDRESS, SIZE_OF_KEY, SIZE_OF_SIGNATURE, SIZE_OF_TIMESTAMP
+from base_node import Base_Node
+
+class RCP_Quorum(Base_Node):
+    def __init__(self, quorumID, documentSize, numNodes, numItems=0, table=[]):
+        Base_Node.__init__(self, quorumID, documentSize, numItems, table)
+        self.numNodes = numNodes
+
+        self.nodeNumRounds = [0 for i in range(self.numNodes)]
+        self.nodeNumMessagesSent = [0 for i in range(self.numNodes)]
+        self.nodeNumMessagesRecv = [0 for i in range(self.numNodes)]
+        self.nodeNumBytesSent = [0 for i in range(self.numNodes)]
+        self.nodeNumBytesRecv = [0 for i in range(self.numNodes)]
+
+        self.nodeLastNumRounds = [0 for i in range(self.numNodes)]
+        self.nodeLastNumMessagesSent = [0 for i in range(self.numNodes)]
+        self.nodeLastNumMessagesRecv = [0 for i in range(self.numNodes)]
+        self.nodeLastNumBytesSent = [0 for i in range(self.numNodes)]
+        self.nodeLastNumBytesRecv = [0 for i in range(self.numNodes)]
+
+    def insert(self, numKeys, numSignatures):
+        self.numItems += 1
+
+        # Asker's ID
+        sizeOfRequest = SIZE_OF_HASH
+        # timestamp
+        sizeOfRequest += SIZE_OF_TIMESTAMP
+        # keys in request
+        sizeOfRequest += SIZE_OF_KEY * numKeys
+        # signatures in request
+        sizeOfRequest += SIZE_OF_SIGNATURE * numSignatures
+        # actual document sent
+        sizeOfRequest += self.documentSize
+        # signature over whole thing
+        sizeOfRequest += SIZE_OF_SIGNATURE
+
+        sizeOfResponse = SIZE_OF_HASH + SIZE_OF_SIGNATURE
+
+        map(lambda x: x+1, self.nodeNumRounds)
+        map(lambda x: x+1, self.nodeNumMessagesSent)
+        map(lambda x: x+1, self.nodeNumMessagesRecv)
+        map(lambda x: x+sizeOfResponse, self.nodeNumBytesSent)
+        map(lambda x: x+sizeOfRequest, self.nodeNumBytesRecv)
+
+    # This is simulating some extra stuff (signing) so that the correct size is recorded
+    def retrieve(self, whichNode, numKeys, numSignatures):
+        # Asker's ID
+        sizeOfRequest = SIZE_OF_HASH
+        # timestamp
+        sizeOfRequest += SIZE_OF_TIMESTAMP
+        # keys in request
+        sizeOfRequest += SIZE_OF_KEY * numKeys
+        # signatures in request
+        sizeOfRequest += SIZE_OF_SIGNATURE * numSignatures
+        # actual hash requested
+        sizeOfRequest += SIZE_OF_HASH
+        # signature over whole thing
+        sizeOfRequest += SIZE_OF_SIGNATURE
+
+        sizeOfResponse = self.documentSize + SIZE_OF_SIGNATURE
+
+        self.nodeNumRounds[whichNode] += 1
+        self.nodeNumMessagesSent[whichNode] += 1
+        self.nodeNumMessagesRecv[whichNode] += 1
+        self.nodeNumBytesSent[whichNode] += sizeOfResponse
+        self.nodeNumBytesRecv[whichNode] += sizeOfRequest
+
+    def get_finger_table_val(self, whichNode, searchID, numKeys, numSignatures):
+        retval = Base_Node.get_finger_table_val(self, searchID)
+
+        # key of previous group
+        sizeOfResponse = SIZE_OF_KEY
+        # next groups ID/routing information
+        sizeOfResponse += SIZE_OF_IP_ADDRESS * self.numNodes + SIZE_OF_HASH
+        # key of next group
+        sizeOfResponse += SIZE_OF_KEY
+        # sign the whole thing
+        sizeOfResponse += SIZE_OF_SIGNATURE
+
+        # Asker's ID
+        sizeOfRequest = SIZE_OF_HASH
+        # timestamp
+        sizeOfRequest += SIZE_OF_TIMESTAMP
+        # Keys in request
+        sizeOfRequest += SIZE_OF_KEY * numKeys
+        # signatures in request
+        sizeOfRequest += SIZE_OF_SIGNATURE * numSignatures
+        # ID being searched for
+        sizeOfRequest += SIZE_OF_HASH
+
+        self.nodeNumRounds[whichNode] += 1
+        self.nodeNumMessagesSent[whichNode] += 1
+        self.nodeNumMessagesRecv[whichNode] += 1
+        self.nodeNumBytesSent[whichNode] += sizeOfResponse
+        self.nodeNumBytesRecv[whichNode] += sizeOfRequest
+
+        return retval
+
+    def get_first_auth(self, whichNode):
+        # Asker's ID
+        sizeOfRequest = SIZE_OF_HASH
+        # timestamp
+        sizeOfRequest += SIZE_OF_TIMESTAMP
+
+        # response is just the whole thing signed
+        sizeOfResponse = sizeOfRequest + SIZE_OF_SIGNATURE
+
+        self.nodeNumRounds[whichNode] += 1
+        self.nodeNumMessagesSent[whichNode] += 1
+        self.nodeNumMessagesRecv[whichNode] += 1
+        self.nodeNumBytesSent[whichNode] += sizeOfResponse
+        self.nodeNumBytesRecv[whichNode] += sizeOfRequest
+
+        return None
+
+    def get_num_nodes(self):
+        return self.numNodes
+
+    def get_num_rounds(self, whichNode):
+        self.nodeLastNumRounds[whichNode] = self.nodeNumRounds[whichNode]
+        return self.nodeNumRounds[whichNode]
+
+    def get_recent_num_rounds(self, whichNode):
+        retval = self.nodeNumRounds[whichNode] - self.nodeLastNumRounds[whichNode]
+        self.nodeLastNumRounds[whichNode] = self.nodeNumRounds[whichNode]
+        return retval
+
+    def get_num_messages_sent(self, whichNode):
+        self.nodeLastNumMessagesSent[whichNode] = self.nodeNumMessagesSent[whichNode]
+        return self.nodeNumMessagesSent[whichNode]
+
+    def get_recent_num_messages_sent(self, whichNode):
+        retval = self.nodeNumMessagesSent[whichNode] - self.nodeLastNumMessagesSent[whichNode]
+        self.nodeLastNumMessagesSent[whichNode] = self.nodeNumMessagesSent[whichNode]
+        return retval
+
+    def get_num_messages_recv(self, whichNode):
+        self.nodeLastNumMessagesRecv[whichNode] = self.nodeNumMessagesRecv[whichNode]
+        return self.nodeNumMessagesRecv[whichNode]
+
+    def get_recent_num_messages_recv(self, whichNode):
+        retval = self.nodeNumMessagesRecv[whichNode] - self.nodeLastNumMessagesRecv[whichNode]
+        self.nodeLastNumMessagesRecv[whichNode] = self.nodeNumMessagesRecv[whichNode]
+        return retval
+
+    def get_num_bytes_sent(self, whichNode):
+        self.nodeLastNumBytesSent[whichNode] = self.nodeNumBytesSent[whichNode]
+        return self.nodeNumBytesSent[whichNode]
+
+    def get_recent_num_bytes_sent(self, whichNode):
+        retval = self.nodeNumBytesSent[whichNode] - self.nodeLastNumBytesSent[whichNode]
+        self.nodeLastNumBytesSent[whichNode] = self.nodeNumBytesSent[whichNode]
+        return retval
+
+    def get_num_bytes_recv(self, whichNode):
+        self.nodeLastNumBytesRecv[whichNode] = self.nodeNumBytesRecv[whichNode]
+        return self.nodeNumBytesRecv[whichNode]
+
+    def get_recent_num_bytes_recv(self, whichNode):
+        retval = self.nodeNumBytesRecv[whichNode] - self.nodeLastNumBytesRecv[whichNode]
+        self.nodeLastNumBytesRecv[whichNode] = self.nodeNumBytesRecv[whichNode]
+        return retval
+
+# TODO: Add unit tests for size calculations
+# TODO: Add unit test to make sure finger_table_val is still firing correctly?
+if __name__ == "__main__":
+    SIZE_OF_DOCUMENTS_IN_TEST = 1024
+    NUM_NODES_PER_QUORUM_IN_TEST = 10
+    test = RCP_Quorum(0, SIZE_OF_DOCUMENTS_IN_TEST, NUM_NODES_PER_QUORUM_IN_TEST)
+    
+    randomNode = 0
+    [test.insert() for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
+    [test.retrieve(randomNode) for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
+    print("Insert and retrieval on node 0 fires correctly.")
+
+    [test.retrieve(x) for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
+    print("Retrieval fires on all nodes correctly.")

+ 126 - 0
dhtpir_simulation/options_setup.c

@@ -0,0 +1,126 @@
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+
+// 25
+void seed_increment(char *seed) {
+    if (seed[0]) {
+        if (seed[0] == 'z') {
+            seed_increment(seed + 1);
+            seed[0] = 'a';
+        }
+        else {
+            seed[0]++;
+        }
+    }
+    else {
+        seed[1] = 0;
+        seed[0] = 'a';
+    }
+}
+
+// 3 per power of 10
+// 4 powers of 10
+// 4 groups
+// 48 + 4 = 52
+void next_num_documents(int *documents, int *groups, int *nodes, char *seed) {
+    int max_num = *groups * 1000000;
+
+    if (*documents >= max_num) {
+        switch (*groups) {
+            case 16:
+                *groups = 32;
+                *documents = 3200;
+                break;
+
+            case 32:
+                *groups = 64;
+                *documents = 6400;
+                break;
+
+            case 64:
+                *groups = 128;
+                *documents = 12800;
+                break;
+
+            case 128:
+                seed_increment(seed);
+                *groups = 16;
+                *documents = 1600;
+                break;
+        }
+    } else {
+        int first_digit = (*documents / *groups);
+        while (first_digit >= 10)
+            first_digit /= 10;
+
+        switch (first_digit) {
+            case 1:
+            case 5:
+                *documents *= 2;
+                break;
+
+            case 2:
+                *documents = (*documents * 5) / 2;
+                break;
+        }
+    }
+}
+
+// 5
+void next_node_type(char *node_type, int *documents, int *groups, int *nodes, char *seed) {
+    switch (node_type[1]) {
+        case 'd':
+            node_type[1] = 'l';
+            break;
+
+        case 'l':
+            node_type[1] = 'q';
+            break;
+
+        case 'q':
+            node_type[1] = 'r';
+            break;
+
+        case 'r':
+            node_type[1] = 'b';
+            break;
+
+        case 'b':
+            next_num_documents(documents, groups, nodes, seed);
+            node_type[1] = 'd';
+            break;
+    }
+}
+
+int main(int argc, char *argv[]) {
+    int num_machines = 1;
+    int which_machine = 0;
+    int num_documents = 1600;
+    int document_sizes = 1024;
+    int num_groups = 16;
+    int num_nodes = 64;
+    char which_node[3];
+    char seed[3];
+    FILE *fout;
+
+    if (argc > 2) {
+        num_machines = atoi(argv[1]);
+        which_machine = atoi(argv[2]);
+    }
+
+    strcpy(which_node, "-d");
+    strcpy(seed, "a");
+    
+    fout = fopen("test_options", "w");
+    
+    for (int i = 0; i < 6500; i++) {
+        if (i % num_machines == which_machine)
+            fprintf(fout, "%d,%d,%d,%d,%s,%s,%s\n", num_documents, document_sizes, num_groups, num_nodes, "--seed", seed, which_node);
+        next_node_type(which_node, &num_documents, &num_groups, &num_nodes, seed);
+    }
+
+    fclose(fout);
+    return 0;
+}

+ 101 - 0
dhtpir_simulation/run_tests.c

@@ -0,0 +1,101 @@
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <sys/wait.h>
+
+FILE *fin;
+FILE *fout;
+pthread_mutex_t read_file_lock;
+pthread_mutex_t write_file_lock;
+const int SIZE_OF_OPTIONS_STRING = 64;
+char *python_file = "./test_harness.py";
+char *delim = ",";
+
+void *runner(void *input) {
+    int flag = 1;
+    char options[SIZE_OF_OPTIONS_STRING];
+    char options_copy[SIZE_OF_OPTIONS_STRING];
+    char *saveptr = NULL;
+    char *exec_vector[9];
+    pid_t child;
+
+    exec_vector[0] = python_file;
+    exec_vector[8] = NULL;
+
+    while (flag) {
+        pthread_mutex_lock(&read_file_lock);
+        if(fgets(options, SIZE_OF_OPTIONS_STRING, fin) == NULL) {
+            pthread_mutex_unlock(&read_file_lock);
+
+            flag = 0;
+        } else {
+            pthread_mutex_unlock(&read_file_lock);
+            strncpy(options_copy, options, SIZE_OF_OPTIONS_STRING);
+
+            exec_vector[1] = strtok_r(options_copy, delim, &saveptr);
+            exec_vector[2] = strtok_r(NULL, delim, &saveptr);
+            exec_vector[3] = strtok_r(NULL, delim, &saveptr);
+            exec_vector[4] = strtok_r(NULL, delim, &saveptr);
+            exec_vector[5] = strtok_r(NULL, delim, &saveptr);
+            exec_vector[6] = strtok_r(NULL, delim, &saveptr);
+            exec_vector[7] = strtok_r(NULL, delim, &saveptr);
+            exec_vector[7][2] = 0;
+            
+            if ((child = fork())) {
+                waitpid(child, NULL, 0);
+
+                pthread_mutex_lock(&write_file_lock);
+                fputs(options, fout);
+                fflush(fout);
+                pthread_mutex_unlock(&write_file_lock);
+
+                printf("Ending simulation: %s, %s, %s, %s, %s, %s\n", exec_vector[1], exec_vector[2], exec_vector[3], exec_vector[4], exec_vector[7], exec_vector[6]);
+            } else {
+                printf("Running simulation: %s, %s, %s, %s, %s, %s\n", exec_vector[1], exec_vector[2], exec_vector[3], exec_vector[4], exec_vector[7], exec_vector[6]);
+
+                execve(python_file, exec_vector, NULL);
+            }
+        }    
+    }
+    
+    return NULL;
+}
+
+int main(int argc, char *argv[]) {
+    int num_processes = 4;
+    pthread_t *other_threads;
+    
+    if (argc > 1) {
+        num_processes = atoi(argv[1]);
+    }
+
+    printf("Removing redundant runs.\n");
+    system("echo 'comm -23 <(sort -u test_options) <(sort -u completed_options) > sorted_options' | bash");
+    system("sort -R sorted_options > curr_options");
+
+    fin = fopen("curr_options", "r");
+    fout = fopen("completed_options", "a");
+    pthread_mutex_init(&read_file_lock, NULL);
+    pthread_mutex_init(&write_file_lock, NULL);
+    other_threads = malloc(sizeof(*other_threads) * num_processes);
+
+    printf("Creating threads.\n");
+    for (int i = 0; i < num_processes; i++) {
+        pthread_create(other_threads + i, NULL, runner, NULL);
+    }
+
+    for (int i = 0; i < num_processes; i++) {
+        pthread_join(other_threads[i], NULL);
+    }
+    printf("All threads complete.\n");
+
+    free(other_threads);
+    pthread_mutex_destroy(&write_file_lock);
+    pthread_mutex_destroy(&read_file_lock);
+    fclose(fout);
+    fclose(fin);
+
+    return 0;
+}

+ 343 - 0
dhtpir_simulation/test_harness.py

@@ -0,0 +1,343 @@
+#!/usr/bin/env python3
+
+from contextlib import contextmanager
+from random import Random
+from collections import defaultdict
+import numpy as np
+import resource
+import argparse
+import sys
+import os
+
+directory = os.path.expanduser('library')
+sys.path.insert(1, directory)
+
+from dht_common import generate_file, KNOWN_NODE
+from dht_simulator import DHT_Simulator
+from base_node import Base_Node
+from base_client import Base_Client
+from rcp_node import RCP_Quorum
+from rcp_client import RCP_Client
+from qp_node import QP_Quorum
+from qp_client import QP_Client
+from qplasthop_node import QPLastHop_Quorum
+from qplasthop_client import QPLastHop_Client
+from dhtpir_node import DHTPIR_Quorum
+from dhtpir_client import DHTPIR_Client
+
+##
+# This functionality allows us to temporarily change our working directory
+#
+# @input newdir - the new directory (relative to our current position) we want to be in
+@contextmanager
+def cd(newDir, makeNew):
+    prevDir = os.getcwd()
+    directory = os.path.expanduser(newDir)
+    if not os.path.exists(directory) and makeNew:
+        os.makedirs(directory)
+    os.chdir(directory)
+    try:
+        yield
+    finally:
+        os.chdir(prevDir)
+
+##
+# This functionality allows us to temporarily change where stdout routes
+#
+# @input new_out - the file that stdout will get routed to temporarily
+@contextmanager
+def change_stdout(newOut):
+    prevOut = sys.stdout
+    sys.stdout = open(newOut, 'w')
+    try:
+        yield
+    finally:
+        sys.stdout.close()
+        sys.stdout = prevOut
+
+def main(numDocuments, documentSize, numGroups, numNodes, nodeType, clientType, seed):
+    cryptogen = Random(seed)
+
+    testbed = DHT_Simulator(nodeType, numGroups, documentSize, numNodes)
+    client = clientType(testbed, KNOWN_NODE, documentSize, numNodes)
+
+    documentIDs = []
+    print("Inserting files.")
+    for i in range(numDocuments):
+        document = generate_file(documentSize, cryptogen)
+        documentIDs.append(client.insert_file(document))
+
+    clientPubRounds = client.get_num_rounds()
+    clientPubMessagesSent = client.get_num_messages_sent()
+    clientPubMessagesRecv = client.get_num_messages_recv()
+    clientPubBytesSent = client.get_num_bytes_sent()
+    clientPubBytesRecv = client.get_num_bytes_recv()
+
+    numPubRounds = []
+    numPubMessagesSent = []
+    numPubMessagesRecv = []
+    numPubBytesSent = []
+    numPubBytesRecv = []
+    numPubNodesInSample = 0
+
+    for i in range(numGroups):
+        if nodeType != Base_Node:
+            for j in range(numNodes):
+                currNumRounds = testbed.get_num_rounds(i, j)
+                currNumMessagesSent = testbed.get_num_messages_sent(i, j)
+                currNumMessagesRecv = testbed.get_num_messages_recv(i, j)
+                currNumBytesSent = testbed.get_num_bytes_sent(i, j)
+                currNumBytesRecv = testbed.get_num_bytes_recv(i, j)
+
+                numPubRounds.append(currNumRounds)
+                numPubMessagesSent.append(currNumMessagesSent)
+                numPubMessagesRecv.append(currNumMessagesRecv)
+                numPubBytesSent.append(currNumBytesSent)
+                numPubBytesRecv.append(currNumBytesRecv)
+
+                numPubNodesInSample += 1
+        else:
+            currNumRounds = testbed.get_num_rounds_base(i)
+            currNumMessagesSent = testbed.get_num_messages_sent_base(i)
+            currNumMessagesRecv = testbed.get_num_messages_recv_base(i)
+            currNumBytesSent = testbed.get_num_bytes_sent_base(i)
+            currNumBytesRecv = testbed.get_num_bytes_recv_base(i)
+            
+            numPubRounds.append(currNumRounds)
+            numPubMessagesSent.append(currNumMessagesSent)
+            numPubMessagesRecv.append(currNumMessagesRecv)
+            numPubBytesSent.append(currNumBytesSent)
+            numPubBytesRecv.append(currNumBytesRecv)
+
+            numPubNodesInSample += 1
+
+    numPubRounds = np.array(numPubRounds)
+    numPubMessagesSent = np.array(numPubMessagesSent)
+    numPubMessagesRecv = np.array(numPubMessagesRecv)
+    numPubBytesSent = np.array(numPubBytesSent)
+    numPubBytesRecv = np.array(numPubBytesRecv)
+    
+    numPubRounds = [np.mean(numPubRounds), np.percentile(numPubRounds, 25), np.percentile(numPubRounds, 50), np.percentile(numPubRounds, 75), np.std(numPubRounds)]
+    numPubMessagesSent = [np.mean(numPubMessagesSent), np.percentile(numPubMessagesSent, 25), np.percentile(numPubMessagesSent, 50), np.percentile(numPubMessagesSent, 75), np.std(numPubMessagesSent)]
+    numPubMessagesRecv = [np.mean(numPubMessagesRecv), np.percentile(numPubMessagesRecv, 25), np.percentile(numPubMessagesRecv, 50), np.percentile(numPubMessagesRecv, 75), np.std(numPubMessagesRecv)]
+    numPubBytesSent = [np.mean(numPubBytesSent), np.percentile(numPubBytesSent, 25), np.percentile(numPubBytesSent, 50), np.percentile(numPubBytesSent, 75), np.std(numPubBytesSent)]
+    numPubBytesRecv = [np.mean(numPubBytesRecv), np.percentile(numPubBytesRecv, 25), np.percentile(numPubBytesRecv, 50), np.percentile(numPubBytesRecv, 75), np.std(numPubBytesRecv)]
+
+    print("Retrieving files.")
+    for i in range(numDocuments):
+        client.retrieve_file(documentIDs[i])
+
+    numRounds = []
+    numMessagesSent = []
+    numMessagesRecv = []
+    numBytesSent = []
+    numBytesRecv = []
+    numNodesInSample = 0
+
+    allFingerTableRangeAccesses = defaultdict(lambda: 0)
+    allFingerTableAccesses = defaultdict(lambda: 0)
+    allDatabaseAccesses = defaultdict(lambda: 0)
+    allPHFGenerations = defaultdict(lambda: 0)
+    allPIRRetrievals = defaultdict(lambda: 0)
+
+    for i in range(numGroups):
+        if nodeType != Base_Node:
+            for j in range(numNodes):
+                currNumRounds = testbed.get_num_rounds(i, j)
+                currNumMessagesSent = testbed.get_num_messages_sent(i, j)
+                currNumMessagesRecv = testbed.get_num_messages_recv(i, j)
+                currNumBytesSent = testbed.get_num_bytes_sent(i, j)
+                currNumBytesRecv = testbed.get_num_bytes_recv(i, j)
+
+                numRounds.append(currNumRounds)
+                numMessagesSent.append(currNumMessagesSent)
+                numMessagesRecv.append(currNumMessagesRecv)
+                numBytesSent.append(currNumBytesSent)
+                numBytesRecv.append(currNumBytesRecv)
+
+                numNodesInSample += 1
+
+                if nodeType != RCP_Quorum:
+                    currFingerTableRangeAccesses = testbed.get_finger_table_range_accesses(i, j)
+                    for currKey in currFingerTableRangeAccesses.keys():
+                        allFingerTableRangeAccesses[currKey] += currFingerTableRangeAccesses[currKey]
+                    
+                    currFingerTableAccesses = testbed.get_finger_table_accesses(i, j)
+                    for currKey in currFingerTableAccesses.keys():
+                        allFingerTableAccesses[currKey] += currFingerTableAccesses[currKey]
+
+                if nodeType == QPLastHop_Quorum:
+                    currDatabaseAccesses = testbed.get_database_accesses(i, j)
+                    for currKey in currDatabaseAccesses.keys():
+                        allDatabaseAccesses[currKey] += currDatabaseAccesses[currKey]
+
+                if nodeType == DHTPIR_Quorum:
+                    currPHFGenerations = testbed.get_PHF_generations(i, j)
+                    for currKey in currPHFGenerations.keys():
+                        allPHFGenerations[currKey] += currPHFGenerations[currKey]
+                    
+                    currPIRRetrievals = testbed.get_PIR_retrievals(i, j)
+                    for currKey in currPIRRetrievals.keys():
+                        allPIRRetrievals[currKey] += currPIRRetrievals[currKey]
+
+
+        else:
+            currNumRounds = testbed.get_num_rounds_base(i)
+            currNumMessagesSent = testbed.get_num_messages_sent_base(i)
+            currNumMessagesRecv = testbed.get_num_messages_recv_base(i)
+            currNumBytesSent = testbed.get_num_bytes_sent_base(i)
+            currNumBytesRecv = testbed.get_num_bytes_recv_base(i)
+            
+            numRounds.append(currNumRounds)
+            numMessagesSent.append(currNumMessagesSent)
+            numMessagesRecv.append(currNumMessagesRecv)
+            numBytesSent.append(currNumBytesSent)
+            numBytesRecv.append(currNumBytesRecv)
+
+            numNodesInSample += 1
+
+    numRounds = np.array(numRounds)
+    numMessagesSent = np.array(numMessagesSent)
+    numMessagesRecv = np.array(numMessagesRecv)
+    numBytesSent = np.array(numBytesSent)
+    numBytesRecv = np.array(numBytesRecv)
+    
+    numRounds = [np.mean(numRounds), np.percentile(numRounds, 25), np.percentile(numRounds, 50), np.percentile(numRounds, 75), np.std(numRounds)]
+    numMessagesSent = [np.mean(numMessagesSent), np.percentile(numMessagesSent, 25), np.percentile(numMessagesSent, 50), np.percentile(numMessagesSent, 75), np.std(numMessagesSent)]
+    numMessagesRecv = [np.mean(numMessagesRecv), np.percentile(numMessagesRecv, 25), np.percentile(numMessagesRecv, 50), np.percentile(numMessagesRecv, 75), np.std(numMessagesRecv)]
+    numBytesSent = [np.mean(numBytesSent), np.percentile(numBytesSent, 25), np.percentile(numBytesSent, 50), np.percentile(numBytesSent, 75), np.std(numBytesSent)]
+    numBytesRecv = [np.mean(numBytesRecv), np.percentile(numBytesRecv, 25), np.percentile(numBytesRecv, 50), np.percentile(numBytesRecv, 75), np.std(numBytesRecv)]
+
+    with cd('../outputs/' + nodeType.__name__ + '/' + str(numGroups) + '/' + str(numNodes) + '/' + str(numDocuments) + '/' + seed, True):
+        with change_stdout('avg_node.out'):
+            output = str(numNodesInSample) + "\n"
+            output += ",".join(map(lambda x: str(x), numRounds))
+            output += "\n"
+            output += ",".join(map(lambda x: str(x), numMessagesSent))
+            output += "\n"
+            output += ",".join(map(lambda x: str(x), numMessagesRecv))
+            output += "\n"
+            output += ",".join(map(lambda x: str(x), numBytesSent))
+            output += "\n"
+            output += ",".join(map(lambda x: str(x), numBytesRecv))
+            output += "\n"
+            print(output)
+        with change_stdout('client.out'):
+            currNumRounds = client.get_num_rounds()
+            currNumMessagesSent = client.get_num_messages_sent()
+            currNumMessagesRecv = client.get_num_messages_recv()
+            currNumBytesSent = client.get_num_bytes_sent()
+            currNumBytesRecv = client.get_num_bytes_recv()
+            output = ",".join(map(lambda x: str(x), [currNumRounds, currNumMessagesSent, currNumMessagesRecv, currNumBytesSent, currNumBytesRecv]))
+            print(output)
+        with change_stdout('avg_node_pub.out'):
+            output = str(numPubNodesInSample) + "\n"
+            output += ",".join(map(lambda x: str(x), numPubRounds))
+            output += "\n"
+            output += ",".join(map(lambda x: str(x), numPubMessagesSent))
+            output += "\n"
+            output += ",".join(map(lambda x: str(x), numPubMessagesRecv))
+            output += "\n"
+            output += ",".join(map(lambda x: str(x), numPubBytesSent))
+            output += "\n"
+            output += ",".join(map(lambda x: str(x), numPubBytesRecv))
+            output += "\n"
+            print(output)
+        with change_stdout('client_pub.out'):
+            output = ",".join(map(lambda x: str(x), [clientPubRounds, clientPubMessagesSent, clientPubMessagesRecv, clientPubBytesSent, clientPubBytesRecv]))
+            print(output)
+        with change_stdout('usage.out'):
+            resources_log = resource.getrusage(resource.RUSAGE_SELF)
+            maxmemmib = resources_log.ru_maxrss/1024
+            usertime = resources_log.ru_utime
+            systime = resources_log.ru_stime
+            output = ",".join(map(lambda x: str(x), [maxmemmib, usertime, systime]))
+            print(output)
+        if nodeType == QP_Quorum or nodeType == QPLastHop_Quorum or nodeType == DHTPIR_Quorum:
+            with change_stdout('client_latency.out'):
+                print("FT Range Accesses")
+                currFingerTableRangeAccesses = client.get_finger_table_range_accesses()
+                print("\n".join(map(lambda x: str(x[0]) + "," + str(x[1]), currFingerTableRangeAccesses.items())))
+
+                print("FT Direct Accesses")
+                currFingerTableAccesses = client.get_finger_table_accesses()
+                print("\n".join(map(lambda x: str(x[0]) + "," + str(x[1]), currFingerTableAccesses.items())))
+
+                if nodeType == QPLastHop_Quorum:
+                    print("Database OT Accesses")
+                    currDatabaseAccesses = client.get_database_accesses()
+                    print("\n".join(map(lambda x: str(x[0]) + "," + str(x[1]), currDatabaseAccesses.items())))
+
+                if nodeType == DHTPIR_Quorum:
+                    print("PIR Retrievals")
+                    currPIRRetrievals = client.get_PIR_retrievals()
+                    print("\n".join(map(lambda x: str(x[0]) + "," + str(x[1]), currPIRRetrievals.items())))
+
+            with change_stdout('all_node_calculations.out'):
+                print("FT Range Accesses")
+                print("\n".join(map(lambda x: str(x[0]) + "," + str(x[1]), allFingerTableRangeAccesses.items())))
+
+                print("FT Direct Accesses")
+                print("\n".join(map(lambda x: str(x[0]) + "," + str(x[1]), allFingerTableAccesses.items())))
+
+                if nodeType == QPLastHop_Quorum:
+                    print("Database OT Accesses")
+                    print("\n".join(map(lambda x: str(x[0]) + "," + str(x[1]), allDatabaseAccesses.items())))
+
+                if nodeType == DHTPIR_Quorum:
+                    print("PHF Generations")
+                    print("\n".join(map(lambda x: str(x[0]) + "," + str(x[1]), allPHFGenerations.items())))
+
+                    print("PIR Retrievals")
+                    print("\n".join(map(lambda x: str(x[0]) + "," + str(x[1]), allPIRRetrievals.items())))
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description="Experiment harness for DHTPIR")
+
+    parser.add_argument('numDocuments', metavar="numDocuments", type=int, help="The number of documents in the experiment")
+    parser.add_argument('sizeOfDocuments', metavar="sizeOfDocuments", type=int, help="The size of the documents in the experiment")
+    parser.add_argument('numGroups', metavar="numGroups", type=int, help="The number of groups in the experiment")
+    parser.add_argument('numNodes', metavar="numNodes", type=int, help="The number of nodes per group in the experiment (not used for Base Nodes)")
+    parser.add_argument('-b', action='store_true', help="Use Base Nodes in the experiment (if not set, defaults to DHTPIR Nodes)")
+    parser.add_argument('-r', action='store_true', help="Use RCP Nodes in the experiment (if not set, defaults to DHTPIR Nodes)")
+    parser.add_argument('-q', action='store_true', help="Use QP Nodes in the experiment (if not set, defaults to DHTPIR Nodes)")
+    parser.add_argument('-l', action='store_true', help="Use QP Nodes with last hop OT in the experiment (if not set, defaults to DHTPIR Nodes)")
+    parser.add_argument('-d', action='store_true', help="Use DHTPIR Nodes in the experiment (if not set, defaults to DHTPIR Nodes)")
+    parser.add_argument('--seed', help="Set the seed for the file generation in this run.")
+
+    args = parser.parse_args()
+
+    numNodes = 4
+    if args.numNodes >= 4:
+        numNodes = args.numNodes
+
+    numGroups = args.numGroups
+
+    if args.d:
+        nodeType = DHTPIR_Quorum
+        clientType = DHTPIR_Client
+    elif args.l:
+        nodeType = QPLastHop_Quorum
+        clientType = QPLastHop_Client
+    elif args.q:
+        nodeType = QP_Quorum
+        clientType = QP_Client
+    elif args.r:
+        nodeType = RCP_Quorum
+        clientType = RCP_Client
+    elif args.b:
+        nodeType = Base_Node
+        clientType = Base_Client
+
+        numGroups *= numNodes
+        numNodes = 1
+    else:
+        nodeType = DHTPIR_Quorum
+        clientType = DHTPIR_Client
+
+    seed = ""
+    if args.seed:
+        seed = args.seed
+
+    main(args.numDocuments, args.sizeOfDocuments, numGroups, numNodes, nodeType, clientType, seed)

+ 6 - 0
dhtpir_simulation/timetests.sh

@@ -0,0 +1,6 @@
+#!/bin/bash
+
+for i in {1..10}
+do
+	time ./test_harness.py -d --seed i 100 1024 10000 20 >> test.out 
+done

+ 7 - 0
outputs/Base_Node/1024/1/1600/a/avg_node.out

@@ -0,0 +1,7 @@
+1024
+21.962890625,4.0,8.0,16.0,121.886003469
+21.962890625,4.0,8.0,16.0,121.886003469
+21.962890625,4.0,8.0,16.0,121.886003469
+2001.7578125,1080.0,1380.0,2920.0,2795.51007825
+1926.40625,1072.0,1312.0,2672.0,2378.028692
+

+ 7 - 0
outputs/Base_Node/1024/1/1600/a/avg_node_pub.out

@@ -0,0 +1,7 @@
+1024
+10.9814453125,2.0,4.0,8.0,60.9430017345
+10.9814453125,2.0,4.0,8.0,60.9430017345
+10.9814453125,2.0,4.0,8.0,60.9430017345
+213.37890625,36.0,72.0,144.0,1218.67255723
+1750.703125,1040.0,1152.0,2264.0,1644.76208557
+

+ 1 - 0
outputs/Base_Node/1024/1/1600/a/client.out

@@ -0,0 +1 @@
+22490,22490,22490,1972640,2049800

+ 1 - 0
outputs/Base_Node/1024/1/1600/a/client_pub.out

@@ -0,0 +1 @@
+11245,11245,11245,1792720,218500

+ 1 - 0
outputs/Base_Node/1024/1/1600/a/usage.out

@@ -0,0 +1 @@
+23.94921875,5.50118,0.031959999999999995

+ 7 - 0
outputs/Base_Node/1024/1/1600/b/avg_node.out

@@ -0,0 +1,7 @@
+1024
+22.03515625,4.0,8.0,16.0,121.495879618
+22.03515625,4.0,8.0,16.0,121.495879618
+22.03515625,4.0,8.0,16.0,121.495879618
+2003.203125,1080.0,1460.0,2520.0,2749.90722389
+1927.5625,1072.0,1376.0,2408.0,2332.60204677
+

+ 7 - 0
outputs/Base_Node/1024/1/1600/b/avg_node_pub.out

@@ -0,0 +1,7 @@
+1024
+11.017578125,2.0,4.0,8.0,60.7479398088
+11.017578125,2.0,4.0,8.0,60.7479398088
+11.017578125,2.0,4.0,8.0,60.7479398088
+214.1015625,36.0,72.0,144.0,1214.88279617
+1751.28125,1040.0,1168.0,2168.0,1601.44777729
+

+ 1 - 0
outputs/Base_Node/1024/1/1600/b/client.out

@@ -0,0 +1 @@
+22564,22564,22564,1973824,2051280

+ 1 - 0
outputs/Base_Node/1024/1/1600/b/client_pub.out

@@ -0,0 +1 @@
+11282,11282,11282,1793312,219240

+ 1 - 0
outputs/Base_Node/1024/1/1600/b/usage.out

@@ -0,0 +1 @@
+23.9609375,5.553722,0.044013

+ 7 - 0
outputs/Base_Node/1024/1/1600/c/avg_node.out

@@ -0,0 +1,7 @@
+1024
+21.9765625,4.0,8.0,16.0,121.99497075
+21.9765625,4.0,8.0,16.0,121.99497075
+21.9765625,4.0,8.0,16.0,121.99497075
+2002.03125,1080.0,1720.0,2620.0,2754.52110158
+1926.625,1072.0,1584.0,2496.0,2332.53705423
+

+ 7 - 0
outputs/Base_Node/1024/1/1600/c/avg_node_pub.out

@@ -0,0 +1,7 @@
+1024
+10.98828125,2.0,4.0,8.0,60.9974853748
+10.98828125,2.0,4.0,8.0,60.9974853748
+10.98828125,2.0,4.0,8.0,60.9974853748
+213.515625,36.0,72.0,144.0,1219.84301618
+1750.8125,1040.0,1272.0,2212.0,1589.32252543
+

+ 1 - 0
outputs/Base_Node/1024/1/1600/c/client.out

@@ -0,0 +1 @@
+22504,22504,22504,1972864,2050080

+ 1 - 0
outputs/Base_Node/1024/1/1600/c/client_pub.out

@@ -0,0 +1 @@
+11252,11252,11252,1792832,218640

+ 1 - 0
outputs/Base_Node/1024/1/1600/c/usage.out

@@ -0,0 +1 @@
+23.9296875,5.448587,0.036003

+ 7 - 0
outputs/Base_Node/1024/1/1600/d/avg_node.out

@@ -0,0 +1,7 @@
+1024
+21.814453125,4.0,8.0,16.0,119.971614262
+21.814453125,4.0,8.0,16.0,119.971614262
+21.814453125,4.0,8.0,16.0,119.971614262
+1998.7890625,1080.0,1440.0,2530.0,2754.37948477
+1924.03125,1072.0,1344.0,2420.0,2348.98525943
+

+ 7 - 0
outputs/Base_Node/1024/1/1600/d/avg_node_pub.out

@@ -0,0 +1,7 @@
+1024
+10.9072265625,2.0,4.0,8.0,59.985807131
+10.9072265625,2.0,4.0,8.0,59.985807131
+10.9072265625,2.0,4.0,8.0,59.985807131
+211.89453125,36.0,72.0,144.0,1199.62558148
+1749.515625,1040.0,1152.0,2180.0,1644.66663123
+

+ 1 - 0
outputs/Base_Node/1024/1/1600/d/client.out

@@ -0,0 +1 @@
+22338,22338,22338,1970208,2046760

+ 1 - 0
outputs/Base_Node/1024/1/1600/d/client_pub.out

@@ -0,0 +1 @@
+11169,11169,11169,1791504,216980

+ 1 - 0
outputs/Base_Node/1024/1/1600/d/usage.out

@@ -0,0 +1 @@
+23.8671875,5.223151,0.043959

+ 7 - 0
outputs/Base_Node/1024/1/1600/e/avg_node.out

@@ -0,0 +1,7 @@
+1024
+21.900390625,4.0,8.0,16.0,120.939816838
+21.900390625,4.0,8.0,16.0,120.939816838
+21.900390625,4.0,8.0,16.0,120.939816838
+2000.5078125,1080.0,1460.0,2830.0,2731.54398731
+1925.40625,1072.0,1376.0,2608.0,2317.25144783
+

+ 7 - 0
outputs/Base_Node/1024/1/1600/e/avg_node_pub.out

@@ -0,0 +1,7 @@
+1024
+10.9501953125,2.0,4.0,8.0,60.4699084188
+10.9501953125,2.0,4.0,8.0,60.4699084188
+10.9501953125,2.0,4.0,8.0,60.4699084188
+212.75390625,36.0,72.0,144.0,1209.36371845
+1750.203125,1040.0,1192.0,2264.0,1593.59915639
+

+ 1 - 0
outputs/Base_Node/1024/1/1600/e/client.out

@@ -0,0 +1 @@
+22426,22426,22426,1971616,2048520

+ 1 - 0
outputs/Base_Node/1024/1/1600/e/client_pub.out

@@ -0,0 +1 @@
+11213,11213,11213,1792208,217860

+ 1 - 0
outputs/Base_Node/1024/1/1600/e/usage.out

@@ -0,0 +1 @@
+23.77734375,5.716809,0.031982

+ 7 - 0
outputs/Base_Node/1024/1/1600/f/avg_node.out

@@ -0,0 +1,7 @@
+1024
+21.6953125,4.0,8.0,16.0,120.939296407
+21.6953125,4.0,8.0,16.0,120.939296407
+21.6953125,4.0,8.0,16.0,120.939296407
+1996.40625,1080.0,1560.0,2490.0,2777.10329479
+1922.125,1072.0,1456.0,2400.0,2360.25771567
+

+ 7 - 0
outputs/Base_Node/1024/1/1600/f/avg_node_pub.out

@@ -0,0 +1,7 @@
+1024
+10.84765625,2.0,4.0,8.0,60.4696482037
+10.84765625,2.0,4.0,8.0,60.4696482037
+10.84765625,2.0,4.0,8.0,60.4696482037
+210.703125,36.0,72.0,144.0,1209.14604654
+1748.5625,1040.0,1232.0,2176.0,1623.93678559
+

+ 1 - 0
outputs/Base_Node/1024/1/1600/f/client.out

@@ -0,0 +1 @@
+22216,22216,22216,1968256,2044320

+ 1 - 0
outputs/Base_Node/1024/1/1600/f/client_pub.out

@@ -0,0 +1 @@
+11108,11108,11108,1790528,215760

+ 1 - 0
outputs/Base_Node/1024/1/1600/f/usage.out

@@ -0,0 +1 @@
+23.84765625,5.616389,0.023967

+ 7 - 0
outputs/Base_Node/1024/1/1600/g/avg_node.out

@@ -0,0 +1,7 @@
+1024
+21.751953125,4.0,8.0,16.0,120.731320518
+21.751953125,4.0,8.0,16.0,120.731320518
+21.751953125,4.0,8.0,16.0,120.731320518
+1997.5390625,1080.0,1660.0,2530.0,2764.63184824
+1923.03125,1072.0,1520.0,2432.0,2347.38978687
+

+ 7 - 0
outputs/Base_Node/1024/1/1600/g/avg_node_pub.out

@@ -0,0 +1,7 @@
+1024
+10.8759765625,2.0,4.0,8.0,60.365660259
+10.8759765625,2.0,4.0,8.0,60.365660259
+10.8759765625,2.0,4.0,8.0,60.365660259
+211.26953125,36.0,72.0,144.0,1207.07384335
+1749.015625,1040.0,1240.0,2180.0,1609.42647968
+

+ 1 - 0
outputs/Base_Node/1024/1/1600/g/client.out

@@ -0,0 +1 @@
+22274,22274,22274,1969184,2045480

+ 1 - 0
outputs/Base_Node/1024/1/1600/g/client_pub.out

@@ -0,0 +1 @@
+11137,11137,11137,1790992,216340

+ 1 - 0
outputs/Base_Node/1024/1/1600/g/usage.out

@@ -0,0 +1 @@
+23.85546875,5.4582370000000004,0.067928

+ 7 - 0
outputs/Base_Node/1024/1/1600/h/avg_node.out

@@ -0,0 +1,7 @@
+1024
+21.97265625,4.0,8.0,16.0,121.82633337
+21.97265625,4.0,8.0,16.0,121.82633337
+21.97265625,4.0,8.0,16.0,121.82633337
+2001.953125,1080.0,1520.0,2400.0,2775.32587011
+1926.5625,1072.0,1424.0,2336.0,2359.78609912
+

+ 7 - 0
outputs/Base_Node/1024/1/1600/h/avg_node_pub.out

@@ -0,0 +1,7 @@
+1024
+10.986328125,2.0,4.0,8.0,60.9131666849
+10.986328125,2.0,4.0,8.0,60.9131666849
+10.986328125,2.0,4.0,8.0,60.9131666849
+213.4765625,36.0,72.0,144.0,1218.17622689
+1750.78125,1040.0,1200.0,2160.0,1634.28761687
+

+ 1 - 0
outputs/Base_Node/1024/1/1600/h/client.out

@@ -0,0 +1 @@
+22500,22500,22500,1972800,2050000

+ 1 - 0
outputs/Base_Node/1024/1/1600/h/client_pub.out

@@ -0,0 +1 @@
+11250,11250,11250,1792800,218600

+ 1 - 0
outputs/Base_Node/1024/1/1600/h/usage.out

@@ -0,0 +1 @@
+23.9296875,5.455235,0.047993

+ 7 - 0
outputs/Base_Node/1024/1/1600/i/avg_node.out

@@ -0,0 +1,7 @@
+1024
+21.8125,4.0,8.0,14.5,120.777575138
+21.8125,4.0,8.0,14.5,120.777575138
+21.8125,4.0,8.0,14.5,120.777575138
+1998.75,1080.0,1600.0,2480.0,2749.56786377
+1924.0,1072.0,1488.0,2400.0,2338.70434215
+

+ 7 - 0
outputs/Base_Node/1024/1/1600/i/avg_node_pub.out

@@ -0,0 +1,7 @@
+1024
+10.90625,2.0,4.0,7.25,60.3887875691
+10.90625,2.0,4.0,7.25,60.3887875691
+10.90625,2.0,4.0,7.25,60.3887875691
+211.875,36.0,72.0,141.0,1207.71656314
+1749.5,1040.0,1248.0,2176.0,1623.16873738
+

+ 1 - 0
outputs/Base_Node/1024/1/1600/i/client.out

@@ -0,0 +1 @@
+22336,22336,22336,1970176,2046720

+ 1 - 0
outputs/Base_Node/1024/1/1600/i/client_pub.out

@@ -0,0 +1 @@
+11168,11168,11168,1791488,216960

+ 1 - 0
outputs/Base_Node/1024/1/1600/i/usage.out

@@ -0,0 +1 @@
+23.9296875,5.156299,0.031952

+ 7 - 0
outputs/Base_Node/1024/1/1600/j/avg_node.out

@@ -0,0 +1,7 @@
+1024
+21.626953125,4.0,8.0,16.0,120.703299944
+21.626953125,4.0,8.0,16.0,120.703299944
+21.626953125,4.0,8.0,16.0,120.703299944
+1995.0390625,1080.0,1360.0,2730.0,2876.29298344
+1921.03125,1072.0,1296.0,2568.0,2456.70662402
+

+ 7 - 0
outputs/Base_Node/1024/1/1600/j/avg_node_pub.out

@@ -0,0 +1,7 @@
+1024
+10.8134765625,2.0,4.0,8.0,60.3516499718
+10.8134765625,2.0,4.0,8.0,60.3516499718
+10.8134765625,2.0,4.0,8.0,60.3516499718
+210.01953125,36.0,72.0,144.0,1206.33620143
+1748.015625,1040.0,1136.0,2260.0,1701.16228643
+

+ 1 - 0
outputs/Base_Node/1024/1/1600/j/client.out

@@ -0,0 +1 @@
+22146,22146,22146,1967136,2042920

+ 1 - 0
outputs/Base_Node/1024/1/1600/j/client_pub.out

@@ -0,0 +1 @@
+11073,11073,11073,1789968,215060

Some files were not shown because too many files changed in this diff