Explorar o código

Nomenclature change: public/private channels become ID/token channels

Ian Goldberg hai 8 meses
pai
achega
d1cc60bfdc

+ 15 - 15
App/appconfig.cpp

@@ -76,22 +76,22 @@ bool config_parse(Config &config, const std::string configstr,
                     config.msg_size = pentry.second.get_value<uint16_t>();
                 } else if (!pentry.first.compare("user_count")) {
                     config.user_count = pentry.second.get_value<uint32_t>();
-                } else if (!pentry.first.compare("priv_out")) {
-                    config.m_priv_out = pentry.second.get_value<uint8_t>();
-                } else if (!pentry.first.compare("priv_in")) {
-                    config.m_priv_in = pentry.second.get_value<uint8_t>();
-                } else if (!pentry.first.compare("pub_out")) {
-                    config.m_pub_out = pentry.second.get_value<uint8_t>();
-                } else if (!pentry.first.compare("pub_in")) {
-                    config.m_pub_in = pentry.second.get_value<uint8_t>();
+                } else if (!pentry.first.compare("token_out")) {
+                    config.m_token_out = pentry.second.get_value<uint8_t>();
+                } else if (!pentry.first.compare("token_in")) {
+                    config.m_token_in = pentry.second.get_value<uint8_t>();
+                } else if (!pentry.first.compare("id_out")) {
+                    config.m_id_out = pentry.second.get_value<uint8_t>();
+                } else if (!pentry.first.compare("id_in")) {
+                    config.m_id_in = pentry.second.get_value<uint8_t>();
                 } else if (!pentry.first.compare("master_secret")) {
                     // Currently hardcoding an AES key for client <->
                     // server communication, but in reality, a key
                     // exchange would be done
                     std::string hex_key = pentry.second.data();
                     memcpy(config.master_secret, hex_key.c_str(), SGX_AESGCM_KEY_SIZE);
-                } else if (!pentry.first.compare("private_routing")) {
-                    config.private_routing = pentry.second.get_value<bool>();
+                } else if (!pentry.first.compare("token_channel")) {
+                    config.token_channel = pentry.second.get_value<bool>();
                 } else {
                     std::cerr << "Unknown field in params: " <<
                         pentry.first << "\n";
@@ -161,12 +161,12 @@ bool config_parse(Config &config, const std::string configstr,
     EnclaveAPIParams apiparams;
     apiparams.user_count = config.user_count;
     apiparams.msg_size = config.msg_size;
-    apiparams.m_priv_out = config.m_priv_out;
-    apiparams.m_priv_in = config.m_priv_in;
-    apiparams.m_pub_out = config.m_pub_out;
-    apiparams.m_pub_in = config.m_pub_in;
+    apiparams.m_token_out = config.m_token_out;
+    apiparams.m_token_in = config.m_token_in;
+    apiparams.m_id_out = config.m_id_out;
+    apiparams.m_id_in = config.m_id_in;
     memcpy(apiparams.master_secret, config.master_secret, SGX_AESGCM_KEY_SIZE);
-    apiparams.private_routing = config.private_routing;
+    apiparams.token_channel = config.token_channel;
     nodenum_t num_nodes = (nodenum_t)(config.nodes.size());
     std::vector<EnclaveAPINodeConfig> apinodeconfigs;
     apinodeconfigs.resize(num_nodes);

+ 5 - 5
App/appconfig.hpp

@@ -26,11 +26,11 @@ struct Config {
     // global params
     uint32_t user_count;
     uint16_t msg_size;
-    uint8_t m_priv_out;
-    uint8_t m_priv_in;
-    uint8_t m_pub_out;
-    uint8_t m_pub_in;
-    bool private_routing;
+    uint8_t m_token_out;
+    uint8_t m_token_in;
+    uint8_t m_id_out;
+    uint8_t m_id_in;
+    bool token_channel;
     uint16_t nthreads;
     // config for each node
     std::vector<NodeConfig> nodes;

+ 10 - 10
App/launch

@@ -73,13 +73,13 @@ if __name__ == "__main__":
     aparse.add_argument('-u', default=None,
         help='override max number of users')
     aparse.add_argument('-B', default=None,
-        help='override max number of outgoing private messages per user per epoch')
+        help='override max number of outgoing token channel messages per user per epoch')
     aparse.add_argument('-b', default=None,
-        help='override max number of incoming private messages per user per epoch')
+        help='override max number of incoming token channel messages per user per epoch')
     aparse.add_argument('-C', default=None,
-        help='override max number of outgoing public messages per user per epoch')
+        help='override max number of outgoing ID channel messages per user per epoch')
     aparse.add_argument('-c', default=None,
-        help='override max number of incoming public messages per user per epoch')
+        help='override max number of incoming ID channel messages per user per epoch')
     aparse.add_argument('-l', default=None,
         help='log folder to store logs of each server in an experiment')
     aparse.add_argument('-d', default=None,
@@ -89,7 +89,7 @@ if __name__ == "__main__":
     aparse.add_argument('-w', default=None,
         help='Set number of Waksman Networks to precompute before starting epochs')
     aparse.add_argument('-r', default=None,
-        help='override if routing private channel messages (or public)')
+        help='override if routing token channel messages (or ID)')
     aparse.add_argument('-n', nargs='*', help='nodes to include')
     aparse.add_argument('cmd', nargs='*', help='experiment to run')
     args = aparse.parse_args()
@@ -100,11 +100,11 @@ if __name__ == "__main__":
     params_overrides = {
         'msg_size': args.z,
         'user_count': args.u,
-        'priv_out': args.B,
-        'priv_in': args.b,
-        'pub_out': args.C,
-        'pub_in': args.c,
-        'private_routing': args.r
+        'token_out': args.B,
+        'token_in': args.b,
+        'id_out': args.C,
+        'id_in': args.c,
+        'token_channel': args.r
     }
 
     config = mkconfig.create_json(args.m, args.p, args.n, params_overrides)

+ 8 - 8
App/mkconfig.py

@@ -66,23 +66,23 @@ if __name__ == "__main__":
     aparse.add_argument('-u', default=None,
         help='override max number of users')
     aparse.add_argument('-B', default=None,
-        help='override max number of outgoing private messages per user per epoch')
+        help='override max number of outgoing token channel messages per user per epoch')
     aparse.add_argument('-b', default=None,
-        help='override max number of incoming private messages per user per epoch')
+        help='override max number of incoming token channel messages per user per epoch')
     aparse.add_argument('-C', default=None,
-        help='override max number of outgoing public messages per user per epoch')
+        help='override max number of outgoing ID channel messages per user per epoch')
     aparse.add_argument('-c', default=None,
-        help='override max number of incoming public messages per user per epoch')
+        help='override max number of incoming ID channel messages per user per epoch')
     aparse.add_argument('-n', nargs='*', help='nodes to include')
     args = aparse.parse_args()
 
     params_overrides = {
         'msg_size': args.z,
         'user_count': args.u,
-        'priv_out': args.B,
-        'priv_in': args.b,
-        'pub_out': args.C,
-        'pub_in': args.c,
+        'token_out': args.B,
+        'token_in': args.b,
+        'id_out': args.C,
+        'id_in': args.c,
     }
     json = create_json(args.m, args.p, args.n, params_overrides)
 

+ 13 - 13
App/net.cpp

@@ -283,10 +283,10 @@ void NetIO::ing_receive_msgbundle(tcp::socket* csocket, clientid_t c_simid)
 
         bool ret;
         //Ingest the message_bundle
-        if(conf.private_routing) {
-            ret = ecall_ingest_msgbundle(c_simid, msgbundle, conf.m_priv_out);
+        if(conf.token_channel) {
+            ret = ecall_ingest_msgbundle(c_simid, msgbundle, conf.m_token_out);
         } else {
-            ret = ecall_ingest_msgbundle(c_simid, msgbundle, conf.m_pub_out);
+            ret = ecall_ingest_msgbundle(c_simid, msgbundle, conf.m_id_out);
         }
         free(msgbundle);
 
@@ -601,23 +601,23 @@ NetIO::NetIO(boost::asio::io_context &io_context, const Config &config)
 
 
     auth_size = sizeof(clientid_t) + sizeof(unsigned long) + SGX_AESGCM_KEY_SIZE;
-    uint16_t priv_out, priv_in, pub_in;
-    if(config.private_routing) {
-        priv_out = conf.m_priv_out;
-        priv_in = conf.m_priv_in;
+    uint16_t token_out, token_in, id_in;
+    if(config.token_channel) {
+        token_out = conf.m_token_out;
+        token_in = conf.m_token_in;
         msgbundle_size = SGX_AESGCM_IV_SIZE
-            + (conf.m_priv_out * (conf.msg_size + TOKEN_SIZE))
+            + (conf.m_token_out * (conf.msg_size + TOKEN_SIZE))
             + SGX_AESGCM_MAC_SIZE;
-        token_bundle_size = ((priv_out * TOKEN_SIZE)
+        token_bundle_size = ((token_out * TOKEN_SIZE)
             + SGX_AESGCM_IV_SIZE + SGX_AESGCM_MAC_SIZE);
-        mailbox_size = (priv_in * conf.msg_size) + SGX_AESGCM_IV_SIZE
+        mailbox_size = (token_in * conf.msg_size) + SGX_AESGCM_IV_SIZE
             + SGX_AESGCM_MAC_SIZE;
     } else {
-        pub_in = conf.m_pub_in;
+        id_in = conf.m_id_in;
         msgbundle_size = SGX_AESGCM_IV_SIZE
-            + (conf.m_pub_out * conf.msg_size)
+            + (conf.m_id_out * conf.msg_size)
             + SGX_AESGCM_MAC_SIZE;
-        mailbox_size = (pub_in * conf.msg_size) + SGX_AESGCM_IV_SIZE
+        mailbox_size = (id_in * conf.msg_size) + SGX_AESGCM_IV_SIZE
             + SGX_AESGCM_MAC_SIZE;
     }
 

+ 2 - 2
App/start.cpp

@@ -9,7 +9,7 @@
 int num_epochs = 4;
 // Default epoch_wait_time of 5 seconds
 int epoch_wait_time = 5;
-// Default of 12 Waksman Networks (3 per private_route for 4 epochs)
+// Default of 12 Waksman Networks (3 per token channel route for 4 epochs)
 int num_WN_to_precompute = 12;
 // We'll always run the WN precomputation in the foreground
 // TODO: Later fix this to a command line param
@@ -321,7 +321,7 @@ static void route_test(NetIO &netio, char **args)
     }
 
     // The arguments are num_nodes sets of num_nodes values.  The jth
-    // value in the ith set is the number of private routing tokens
+    // value in the ith set is the number of token channel routing tokens
     // ingestion node i holds for storage node j.
 
     // We are node i = netio.me, so ignore the other sets of values.

+ 8 - 8
Client/clientlaunch

@@ -65,13 +65,13 @@ if __name__ == "__main__":
     aparse.add_argument('-u', default=None,
         help='override max number of users')
     aparse.add_argument('-B', default=None,
-        help='override max number of outgoing private messages per user per epoch')
+        help='override max number of outgoing token channel messages per user per epoch')
     aparse.add_argument('-b', default=None,
-        help='override max number of incoming private messages per user per epoch')
+        help='override max number of incoming token channel messages per user per epoch')
     aparse.add_argument('-C', default=None,
-        help='override max number of outgoing public messages per user per epoch')
+        help='override max number of outgoing ID channel messages per user per epoch')
     aparse.add_argument('-c', default=None,
-        help='override max number of incoming public messages per user per epoch')
+        help='override max number of incoming ID channel messages per user per epoch')
     aparse.add_argument('-l', default=None,
         help='log file to store client simulator log for an experiment')
     aparse.add_argument('-n', nargs='*', help='nodes to include')
@@ -84,10 +84,10 @@ if __name__ == "__main__":
     params_overrides = {
         'msg_size': args.z,
         'user_count': args.u,
-        'priv_out': args.B,
-        'priv_in': args.b,
-        'pub_out': args.C,
-        'pub_in': args.c,
+        'token_out': args.B,
+        'token_in': args.b,
+        'id_out': args.C,
+        'id_in': args.c,
     }
 
     config = mkconfig.create_json(args.m, args.p, args.n, params_overrides)

+ 55 - 55
Client/clients.cpp

@@ -23,7 +23,7 @@ std::vector<uint16_t> storage_map;
 std::vector<uint16_t> ingestion_map;
 unsigned long setup_time;
 uint16_t nthreads = 1;
-bool private_routing;
+bool token_channel;
 
 // Split a hostport string like "127.0.0.1:12000" at the rightmost colon
 // into a host part "127.0.0.1" and a port part "12000".
@@ -84,13 +84,13 @@ void displayMessage(unsigned char *msg, uint16_t msg_size,
     unsigned char *ptr = msg;
     rid = *((clientid_t*) ptr);
     ptr+=sizeof(rid);
-    if (!private_routing) {
+    if (!token_channel) {
         prio = *((uint32_t*) ptr);
         ptr+=sizeof(prio);
     }
     sid = *((clientid_t*) ptr);
     ptr+=sizeof(sid);
-    if (private_routing) {
+    if (token_channel) {
         outbuf << std::hex
             << "Cli: "
             << std::setfill('0') << std::setw(8) << client
@@ -158,37 +158,37 @@ void displayPtMessageBundle(unsigned char *bundle, uint16_t num_out,
     printf("\n");
 }
 
-static inline uint32_t encPubMsgBundleSize(uint16_t pub_out, uint16_t msg_size)
+static inline uint32_t encPubMsgBundleSize(uint16_t id_out, uint16_t msg_size)
 {
-    return SGX_AESGCM_IV_SIZE + (uint32_t(pub_out) * msg_size)
+    return SGX_AESGCM_IV_SIZE + (uint32_t(id_out) * msg_size)
         + SGX_AESGCM_MAC_SIZE;
 }
 
-static inline uint32_t ptPubMsgBundleSize(uint16_t pub_out, uint16_t msg_size)
+static inline uint32_t ptPubMsgBundleSize(uint16_t id_out, uint16_t msg_size)
 {
-    return uint32_t(pub_out) * msg_size;
+    return uint32_t(id_out) * msg_size;
 }
 
-static inline uint32_t encMsgBundleSize(uint16_t priv_out, uint16_t msg_size)
+static inline uint32_t encMsgBundleSize(uint16_t token_out, uint16_t msg_size)
 {
-    return SGX_AESGCM_IV_SIZE + (uint32_t(priv_out) * (msg_size + TOKEN_SIZE))
+    return SGX_AESGCM_IV_SIZE + (uint32_t(token_out) * (msg_size + TOKEN_SIZE))
         + SGX_AESGCM_MAC_SIZE;
 }
 
-static inline uint32_t ptMsgBundleSize(uint16_t priv_out, uint16_t msg_size)
+static inline uint32_t ptMsgBundleSize(uint16_t token_out, uint16_t msg_size)
 {
-    return uint32_t(priv_out) * (msg_size + TOKEN_SIZE);
+    return uint32_t(token_out) * (msg_size + TOKEN_SIZE);
 }
 
-static inline uint32_t encMailboxSize(uint16_t priv_in, uint16_t msg_size)
+static inline uint32_t encMailboxSize(uint16_t token_in, uint16_t msg_size)
 {
-    return SGX_AESGCM_IV_SIZE + (uint32_t(priv_in) * msg_size)
+    return SGX_AESGCM_IV_SIZE + (uint32_t(token_in) * msg_size)
         + SGX_AESGCM_MAC_SIZE;
 }
 
-static inline uint32_t ptMailboxSize(uint16_t priv_in, uint16_t msg_size)
+static inline uint32_t ptMailboxSize(uint16_t token_in, uint16_t msg_size)
 {
-    return uint32_t(priv_in) * msg_size;
+    return uint32_t(token_in) * msg_size;
 }
 
 bool config_parse(Config &config, const std::string configstr,
@@ -212,14 +212,14 @@ bool config_parse(Config &config, const std::string configstr,
                     config.msg_size = pentry.second.get_value<uint16_t>();
                 } else if (!pentry.first.compare("user_count")) {
                     config.user_count = pentry.second.get_value<uint32_t>();
-                } else if (!pentry.first.compare("priv_out")) {
-                    config.m_priv_out = pentry.second.get_value<uint8_t>();
-                } else if (!pentry.first.compare("priv_in")) {
-                    config.m_priv_in = pentry.second.get_value<uint8_t>();
-                } else if (!pentry.first.compare("pub_out")) {
-                    config.m_pub_out = pentry.second.get_value<uint8_t>();
-                } else if (!pentry.first.compare("pub_in")) {
-                    config.m_pub_in = pentry.second.get_value<uint8_t>();
+                } else if (!pentry.first.compare("token_out")) {
+                    config.m_token_out = pentry.second.get_value<uint8_t>();
+                } else if (!pentry.first.compare("token_in")) {
+                    config.m_token_in = pentry.second.get_value<uint8_t>();
+                } else if (!pentry.first.compare("id_out")) {
+                    config.m_id_out = pentry.second.get_value<uint8_t>();
+                } else if (!pentry.first.compare("id_in")) {
+                    config.m_id_in = pentry.second.get_value<uint8_t>();
                 // A stub hardcoded shared secret to derive various
                 // keys for client <-> server communications and tokens
                 // In reality, this would be a key exchange
@@ -227,8 +227,8 @@ bool config_parse(Config &config, const std::string configstr,
                     std::string hex_key = pentry.second.data();
                     memcpy(config.master_secret, hex_key.c_str(),
                         SGX_AESGCM_KEY_SIZE);
-                } else if (!pentry.first.compare("private_routing")) {
-                    config.private_routing = pentry.second.get_value<bool>();
+                } else if (!pentry.first.compare("token_channel")) {
+                    config.token_channel = pentry.second.get_value<bool>();
                 } else {
                     std::cerr << "Unknown field in params: " <<
                         pentry.first << "\n";
@@ -403,7 +403,7 @@ void Client::initClient(clientid_t cid, uint16_t stg_id,
     id = stg_id << DEST_UID_BITS;
     id += (cid/num_storage_nodes);
 
-    token_list = new token[config.m_priv_out];
+    token_list = new token[config.m_token_out];
     memcpy(ing_key, ikey, SGX_AESGCM_KEY_SIZE);
     memcpy(stg_key, skey, SGX_AESGCM_KEY_SIZE);
 }
@@ -552,11 +552,11 @@ void Client::initializeIngSocket(boost::asio::io_context &ioc,
     Populates the buffer pt_msgbundle with a valid message pt_msgbundle.
     Assumes that it is supplied with a pt_msgbundle buffer of the correct length
 
-    num_out is either priv_out or pub_out, depending on whether we're
-    doing private or public routing
+    num_out is either token_out or id_out, depending on whether we're
+    doing token channel or ID channel routing
 
     Correct length for pt_msgbundle  = (num_out)*(msg_size) +
-        (only for private routing) (num_out)*TOKEN_SIZE
+        (only for token channel routing) (num_out)*TOKEN_SIZE
 
 */
 void Client::generateMessageBundle(uint8_t num_out, uint32_t msg_size,
@@ -573,8 +573,8 @@ void Client::generateMessageBundle(uint8_t num_out, uint32_t msg_size,
         uint32_t rid = id;
 #if 0
         uint32_t dest_uid_mask = (1 << DEST_UID_BITS) - 1;
-        if (!private_routing) {
-            // If we're testing public routing, have each user send a
+        if (!token_channel) {
+            // If we're testing ID channel routing, have each user send a
             // message to the user with the same local id, but on
             // storage server 0.
             rid &= dest_uid_mask;
@@ -584,11 +584,11 @@ void Client::generateMessageBundle(uint8_t num_out, uint32_t msg_size,
         memcpy(ptr, &rid, sizeof(rid));
         ptr += sizeof(rid);
 
-        // Priority (for public routing only)
-        if (!private_routing) {
+        // Priority (for ID channel routing only)
+        if (!token_channel) {
             uint32_t priority = 0;
 #ifdef SHOW_RECEIVED_MESSAGES
-            // If we're testing public routing, set the priority so that
+            // If we're testing ID channel routing, set the priority so that
             // messages to different users will have the highest
             // priority messages sent from users at different servers
             uint32_t id_low_bits = id &
@@ -613,9 +613,9 @@ void Client::generateMessageBundle(uint8_t num_out, uint32_t msg_size,
         ptr+=(remaining_message_size);
     }
 
-    if(private_routing) {
+    if(token_channel) {
         // Add the tokens for this msgbundle
-        memcpy(ptr, token_list, config.m_priv_out * TOKEN_SIZE);
+        memcpy(ptr, token_list, config.m_token_out * TOKEN_SIZE);
     }
 }
 
@@ -688,33 +688,33 @@ static thread_local LimitLogger
 
 void Client::sendMessageBundle()
 {
-    uint16_t priv_out = config.m_priv_out;
-    uint16_t pub_out = config.m_pub_out;
+    uint16_t token_out = config.m_token_out;
+    uint16_t id_out = config.m_id_out;
     uint16_t msg_size = config.msg_size;
     uint32_t send_pt_msgbundle_size, send_enc_msgbundle_size;
 
-    if(private_routing) {
-        send_pt_msgbundle_size = ptMsgBundleSize(priv_out, msg_size);
-        send_enc_msgbundle_size = encMsgBundleSize(priv_out, msg_size);
+    if(token_channel) {
+        send_pt_msgbundle_size = ptMsgBundleSize(token_out, msg_size);
+        send_enc_msgbundle_size = encMsgBundleSize(token_out, msg_size);
     } else {
-        send_pt_msgbundle_size = ptPubMsgBundleSize(pub_out, msg_size);
-        send_enc_msgbundle_size = encPubMsgBundleSize(pub_out, msg_size);
+        send_pt_msgbundle_size = ptPubMsgBundleSize(id_out, msg_size);
+        send_enc_msgbundle_size = encPubMsgBundleSize(id_out, msg_size);
     }
 
     unsigned char *send_pt_msgbundle =
         (unsigned char*) malloc (send_pt_msgbundle_size);
     unsigned char *send_enc_msgbundle =
         (unsigned char*) malloc (send_enc_msgbundle_size);
-    if(private_routing) {
-        generateMessageBundle(priv_out, msg_size, send_pt_msgbundle);
+    if(token_channel) {
+        generateMessageBundle(token_out, msg_size, send_pt_msgbundle);
     } else {
-        generateMessageBundle(pub_out, msg_size, send_pt_msgbundle);
+        generateMessageBundle(id_out, msg_size, send_pt_msgbundle);
     }
     encryptMessageBundle(send_enc_msgbundle_size, send_pt_msgbundle,
         send_enc_msgbundle);
 
 #ifdef VERBOSE_CLIENT
-    displayPtMessageBundle(send_pt_msgbundle, priv_out, msg_size);
+    displayPtMessageBundle(send_pt_msgbundle, token_out, msg_size);
 #endif
 
     free(send_pt_msgbundle);
@@ -935,19 +935,19 @@ using the tokens they received in this epoch
 
 void Client::epoch_process() {
 
-    uint32_t pt_token_size = uint32_t(config.m_priv_out) * TOKEN_SIZE;
+    uint32_t pt_token_size = uint32_t(config.m_token_out) * TOKEN_SIZE;
     uint32_t token_bundle_size = pt_token_size + SGX_AESGCM_IV_SIZE
         + SGX_AESGCM_MAC_SIZE;
     unsigned char *enc_tokens = nullptr;
-    uint16_t num_in = config.m_pub_in;
+    uint16_t num_in = config.m_id_in;
 
     std::vector<boost::asio::mutable_buffer> toreceive;
 
-    if (private_routing) {
+    if (token_channel) {
         enc_tokens = (unsigned char*) malloc (token_bundle_size);
         toreceive.push_back(boost::asio::buffer(enc_tokens,
             token_bundle_size));
-        num_in = config.m_priv_in;
+        num_in = config.m_token_in;
     }
 
     uint16_t msg_size = config.msg_size;
@@ -961,8 +961,8 @@ void Client::epoch_process() {
     toreceive.push_back(boost::asio::buffer(recv_enc_mailbox,
         recv_enc_mailbox_size));
 
-    // Async read the encrypted tokens (for private routing only) and
-    // encrypted mailbox (both private and public routing) for this
+    // Async read the encrypted tokens (for token channel routing only) and
+    // encrypted mailbox (both token and ID channels) for this
     // epoch
     boost::asio::async_read(*storage_sock, toreceive,
         [this, enc_tokens, token_bundle_size, pt_token_size, num_in,
@@ -999,7 +999,7 @@ void Client::epoch_process() {
         }
 #endif
 
-        if (private_routing) {
+        if (token_channel) {
             // Decrypt the token bundle
             unsigned char *enc_tkn_ptr = enc_tokens + SGX_AESGCM_IV_SIZE;
             unsigned char *enc_tkn_tag = enc_tokens + SGX_AESGCM_IV_SIZE +
@@ -1124,7 +1124,7 @@ int main(int argc, char **argv)
        exit(1);
     }
 
-    private_routing = config.private_routing;
+    token_channel = config.token_channel;
     clients = new Client[config.user_count];
 #ifdef VERBOSE_CLIENT
     printf("Number of ingestion_nodes = %ld, Number of storage_node = %ld\n",

+ 2 - 2
Client/clients.hpp

@@ -24,7 +24,7 @@ typedef uint8_t aes_key[SGX_AESGCM_KEY_SIZE];
 
   2) Messages
      Format: IV, AESGCM([CM_1], [CM_2], ..., [CM_k]), TAG
-      - each CM = Client Message for private channel has the format :
+      - each CM = Client Message for token channel has the format :
         4-byte Sender ID, 4-byte Recipient ID, 16-byte Token,
         <Upto msg_size - 24> - bytes of message data
         where the Sender ID and Recipient ID are the TEEMS client id
@@ -89,7 +89,7 @@ private:
     int sendIngAuthMessage(unsigned long epoch_no);
     int sendStgAuthMessage(unsigned long epoch_no);
 
-    void generateMessageBundle(uint8_t priv_out, uint32_t msg_size,
+    void generateMessageBundle(uint8_t token_out, uint32_t msg_size,
         unsigned char *pt_msgbundle);
 
     bool encryptMessageBundle(uint32_t bundle_size, unsigned char *pt_msgbundle,

+ 1 - 1
Enclave/client.hpp

@@ -10,7 +10,7 @@ struct IngClient {
 struct StgClient{
     sgx_aes_gcm_128bit_key_t key;
     clientid_t my_id;
-    std::vector<clientid_t> priv_friends;
+    std::vector<clientid_t> token_friends;
     uint8_t iv[SGX_AESGCM_IV_SIZE];
 };
 

+ 7 - 7
Enclave/config.cpp

@@ -70,13 +70,13 @@ bool ecall_config_load(threadid_t nthreads,
     g_teems_config.my_node_num = my_node_num;
     g_teems_config.user_count = apiparams->user_count;
     g_teems_config.msg_size = apiparams->msg_size;
-    g_teems_config.m_priv_out = apiparams->m_priv_out;
-    g_teems_config.m_priv_in = apiparams->m_priv_in;
-    g_teems_config.m_pub_out = apiparams->m_pub_out;
-    g_teems_config.m_pub_in = apiparams->m_pub_in;
+    g_teems_config.m_token_out = apiparams->m_token_out;
+    g_teems_config.m_token_in = apiparams->m_token_in;
+    g_teems_config.m_id_out = apiparams->m_id_out;
+    g_teems_config.m_id_in = apiparams->m_id_in;
     memcpy(g_teems_config.master_secret, apiparams->master_secret,
         SGX_AESGCM_KEY_SIZE);
-    g_teems_config.private_routing = apiparams->private_routing;
+    g_teems_config.token_channel = apiparams->token_channel;
     // Temporary vectors to store node numbers for nodes of different
     // types, where the node numbers are smaller than our own node
     // number
@@ -101,8 +101,8 @@ bool ecall_config_load(threadid_t nthreads,
             }
         }
         if (apinodeconfigs[i].roles & ROLE_ROUTING) {
-            // Only use weights in private routing
-            if (g_teems_config.private_routing) {
+            // Only use weights in token channel routing
+            if (g_teems_config.token_channel) {
                 nw.weight = apinodeconfigs[i].weight;
             } else  {
                 nw.weight = 1;

+ 5 - 5
Enclave/config.hpp

@@ -24,12 +24,12 @@ struct Config {
     uint32_t user_count;
     uint16_t msg_size;
     uint16_t tot_weight;
-    uint8_t m_priv_out;
-    uint8_t m_priv_in;
-    uint8_t m_pub_out;
-    uint8_t m_pub_in;
+    uint8_t m_token_out;
+    uint8_t m_token_in;
+    uint8_t m_id_out;
+    uint8_t m_id_in;
     uint8_t my_weight;
-    bool private_routing;
+    bool token_channel;
     std::vector<uint8_t> roles;
     std::vector<NodeWeight> weights;
     std::vector<nodenum_t> ingestion_nodes;

+ 5 - 5
Enclave/enclave_api.h

@@ -11,12 +11,12 @@ typedef uint32_t clientid_t;
 struct EnclaveAPIParams {
     uint32_t user_count;
     uint16_t msg_size;
-    uint8_t m_priv_out;
-    uint8_t m_priv_in;
-    uint8_t m_pub_out;
-    uint8_t m_pub_in;
+    uint8_t m_token_out;
+    uint8_t m_token_in;
+    uint8_t m_id_out;
+    uint8_t m_id_in;
     sgx_aes_gcm_128bit_key_t master_secret;
-    bool private_routing;
+    bool token_channel;
 };
 
 #define ROLE_INGESTION 0x01

+ 6 - 6
Enclave/ingest.cpp

@@ -30,10 +30,10 @@ void Ingestion::initialize(uint32_t num, uint32_t start,
 
     generateClientKeys(ESK);
 
-    if(g_teems_config.private_routing) {
-        max_buffer_size = g_teems_config.m_priv_out * cnum;
+    if(g_teems_config.token_channel) {
+        max_buffer_size = g_teems_config.m_token_out * cnum;
     } else {
-        max_buffer_size = g_teems_config.m_pub_out * cnum;
+        max_buffer_size = g_teems_config.m_id_out * cnum;
     }
     buffer = &(route_state.ingbuf);
 }
@@ -61,7 +61,7 @@ bool Ingestion::processMsgBundle(clientid_t cid, unsigned char *msgbundle,
 
     uint16_t msg_size = g_teems_config.msg_size;
     uint32_t msgbundle_size;
-    if(g_teems_config.private_routing) {
+    if(g_teems_config.token_channel) {
         msgbundle_size = num_msgs * (msg_size + TOKEN_SIZE);
     } else {
         msgbundle_size = num_msgs * msg_size;
@@ -87,7 +87,7 @@ bool Ingestion::processMsgBundle(clientid_t cid, unsigned char *msgbundle,
     // benchmark, we'll let the clients choose them arbitrarily.
 
     unsigned char *sid_ptr = dec_msgbundle + sizeof(clientid_t);
-    if (!g_teems_config.private_routing) {
+    if (!g_teems_config.token_channel) {
         // Leave room for the priority
         sid_ptr += sizeof(uint32_t);
     }
@@ -98,7 +98,7 @@ bool Ingestion::processMsgBundle(clientid_t cid, unsigned char *msgbundle,
 
     // Verify the tokens from end of the msgbundle
 
-    if (g_teems_config.private_routing) {
+    if (g_teems_config.token_channel) {
         unsigned char token_body[TOKEN_SIZE];
         const sgx_aes_gcm_128bit_key_t *pTSK = &(g_teems_config.TSK);
         unsigned char *dm_ptr = dec_msgbundle;

+ 30 - 30
Enclave/route.cpp

@@ -28,7 +28,7 @@ static void show_messages(const char *label, const unsigned char *buffer,
     }
     for (size_t i=0; i<num && i<300; ++i) {
         const uint32_t *ibuf = (const uint32_t *)buffer;
-        if (g_teems_config.private_routing) {
+        if (g_teems_config.token_channel) {
             printf("%3d R:%08x S:%08x [%08x]\n", i, ibuf[0], ibuf[1],
                 ibuf[2]);
         } else {
@@ -51,18 +51,18 @@ bool route_init()
 
     // Each ingestion node will have at most
     // ceil(user_count/num_ingestion_nodes) users, and each user will
-    // send at most m_priv_out messages.
+    // send at most m_token_out messages.
     uint32_t users_per_ing = CEILDIV(g_teems_config.user_count,
         g_teems_config.num_ingestion_nodes);
     uint32_t tot_msg_per_ing;
-    if (g_teems_config.private_routing) {
-        tot_msg_per_ing = users_per_ing * g_teems_config.m_priv_out;
+    if (g_teems_config.token_channel) {
+        tot_msg_per_ing = users_per_ing * g_teems_config.m_token_out;
     } else {
-        tot_msg_per_ing = users_per_ing * g_teems_config.m_pub_out;
+        tot_msg_per_ing = users_per_ing * g_teems_config.m_id_out;
     }
 
     // Compute the maximum number of messages we could receive in round 1
-    // In private routing, each ingestion node will send us an
+    // In token channel routing, each ingestion node will send us an
     // our_weight/tot_weight fraction of the messages they hold
     uint32_t max_msg_from_each_ing;
     max_msg_from_each_ing = CEILDIV(tot_msg_per_ing, g_teems_config.tot_weight) *
@@ -81,10 +81,10 @@ bool route_init()
 
     // And so can receive at most this many messages
     uint32_t tot_msg_per_stg;
-    if (g_teems_config.private_routing) {
-        tot_msg_per_stg = users_per_stg * g_teems_config.m_priv_in;
+    if (g_teems_config.token_channel) {
+        tot_msg_per_stg = users_per_stg * g_teems_config.m_token_in;
     } else {
-        tot_msg_per_stg = users_per_stg * g_teems_config.m_pub_in;
+        tot_msg_per_stg = users_per_stg * g_teems_config.m_id_in;
     }
 
     // Which will be at most this many from us
@@ -114,8 +114,8 @@ bool route_init()
     max_stg_msgs = (tot_msg_per_stg/g_teems_config.tot_weight
         + g_teems_config.tot_weight) * g_teems_config.tot_weight;
 
-    // Calculating public-routing buffer sizes
-    // Weights are not used in public routing
+    // Calculating ID channel buffer sizes
+    // Weights are not used in ID channel routing
     // Round up to a multiple of num_routing_nodes
     uint32_t max_round1b_msgs_to_adj_rtr = CEILDIV(
         (g_teems_config.num_routing_nodes-1)*(g_teems_config.num_routing_nodes-1),
@@ -154,7 +154,7 @@ bool route_init()
             printf("route_init alloc %u msgs\n", max_round2_msgs);
             printf("route_init H3 heap %u\n", g_peak_heap_used);
 #endif
-            if (!g_teems_config.private_routing) {
+            if (!g_teems_config.token_channel) {
                 route_state.round1a.alloc(max_round1a_msgs);
                 route_state.round1a_sorted.alloc(max_round1a_msgs +
                     max_round1b_msgs_to_adj_rtr);
@@ -205,7 +205,7 @@ bool route_init()
     }
     if (my_roles & ROLE_ROUTING) {
         sort_precompute_evalplan(max_round2_msgs, nthreads);
-        if(!g_teems_config.private_routing) {
+        if(!g_teems_config.token_channel) {
             sort_precompute_evalplan(max_round1a_msgs, nthreads);
             sort_precompute_evalplan(2*max_round1b_msgs_to_adj_rtr, nthreads);
         }
@@ -264,14 +264,14 @@ size_t ecall_precompute_sort(int sizeidx)
         }
         if (my_roles & ROLE_ROUTING) {
             used_sizes.push_back(route_state.max_round2_msgs);
-            if(!g_teems_config.private_routing) {
+            if(!g_teems_config.token_channel) {
                 used_sizes.push_back(route_state.max_round1a_msgs);
                 used_sizes.push_back(2*route_state.max_round1b_msgs_to_adj_rtr);
             }
         }
         if (my_roles & ROLE_STORAGE) {
             used_sizes.push_back(route_state.max_stg_msgs);
-            if(!g_teems_config.private_routing) {
+            if(!g_teems_config.token_channel) {
                 used_sizes.push_back(route_state.max_stg_msgs);
             }
         }
@@ -342,7 +342,7 @@ static void round1_received(NodeCommState &nodest,
     pthread_mutex_unlock(&route_state.round1.mutex);
 
     // What is the next message we expect from this node?
-    if (g_teems_config.private_routing) {
+    if (g_teems_config.token_channel) {
         if ((our_roles & ROLE_STORAGE) && (their_roles & ROLE_ROUTING)) {
             nodest.in_msg_get_buf = [&](NodeCommState &commst,
                     uint32_t tot_enc_chunk_size) {
@@ -743,7 +743,7 @@ static void send_round_robin_msgs(MsgBuffer &round, const uint8_t *msgs,
     }
 }
 
-// Send the round 1a messages from the round 1 buffer, which only occurs in public-channel routing.
+// Send the round 1a messages from the round 1 buffer, which only occurs in ID-channel routing.
 // msgs points to the message buffer, indices points to the the sorted indices, and N is the number
 // of non-padding items.
 static void send_round1a_msgs(const uint8_t *msgs, const UidPriorityKey *indices, uint32_t N) {
@@ -805,7 +805,7 @@ static void send_round1a_msgs(const uint8_t *msgs, const UidPriorityKey *indices
     }
 }
 
-// Send the round 1b messages from the round 1a buffer, which only occurs in public-channel routing.
+// Send the round 1b messages from the round 1a buffer, which only occurs in ID-channel routing.
 // msgs points to the message buffer, and N is the number of non-padding items.
 // Return the number of messages sent
 static uint32_t send_round1b_msgs(const uint8_t *msgs, uint32_t N) {
@@ -823,7 +823,7 @@ static uint32_t send_round1b_msgs(const uint8_t *msgs, uint32_t N) {
         // bytes for the receiver id in the next message we _didn't_
         // send, and 1 byte for the number of messages we have at the
         // beginning of the buffer of messages we didn't send (max
-        // pub_in) with the same receiver id
+        // id_in) with the same receiver id
         nodecom.message_start(num_msgs * msg_size + 5);
         nodecom.message_data(msgs, num_msgs * msg_size);
         uint32_t next_receiver_id = 0xffffffff;
@@ -835,15 +835,15 @@ static uint32_t send_round1b_msgs(const uint8_t *msgs, uint32_t N) {
                 num_msgs * msg_size);
             next_rid_count = 1;
 
-            // If pub_in > 1, obliviously scan messages num_msgs+1 ..
-            // num_msgs+(pub_in-1) and as long as they have the same
+            // If id_in > 1, obliviously scan messages num_msgs+1 ..
+            // num_msgs+(id_in-1) and as long as they have the same
             // receiver id as next_receiver_id, add 1 to next_rid_count (but
             // don't go past message N of course)
 
             // This count _includes_ the first message already scanned
             // above.  It is not private.
             uint8_t num_to_scan = uint8_t(std::min(N - num_msgs,
-                uint32_t(g_teems_config.m_pub_in)));
+                uint32_t(g_teems_config.m_id_in)));
             const unsigned char *scan_msg = msgs +
                 (num_msgs + 1) * msg_size;
             for (uint8_t i=1; i<num_to_scan; ++i) {
@@ -1190,7 +1190,7 @@ static void round1c_processing(void *cbpointer) {
         // priority.  Going from the end of the buffer to the beginning
         // (so as to encounter and keep the highest-priority messages
         // for any given receiver first), obliviously turn any messages
-        // over the limit of pub_in for any given receiver into padding.
+        // over the limit of id_in for any given receiver into padding.
         // Also keep track of which messages are not padding for use in
         // later compaction.
         bool *is_not_padding = new bool[round1a_sorted.inserted];
@@ -1198,7 +1198,7 @@ static void round1c_processing(void *cbpointer) {
             uint8_t *header = round1a_sorted.buf +
                 msg_size * (round1a_sorted.inserted - 1 - i);
             uint32_t receiver_id = *(uint32_t*)header;
-            uint32_t pub_in = uint32_t(g_teems_config.m_pub_in);
+            uint32_t id_in = uint32_t(g_teems_config.m_id_in);
 
             // These are the possible cases and what we need to do in
             // each case, but we have to evaluate them obliviously
@@ -1207,11 +1207,11 @@ static void round1c_processing(void *cbpointer) {
             //    next_receiver_id = receiver_id
             //    next_rid_count = 1
             //    become_padding = 0
-            // receiver_id == next_receiver_id && next_rid_count < pub_in:
+            // receiver_id == next_receiver_id && next_rid_count < id_in:
             //    next_receiver_id = receiver_id
             //    next_rid_count = next_rid_count + 1
             //    become_padding = 0
-            // receiver_id == next_receiver_id && next_rid_count >= pub_in:
+            // receiver_id == next_receiver_id && next_rid_count >= id_in:
             //    next_receiver_id = receiver_id
             //    next_rid_count = next_rid_count
             //    become_padding = 1
@@ -1222,7 +1222,7 @@ static void round1c_processing(void *cbpointer) {
             // This method (AND with -same_receiver_id) is more likely
             // to be constant time than multiplying by same_receiver_id.
             next_rid_count &= (-(uint32_t(same_receiver_id)));
-            bool become_padding = (next_rid_count >= pub_in);
+            bool become_padding = (next_rid_count >= id_in);
             next_rid_count += !become_padding;
             next_receiver_id = receiver_id;
 
@@ -1443,7 +1443,7 @@ void ecall_routing_proceed(void *cbpointer)
                 printf_with_rtclock("begin oblivious sort (%u,%u)\n", inserted,
                     route_state.tot_msg_per_ing);
 #endif
-            if (g_teems_config.private_routing) {
+            if (g_teems_config.token_channel) {
                 sort_mtobliv<UidKey>(g_teems_config.nthreads, ingbuf.buf,
                     g_teems_config.msg_size, ingbuf.inserted,
                     route_state.tot_msg_per_ing,
@@ -1491,9 +1491,9 @@ void ecall_routing_proceed(void *cbpointer)
             ocall_routing_round_complete(cbpointer, 1);
         }
     } else if (route_state.step == ROUTE_ROUND_1) {
-        if (g_teems_config.private_routing) { // private routing next round
+        if (g_teems_config.token_channel) { // Token channel routing next round
             round2_processing(my_roles, cbpointer, route_state.round1);
-        } else { // public routing next round
+        } else { // ID channel routing next round
             round1a_processing(cbpointer);
         }
     } else if (route_state.step == ROUTE_ROUND_1A) {

+ 2 - 2
Enclave/sort.hpp

@@ -92,7 +92,7 @@ struct UidKey {
     inline uint32_t index() const { return (uint32_t) uid_index; }
 };
 
-// The above and also the priority (for public routing)
+// The above and also the priority (for ID channel routing)
 struct UidPriorityKey {
     uint64_t uid_priority;
     uint32_t idx;
@@ -101,7 +101,7 @@ struct UidPriorityKey {
 };
 
 // Just the nodeid (not the per-node userid) and the priority (for
-// public routing)
+// ID channel routing)
 struct NidPriorityKey {
     uint64_t nid_priority;
     uint32_t idx;

+ 16 - 16
Enclave/storage.cpp

@@ -29,10 +29,10 @@ static bool storage_generateClientKeys(uint32_t num_clients,
     for(uint32_t i =0; i < num_clients; i++) {
         uint32_t mid = storage_state.my_storage_node_id + i;
         clients[i].my_id = mid;
-        clients[i].priv_friends.resize(g_teems_config.m_priv_out);
-        // Initialize this client's private channel friends as themself
-        for(int j = 0; j < g_teems_config.m_priv_out; j++) {
-            (clients[i].priv_friends)[j] = mid;
+        clients[i].token_friends.resize(g_teems_config.m_token_out);
+        // Initialize this client's token channel friends as themself
+        for(int j = 0; j < g_teems_config.m_token_out; j++) {
+            (clients[i].token_friends)[j] = mid;
         }
     }
 
@@ -83,7 +83,7 @@ static void* generate_all_tokens_launch(void *voidargs)
 {
     UserRange *args = (UserRange *)voidargs;
 
-    uint32_t pt_tokens_size = (g_teems_config.m_priv_out * SGX_CMAC_MAC_SIZE);
+    uint32_t pt_tokens_size = (g_teems_config.m_token_out * SGX_CMAC_MAC_SIZE);
     uint32_t enc_tokens_size = pt_tokens_size +
         SGX_AESGCM_IV_SIZE + SGX_AESGCM_MAC_SIZE;
     unsigned char token_body[pt_tokens_size];
@@ -113,11 +113,11 @@ static void* generate_all_tokens_launch(void *voidargs)
         sgx_status_t ret = SGX_SUCCESS;
         unsigned char *ptr = tkn_ptr;
         unsigned char *tkn_body_ptr = token_body;
-        for(int i = 0; i<g_teems_config.m_priv_out; i++)
+        for(int i = 0; i<g_teems_config.m_token_out; i++)
         {
             memcpy(ptr, (&(clients[lcid].my_id)), sizeof(clientid_t));
             memcpy(ptr + sizeof(clientid_t),
-                (&(clients[lcid].priv_friends[i])), sizeof(clientid_t));
+                (&(clients[lcid].token_friends[i])), sizeof(clientid_t));
             memcpy(ptr + 2 * sizeof(clientid_t),
                 &epoch_val, sizeof(epoch_val));
 
@@ -221,8 +221,8 @@ bool generate_all_tokens() {
 // if it is a padding message (the low DEST_UID_BITS of the receiver id
 // are all 1).  The function is oblivious as to whether the message is a
 // padding message.  msg_size must be a multiple of 16.
-// sender_id_offset should be set to 4 for private-channel routing, or 8
-// for public-channel routing.
+// sender_id_offset should be set to 4 for token-channel routing, or 8
+// for ID-channel routing.
 static inline void padding_sanitize_msg(unsigned char *msg,
     size_t msg_size, size_t sender_id_offset)
 {
@@ -243,7 +243,7 @@ static inline void padding_sanitize_msg(unsigned char *msg,
     uint64_t content_mask = (!((~receiver_id) & uid_mask))-1;
 
     // Mask the first 16 bytes, which includes the 8 or 12 byte header
-    // (depending on private or public-channel routing).  The first 4
+    // (depending on token or ID-channel routing).  The first 4
     // bytes are the destination id; set those to 0xffffffff if this is
     // a padding message.  Set the rest of the first 16 bytes to 0 if
     // this is a padding message (all obliviously).
@@ -275,11 +275,11 @@ static void *processMsgs_launch(void *voidargs) {
 
     uint32_t mailbox_size;
     size_t sender_id_offset;
-    if (g_teems_config.private_routing) {
-        mailbox_size = g_teems_config.m_priv_in * msg_size;
+    if (g_teems_config.token_channel) {
+        mailbox_size = g_teems_config.m_token_in * msg_size;
         sender_id_offset = 4;
     } else {
-        mailbox_size = g_teems_config.m_pub_in * msg_size;
+        mailbox_size = g_teems_config.m_id_in * msg_size;
         sender_id_offset = 8;
     }
 
@@ -460,8 +460,8 @@ void storage_received(MsgBuffer &storage_buf)
     uint32_t *dests = storage_state.dest.data();
     uint32_t stg_size = storage_state.stg_buf.bufsize;
     uint8_t *buf = storage_state.stg_buf.buf;
-    uint32_t m_in = g_teems_config.private_routing ?
-        g_teems_config.m_priv_in : g_teems_config.m_pub_in;
+    uint32_t m_in = g_teems_config.token_channel ?
+        g_teems_config.m_token_in : g_teems_config.m_id_in;
 
     uint32_t uid = *(uint32_t*)(buf);
     uid &= uid_mask;
@@ -523,7 +523,7 @@ void storage_received(MsgBuffer &storage_buf)
     storage_buf.reset();
     pthread_mutex_unlock(&storage_buf.mutex);
 
-    if (g_teems_config.private_routing) {
+    if (g_teems_config.token_channel) {
         generate_all_tokens();
     }
 

+ 21 - 21
core-time-tradeoff

@@ -1,37 +1,37 @@
 #!/usr/bin/env python3
 
 """Compute the core-time tradeoff given the experimental results for
-the private and public channels"""
+the token and ID channels"""
 
 import sys
 import csv
 import math
 
 if len(sys.argv)!=3:
-    print(f"Usage: {sys.argv[0]} <PRIV.csv file> <PUB.csv file>")
+    print(f"Usage: {sys.argv[0]} <token-channel.csv file> <id-channel.csv file>")
     sys.exit(0)
 
-priv_file = open(sys.argv[1], 'r')
-pub_file = open(sys.argv[2], 'r')
-priv_input = csv.DictReader(priv_file)
-pub_input = csv.DictReader(pub_file)
+token_file = open(sys.argv[1], 'r')
+id_file = open(sys.argv[2], 'r')
+token_input = csv.DictReader(token_file)
+id_input = csv.DictReader(id_file)
 
-priv_data = sorted(
-    filter(lambda row: row['T']=='1' and row['N']=='1048576', priv_input),
+token_data = sorted(
+    filter(lambda row: row['T']=='1' and row['N']=='1048576', token_input),
     key = lambda row: (int(row['T']), int(row['M']), int(row['N'])))
-pub_data = sorted(
-    filter(lambda row: row['T']=='1' and row['N']=='1048576', pub_input),
+id_data = sorted(
+    filter(lambda row: row['T']=='1' and row['N']=='1048576', id_input),
     key = lambda row: (int(row['T']), int(row['M']), int(row['N'])))
 
-priv_times = [ ( int(row['M']),
+token_times = [ ( int(row['M']),
     float(row['epoch_mean']) + float(row['bytes_max'])*8/13000000000,
-    float(row['wn_mean']) ) for row in priv_data ]
-pub_times = [ ( int(row['M']),
+    float(row['wn_mean']) ) for row in token_data ]
+id_times = [ ( int(row['M']),
     float(row['epoch_mean']) + float(row['bytes_max'])*8/13000000000,
-    float(row['wn_mean']) ) for row in pub_data ]
+    float(row['wn_mean']) ) for row in id_data ]
 
 # Sort the list of all times appearing in the data
-all_times = sorted([ tpl[1] for tpl in priv_times + pub_times ])
+all_times = sorted([ tpl[1] for tpl in token_times + id_times ])
 
 # Put integer times in there as well
 max_int_time = int(all_times[-1])
@@ -48,12 +48,12 @@ for target_time in interp_times:
     # Find the smallest public and private channel configurations that
     # are at or below the target time
     try:
-        priv_conf = next(filter(lambda tpl: tpl[1] <= target_time, priv_times))
-        pub_conf = next(filter(lambda tpl: tpl[1] <= target_time, pub_times))
-        priv_cores = math.ceil((1 + priv_conf[2] / target_time) * priv_conf[0])
-        pub_cores = math.ceil((1 + pub_conf[2] / target_time) * pub_conf[0])
-        print (target_time, priv_conf[0], pub_conf[0], priv_cores,
-            pub_cores, priv_cores + pub_cores)
+        token_conf = next(filter(lambda tpl: tpl[1] <= target_time, token_times))
+        id_conf = next(filter(lambda tpl: tpl[1] <= target_time, id_times))
+        token_cores = math.ceil((1 + token_conf[2] / target_time) * token_conf[0])
+        id_cores = math.ceil((1 + id_conf[2] / target_time) * id_conf[0])
+        print (target_time, token_conf[0], id_conf[0], token_cores,
+            id_cores, token_cores + id_cores)
     except:
         # One of the channels couldn't reach a time as small as the
         # other channel could

+ 12 - 12
docker/repro

@@ -15,26 +15,26 @@ fi
 
 ./start-docker
 docker exec -it ${TEEMS_DOCKER_PREFIX}teems $DOCKER_ENV ./run_all_experiments.py
-docker exec -it ${TEEMS_DOCKER_PREFIX}teems ./logs_to_csv.py Experiments/Public Experiments/public.csv
-docker exec -it ${TEEMS_DOCKER_PREFIX}teems ./logs_to_csv.py Experiments/Private Experiments/private.csv
-docker cp ${TEEMS_DOCKER_PREFIX}teems:/root/teems/Experiments/public.csv public.csv
-docker cp ${TEEMS_DOCKER_PREFIX}teems:/root/teems/Experiments/private.csv private.csv
+docker exec -it ${TEEMS_DOCKER_PREFIX}teems ./logs_to_csv.py Experiments/ID Experiments/id-channel.csv
+docker exec -it ${TEEMS_DOCKER_PREFIX}teems ./logs_to_csv.py Experiments/Token Experiments/token-channel.csv
+docker cp ${TEEMS_DOCKER_PREFIX}teems:/root/teems/Experiments/id-channel.csv id-channel.csv
+docker cp ${TEEMS_DOCKER_PREFIX}teems:/root/teems/Experiments/token-channel.csv token-channel.csv
 echo
-echo "=== Figure 5 Public channel ==="
+echo "=== Figure 7 ID channel ==="
 echo
-( head -1 public.csv ; grep ,4,4,256 public.csv | sort -t, -n -k1,1 ) | cut -d, -f1-7
+( head -1 id-channel.csv ; grep ,4,4,256 id-channel.csv | sort -t, -n -k1,1 ) | cut -d, -f1-7
 echo
-echo "=== Figure 5 Private channel ==="
+echo "=== Figure 7 Token channel ==="
 echo
-( head -1 private.csv ; grep ,4,4,256 private.csv | sort -t, -n -k1,1 ) | cut -d, -f1-7
+( head -1 token-channel.csv ; grep ,4,4,256 token-channel.csv | sort -t, -n -k1,1 ) | cut -d, -f1-7
 echo
-echo "=== Figure 6 Public channel ==="
+echo "=== Figure 8 ID channel ==="
 echo
-( head -1 public.csv ; grep ,1,256 public.csv | sort -t, -n -k2,2 ) | cut -d, -f1-7
+( head -1 id-channel.csv ; grep ,1,256 id-channel.csv | sort -t, -n -k2,2 ) | cut -d, -f1-7
 echo
-echo "=== Figure 6 Private channel ==="
+echo "=== Figure 8 Token channel ==="
 echo
-( head -1 private.csv ; grep ,1,256 private.csv | sort -t, -n -k2,2 ) | cut -d, -f1-7
+( head -1 token-channel.csv ; grep ,1,256 token-channel.csv | sort -t, -n -k2,2 ) | cut -d, -f1-7
 echo
 if [ "$OVERLOAD_CORES" == "1" ]; then
     echo "*** Reminder: OVERLOAD_CORES is set to 1; performance may be lowered"

+ 2 - 2
docker/short-test

@@ -15,11 +15,11 @@ fi
 
 ./start-docker
 docker exec -it ${TEEMS_DOCKER_PREFIX}teems $DOCKER_ENV ./short_exp.py
-docker exec -it ${TEEMS_DOCKER_PREFIX}teems ./logs_to_csv.py Short_test/Public Short_test/public.csv
+docker exec -it ${TEEMS_DOCKER_PREFIX}teems ./logs_to_csv.py Short_test/ID Short_test/id-channel.csv
 echo
 echo "=== Short test output ==="
 echo
-docker exec -it ${TEEMS_DOCKER_PREFIX}teems cat Short_test/public.csv
+docker exec -it ${TEEMS_DOCKER_PREFIX}teems cat Short_test/id-channel.csv
 echo
 
 ./stop-docker >/dev/null

+ 18 - 18
gen_enclave_config.py

@@ -17,36 +17,36 @@ CONFIG_FILE = "Enclave/Enclave.config.xml"
     M: number of servers
     T: number of threads for each server
     B: msg_size
-    PRIVATE_ROUTE: Private (True) / Public (False) route
-    PRO: PRivate Out
-    PRI: PRivate In
-    PUO: PUblic Out
-    PUI: PUblic In
+    TOKEN_CHANNEL: Token channel (True) / ID channel (False) routing
+    TOKO: TOken channel Out
+    TOKI: TOken channel In
+    IDO: ID channel Out
+    IDI: ID channel In
     num_WN_to_precompute: The default num_WN_to_precompute is 12 in App/start.cpp
 '''
 
-def get_heap_size(N, M, T, B, PRIVATE_ROUTE=True, PRO=1, PRI=1, PUO=1, PUI=1, num_WN_to_precompute=12):
+def get_heap_size(N, M, T, B, TOKEN_CHANNEL=True, TOKO=1, TOKI=1, IDO=1, IDI=1, num_WN_to_precompute=12):
     clients_per_server = math.ceil(N/M)
 
     # Base heap of 2 MB per thread
     heap_size = 2000000 * T
 
-    num_out_mult = PRO
-    if not PRIVATE_ROUTE:
-        num_out_mult = PUO
-    num_in_mult = PRI
-    if not PRIVATE_ROUTE:
-        num_in_mult = PUI
+    num_out_mult = TOKO
+    if not TOKEN_CHANNEL:
+        num_out_mult = IDO
+    num_in_mult = TOKI
+    if not TOKEN_CHANNEL:
+        num_in_mult = IDI
 
     # Storage and Ingestion data stored per_client = 52 bytes
     heap_size += clients_per_server * (B + 60)
 
     # 2 Buffers of clients_per_server items of B size each, plus 1 of
-    # size (clients_per_server + M) items, for private routing
+    # size (clients_per_server + M) items, for token channel routing
     heap_size += (clients_per_server * B * 2) * num_out_mult
     heap_size += ((clients_per_server + M) * B) * num_in_mult
 
-    # Additional buffers for public routing
+    # Additional buffers for ID channel routing
 
     # Round (M-1)^2 up to a multiple of M
     round1b_size = (M-1)*M
@@ -54,7 +54,7 @@ def get_heap_size(N, M, T, B, PRIVATE_ROUTE=True, PRO=1, PRI=1, PUO=1, PUI=1, nu
     # Round up to a multiple of M
     colsort_size = int((wn_size + M - 1) / M) * M
 
-    if not PRIVATE_ROUTE:
+    if not TOKEN_CHANNEL:
         heap_size += (colsort_size * B * 3) + (2 * round1b_size * B)
 
     # num_WN_to_precompute times size of each WN
@@ -64,11 +64,11 @@ def get_heap_size(N, M, T, B, PRIVATE_ROUTE=True, PRO=1, PRI=1, PUO=1, PUI=1, nu
     heap_size_page_aligned = math.ceil(heap_size/4096) * 4096
     return heap_size_page_aligned
 
-def generate_config(N, M, T, B, PRIVATE_ROUTE=True, PRO=1, PRI=1, PUO=1, PUI=1, num_WN_to_precompute=12):
+def generate_config(N, M, T, B, TOKEN_CHANNEL=True, TOKO=1, TOKI=1, IDO=1, IDI=1, num_WN_to_precompute=12):
 
     cf = open(CONFIG_FILE, 'w+')
-    heap_size_page_aligned = get_heap_size(N, M, T, B, PRIVATE_ROUTE,
-        PRO, PRI, PUO, PUI, num_WN_to_precompute)
+    heap_size_page_aligned = get_heap_size(N, M, T, B, TOKEN_CHANNEL,
+        TOKO, TOKI, IDO, IDI, num_WN_to_precompute)
     hex_heap_size = hex(heap_size_page_aligned)
 
     enclave_config = '''<!-- Please refer to User's Guide for the explanation of each field -->

+ 13 - 13
gen_manifest.py

@@ -17,7 +17,7 @@ MANIFEST_FILE = "App/manifest.yaml"
     T: number of threads for each server
     B: msg_size
 '''
-def generate_manifest(N, M, T, B, PRIVATE_ROUTE = True, priv_out=1, priv_in=1, pub_out=1, pub_in=1):
+def generate_manifest(N, M, T, B, TOKEN_CHANNEL = True, token_out=1, token_in=1, id_out=1, id_in=1):
     (servers_allocation, client_allocation) = core_allocation(M, T)
     assert servers_allocation is not None
     assert client_allocation is not None
@@ -27,20 +27,20 @@ def generate_manifest(N, M, T, B, PRIVATE_ROUTE = True, priv_out=1, priv_in=1, p
   user_count: {N}
   # The size of a message in bytes
   msg_size: {B}
-  # The number of private messages each user can send per epoch
-  priv_out: {pro}
-  # The number of private messages each user can receive per epoch
-  priv_in: {pri}
-  # The number of public messages each user can send per epoch
-  pub_out: {puo}
-  # The number of public messages each user can receive per epoch
-  pub_in: {pui}
-  # Private or public routing protocol selection
-  private_routing: {PRIVATE_ROUTE}
+  # The number of token channel messages each user can send per epoch
+  token_out: {toko}
+  # The number of token channel messages each user can receive per epoch
+  token_in: {toki}
+  # The number of ID channel messages each user can send per epoch
+  id_out: {ido}
+  # The number of ID channel messages each user can receive per epoch
+  id_in: {idi}
+  # Token channel or ID channel routing protocol selection
+  token_channel: {TOKEN_CHANNEL}
   # Currently hardcoding an AES key for client -> server communications,
   # but in reality, a key exchange would be done
-  master_secret: \"AHardCodedAESKey\"\n'''.format(N = str(N), B = str(B), PRIVATE_ROUTE=str(PRIVATE_ROUTE),\
-  pro = str(priv_out), pri = str(priv_in), puo = str(pub_out), pui = str(pub_in))
+  master_secret: \"AHardCodedAESKey\"\n'''.format(N = str(N), B = str(B), TOKEN_CHANNEL=str(TOKEN_CHANNEL),\
+  toko = str(token_out), toki = str(token_in), ido = str(id_out), idi = str(id_in))
 
     # print (manifest_params)
     mf.write(manifest_params)

+ 1 - 1
mkzip

@@ -1,6 +1,6 @@
 #!/bin/bash -x
 
-BRANCH=Clients_public
+BRANCH=main
 
 rm -rf artifact_folder
 mkdir artifact_folder || exit 1

+ 16 - 16
run_all_experiments.py

@@ -30,10 +30,10 @@ epc_gib = get_max_epc_bytes() / (1<<30)
 LOG_FOLDER = "Experiments/"
 
 NUM_EPOCHS = 10
-PRIV_OUT = 1
-PRIV_IN = 1
-PUB_OUT = 1
-PUB_IN = 1
+TOKEN_OUT = 1
+TOKEN_IN = 1
+ID_OUT = 1
+ID_IN = 1
 # B = message size (bytes)
 B = 256
 N_MAX = 1<<20
@@ -81,34 +81,34 @@ if epc_gib < 3.65 or free_gib < 20:
 """)
         sys.exit(1)
 
-## Figure 5 Public
-PRIVATE_ROUTE = False
+## Figure 7 ID channel
+TOKEN_CHANNEL = False
 N = [n for n in [1<<15, 1<<16, 1<<17, 1<<18, 1<<19, 1<<20] if n <= N_MAX]
 M = [4]
 T = [4]
 
-run_exp(LOG_FOLDER, PRIVATE_ROUTE, NUM_EPOCHS, N, M, T, B, PRIV_OUT, PRIV_IN, PUB_OUT, PUB_IN)
+run_exp(LOG_FOLDER, TOKEN_CHANNEL, NUM_EPOCHS, N, M, T, B, TOKEN_OUT, TOKEN_IN, ID_OUT, ID_IN)
 
-## Figure 6 Public
-PRIVATE_ROUTE = False
+## Figure 8 ID channel
+TOKEN_CHANNEL = False
 N = [N_MAX]
 M = [m for m in [72, 64, 48, 36, 32, 24, 16, 8, 6, 4] if m <= M_MAX]
 T = [1]
 
-run_exp(LOG_FOLDER, PRIVATE_ROUTE, NUM_EPOCHS, N, M, T, B, PRIV_OUT, PRIV_IN, PUB_OUT, PUB_IN)
+run_exp(LOG_FOLDER, TOKEN_CHANNEL, NUM_EPOCHS, N, M, T, B, TOKEN_OUT, TOKEN_IN, ID_OUT, ID_IN)
 
-## Figure 5 Private
-PRIVATE_ROUTE = True
+## Figure 7 Token channel
+TOKEN_CHANNEL = True
 N = [n for n in [1<<15, 1<<16, 1<<17, 1<<18, 1<<19, 1<<20] if n <= N_MAX]
 M = [4]
 T = [4]
 
-run_exp(LOG_FOLDER, PRIVATE_ROUTE, NUM_EPOCHS, N, M, T, B, PRIV_OUT, PRIV_IN, PUB_OUT, PUB_IN)
+run_exp(LOG_FOLDER, TOKEN_CHANNEL, NUM_EPOCHS, N, M, T, B, TOKEN_OUT, TOKEN_IN, ID_OUT, ID_IN)
 
-## Figure 6 Private
-PRIVATE_ROUTE = True
+## Figure 8 Token channel
+TOKEN_CHANNEL = True
 N = [N_MAX]
 M = [m for m in [72, 64, 48, 36, 32, 24, 16, 8, 6, 4] if m <= M_MAX]
 T = [1]
 
-run_exp(LOG_FOLDER, PRIVATE_ROUTE, NUM_EPOCHS, N, M, T, B, PRIV_OUT, PRIV_IN, PUB_OUT, PUB_IN)
+run_exp(LOG_FOLDER, TOKEN_CHANNEL, NUM_EPOCHS, N, M, T, B, TOKEN_OUT, TOKEN_IN, ID_OUT, ID_IN)

+ 20 - 20
run_experiments.py

@@ -27,27 +27,27 @@ def epoch_time_estimate(n, m, t, b):
     etime_precompute = 1 * math.ceil(clients_per_server/100000)
 
     # Costs for Waksman network precompute
-    # Public routing needs 7 WN, private routing needs 3 WNs
+    # ID channels needs 6 WN, token channel needs 3 WNs
     etime_precompute *=5
 
     # Client time:
     # Takes about 30 sec for handling 2^20 clients
     etime_client = 3 * math.ceil(clients_per_server/50000)
-    if(m==2 or m==3):
+    if m==2 or m==3:
         etime_client += 60
 
     etime = etime_base + etime_precompute + etime_route_compute + etime_client
     return int(etime)
 
 
-def run_exp(LOG_FOLDER, PRIVATE_ROUTE, NUM_EPOCHS, N, M, T, B, PRIV_OUT, PRIV_IN, PUB_OUT, PUB_IN):
+def run_exp(LOG_FOLDER, TOKEN_CHANNEL, NUM_EPOCHS, N, M, T, B, TOKEN_OUT, TOKEN_IN, ID_OUT, ID_IN):
 
     if not os.path.exists(LOG_FOLDER):
         os.mkdir(LOG_FOLDER)
-    if(PRIVATE_ROUTE):
-        LOG_FOLDER = LOG_FOLDER + "Private/"
+    if TOKEN_CHANNEL:
+        LOG_FOLDER = LOG_FOLDER + "Token/"
     else:
-        LOG_FOLDER = LOG_FOLDER + "Public/"
+        LOG_FOLDER = LOG_FOLDER + "ID/"
     if not os.path.exists(LOG_FOLDER):
         os.mkdir(LOG_FOLDER)
 
@@ -78,45 +78,45 @@ def run_exp(LOG_FOLDER, PRIVATE_ROUTE, NUM_EPOCHS, N, M, T, B, PRIV_OUT, PRIV_IN
 """ % (m,t))
                         continue
                     num_WN_to_precompute = 0
-                    if(PRIVATE_ROUTE):
+                    if TOKEN_CHANNEL:
                         num_WN_to_precompute = 2 * 3
                     else:
                         num_WN_to_precompute = 2 * 6
 
                     # Make the correct output folder for diagnostic/experiment
                     experiment_name = str(n) + "_" + str(m) + "_" + str(t) + "_" + str(b) + "/"
-                    if(run == "diagnostic"):
+                    if run == "diagnostic":
                         log_subfolder = DIAGNOSTIC_FOLDER
-                    elif(run == "experiment"):
+                    elif run == "experiment":
                         log_subfolder = LOG_FOLDER
                     log_subfolder = log_subfolder + experiment_name
                     if not os.path.exists(log_subfolder):
                         os.mkdir(log_subfolder)
 
-                    if(run == "diagnostic"):
+                    if run == "diagnostic":
                         print("\n\n   Running %s diagnostic t = %d, m = %d, n = %d \n\n" %
-                            ("private routing" if PRIVATE_ROUTE else "public routing", t, m, n))
+                            ("token channel" if TOKEN_CHANNEL else "ID channel", t, m, n))
                         # Manifest generated by diagnostic can be reused by the actual experiment
-                        generate_manifest(n, m, t, b, PRIVATE_ROUTE, PRIV_OUT, PRIV_IN, PUB_OUT, PUB_IN)
-                        generate_config(n, m, t, b, PRIVATE_ROUTE, PRIV_OUT, PRIV_IN, PUB_OUT, PUB_IN, num_WN_to_precompute)
+                        generate_manifest(n, m, t, b, TOKEN_CHANNEL, TOKEN_OUT, TOKEN_IN, ID_OUT, ID_IN)
+                        generate_config(n, m, t, b, TOKEN_CHANNEL, TOKEN_OUT, TOKEN_IN, ID_OUT, ID_IN, num_WN_to_precompute)
                         epoch_param = epoch_time_estimate(n, m, t, b)
-                    elif(run == "experiment"):
+                    elif run == "experiment":
                         #print("Waiting for 2 mins to reset sockets")
                         #time.sleep(120)
                         #num_sizes, pwn_max, epoch_max, scm_max = parse_output_logs(DIAGNOSTIC_FOLDER, experiment_name)
                         #print("From logs_to_csv: num_sizes = %d, pwn_max = %f, epoch_max = %f, scm_max = %f"
                         #% (num_sizes, pwn_max, epoch_max, scm_max))
                         print("\n\n   Running %s experiment t = %d, m = %d, n = %d \n\n" %
-                            ("private routing" if PRIVATE_ROUTE else "public routing", t, m, n))
+                            ("token channel" if TOKEN_CHANNEL else "ID channel", t, m, n))
                         #num_WN_to_precompute = math.ceil((num_sizes * pwn_max)/epoch_max)
                         #print("num_WN_to_precompute = %d" %(num_WN_to_precompute))
-                        #if(num_WN_to_precompute < 2 * num_sizes):
+                        #if num_WN_to_precompute < 2 * num_sizes:
                         #    num_WN_to_precompute = 2 * num_sizes
                         #print("num_WN_to_precompute (pushed up to min 2 sets) = %d" %(num_WN_to_precompute))
                         #epoch_param = math.ceil(epoch_max + 10 * m * scm_max)
-                        generate_manifest(n, m, t, b, PRIVATE_ROUTE, PRIV_OUT, PRIV_IN, PUB_OUT, PUB_IN)
+                        generate_manifest(n, m, t, b, TOKEN_CHANNEL, TOKEN_OUT, TOKEN_IN, ID_OUT, ID_IN)
                         epoch_param = epoch_time_estimate(n, m, t, b)
-                        generate_config(n, m, t, b, PRIVATE_ROUTE, PRIV_IN, PUB_OUT, PUB_IN, num_WN_to_precompute)
+                        generate_config(n, m, t, b, TOKEN_CHANNEL, TOKEN_OUT, TOKEN_IN, ID_OUT, ID_IN, num_WN_to_precompute)
 
                     # Either estimate from epoch_time_estimate for diagnostic
                     # or the one we got from diagnostic run
@@ -140,11 +140,11 @@ def run_exp(LOG_FOLDER, PRIVATE_ROUTE, NUM_EPOCHS, N, M, T, B, PRIV_OUT, PRIV_IN
                     slaunch.append("-d")
                     slaunch.append(str(epoch_wait_time))
                     slaunch.append("-e")
-                    if(run == "experiment"):
+                    if run == "experiment":
                         slaunch.append(str(NUM_EPOCHS))
                     else:
                         slaunch.append(str(NUM_DIAGNOSTIC_EPOCHS))
-                    if(run == "experiment"):
+                    if run == "experiment":
                         slaunch.append("-w")
                         slaunch.append(str(num_WN_to_precompute))
 

+ 6 - 6
short_exp.py

@@ -5,16 +5,16 @@ from run_experiments import run_exp
 LOG_FOLDER = "Short_test/"
 
 NUM_EPOCHS = 3
-PRIV_OUT = 1
-PRIV_IN = 1
-PUB_OUT = 1
-PUB_IN = 1
+TOKEN_OUT = 1
+TOKEN_IN = 1
+ID_OUT = 1
+ID_IN = 1
 # B = message size (bytes)
 B = 256
 
-PRIVATE_ROUTE = False
+TOKEN_CHANNEL = False
 N = [1<<7]
 M = [4]
 T = [1]
 
-run_exp(LOG_FOLDER, PRIVATE_ROUTE, NUM_EPOCHS, N, M, T, B, PRIV_OUT, PRIV_IN, PUB_OUT, PUB_IN)
+run_exp(LOG_FOLDER, TOKEN_CHANNEL, NUM_EPOCHS, N, M, T, B, TOKEN_OUT, TOKEN_IN, ID_OUT, ID_IN)