|
|
@@ -16,17 +16,19 @@ The notation follows that of the paper "Hyphae: Social Secret Sharing"
|
|
|
#![allow(non_snake_case)]
|
|
|
|
|
|
#[cfg(feature = "bridgeauth")]
|
|
|
-use ed25519_dalek::{Signature, SignatureError, SigningKey, Verifier, VerifyingKey};
|
|
|
+use ed25519_dalek::{Signature, SignatureError, Signer, SigningKey, Verifier, VerifyingKey};
|
|
|
use subtle::ConstantTimeEq;
|
|
|
|
|
|
#[cfg(feature = "bridgeauth")]
|
|
|
-use chrono::{DateTime, Utc};
|
|
|
+use chrono::{DateTime, Duration, Utc};
|
|
|
#[cfg(feature = "bridgeauth")]
|
|
|
use cmz::*;
|
|
|
use curve25519_dalek::ristretto::RistrettoPoint as G;
|
|
|
use group::Group;
|
|
|
#[cfg(feature = "bridgeauth")]
|
|
|
-use rand::rngs::OsRng;
|
|
|
+use rand::{rngs::OsRng, Rng};
|
|
|
+#[cfg(feature = "bridgeauth")]
|
|
|
+use std::collections::HashMap;
|
|
|
type Scalar = <G as Group>::Scalar;
|
|
|
#[cfg(feature = "bridgeauth")]
|
|
|
use sha2::Sha512;
|
|
|
@@ -35,6 +37,7 @@ pub mod bridge_table;
|
|
|
pub mod dup_filter;
|
|
|
pub mod lox_creds;
|
|
|
pub mod migration_table;
|
|
|
+pub mod mock_auth;
|
|
|
pub mod proto {
|
|
|
pub mod blockage_migration;
|
|
|
pub mod check_blockage;
|
|
|
@@ -50,9 +53,9 @@ pub mod proto {
|
|
|
}
|
|
|
|
|
|
#[cfg(feature = "bridgeauth")]
|
|
|
-use bridge_table::BridgeTable;
|
|
|
-// BridgeLine, EncryptedBucket, MAX_BRIDGES_PER_BUCKET, MIN_BUCKET_REACHABILITY,
|
|
|
-//};
|
|
|
+use bridge_table::{
|
|
|
+ BridgeLine, BridgeTable, EncryptedBucket, MAX_BRIDGES_PER_BUCKET, MIN_BUCKET_REACHABILITY,
|
|
|
+};
|
|
|
#[cfg(feature = "bridgeauth")]
|
|
|
use lox_creds::*;
|
|
|
#[cfg(feature = "bridgeauth")]
|
|
|
@@ -61,6 +64,52 @@ use migration_table::{MigrationTable, MigrationType};
|
|
|
use serde::{Deserialize, Serialize};
|
|
|
#[cfg(feature = "bridgeauth")]
|
|
|
use std::collections::HashSet;
|
|
|
+#[cfg(any(feature = "bridgeauth", test))]
|
|
|
+use thiserror::Error;
|
|
|
+
|
|
|
+// EXPIRY_DATE is set to EXPIRY_DATE days for open-entry and blocked buckets in order to match
|
|
|
+// the expiry date for Lox credentials.This particular value (EXPIRY_DATE) is chosen because
|
|
|
+// values that are 2^k − 1 make range proofs more efficient, but this can be changed to any value
|
|
|
+pub const EXPIRY_DATE: u32 = 511;
|
|
|
+
|
|
|
+/// ReplaceSuccess sends a signal to the lox-distributor to inform
|
|
|
+/// whether or not a bridge was successfully replaced
|
|
|
+#[derive(PartialEq, Eq)]
|
|
|
+#[cfg(feature = "bridgeauth")]
|
|
|
+pub enum ReplaceSuccess {
|
|
|
+ NotFound = 0,
|
|
|
+ NotReplaced = 1,
|
|
|
+ Replaced = 2,
|
|
|
+ Removed = 3,
|
|
|
+}
|
|
|
+
|
|
|
+/// This error is thrown if the number of buckets/keys in the bridge table
|
|
|
+/// exceeds u32 MAX.It is unlikely this error will ever occur.
|
|
|
+#[derive(Error, Debug)]
|
|
|
+#[cfg(feature = "bridgeauth")]
|
|
|
+pub enum NoAvailableIDError {
|
|
|
+ #[error("Find key exhausted with no available index found!")]
|
|
|
+ ExhaustedIndexer,
|
|
|
+}
|
|
|
+
|
|
|
+/// This error is thrown after the MAX_DAILY_BRIDGES threshold for bridges
|
|
|
+/// distributed in a day has been reached
|
|
|
+#[derive(Error, Debug)]
|
|
|
+#[cfg(any(feature = "bridgeauth", test))]
|
|
|
+pub enum OpenInvitationError {
|
|
|
+ #[error("The maximum number of bridges has already been distributed today, please try again tomorrow!")]
|
|
|
+ ExceededMaxBridges,
|
|
|
+
|
|
|
+ #[error("There are no bridges available for open invitations.")]
|
|
|
+ NoBridgesAvailable,
|
|
|
+}
|
|
|
+
|
|
|
+#[derive(Error, Debug)]
|
|
|
+#[cfg(feature = "bridgeauth")]
|
|
|
+pub enum BridgeTableError {
|
|
|
+ #[error("The bucket corresponding to key {0} was not in the bridge table")]
|
|
|
+ MissingBucket(u32),
|
|
|
+}
|
|
|
|
|
|
/// Number of times a given invitation is ditributed
|
|
|
pub const OPENINV_K: u32 = 10;
|
|
|
@@ -151,6 +200,86 @@ impl BridgeDb {
|
|
|
daily_bridges_distributed: 0,
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ pub fn openinv_length(&mut self) -> usize {
|
|
|
+ self.openinv_buckets.len()
|
|
|
+ }
|
|
|
+
|
|
|
+ /// Rotate Open Invitation keys
|
|
|
+ pub fn rotate_open_inv_keys(&mut self) -> VerifyingKey {
|
|
|
+ let mut csprng = OsRng {};
|
|
|
+ self.keypair = SigningKey::generate(&mut csprng);
|
|
|
+ self.pubkey = self.keypair.verifying_key();
|
|
|
+ self.pubkey
|
|
|
+ }
|
|
|
+
|
|
|
+ /// Insert an open-invitation bucket into the set
|
|
|
+ pub fn insert_openinv(&mut self, bucket: u32) {
|
|
|
+ self.openinv_buckets.insert(bucket);
|
|
|
+ }
|
|
|
+
|
|
|
+ /// Remove an open-invitation bucket from the set
|
|
|
+ pub fn remove_openinv(&mut self, bucket: &u32) {
|
|
|
+ self.openinv_buckets.remove(bucket);
|
|
|
+ }
|
|
|
+
|
|
|
+ /// Remove open invitation and/or otherwise distributed buckets that have
|
|
|
+ /// become blocked or are expired to free up the index for a new bucket
|
|
|
+ pub fn remove_blocked_or_expired_buckets(&mut self, bucket: &u32) {
|
|
|
+ if self.openinv_buckets.contains(bucket) {
|
|
|
+ println!("Removing a bucket that has not been distributed yet!");
|
|
|
+ self.openinv_buckets.remove(bucket);
|
|
|
+ } else if self.distributed_buckets.contains(bucket) {
|
|
|
+ self.distributed_buckets.retain(|&x| x != *bucket);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /// Mark a bucket as distributed
|
|
|
+ pub fn mark_distributed(&mut self, bucket: u32) {
|
|
|
+ self.distributed_buckets.push(bucket);
|
|
|
+ }
|
|
|
+
|
|
|
+ /// Produce an open invitation such that the next k users, where k is <
|
|
|
+ /// OPENINV_K, will receive the same open invitation bucket
|
|
|
+ /// chosen randomly from the set of open-invitation buckets.
|
|
|
+ pub fn invite(&mut self) -> Result<[u8; OPENINV_LENGTH], OpenInvitationError> {
|
|
|
+ let mut res: [u8; OPENINV_LENGTH] = [0; OPENINV_LENGTH];
|
|
|
+ let mut rng = rand::rngs::OsRng;
|
|
|
+ // Choose a random invitation id (a Scalar) and serialize it
|
|
|
+ let id = Scalar::random(&mut rng);
|
|
|
+ res[0..32].copy_from_slice(&id.to_bytes());
|
|
|
+ let bucket_num: u32;
|
|
|
+ if Utc::now() >= (self.today + Duration::days(1)) {
|
|
|
+ self.today = Utc::now();
|
|
|
+ self.daily_bridges_distributed = 0;
|
|
|
+ }
|
|
|
+ if self.daily_bridges_distributed < MAX_DAILY_BRIDGES {
|
|
|
+ if self.current_k < OPENINV_K && !self.distributed_buckets.is_empty() {
|
|
|
+ bucket_num = *self.distributed_buckets.last().unwrap();
|
|
|
+ self.current_k += 1;
|
|
|
+ } else {
|
|
|
+ if self.openinv_buckets.is_empty() {
|
|
|
+ return Err(OpenInvitationError::NoBridgesAvailable);
|
|
|
+ }
|
|
|
+ // Choose a random bucket number (from the set of open
|
|
|
+ // invitation buckets) and serialize it
|
|
|
+ let openinv_vec: Vec<&u32> = self.openinv_buckets.iter().collect();
|
|
|
+ bucket_num = *openinv_vec[rng.gen_range(0..openinv_vec.len())];
|
|
|
+ self.mark_distributed(bucket_num);
|
|
|
+ self.remove_openinv(&bucket_num);
|
|
|
+ self.current_k = 1;
|
|
|
+ self.daily_bridges_distributed += 1;
|
|
|
+ }
|
|
|
+ res[32..(32 + 4)].copy_from_slice(&bucket_num.to_le_bytes());
|
|
|
+ // Sign the first 36 bytes and serialize it
|
|
|
+ let sig = self.keypair.sign(&res[0..(32 + 4)]);
|
|
|
+ res[(32 + 4)..].copy_from_slice(&sig.to_bytes());
|
|
|
+ Ok(res)
|
|
|
+ } else {
|
|
|
+ Err(OpenInvitationError::ExceededMaxBridges)
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/// Verify an open invitation. Returns the invitation id and the
|
|
|
/// bucket number if the signature checked out. It is up to the
|
|
|
/// caller to then check that the invitation id has not been used
|
|
|
@@ -178,7 +307,7 @@ impl BridgeDb {
|
|
|
|
|
|
/// The bridge authority. This will typically be a singleton object.
|
|
|
#[cfg(feature = "bridgeauth")]
|
|
|
-#[derive(Debug, Serialize, Deserialize)]
|
|
|
+#[derive(Default, Debug, Serialize, Deserialize)]
|
|
|
pub struct BridgeAuth {
|
|
|
/// The private key for the main Lox credential
|
|
|
lox_priv: CMZPrivkey<G>,
|
|
|
@@ -271,6 +400,512 @@ impl BridgeAuth {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ /// Insert a set of open invitation bridges.
|
|
|
+ ///
|
|
|
+ /// Each of the bridges will be given its own open invitation
|
|
|
+ /// bucket, and the BridgeDb will be informed. A single bucket
|
|
|
+ /// containing all of the bridges will also be created, with a trust
|
|
|
+ /// upgrade migration from each of the single-bridge buckets.
|
|
|
+ pub fn add_openinv_bridges(
|
|
|
+ &mut self,
|
|
|
+ bridges: [BridgeLine; MAX_BRIDGES_PER_BUCKET],
|
|
|
+ bdb: &mut BridgeDb,
|
|
|
+ ) -> Result<(), NoAvailableIDError> {
|
|
|
+ let bindex = self.find_next_available_key(bdb)?;
|
|
|
+ self.bridge_table.new_bucket(bindex, &bridges);
|
|
|
+ let mut single = [BridgeLine::default(); MAX_BRIDGES_PER_BUCKET];
|
|
|
+ for b in bridges.iter() {
|
|
|
+ let sindex = self.find_next_available_key(bdb)?;
|
|
|
+ single[0] = *b;
|
|
|
+ self.bridge_table.new_bucket(sindex, &single);
|
|
|
+ self.bridge_table.open_inv_keys.push((sindex, self.today()));
|
|
|
+ bdb.insert_openinv(sindex);
|
|
|
+ self.trustup_migration_table.table.insert(sindex, bindex);
|
|
|
+ }
|
|
|
+ Ok(())
|
|
|
+ }
|
|
|
+
|
|
|
+ pub fn is_empty(&self) -> bool {
|
|
|
+ self.bridge_table.buckets.is_empty()
|
|
|
+ }
|
|
|
+
|
|
|
+ pub fn reachable_length(&self) -> usize {
|
|
|
+ self.bridge_table.reachable.len()
|
|
|
+ }
|
|
|
+
|
|
|
+ pub fn unallocated_length(&self) -> usize {
|
|
|
+ self.bridge_table.unallocated_bridges.len()
|
|
|
+ }
|
|
|
+
|
|
|
+ pub fn spares_length(&self) -> usize {
|
|
|
+ self.bridge_table.spares.len()
|
|
|
+ }
|
|
|
+
|
|
|
+ pub fn openinv_length(&self, bdb: &mut BridgeDb) -> usize {
|
|
|
+ bdb.openinv_length()
|
|
|
+ }
|
|
|
+
|
|
|
+ /// Insert a hot spare bucket of bridges
|
|
|
+ pub fn add_spare_bucket(
|
|
|
+ &mut self,
|
|
|
+ bucket: [BridgeLine; MAX_BRIDGES_PER_BUCKET],
|
|
|
+ bdb: &mut BridgeDb,
|
|
|
+ ) -> Result<(), NoAvailableIDError> {
|
|
|
+ let index = self.find_next_available_key(bdb)?;
|
|
|
+ self.bridge_table.new_bucket(index, &bucket);
|
|
|
+ self.bridge_table.spares.insert(index);
|
|
|
+ Ok(())
|
|
|
+ }
|
|
|
+
|
|
|
+ /// When syncing the Lox bridge table with rdsys, this function returns any bridges
|
|
|
+ /// that are found in the Lox bridge table that are not found in the Vector
|
|
|
+ /// of bridges received from rdsys through the Lox distributor.
|
|
|
+ pub fn find_and_remove_unaccounted_for_bridges(
|
|
|
+ &mut self,
|
|
|
+ accounted_for_bridges: Vec<u64>,
|
|
|
+ ) -> Vec<BridgeLine> {
|
|
|
+ let mut unaccounted_for: Vec<BridgeLine> = Vec::new();
|
|
|
+ for (k, _v) in self.bridge_table.reachable.clone() {
|
|
|
+ if !accounted_for_bridges.contains(&k.uid_fingerprint) {
|
|
|
+ unaccounted_for.push(k);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ unaccounted_for
|
|
|
+ }
|
|
|
+
|
|
|
+ /// Allocate single left over bridges to an open invitation bucket
|
|
|
+ pub fn allocate_bridges(
|
|
|
+ &mut self,
|
|
|
+ distributor_bridges: &mut Vec<BridgeLine>,
|
|
|
+ bdb: &mut BridgeDb,
|
|
|
+ ) {
|
|
|
+ while let Some(bridge) = distributor_bridges.pop() {
|
|
|
+ self.bridge_table.unallocated_bridges.push(bridge);
|
|
|
+ }
|
|
|
+ while self.bridge_table.unallocated_bridges.len() >= MAX_BRIDGES_PER_BUCKET {
|
|
|
+ let mut bucket = [BridgeLine::default(); MAX_BRIDGES_PER_BUCKET];
|
|
|
+ for bridge in bucket.iter_mut() {
|
|
|
+ *bridge = self.bridge_table.unallocated_bridges.pop().unwrap();
|
|
|
+ }
|
|
|
+ match self.add_openinv_bridges(bucket, bdb) {
|
|
|
+ Ok(_) => continue,
|
|
|
+ Err(e) => {
|
|
|
+ println!("Error: {:?}", e);
|
|
|
+ for bridge in bucket {
|
|
|
+ self.bridge_table.unallocated_bridges.push(bridge);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // Update the details of a bridge in the bridge table. This assumes that the IP and Port
|
|
|
+ // of a given bridge remains the same and thus can be updated.
|
|
|
+ // First we must retrieve the list of reachable bridges, then we must search for any matching our partial key
|
|
|
+ // which will include the IP and Port. Finally we can replace the original bridge with the updated bridge.
|
|
|
+ // Returns true if the bridge has successfully updated
|
|
|
+ pub fn bridge_update(&mut self, bridge: &BridgeLine) -> bool {
|
|
|
+ let mut res: bool = false; //default False to assume that update failed
|
|
|
+ let reachable_bridges = self.bridge_table.reachable.clone();
|
|
|
+ for reachable_bridge in reachable_bridges {
|
|
|
+ if reachable_bridge.0.uid_fingerprint == bridge.uid_fingerprint {
|
|
|
+ // Now we must remove the old bridge from the table and insert the new bridge in its place
|
|
|
+ // i.e., in the same bucket and with the same permissions.
|
|
|
+ let positions = self.bridge_table.reachable.get(&reachable_bridge.0);
|
|
|
+ if let Some(v) = positions {
|
|
|
+ for (bucketnum, offset) in v.iter() {
|
|
|
+ let mut bridgelines = match self.bridge_table.buckets.get(bucketnum) {
|
|
|
+ Some(bridgelines) => *bridgelines,
|
|
|
+ None => return res,
|
|
|
+ };
|
|
|
+ assert!(bridgelines[*offset] == reachable_bridge.0);
|
|
|
+ bridgelines[*offset] = *bridge;
|
|
|
+ self.bridge_table.buckets.insert(*bucketnum, bridgelines);
|
|
|
+ /* #[cfg(feature = "blockage-detection")]
|
|
|
+ let (fingerprint_str, bucket) =
|
|
|
+ self.get_tp_bucket_and_fingerprint(bridge, bucketnum);
|
|
|
+ // Add bucket to existing entry or add new entry
|
|
|
+ #[cfg(feature = "blockage-detection")]
|
|
|
+ match self.tp_bridge_infos.get_mut(&fingerprint_str) {
|
|
|
+ Some(info) => {
|
|
|
+ info.buckets.insert(bucket);
|
|
|
+ }
|
|
|
+ None => {
|
|
|
+ let mut buckets = HashSet::<Scalar>::new();
|
|
|
+ buckets.insert(bucket);
|
|
|
+ self.tp_bridge_infos.insert(
|
|
|
+ fingerprint_str,
|
|
|
+ BridgeVerificationInfo {
|
|
|
+ bridge_line: *bridge,
|
|
|
+ buckets,
|
|
|
+ },
|
|
|
+ );
|
|
|
+ }
|
|
|
+ }; */
|
|
|
+ if !self.bridge_table.buckets.contains_key(bucketnum) {
|
|
|
+ return res;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ res = true;
|
|
|
+ } else {
|
|
|
+ return res;
|
|
|
+ }
|
|
|
+ // We must also remove the old bridge from the reachable bridges table
|
|
|
+ // and add the new bridge
|
|
|
+ self.bridge_table.reachable.remove(&reachable_bridge.0);
|
|
|
+ self.bridge_table
|
|
|
+ .reachable
|
|
|
+ .insert(*bridge, reachable_bridge.1);
|
|
|
+ return res;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ // Also check the unallocated bridges just in case there is a bridge that should be updated there
|
|
|
+ let unallocated_bridges = self.bridge_table.unallocated_bridges.clone();
|
|
|
+ for (i, unallocated_bridge) in unallocated_bridges.iter().enumerate() {
|
|
|
+ if unallocated_bridge.uid_fingerprint == bridge.uid_fingerprint {
|
|
|
+ // Now we must remove the old bridge from the unallocated bridges and insert the new bridge
|
|
|
+ // in its place
|
|
|
+ self.bridge_table.unallocated_bridges.remove(i);
|
|
|
+ self.bridge_table.unallocated_bridges.push(*bridge);
|
|
|
+ res = true;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ // If this is returned, we assume that the bridge wasn't found in the bridge table
|
|
|
+ // and therefore should be treated as a "new bridge"
|
|
|
+ res
|
|
|
+ }
|
|
|
+
|
|
|
+ // Repurpose a bucket of spares into unallocated bridges
|
|
|
+ pub fn dissolve_spare_bucket(&mut self, key: u32) -> Result<(), BridgeTableError> {
|
|
|
+ self.bridge_table.spares.remove(&key);
|
|
|
+ // Get the actual bridges from the spare bucket
|
|
|
+ let spare_bucket = self
|
|
|
+ .bridge_table
|
|
|
+ .buckets
|
|
|
+ .remove(&key)
|
|
|
+ .ok_or(BridgeTableError::MissingBucket(key))?;
|
|
|
+ for bridge in spare_bucket.iter() {
|
|
|
+ self.bridge_table.unallocated_bridges.push(*bridge);
|
|
|
+ // Mark bucket as unreachable while it is unallocated
|
|
|
+ self.bridge_table.reachable.remove(bridge);
|
|
|
+ }
|
|
|
+ self.bridge_table.keys.remove(&key);
|
|
|
+ self.bridge_table.recycleable_keys.push(key);
|
|
|
+ Ok(())
|
|
|
+ }
|
|
|
+
|
|
|
+ // Removes an unallocated bridge and returns it if it was present
|
|
|
+ pub fn remove_unallocated(&mut self, bridge: &BridgeLine) -> Option<BridgeLine> {
|
|
|
+ // #[cfg(feature = "blockage-detection")]
|
|
|
+ // let fingerprint_str = self.fingerprint_hasher(bridge.unhashed_fingerprint);
|
|
|
+ match self
|
|
|
+ .bridge_table
|
|
|
+ .unallocated_bridges
|
|
|
+ .iter()
|
|
|
+ .position(|x| x == bridge)
|
|
|
+ {
|
|
|
+ Some(index) => Some({
|
|
|
+ // #[cfg(feature = "blockage-detection")]
|
|
|
+ // self.tp_bridge_infos.remove_entry(&fingerprint_str);
|
|
|
+ self.bridge_table.unallocated_bridges.swap_remove(index)
|
|
|
+ }),
|
|
|
+ None => None,
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /// Attempt to remove a bridge that is failing tests and replace it with a bridge from
|
|
|
+ /// available_bridge or from a spare bucket
|
|
|
+ pub fn bridge_replace(
|
|
|
+ &mut self,
|
|
|
+ bridge: &BridgeLine,
|
|
|
+ available_bridge: Option<BridgeLine>,
|
|
|
+ ) -> ReplaceSuccess {
|
|
|
+ let reachable_bridges = &self.bridge_table.reachable.clone();
|
|
|
+ let Some(positions) = reachable_bridges.get(bridge) else {
|
|
|
+ match self.remove_unallocated(bridge) {
|
|
|
+ Some(_) => {
|
|
|
+ return ReplaceSuccess::Removed;
|
|
|
+ }
|
|
|
+ None => {
|
|
|
+ return ReplaceSuccess::NotFound;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ };
|
|
|
+ // Check if the bridge is in a spare bucket first, if it is, dissolve the bucket
|
|
|
+ if let Some(spare) = self
|
|
|
+ .bridge_table
|
|
|
+ .spares
|
|
|
+ .iter()
|
|
|
+ .find(|x| positions.iter().any(|(bucketnum, _)| &bucketnum == x))
|
|
|
+ .cloned()
|
|
|
+ {
|
|
|
+ let Ok(_) = self.dissolve_spare_bucket(spare) else {
|
|
|
+ return ReplaceSuccess::NotReplaced;
|
|
|
+ };
|
|
|
+ // Next Check if the bridge is in the unallocated bridges and remove the bridge if so
|
|
|
+ // Bridges in spare buckets should have been moved to the unallocated bridges
|
|
|
+ match self.remove_unallocated(bridge) {
|
|
|
+ Some(_) => {
|
|
|
+ return ReplaceSuccess::Removed;
|
|
|
+ }
|
|
|
+ None => {
|
|
|
+ return ReplaceSuccess::NotFound;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ // select replacement:
|
|
|
+ // - first try the given bridge
|
|
|
+ // - second try to pick one from the set of available bridges
|
|
|
+ // - third dissolve a spare bucket to create more available bridges
|
|
|
+ let Some(replacement) = available_bridge.or_else(|| {
|
|
|
+ self.bridge_table.unallocated_bridges.pop().or_else(|| {
|
|
|
+ let spare = self
|
|
|
+ .bridge_table
|
|
|
+ .spares
|
|
|
+ .iter()
|
|
|
+ // in case bridge is a spare, avoid replacing it with itself
|
|
|
+ .find(|x| !positions.iter().any(|(bucketnum, _)| &bucketnum == x))
|
|
|
+ .cloned()?;
|
|
|
+ let Ok(_) = self.dissolve_spare_bucket(spare) else {
|
|
|
+ return None;
|
|
|
+ };
|
|
|
+ self.bridge_table.unallocated_bridges.pop()
|
|
|
+ })
|
|
|
+ }) else {
|
|
|
+ // If there are no available bridges that can be assigned here, the only thing
|
|
|
+ // that can be done is return an indication that updating the gone bridge
|
|
|
+ // didn't work.
|
|
|
+ // In this case, we do not mark the bridge as unreachable or remove it from the
|
|
|
+ // reachable bridges so that we can still find it when a new bridge does become available
|
|
|
+ println!("No available bridges");
|
|
|
+ return ReplaceSuccess::NotReplaced;
|
|
|
+ };
|
|
|
+ for (bucketnum, offset) in positions.iter() {
|
|
|
+ let mut bridgelines = match self.bridge_table.buckets.get(bucketnum) {
|
|
|
+ Some(bridgelines) => *bridgelines,
|
|
|
+ None => return ReplaceSuccess::NotFound,
|
|
|
+ };
|
|
|
+ assert!(bridgelines[*offset] == *bridge);
|
|
|
+ bridgelines[*offset] = replacement;
|
|
|
+ self.bridge_table.buckets.insert(*bucketnum, bridgelines);
|
|
|
+ // Remove the bridge from the reachable bridges and add new bridge
|
|
|
+ self.bridge_table
|
|
|
+ .reachable
|
|
|
+ .insert(replacement, positions.clone());
|
|
|
+ // Remove the bridge from the bucket
|
|
|
+ self.bridge_table.reachable.remove(bridge);
|
|
|
+ }
|
|
|
+ ReplaceSuccess::Replaced
|
|
|
+ }
|
|
|
+
|
|
|
+ /* pub fn get_bridge_verification_info(
|
|
|
+ &mut self,
|
|
|
+ bridge_str: &String,
|
|
|
+ ) -> Option<&BridgeVerificationInfo> {
|
|
|
+ self.tp_bridge_infos.get(bridge_str)
|
|
|
+ }
|
|
|
+
|
|
|
+ // Remove Bridge Info for blocked bridge and return the bridgeline with the given fingerprint
|
|
|
+ pub fn block_by_string(&mut self, bridge_str: &String) -> Option<BridgeLine> {
|
|
|
+ if let Some(bridge_verification_info) = self.tp_bridge_infos.remove(bridge_str) {
|
|
|
+ return Some(bridge_verification_info.bridge_line);
|
|
|
+ }
|
|
|
+ None
|
|
|
+ }
|
|
|
+ */
|
|
|
+
|
|
|
+ /// Mark a bridge as blocked
|
|
|
+ ///
|
|
|
+ /// This bridge will be removed from each of the buckets that
|
|
|
+ /// contains it. If any of those are open-invitation buckets, the
|
|
|
+ /// trust upgrade migration for that bucket will be removed and the
|
|
|
+ /// BridgeDb will be informed to stop handing out that bridge. If
|
|
|
+ /// any of those are trusted buckets where the number of reachable
|
|
|
+ /// bridges has fallen below the threshold, a blockage migration
|
|
|
+ /// from that bucket to a spare bucket will be added, and the spare
|
|
|
+ /// bucket will be removed from the list of hot spares. In
|
|
|
+ /// addition, if the blocked bucket was the _target_ of a blockage
|
|
|
+ /// migration, change the target to the new (formerly spare) bucket.
|
|
|
+ /// Returns true if sucessful, or false if it needed a hot spare but
|
|
|
+ /// there was none available.
|
|
|
+ pub fn bridge_blocked(&mut self, bridge: &BridgeLine, bdb: &mut BridgeDb) -> bool {
|
|
|
+ let mut res: bool = true;
|
|
|
+ if self.remove_unallocated(bridge).is_some() {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if let Some(positions) = self.bridge_table.reachable.get(bridge) {
|
|
|
+ for (bucketnum, offset) in positions.iter() {
|
|
|
+ // Count how many bridges in this bucket are reachable
|
|
|
+ let mut bucket = match self.bridge_table.buckets.get(bucketnum) {
|
|
|
+ Some(bridgelines) => *bridgelines,
|
|
|
+ None => return false, // This should not happen
|
|
|
+ };
|
|
|
+ // Remove the bridge from the bucket
|
|
|
+ assert!(bucket[*offset] == *bridge);
|
|
|
+ bucket[*offset] = BridgeLine::default();
|
|
|
+
|
|
|
+ // If this is an open invitation bucket, there is only one bridge, remove bucket
|
|
|
+ if bdb.openinv_buckets.contains(bucketnum)
|
|
|
+ || bdb.distributed_buckets.contains(bucketnum)
|
|
|
+ {
|
|
|
+ bdb.remove_blocked_or_expired_buckets(bucketnum);
|
|
|
+ self.trustup_migration_table.table.remove(bucketnum);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ // If this bucket still has an acceptable number of bridges, continue
|
|
|
+ let numreachable = bucket
|
|
|
+ .iter()
|
|
|
+ .filter(|br| self.bridge_table.reachable.contains_key(br))
|
|
|
+ .count();
|
|
|
+ if numreachable != MIN_BUCKET_REACHABILITY {
|
|
|
+ // No
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Remove any trust upgrade migrations to this bucket
|
|
|
+ self.trustup_migration_table
|
|
|
+ .table
|
|
|
+ .retain(|_, &mut v| v != *bucketnum);
|
|
|
+
|
|
|
+ // If there are no spares, delete blockage migrations leading to this bucket
|
|
|
+ if self.bridge_table.spares.is_empty() {
|
|
|
+ res = false;
|
|
|
+ self.blockage_migration_table
|
|
|
+ .table
|
|
|
+ .retain(|_, &mut v| v != *bucketnum);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ // Get the first spare and remove it from the spares
|
|
|
+ // set.
|
|
|
+ let spare = *self.bridge_table.spares.iter().next().unwrap();
|
|
|
+ self.bridge_table.spares.remove(&spare);
|
|
|
+ self.bridge_table
|
|
|
+ .blocked_keys
|
|
|
+ .push((*bucketnum, self.today()));
|
|
|
+ // Add a blockage migration from this bucket to the spare
|
|
|
+ self.blockage_migration_table
|
|
|
+ .table
|
|
|
+ .insert(*bucketnum, spare);
|
|
|
+ // Change any blockage migrations with this bucket
|
|
|
+ // as the destination to the spare
|
|
|
+ for (_, v) in self.blockage_migration_table.table.iter_mut() {
|
|
|
+ if *v == *bucketnum {
|
|
|
+ *v = spare;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ self.bridge_table.reachable.remove(bridge);
|
|
|
+
|
|
|
+ res
|
|
|
+ }
|
|
|
+
|
|
|
+ // Since buckets are moved around in the bridge_table, finding a lookup key that
|
|
|
+ // does not overwrite existing bridges could become an issue.We keep a list
|
|
|
+ // of recycleable lookup keys from buckets that have been removed and prioritize
|
|
|
+ // this list before increasing the counter
|
|
|
+ fn find_next_available_key(&mut self, bdb: &mut BridgeDb) -> Result<u32, NoAvailableIDError> {
|
|
|
+ self.clean_up_expired_buckets(bdb);
|
|
|
+ if self.bridge_table.recycleable_keys.is_empty() {
|
|
|
+ let mut test_index = 1;
|
|
|
+ let mut test_counter = self.bridge_table.counter.wrapping_add(test_index);
|
|
|
+ let mut i = 0;
|
|
|
+ while self.bridge_table.buckets.contains_key(&test_counter) && i < 5000 {
|
|
|
+ test_index += 1;
|
|
|
+ test_counter = self.bridge_table.counter.wrapping_add(test_index);
|
|
|
+ i += 1;
|
|
|
+ if i == 5000 {
|
|
|
+ return Err(NoAvailableIDError::ExhaustedIndexer);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ self.bridge_table.counter = self.bridge_table.counter.wrapping_add(test_index);
|
|
|
+ Ok(self.bridge_table.counter)
|
|
|
+ } else {
|
|
|
+ Ok(self.bridge_table.recycleable_keys.pop().unwrap())
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // This function looks for and removes buckets so their indexes can be reused
|
|
|
+ // This should include buckets that have been blocked for a sufficiently long period
|
|
|
+ // that we no longer want to allow migration to, or else, open-entry buckets that
|
|
|
+ // have been unblocked long enough to become trusted and who's users' credentials
|
|
|
+ // would have expired (after EXPIRY_DATE)
|
|
|
+ pub fn clean_up_expired_buckets(&mut self, bdb: &mut BridgeDb) {
|
|
|
+ // First check if there are any blocked indexes that are old enough to be replaced
|
|
|
+ self.clean_up_blocked();
|
|
|
+ // Next do the same for open_invitations buckets
|
|
|
+ self.clean_up_open_entry(bdb);
|
|
|
+ }
|
|
|
+
|
|
|
+ /// Cleans up exipred blocked buckets
|
|
|
+ fn clean_up_blocked(&mut self) {
|
|
|
+ // If there are expired blockages, separate them from the fresh blockages
|
|
|
+ #[allow(clippy::type_complexity)]
|
|
|
+ let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = self
|
|
|
+ .bridge_table
|
|
|
+ .blocked_keys
|
|
|
+ .iter()
|
|
|
+ .partition(|&x| x.1 + EXPIRY_DATE < self.today());
|
|
|
+ for item in expired {
|
|
|
+ let key = item.0;
|
|
|
+ // check each single bridge line and ensure none are still marked as reachable.
|
|
|
+ // if any are still reachable, remove from reachable bridges.
|
|
|
+ // When syncing resources, we will likely have to reallocate this bridge but if it hasn't already been
|
|
|
+ // blocked, this might be fine?
|
|
|
+ let bridgelines = self.bridge_table.buckets.get(&key).unwrap();
|
|
|
+ for bridgeline in bridgelines {
|
|
|
+ // If the bridge hasn't been set to default, assume it's still reachable
|
|
|
+ if bridgeline.port > 0 {
|
|
|
+ // Move to unallocated bridges
|
|
|
+ self.bridge_table.unallocated_bridges.push(*bridgeline);
|
|
|
+ // Make sure bridge is removed from reachable bridges
|
|
|
+ self.bridge_table.reachable.remove(bridgeline);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ // Then remove the bucket and keys at the specified index
|
|
|
+ self.bridge_table.buckets.remove(&key);
|
|
|
+ self.bridge_table.keys.remove(&key);
|
|
|
+ //and add them to the recyclable keys
|
|
|
+ self.bridge_table.recycleable_keys.push(key);
|
|
|
+ // Remove the expired blocked bucket from the blockage migration table,
|
|
|
+ // assuming that anyone that has still not attempted to migrate from their
|
|
|
+ // blocked bridge after the EXPIRY_DATE probably doesn't still need to migrate.
|
|
|
+ self.blockage_migration_table.table.retain(|&k, _| k != key);
|
|
|
+ }
|
|
|
+ // Finally, update the blocked_keys vector to only include the fresh keys
|
|
|
+ self.bridge_table.blocked_keys = fresh
|
|
|
+ }
|
|
|
+
|
|
|
+ /// Cleans up expired open invitation buckets
|
|
|
+ fn clean_up_open_entry(&mut self, bdb: &mut BridgeDb) {
|
|
|
+ // Separate exipred from fresh open invitation indexes
|
|
|
+ #[allow(clippy::type_complexity)]
|
|
|
+ let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = self
|
|
|
+ .bridge_table
|
|
|
+ .open_inv_keys
|
|
|
+ .iter()
|
|
|
+ .partition(|&x| x.1 + EXPIRY_DATE < self.today());
|
|
|
+ for item in expired {
|
|
|
+ let key = item.0;
|
|
|
+ // We should check that the items were actually distributed before they are removed
|
|
|
+ if !bdb.distributed_buckets.contains(&key) {
|
|
|
+ // TODO: Add prometheus metric for this?
|
|
|
+ println!("This bucket was not actually distributed!");
|
|
|
+ }
|
|
|
+ bdb.remove_blocked_or_expired_buckets(&key);
|
|
|
+ // Remove any trust upgrade migrations from this
|
|
|
+ // bucket
|
|
|
+ self.trustup_migration_table.table.retain(|&k, _| k != key);
|
|
|
+ self.bridge_table.buckets.remove(&key);
|
|
|
+ self.bridge_table.keys.remove(&key);
|
|
|
+ //and add them to the recyclable keys
|
|
|
+ self.bridge_table.recycleable_keys.push(key);
|
|
|
+ }
|
|
|
+ // update the open_inv_keys vector to only include the fresh keys
|
|
|
+ self.bridge_table.open_inv_keys = fresh
|
|
|
+ }
|
|
|
+
|
|
|
/// Get today's (real or simulated) date as u32
|
|
|
pub fn today(&self) -> u32 {
|
|
|
// We will not encounter negative Julian dates (~6700 years ago)
|
|
|
@@ -285,9 +920,22 @@ impl BridgeAuth {
|
|
|
pub fn today_date(&self) -> DateTime<Utc> {
|
|
|
Utc::now()
|
|
|
}
|
|
|
+
|
|
|
+ /// Get a reference to the encrypted bridge table.
|
|
|
+ ///
|
|
|
+ /// Be sure to call this function when you want the latest version
|
|
|
+ /// of the table, since it will put fresh Bucket Reachability
|
|
|
+ /// credentials in the buckets each day.
|
|
|
+ pub fn enc_bridge_table(&mut self) -> &HashMap<u32, EncryptedBucket> {
|
|
|
+ let today = self.today();
|
|
|
+ if self.bridge_table.date_last_enc != today {
|
|
|
+ self.bridge_table
|
|
|
+ .encrypt_table(today, &self.reachability_priv);
|
|
|
+ }
|
|
|
+ &self.bridge_table.encbuckets
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-// Try to extract a u32 from a Scalar
|
|
|
pub fn scalar_u32(s: &Scalar) -> Option<u32> {
|
|
|
// Check that the top 28 bytes of the Scalar are 0
|
|
|
let sbytes: &[u8; 32] = s.as_bytes();
|