diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 0000000..8681c85 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,26 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "type": "lldb", + "request": "launch", + "name": "Debug zkbtc", + "cargo": { + "args": [ + "build", + "--bin=zkbtc", + "--package=zkbitcoin" + ], + "filter": { + "name": "zkbtc", + "kind": "bin" + } + }, + "args": [], + "cwd": "${workspaceFolder}" + }, + ] +} diff --git a/docs/mpc.md b/docs/mpc.md index c9e8ce4..b9276b0 100644 --- a/docs/mpc.md +++ b/docs/mpc.md @@ -68,7 +68,7 @@ pub struct LocalSigningTask { Members keep track of such signing tasks in a local hashmap: ```rust -signing_tasks: RwLock> +signing_tasks: RwLock> ``` The commitments created at this point are sent back to the orchestrator: diff --git a/src/capped_hashmap.rs b/src/capped_hashmap.rs new file mode 100644 index 0000000..a9efd2f --- /dev/null +++ b/src/capped_hashmap.rs @@ -0,0 +1,188 @@ +use std::{ + cmp::Eq, + collections::{HashMap, VecDeque}, + hash::Hash, +}; + +use log::info; + +pub struct CappedHashMap +where + K: Hash + Eq + Copy + Clone, +{ + max_size: usize, + inner: HashMap, + last_items: VecDeque, +} + +impl CappedHashMap +where + K: Hash + Eq + Copy + Clone, +{ + pub fn new(max_size: usize) -> Self { + Self { + max_size, + inner: HashMap::with_capacity(max_size), + last_items: VecDeque::with_capacity(max_size), + } + } + + fn log(&self) { + let count = self.last_items.len(); + + if count >= self.max_size * 90 / 100 { + info!("Over 90% full"); + } else if count >= self.max_size / 2 { + info!("Over 50% full"); + } else if count >= self.max_size / 4 { + info!("Over 25% full"); + } + } + + /// Inserts an new key-value pair to the collection. Return Some(key) where key is the + /// key that was removed when we reach the max capacity. Otherwise returns None. + pub fn add_entry(&mut self, k: K, v: V) -> Option { + let mut ret = None; + let new_key = !self.inner.contains_key(&k); + + if new_key && self.last_items.len() >= self.max_size { + // remove the oldest item. We an safely unwrap because we know the last_items is not empty at this point + let key = self.last_items.pop_back().unwrap(); + assert!(self.remove(&key).is_some()); + + ret = Some(key); + } + + // replacing a value should not push any new items to last_items + if self.inner.insert(k, v).is_none() { + self.last_items.push_front(k); + } + + self.log(); + ret + } + + /// Removes a key from the map, returning the value at the key if the key was previously in the map. + pub fn remove(&mut self, k: &K) -> Option { + let Some(v) = self.inner.remove(k) else { + return None; + }; + + self.last_items + .iter() + .position(|key| key == k) + .and_then(|pos| self.last_items.remove(pos)); + + Some(v) + } + + /// Returns a reference to the value corresponding to the key. + pub fn get(&self, k: &K) -> Option<&V> { + self.inner.get(k) + } + + /// Returns a mutable reference to the value corresponding to the key. + pub fn get_mut(&mut self, k: &K) -> Option<&mut V> { + self.inner.get_mut(k) + } + + /// Returns the number of elements in the collection + pub fn size(&self) -> usize { + self.inner.len() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_insert_into_non_full_collection() { + let mut col: CappedHashMap = CappedHashMap::new(10); + col.add_entry(1, 1); + col.add_entry(2, 2); + col.add_entry(3, 3); + + assert_eq!(*col.get(&1).unwrap(), 1); + assert_eq!(*col.get(&2).unwrap(), 2); + assert_eq!(*col.get(&3).unwrap(), 3); + } + + #[test] + fn test_insert_should_return_removed_key() { + // The real capacity will be 14. Read here for how this is calculated https://stackoverflow.com/a/76114888/512783 + let mut col: CappedHashMap = CappedHashMap::new(10); + + for i in 0..10 { + col.add_entry(i, i); + } + + for i in 10..30 { + // the nth oldest key will be removed + let key_removed = col.add_entry(i, i); + // our hashmap and vecqueue should never grow i.e. capacity doesn't change + assert_eq!(col.last_items.capacity(), 10); + + assert!(key_removed.is_some()); + assert_eq!(key_removed.unwrap(), i - 10); + assert_eq!(col.size(), 10); + } + + // Not that we should have the last 10 keys in the collection i.e. 20-30. All the previous + // were replaced by these new ones + for i in 0..20 { + assert!(col.get(&i).is_none()); + } + + // after cyclic inserts we still have a full capacity collection. We can remove one item... + assert!(col.remove(&20).is_some()); + assert_eq!(col.size(), 9); + + // ... and now inserting a new item will not replace any existing one + assert!(col.add_entry(31, 31).is_none()); + } + + #[test] + fn test_insert_duplicate() { + let mut col: CappedHashMap = CappedHashMap::new(10); + + for i in 0..10 { + col.add_entry(i, i); + } + + assert_eq!(*col.get(&0).unwrap(), 0); + assert_eq!(col.size(), 10); + + // replacing should simply replace the value and not affect the size. + // so altough our col is full capacity, replacing an existing should not remove the oldest item + assert!(col.add_entry(0, 2).is_none()); + assert_eq!(*col.get(&0).unwrap(), 2); + assert_eq!(col.size(), 10); + + // but inserting a new one should + let key_removed = col.add_entry(10, 10); + assert!(key_removed.is_some()); + assert_eq!(key_removed.unwrap(), 0); + assert_eq!(col.size(), 10); + } + + #[test] + fn test_remove() { + let mut col: CappedHashMap = CappedHashMap::new(10); + + for i in 0..10 { + col.add_entry(i, i); + } + + for i in 0..10 { + let v = col.remove(&i); + assert!(v.is_some()); + assert_eq!(v.unwrap(), i); + assert_eq!(col.size() as u8, 10 - i - 1); + } + + // the collection is empty so the next remove should return None + let v = col.remove(&0); + assert!(v.is_none()); + } +} diff --git a/src/committee/node.rs b/src/committee/node.rs index 3360815..a4dc692 100644 --- a/src/committee/node.rs +++ b/src/committee/node.rs @@ -1,5 +1,5 @@ use std::{ - collections::{BTreeMap, HashMap}, + collections::BTreeMap, net::SocketAddr, sync::{Arc, RwLock}, }; @@ -18,6 +18,8 @@ use serde::{Deserialize, Serialize}; use crate::{ bob_request::{BobRequest, SmartContract}, + capped_hashmap::CappedHashMap, + constants::MAX_SIGNING_TASK, frost, mpc_sign_tx::get_digest_to_hash, }; @@ -34,8 +36,8 @@ pub struct NodeState { /// The public key stuff they need. pub pubkey_package: frost::PublicKeyPackage, - // TODO: ensure that this cannot grow like crazy? prune old tasks? - pub signing_tasks: RwLock>, + /// The current pending signing tasks + pub signing_tasks: RwLock>, } #[derive(Clone)] @@ -98,7 +100,7 @@ async fn round_1_signing( // store it locally { let mut signing_tasks = context.signing_tasks.write().unwrap(); - signing_tasks.insert( + signing_tasks.add_entry( txid, LocalSigningTask { proof_hash: bob_request.proof.hash(), @@ -233,7 +235,7 @@ pub async fn run_server( let ctx = NodeState { key_package, pubkey_package, - signing_tasks: RwLock::new(HashMap::new()), + signing_tasks: RwLock::new(CappedHashMap::new(MAX_SIGNING_TASK)), }; let server = Server::builder() diff --git a/src/constants.rs b/src/constants.rs index d66468f..108091f 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -43,3 +43,5 @@ pub const KEEPALIVE_WAIT_SECONDS: u64 = 5; /// The total number of fibonacci backoff retries before considering an MPC node offline pub const KEEPALIVE_MAX_RETRIES: u8 = 10; + +pub const MAX_SIGNING_TASK: usize = 100; diff --git a/src/lib.rs b/src/lib.rs index 366acb7..f4d2cff 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,6 +3,7 @@ use anyhow::Context; use secp256k1::hashes::Hash; +pub mod capped_hashmap; pub mod committee; pub mod constants; pub mod frost;