Skip to content

Commit

Permalink
Persist is_encoded_longer_than_hash_len property of a Node (#474)
Browse files Browse the repository at this point in the history
  • Loading branch information
xinifinity authored Jan 8, 2024
1 parent fbe03e9 commit 3cff033
Show file tree
Hide file tree
Showing 3 changed files with 63 additions and 16 deletions.
4 changes: 4 additions & 0 deletions firewood/src/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -824,6 +824,9 @@ impl Db {
}
}
})?;

// Calculated the root hash before flushing so it can be persisted.
let root_hash = rev.kv_root_hash()?;
#[allow(clippy::unwrap_used)]
rev.flush_dirty().unwrap();

Expand All @@ -835,6 +838,7 @@ impl Db {
rev,
store,
committed: Arc::new(Mutex::new(false)),
root_hash,
parent,
})
}
Expand Down
18 changes: 9 additions & 9 deletions firewood/src/db/proposal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ pub struct Proposal {
pub(super) rev: DbRev<MutStore>,
pub(super) store: Universe<StoreRevMut>,
pub(super) committed: Arc<Mutex<bool>>,
pub(super) root_hash: TrieHash,

pub(super) parent: ProposalBase,
}
Expand Down Expand Up @@ -106,6 +107,9 @@ impl Proposal {
}
}
})?;

// Calculated the root hash before flushing so it can be persisted.
let hash = rev.kv_root_hash()?;
#[allow(clippy::unwrap_used)]
rev.flush_dirty().unwrap();

Expand All @@ -118,6 +122,7 @@ impl Proposal {
rev,
store,
committed: Arc::new(Mutex::new(false)),
root_hash: hash,
parent,
})
}
Expand All @@ -132,6 +137,7 @@ impl Proposal {
rev,
store,
committed,
root_hash: hash,
parent,
} = self;

Expand All @@ -152,10 +158,7 @@ impl Proposal {
committed_root_hash.expect("committed_root_hash should not be none");
match &parent {
ProposalBase::Proposal(p) => {
let parent_root_hash = p.rev.kv_root_hash().ok();
let parent_root_hash =
parent_root_hash.expect("parent_root_hash should not be none");
if parent_root_hash != committed_root_hash {
if p.root_hash != committed_root_hash {
return Err(DbError::InvalidProposal);
}
}
Expand All @@ -169,9 +172,6 @@ impl Proposal {
}
};

let kv_root_hash = rev.kv_root_hash().ok();
let kv_root_hash = kv_root_hash.expect("kv_root_hash should not be none");

// clear the staging layer and apply changes to the CachedSpace
let (merkle_payload_redo, merkle_payload_wal) = store.merkle.payload.delta();
let (merkle_meta_redo, merkle_meta_wal) = store.merkle.meta.delta();
Expand Down Expand Up @@ -218,14 +218,14 @@ impl Proposal {
revisions.base_revision = Arc::new(rev.into());

// update the rolling window of root hashes
revisions.root_hashes.push_front(kv_root_hash.clone());
revisions.root_hashes.push_front(hash.clone());
if revisions.root_hashes.len() > max_revisions {
revisions
.root_hashes
.resize(max_revisions, TrieHash([0; TRIE_HASH_LEN]));
}

rev_inner.root_hash_staging.write(0, &kv_root_hash.0);
rev_inner.root_hash_staging.write(0, &hash.0);
let (root_hash_redo, root_hash_wal) = rev_inner.root_hash_staging.delta();

// schedule writes to the disk
Expand Down
57 changes: 50 additions & 7 deletions firewood/src/merkle/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,7 @@ impl NodeType {
pub struct Node {
pub(super) root_hash: OnceLock<TrieHash>,
encoded: OnceLock<Vec<u8>>,
is_encoded_longer_than_hash_len: OnceLock<bool>,
// lazy_dirty is an atomicbool, but only writers ever set it
// Therefore, we can always use Relaxed ordering. It's atomic
// just to ensure Sync + Send.
Expand All @@ -173,6 +174,7 @@ impl PartialEq for Node {
let Node {
root_hash,
encoded,
is_encoded_longer_than_hash_len: _,
lazy_dirty: _,
inner,
} = self;
Expand All @@ -187,6 +189,7 @@ impl Clone for Node {
fn clone(&self) -> Self {
Self {
root_hash: self.root_hash.clone(),
is_encoded_longer_than_hash_len: self.is_encoded_longer_than_hash_len.clone(),
encoded: self.encoded.clone(),
lazy_dirty: AtomicBool::new(self.is_dirty()),
inner: self.inner.clone(),
Expand All @@ -199,6 +202,7 @@ impl From<NodeType> for Node {
let mut s = Self {
root_hash: OnceLock::new(),
encoded: OnceLock::new(),
is_encoded_longer_than_hash_len: OnceLock::new(),
inner,
lazy_dirty: AtomicBool::new(false),
};
Expand All @@ -222,6 +226,7 @@ impl Node {
Self {
root_hash: OnceLock::new(),
encoded: OnceLock::new(),
is_encoded_longer_than_hash_len: OnceLock::new(),
inner: NodeType::Branch(
BranchNode {
// path: vec![].into(),
Expand Down Expand Up @@ -249,7 +254,9 @@ impl Node {
}

fn is_encoded_longer_than_hash_len<S: ShaleStore<Node>>(&self, store: &S) -> bool {
self.get_encoded(store).len() >= TRIE_HASH_LEN
*self
.is_encoded_longer_than_hash_len
.get_or_init(|| self.get_encoded(store).len() >= TRIE_HASH_LEN)
}

pub(super) fn rehash(&mut self) {
Expand All @@ -273,13 +280,21 @@ impl Node {
&mut self.inner
}

pub(super) fn new_from_hash(root_hash: Option<TrieHash>, inner: NodeType) -> Self {
pub(super) fn new_from_hash(
root_hash: Option<TrieHash>,
is_encoded_longer_than_hash_len: Option<bool>,
inner: NodeType,
) -> Self {
Self {
root_hash: match root_hash {
Some(h) => OnceLock::from(h),
None => OnceLock::new(),
},
encoded: OnceLock::new(),
is_encoded_longer_than_hash_len: match is_encoded_longer_than_hash_len {
Some(v) => OnceLock::from(v),
None => OnceLock::new(),
},
inner,
lazy_dirty: AtomicBool::new(false),
}
Expand All @@ -298,7 +313,6 @@ impl Node {
struct Meta {
root_hash: [u8; TRIE_HASH_LEN],
attrs: NodeAttributes,
is_encoded_longer_than_hash_len: Option<bool>,
}

impl Meta {
Expand Down Expand Up @@ -360,24 +374,46 @@ impl Storable for Node {
None
};

let is_encoded_longer_than_hash_len =
if attrs.contains(NodeAttributes::IS_ENCODED_BIG_VALID) {
Some(false)
} else if attrs.contains(NodeAttributes::LONG) {
Some(true)
} else {
None
};

let meta_raw = mem
.get_view(offset, 1_u64)
.ok_or(ShaleError::InvalidCacheView {
offset,
size: 1_u64,
})?;

offset += 1;

#[allow(clippy::indexing_slicing)]
match meta_raw.as_deref()[TRIE_HASH_LEN + 1].try_into()? {
match meta_raw.as_deref()[0].try_into()? {
NodeTypeId::Branch => {
let inner = NodeType::Branch(Box::new(BranchNode::deserialize(offset, mem)?));

Ok(Self::new_from_hash(root_hash, inner))
Ok(Self::new_from_hash(
root_hash,
is_encoded_longer_than_hash_len,
inner,
))
}

NodeTypeId::Extension => {
let inner = NodeType::Extension(ExtNode::deserialize(offset, mem)?);
let node = Self::new_from_hash(root_hash, inner);
let node = Self::new_from_hash(root_hash, is_encoded_longer_than_hash_len, inner);

Ok(node)
}

NodeTypeId::Leaf => {
let inner = NodeType::Leaf(LeafNode::deserialize(offset, mem)?);
let node = Self::new_from_hash(root_hash, inner);
let node = Self::new_from_hash(root_hash, is_encoded_longer_than_hash_len, inner);

Ok(node)
}
Expand All @@ -386,6 +422,7 @@ impl Storable for Node {

fn serialized_len(&self) -> u64 {
Meta::SIZE as u64
+ 1
+ match &self.inner {
NodeType::Branch(n) => {
// TODO: add path
Expand Down Expand Up @@ -416,6 +453,12 @@ impl Storable for Node {
} else {
NodeAttributes::IS_ENCODED_BIG_VALID
});
} else if let Some(b) = self.is_encoded_longer_than_hash_len.get() {
attrs.insert(if *b {
NodeAttributes::LONG
} else {
NodeAttributes::IS_ENCODED_BIG_VALID
});
}

#[allow(clippy::unwrap_used)]
Expand Down

0 comments on commit 3cff033

Please sign in to comment.