Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor(pkg/trie/triedb): introduce HashDB interface and integrate into TrieDB with added iterators #4315

Merged
merged 9 commits into from
Nov 13, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions internal/client/state-db/noncanonical.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,10 +146,10 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Insert(
})
nco.lastCanonicalized = &lastCanonicalized
} else if nco.lastCanonicalized != nil {
if number < frontBlockNumber || number > frontBlockNumber+uint64(nco.levels.Len()) {
if number < frontBlockNumber || number > frontBlockNumber+uint64(nco.levels.Len()) { //nolint:gosec
log.Printf(
"TRACE: Failed to insert block %v, current is %v .. %v)\n",
number, frontBlockNumber, frontBlockNumber+uint64(nco.levels.Len()))
number, frontBlockNumber, frontBlockNumber+uint64(nco.levels.Len())) //nolint:gosec
return CommitSet[Key]{}, ErrInvalidBlockNumber
}
// check for valid parent if inserting on second level or higher
Expand All @@ -163,13 +163,13 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Insert(
}
var level overlayLevel[BlockHash, Key] = newOverlayLevel[BlockHash, Key]()
var levelIndex int
if nco.levels.Len() == 0 || number == frontBlockNumber+uint64(nco.levels.Len()) {
if nco.levels.Len() == 0 || number == frontBlockNumber+uint64(nco.levels.Len()) { //nolint:gosec
nco.levels.PushBack(newOverlayLevel[BlockHash, Key]())
level = nco.levels.Back()
levelIndex = nco.levels.Len() - 1
} else {
level = nco.levels.At(int(number - frontBlockNumber))
levelIndex = int(number - frontBlockNumber)
level = nco.levels.At(int(number - frontBlockNumber)) //nolint:gosec
levelIndex = int(number - frontBlockNumber) //nolint:gosec
}

if len(level.blocks) >= int(maxBlocksPerLevel) {
Expand Down Expand Up @@ -221,10 +221,10 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Insert(

func (nco *nonCanonicalOverlay[BlockHash, Key]) discardJournals(
levelIndex uint, discardedJournals *[][]byte, hash BlockHash) {
if levelIndex >= uint(nco.levels.Len()) {
if levelIndex >= uint(nco.levels.Len()) { //nolint:gosec
return
}
level := nco.levels.At(int(levelIndex))
level := nco.levels.At(int(levelIndex)) //nolint:gosec
for _, overlay := range level.blocks {
parent, ok := nco.parents[overlay.hash]
if !ok {
Expand Down Expand Up @@ -418,7 +418,7 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Remove(hash BlockHash) *CommitSe
}
}
}
overlay := level.remove(uint(index))
overlay := level.remove(uint(index)) //nolint:gosec
nco.levels.Set(levelIndex, level)
commit.Meta.Deleted = append(commit.Meta.Deleted, overlay.journalKey)
delete(nco.parents, overlay.hash)
Expand Down Expand Up @@ -496,7 +496,7 @@ func (ol *overlayLevel[BlockHash, Key]) push(overlay blockOverlay[BlockHash, Key
}

func (ol *overlayLevel[BlockHash, Key]) availableIndex() uint64 {
return uint64(bits.TrailingZeros64(^ol.usedIndices))
return uint64(bits.TrailingZeros64(^ol.usedIndices)) //nolint:gosec
}

func (ol *overlayLevel[BlockHash, Key]) remove(index uint) blockOverlay[BlockHash, Key] {
Expand Down Expand Up @@ -639,7 +639,7 @@ func discardDescendants[BlockHash Hash, Key Hash](
panic("there is a parent entry for each entry in levels; qed")
}
if h == hash {
index = uint(i)
index = uint(i) //nolint:gosec
overlay := level.remove(index)
numPinned := discardDescendants(remainder, values, parents, pinned, pinnedInsertions, overlay.hash)
if _, ok := pinned[overlay.hash]; ok {
Expand Down
22 changes: 11 additions & 11 deletions internal/client/state-db/pruning.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@ const defaultMaxBlockConstraint uint32 = 256
// the death list.
// The changes are journaled in the DB.
type pruningWindow[BlockHash Hash, Key Hash] struct {
/// A queue of blocks keep tracking keys that should be deleted for each block in the
/// pruning window.
// A queue of blocks keep tracking keys that should be deleted for each block in the
// pruning window.
queue deathRowQueue[BlockHash, Key]
/// Block number that is next to be pruned.
// Block number that is next to be pruned.
base uint64
}

Expand Down Expand Up @@ -156,9 +156,9 @@ type deathRowQueue[BlockHash Hash, Key Hash] interface {
}

type inMemDeathRowQueue[BlockHash Hash, Key Hash] struct {
/// A queue of keys that should be deleted for each block in the pruning window.
// A queue of keys that should be deleted for each block in the pruning window.
deathRows deque.Deque[deathRow[BlockHash, Key]]
/// An index that maps each key from `death_rows` to block number.
// An index that maps each key from `death_rows` to block number.
deathIndex map[Key]uint64
}

Expand Down Expand Up @@ -207,11 +207,11 @@ func (drqim *inMemDeathRowQueue[BlockHash, Key]) Import(
block, ok := drqim.deathIndex[k]
if ok {
delete(drqim.deathIndex, k)
delete(drqim.deathRows.At(int(block-base)).deleted, k)
delete(drqim.deathRows.At(int(block-base)).deleted, k) //nolint:gosec
}
}
// add new keys
importedBlock := base + uint64(drqim.deathRows.Len())
importedBlock := base + uint64(drqim.deathRows.Len()) //nolint:gosec
deletedMap := make(map[Key]any)
for _, k := range deleted {
drqim.deathIndex[k] = importedBlock
Expand All @@ -236,15 +236,15 @@ func (drqim *inMemDeathRowQueue[BlockHash, Key]) PopFront(base uint64) (*deathRo
// Check if the block at the given `index` of the queue exist
// it is the caller's responsibility to ensure `index` won't be out of bounds
func (drqim *inMemDeathRowQueue[BlockHash, Key]) HaveBlock(hash BlockHash, index uint) haveBlock {
if drqim.deathRows.At(int(index)).hash == hash {
if drqim.deathRows.At(int(index)).hash == hash { //nolint:gosec
return haveBlockYes
}
return haveBlockNo
}

// Return the number of block in the pruning window
func (drqim *inMemDeathRowQueue[BlockHash, Key]) Len(base uint64) uint64 {
return uint64(drqim.deathRows.Len())
return uint64(drqim.deathRows.Len()) //nolint:gosec
}

// Get the hash of the next pruning block
Expand Down Expand Up @@ -276,8 +276,8 @@ func toPruningJournalKey(block uint64) []byte {
type haveBlock uint

const (
/// Definitely don't have this block.
// Definitely don't have this block.
haveBlockNo haveBlock = iota
/// Definitely has this block
// Definitely has this block
haveBlockYes
)
54 changes: 54 additions & 0 deletions internal/hash-db/hash_db.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
// Copyright 2024 ChainSafe Systems (ON)
// SPDX-License-Identifier: LGPL-3.0-only

package hashdb

import "golang.org/x/exp/constraints"

// A trie node prefix, it is the nibble path from the trie root
// to the trie node.
// For a node containing no partial key value it is the full key.
// For a value node or node containing a partial key, it is the full key minus its node partial
// nibbles (the node key can be split into prefix and node partial).
// Therefore it is always the leftmost portion of the node key, so its internal representation
// is a non expanded byte slice followed by a last padded byte representation.
// The padded byte is an optional padded value.
type Prefix struct {
Key []byte
Padded *byte
}

// An empty prefix constant.
// Can be use when the prefix is not use dinternally or for root nodes.
timwu20 marked this conversation as resolved.
Show resolved Hide resolved
var EmptyPrefix = Prefix{}

// Hasher is an interface describing an object that can hash a slice of bytes. Used to abstract
// other types over the hashing algorithm. Defines a single hash method and an
// Out associated type with the necessary bounds.
type Hasher[Out constraints.Ordered] interface {
// Compute the hash of the provided slice of bytes returning the Out type of the Hasher.
Hash(x []byte) Out
}

// HashDB is an interface modelling datastore keyed by a hash defined by the Hasher.
type HashDB[Hash comparable] interface {
// Look up a given hash into the bytes that hash to it, returning None if the
// hash is not known.
Get(key Hash, prefix Prefix) []byte

// Check for the existence of a hash-key.
Contains(key Hash, prefix Prefix) bool

// Insert a datum item into the DB and return the datum's hash for a later lookup. Insertions
// are counted and the equivalent number of remove()s must be performed before the data
// is considered dead.
Insert(prefix Prefix, value []byte) Hash

// Like Insert(), except you provide the key and the data is all moved.
Emplace(key Hash, prefix Prefix, value []byte)

// Remove a datum previously inserted. Insertions can be "owed" such that the same number of
// inserts may happen without the data being eventually being inserted into the DB.
timwu20 marked this conversation as resolved.
Show resolved Hide resolved
// It can be "owed" more than once.
Remove(key Hash, prefix Prefix)
}
Loading
Loading