diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go
index 649b3178da..fae066866c 100644
--- a/accounts/abi/bind/bind_test.go
+++ b/accounts/abi/bind/bind_test.go
@@ -2179,7 +2179,7 @@ func golangBindings(t *testing.T, overload bool) {
if out, err := replacer.CombinedOutput(); err != nil {
t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out)
}
- replacer = exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/ava-labs/libevm@v0.0.0", "-replace", "github.com/ava-labs/libevm=github.com/ava-labs/libevm@v0.0.0-20250122094956-11c780f117f8")
+ replacer = exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/ava-labs/libevm@v0.0.0", "-replace", "github.com/ava-labs/libevm=github.com/ava-labs/libevm@v0.0.0-20250131144451-c0f677c030ad")
replacer.Dir = pkg
if out, err := replacer.CombinedOutput(); err != nil {
t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out)
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
deleted file mode 100644
index f0b5053356..0000000000
--- a/core/rawdb/accessors_chain.go
+++ /dev/null
@@ -1,980 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "math/big"
- "slices"
-
- "github.com/ava-labs/coreth/consensus/misc/eip4844"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/params"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
- "github.com/ava-labs/libevm/rlp"
-)
-
-// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
-func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
- var data []byte
- db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
- data, _ = reader.Ancient(ChainFreezerHashTable, number)
- if len(data) == 0 {
- // Get it by hash from leveldb
- data, _ = db.Get(headerHashKey(number))
- }
- return nil
- })
- return common.BytesToHash(data)
-}
-
-// WriteCanonicalHash stores the hash assigned to a canonical block number.
-func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
- log.Crit("Failed to store number to hash mapping", "err", err)
- }
-}
-
-// DeleteCanonicalHash removes the number to hash canonical mapping.
-func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) {
- if err := db.Delete(headerHashKey(number)); err != nil {
- log.Crit("Failed to delete number to hash mapping", "err", err)
- }
-}
-
-// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
-// both canonical and reorged forks included.
-func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
- prefix := headerKeyPrefix(number)
-
- hashes := make([]common.Hash, 0, 1)
- it := db.NewIterator(prefix, nil)
- defer it.Release()
-
- for it.Next() {
- if key := it.Key(); len(key) == len(prefix)+32 {
- hashes = append(hashes, common.BytesToHash(key[len(key)-32:]))
- }
- }
- return hashes
-}
-
-type NumberHash struct {
- Number uint64
- Hash common.Hash
-}
-
-// ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain
-// heights, both canonical and reorged forks included.
-// This method considers both limits to be _inclusive_.
-func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
- var (
- start = encodeBlockNumber(first)
- keyLength = len(headerPrefix) + 8 + 32
- hashes = make([]*NumberHash, 0, 1+last-first)
- it = db.NewIterator(headerPrefix, start)
- )
- defer it.Release()
- for it.Next() {
- key := it.Key()
- if len(key) != keyLength {
- continue
- }
- num := binary.BigEndian.Uint64(key[len(headerPrefix) : len(headerPrefix)+8])
- if num > last {
- break
- }
- hash := common.BytesToHash(key[len(key)-32:])
- hashes = append(hashes, &NumberHash{num, hash})
- }
- return hashes
-}
-
-// ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
-// certain chain range. If the accumulated entries reaches the given threshold,
-// abort the iteration and return the semi-finish result.
-func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) {
- // Short circuit if the limit is 0.
- if limit == 0 {
- return nil, nil
- }
- var (
- numbers []uint64
- hashes []common.Hash
- )
- // Construct the key prefix of start point.
- start, end := headerHashKey(from), headerHashKey(to)
- it := db.NewIterator(nil, start)
- defer it.Release()
-
- for it.Next() {
- if bytes.Compare(it.Key(), end) >= 0 {
- break
- }
- if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) {
- numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8]))
- hashes = append(hashes, common.BytesToHash(it.Value()))
- // If the accumulated entries reaches the limit threshold, return.
- if len(numbers) >= limit {
- break
- }
- }
- }
- return numbers, hashes
-}
-
-// ReadHeaderNumber returns the header number assigned to a hash.
-func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
- data, _ := db.Get(headerNumberKey(hash))
- if len(data) != 8 {
- return nil
- }
- number := binary.BigEndian.Uint64(data)
- return &number
-}
-
-// WriteHeaderNumber stores the hash->number mapping.
-func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- key := headerNumberKey(hash)
- enc := encodeBlockNumber(number)
- if err := db.Put(key, enc); err != nil {
- log.Crit("Failed to store hash to number mapping", "err", err)
- }
-}
-
-// DeleteHeaderNumber removes hash->number mapping.
-func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Delete(headerNumberKey(hash)); err != nil {
- log.Crit("Failed to delete hash to number mapping", "err", err)
- }
-}
-
-// ReadHeadHeaderHash retrieves the hash of the current canonical head header.
-func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash {
- data, _ := db.Get(headHeaderKey)
- if len(data) == 0 {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
-// WriteHeadHeaderHash stores the hash of the current canonical head header.
-func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
- log.Crit("Failed to store last header's hash", "err", err)
- }
-}
-
-// ReadHeadBlockHash retrieves the hash of the current canonical head block.
-func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash {
- data, _ := db.Get(headBlockKey)
- if len(data) == 0 {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
-// WriteHeadBlockHash stores the head block's hash.
-func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
- log.Crit("Failed to store last block's hash", "err", err)
- }
-}
-
-// ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
-func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash {
- data, _ := db.Get(headFastBlockKey)
- if len(data) == 0 {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
-// WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
-func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
- log.Crit("Failed to store last fast block's hash", "err", err)
- }
-}
-
-// ReadFinalizedBlockHash retrieves the hash of the finalized block.
-func ReadFinalizedBlockHash(db ethdb.KeyValueReader) common.Hash {
- data, _ := db.Get(headFinalizedBlockKey)
- if len(data) == 0 {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
-// WriteFinalizedBlockHash stores the hash of the finalized block.
-func WriteFinalizedBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Put(headFinalizedBlockKey, hash.Bytes()); err != nil {
- log.Crit("Failed to store last finalized block's hash", "err", err)
- }
-}
-
-// ReadLastPivotNumber retrieves the number of the last pivot block. If the node
-// full synced, the last pivot will always be nil.
-func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 {
- data, _ := db.Get(lastPivotKey)
- if len(data) == 0 {
- return nil
- }
- var pivot uint64
- if err := rlp.DecodeBytes(data, &pivot); err != nil {
- log.Error("Invalid pivot block number in database", "err", err)
- return nil
- }
- return &pivot
-}
-
-// WriteLastPivotNumber stores the number of the last pivot block.
-func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) {
- enc, err := rlp.EncodeToBytes(pivot)
- if err != nil {
- log.Crit("Failed to encode pivot block number", "err", err)
- }
- if err := db.Put(lastPivotKey, enc); err != nil {
- log.Crit("Failed to store pivot block number", "err", err)
- }
-}
-
-// ReadTxIndexTail retrieves the number of oldest indexed block
-// whose transaction indices has been indexed.
-func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 {
- data, _ := db.Get(txIndexTailKey)
- if len(data) != 8 {
- return nil
- }
- number := binary.BigEndian.Uint64(data)
- return &number
-}
-
-// WriteTxIndexTail stores the number of oldest indexed block
-// into database.
-func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) {
- if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil {
- log.Crit("Failed to store the transaction index tail", "err", err)
- }
-}
-
-// ReadHeaderRange returns the rlp-encoded headers, starting at 'number', and going
-// backwards towards genesis. This method assumes that the caller already has
-// placed a cap on count, to prevent DoS issues.
-// Since this method operates in head-towards-genesis mode, it will return an empty
-// slice in case the head ('number') is missing. Hence, the caller must ensure that
-// the head ('number') argument is actually an existing header.
-//
-// N.B: Since the input is a number, as opposed to a hash, it's implicit that
-// this method only operates on canon headers.
-func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValue {
- var rlpHeaders []rlp.RawValue
- if count == 0 {
- return rlpHeaders
- }
- i := number
- if count-1 > number {
- // It's ok to request block 0, 1 item
- count = number + 1
- }
- limit, _ := db.Ancients()
- // First read live blocks
- if i >= limit {
- // If we need to read live blocks, we need to figure out the hash first
- hash := ReadCanonicalHash(db, number)
- for ; i >= limit && count > 0; i-- {
- if data, _ := db.Get(headerKey(i, hash)); len(data) > 0 {
- rlpHeaders = append(rlpHeaders, data)
- // Get the parent hash for next query
- hash = types.HeaderParentHashFromRLP(data)
- } else {
- break // Maybe got moved to ancients
- }
- count--
- }
- }
- if count == 0 {
- return rlpHeaders
- }
- // read remaining from ancients
- data, err := db.AncientRange(ChainFreezerHeaderTable, i+1-count, count, 0)
- if err != nil {
- log.Error("Failed to read headers from freezer", "err", err)
- return rlpHeaders
- }
- if uint64(len(data)) != count {
- log.Warn("Incomplete read of headers from freezer", "wanted", count, "read", len(data))
- return rlpHeaders
- }
- // The data is on the order [h, h+1, .., n] -- reordering needed
- for i := range data {
- rlpHeaders = append(rlpHeaders, data[len(data)-1-i])
- }
- return rlpHeaders
-}
-
-// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
-func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
- var data []byte
- db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
- // First try to look up the data in ancient database. Extra hash
- // comparison is necessary since ancient database only maintains
- // the canonical data.
- data, _ = reader.Ancient(ChainFreezerHeaderTable, number)
- if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
- return nil
- }
- // If not, try reading from leveldb
- data, _ = db.Get(headerKey(number, hash))
- return nil
- })
- return data
-}
-
-// HasHeader verifies the existence of a block header corresponding to the hash.
-func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
- if isCanon(db, number, hash) {
- return true
- }
- if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
- return false
- }
- return true
-}
-
-// ReadHeader retrieves the block header corresponding to the hash.
-func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header {
- data := ReadHeaderRLP(db, hash, number)
- if len(data) == 0 {
- return nil
- }
- header := new(types.Header)
- if err := rlp.DecodeBytes(data, header); err != nil {
- log.Error("Invalid block header RLP", "hash", hash, "err", err)
- return nil
- }
- return header
-}
-
-// WriteHeader stores a block header into the database and also stores the hash-
-// to-number mapping.
-func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
- var (
- hash = header.Hash()
- number = header.Number.Uint64()
- )
- // Write the hash -> number mapping
- WriteHeaderNumber(db, hash, number)
-
- // Write the encoded header
- data, err := rlp.EncodeToBytes(header)
- if err != nil {
- log.Crit("Failed to RLP encode header", "err", err)
- }
- key := headerKey(number, hash)
- if err := db.Put(key, data); err != nil {
- log.Crit("Failed to store header", "err", err)
- }
-}
-
-// DeleteHeader removes all block header data associated with a hash.
-func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- deleteHeaderWithoutNumber(db, hash, number)
- if err := db.Delete(headerNumberKey(hash)); err != nil {
- log.Crit("Failed to delete hash to number mapping", "err", err)
- }
-}
-
-// deleteHeaderWithoutNumber removes only the block header but does not remove
-// the hash to number mapping.
-func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- if err := db.Delete(headerKey(number, hash)); err != nil {
- log.Crit("Failed to delete header", "err", err)
- }
-}
-
-// isCanon is an internal utility method, to check whether the given number/hash
-// is part of the ancient (canon) set.
-func isCanon(reader ethdb.AncientReaderOp, number uint64, hash common.Hash) bool {
- h, err := reader.Ancient(ChainFreezerHashTable, number)
- if err != nil {
- return false
- }
- return bytes.Equal(h, hash[:])
-}
-
-// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
-func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
- // First try to look up the data in ancient database. Extra hash
- // comparison is necessary since ancient database only maintains
- // the canonical data.
- var data []byte
- db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
- // Check if the data is in ancients
- if isCanon(reader, number, hash) {
- data, _ = reader.Ancient(ChainFreezerBodiesTable, number)
- return nil
- }
- // If not, try reading from leveldb
- data, _ = db.Get(blockBodyKey(number, hash))
- return nil
- })
- return data
-}
-
-// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
-// block at number, in RLP encoding.
-func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
- var data []byte
- db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
- data, _ = reader.Ancient(ChainFreezerBodiesTable, number)
- if len(data) > 0 {
- return nil
- }
- // Block is not in ancients, read from leveldb by hash and number.
- // Note: ReadCanonicalHash cannot be used here because it also
- // calls ReadAncients internally.
- hash, _ := db.Get(headerHashKey(number))
- data, _ = db.Get(blockBodyKey(number, common.BytesToHash(hash)))
- return nil
- })
- return data
-}
-
-// WriteBodyRLP stores an RLP encoded block body into the database.
-func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
- if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
- log.Crit("Failed to store block body", "err", err)
- }
-}
-
-// HasBody verifies the existence of a block body corresponding to the hash.
-func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
- if isCanon(db, number, hash) {
- return true
- }
- if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
- return false
- }
- return true
-}
-
-// ReadBody retrieves the block body corresponding to the hash.
-func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body {
- data := ReadBodyRLP(db, hash, number)
- if len(data) == 0 {
- return nil
- }
- body := new(types.Body)
- if err := rlp.DecodeBytes(data, body); err != nil {
- log.Error("Invalid block body RLP", "hash", hash, "err", err)
- return nil
- }
- return body
-}
-
-// WriteBody stores a block body into the database.
-func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) {
- data, err := rlp.EncodeToBytes(body)
- if err != nil {
- log.Crit("Failed to RLP encode body", "err", err)
- }
- WriteBodyRLP(db, hash, number, data)
-}
-
-// DeleteBody removes all block body data associated with a hash.
-func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- if err := db.Delete(blockBodyKey(number, hash)); err != nil {
- log.Crit("Failed to delete block body", "err", err)
- }
-}
-
-// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
-func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
- var data []byte
- db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
- // Check if the data is in ancients
- if isCanon(reader, number, hash) {
- data, _ = reader.Ancient(ChainFreezerDifficultyTable, number)
- return nil
- }
- // If not, try reading from leveldb
- data, _ = db.Get(headerTDKey(number, hash))
- return nil
- })
- return data
-}
-
-// ReadTd retrieves a block's total difficulty corresponding to the hash.
-func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
- data := ReadTdRLP(db, hash, number)
- if len(data) == 0 {
- return nil
- }
- td := new(big.Int)
- if err := rlp.DecodeBytes(data, td); err != nil {
- log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
- return nil
- }
- return td
-}
-
-// WriteTd stores the total difficulty of a block into the database.
-func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) {
- data, err := rlp.EncodeToBytes(td)
- if err != nil {
- log.Crit("Failed to RLP encode block total difficulty", "err", err)
- }
- if err := db.Put(headerTDKey(number, hash), data); err != nil {
- log.Crit("Failed to store block total difficulty", "err", err)
- }
-}
-
-// DeleteTd removes all block total difficulty data associated with a hash.
-func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- if err := db.Delete(headerTDKey(number, hash)); err != nil {
- log.Crit("Failed to delete block total difficulty", "err", err)
- }
-}
-
-// HasReceipts verifies the existence of all the transaction receipts belonging
-// to a block.
-func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
- if isCanon(db, number, hash) {
- return true
- }
- if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
- return false
- }
- return true
-}
-
-// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
-func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
- var data []byte
- db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
- // Check if the data is in ancients
- if isCanon(reader, number, hash) {
- data, _ = reader.Ancient(ChainFreezerReceiptTable, number)
- return nil
- }
- // If not, try reading from leveldb
- data, _ = db.Get(blockReceiptsKey(number, hash))
- return nil
- })
- return data
-}
-
-// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
-// The receipt metadata fields are not guaranteed to be populated, so they
-// should not be used. Use ReadReceipts instead if the metadata is needed.
-func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts {
- // Retrieve the flattened receipt slice
- data := ReadReceiptsRLP(db, hash, number)
- if len(data) == 0 {
- return nil
- }
- // Convert the receipts from their storage form to their internal representation
- storageReceipts := []*types.ReceiptForStorage{}
- if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
- log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
- return nil
- }
- receipts := make(types.Receipts, len(storageReceipts))
- for i, storageReceipt := range storageReceipts {
- receipts[i] = (*types.Receipt)(storageReceipt)
- }
- return receipts
-}
-
-// ReadReceipts retrieves all the transaction receipts belonging to a block, including
-// its corresponding metadata fields. If it is unable to populate these metadata
-// fields then nil is returned.
-//
-// The current implementation populates these metadata fields by reading the receipts'
-// corresponding block body, so if the block body is not found it will return nil even
-// if the receipt itself is stored.
-func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, time uint64, config *params.ChainConfig) types.Receipts {
- // We're deriving many fields from the block body, retrieve beside the receipt
- receipts := ReadRawReceipts(db, hash, number)
- if receipts == nil {
- return nil
- }
- body := ReadBody(db, hash, number)
- if body == nil {
- log.Error("Missing body but have receipt", "hash", hash, "number", number)
- return nil
- }
- header := ReadHeader(db, hash, number)
-
- var baseFee *big.Int
- if header == nil {
- baseFee = big.NewInt(0)
- } else {
- baseFee = header.BaseFee
- }
- // Compute effective blob gas price.
- var blobGasPrice *big.Int
- if header != nil && header.ExcessBlobGas != nil {
- blobGasPrice = eip4844.CalcBlobFee(*header.ExcessBlobGas)
- }
- if err := receipts.DeriveFields(config, hash, number, time, baseFee, blobGasPrice, body.Transactions); err != nil {
- log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
- return nil
- }
- return receipts
-}
-
-// WriteReceipts stores all the transaction receipts belonging to a block.
-func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) {
- // Convert the receipts into their storage form and serialize them
- storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
- for i, receipt := range receipts {
- storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
- }
- bytes, err := rlp.EncodeToBytes(storageReceipts)
- if err != nil {
- log.Crit("Failed to encode block receipts", "err", err)
- }
- // Store the flattened receipt slice
- if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil {
- log.Crit("Failed to store block receipts", "err", err)
- }
-}
-
-// DeleteReceipts removes all receipt data associated with a block hash.
-func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
- log.Crit("Failed to delete block receipts", "err", err)
- }
-}
-
-// storedReceiptRLP is the storage encoding of a receipt.
-// Re-definition in core/types/receipt.go.
-// TODO: Re-use the existing definition.
-type storedReceiptRLP struct {
- PostStateOrStatus []byte
- CumulativeGasUsed uint64
- Logs []*types.Log
-}
-
-// ReceiptLogs is a barebone version of ReceiptForStorage which only keeps
-// the list of logs. When decoding a stored receipt into this object we
-// avoid creating the bloom filter.
-type receiptLogs struct {
- Logs []*types.Log
-}
-
-// DecodeRLP implements rlp.Decoder.
-func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error {
- var stored storedReceiptRLP
- if err := s.Decode(&stored); err != nil {
- return err
- }
- r.Logs = stored.Logs
- return nil
-}
-
-// DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc.
-func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error {
- logIndex := uint(0)
- if len(txs) != len(receipts) {
- return errors.New("transaction and receipt count mismatch")
- }
- for i := 0; i < len(receipts); i++ {
- txHash := txs[i].Hash()
- // The derived log fields can simply be set from the block and transaction
- for j := 0; j < len(receipts[i].Logs); j++ {
- receipts[i].Logs[j].BlockNumber = number
- receipts[i].Logs[j].BlockHash = hash
- receipts[i].Logs[j].TxHash = txHash
- receipts[i].Logs[j].TxIndex = uint(i)
- receipts[i].Logs[j].Index = logIndex
- logIndex++
- }
- }
- return nil
-}
-
-// ReadLogs retrieves the logs for all transactions in a block. In case
-// receipts is not found, a nil is returned.
-// Note: ReadLogs does not derive unstored log fields.
-func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log {
- // Retrieve the flattened receipt slice
- data := ReadReceiptsRLP(db, hash, number)
- if len(data) == 0 {
- return nil
- }
- receipts := []*receiptLogs{}
- if err := rlp.DecodeBytes(data, &receipts); err != nil {
- log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
- return nil
- }
-
- logs := make([][]*types.Log, len(receipts))
- for i, receipt := range receipts {
- logs[i] = receipt.Logs
- }
- return logs
-}
-
-// ReadBlock retrieves an entire block corresponding to the hash, assembling it
-// back from the stored header and body. If either the header or body could not
-// be retrieved nil is returned.
-//
-// Note, due to concurrent download of header and block body the header and thus
-// canonical hash can be stored in the database but the body data not (yet).
-func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
- header := ReadHeader(db, hash, number)
- if header == nil {
- return nil
- }
- body := ReadBody(db, hash, number)
- if body == nil {
- return nil
- }
- block := types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
- bodyExtra := types.GetBodyExtra(body)
- block = types.WithBlockExtra(block, bodyExtra.Version, bodyExtra.ExtData, false)
- return block
-}
-
-// WriteBlock serializes a block into the database, header and body separately.
-func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
- WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
- WriteHeader(db, block.Header())
-}
-
-// WriteAncientBlocks writes entire block data into ancient store and returns the total written size.
-func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) {
- var (
- tdSum = new(big.Int).Set(td)
- stReceipts []*types.ReceiptForStorage
- )
- return db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
- for i, block := range blocks {
- // Convert receipts to storage format and sum up total difficulty.
- stReceipts = stReceipts[:0]
- for _, receipt := range receipts[i] {
- stReceipts = append(stReceipts, (*types.ReceiptForStorage)(receipt))
- }
- header := block.Header()
- if i > 0 {
- tdSum.Add(tdSum, header.Difficulty)
- }
- if err := writeAncientBlock(op, block, header, stReceipts, tdSum); err != nil {
- return err
- }
- }
- return nil
- })
-}
-
-func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error {
- num := block.NumberU64()
- if err := op.AppendRaw(ChainFreezerHashTable, num, block.Hash().Bytes()); err != nil {
- return fmt.Errorf("can't add block %d hash: %v", num, err)
- }
- if err := op.Append(ChainFreezerHeaderTable, num, header); err != nil {
- return fmt.Errorf("can't append block header %d: %v", num, err)
- }
- if err := op.Append(ChainFreezerBodiesTable, num, block.Body()); err != nil {
- return fmt.Errorf("can't append block body %d: %v", num, err)
- }
- if err := op.Append(ChainFreezerReceiptTable, num, receipts); err != nil {
- return fmt.Errorf("can't append block %d receipts: %v", num, err)
- }
- if err := op.Append(ChainFreezerDifficultyTable, num, td); err != nil {
- return fmt.Errorf("can't append block %d total difficulty: %v", num, err)
- }
- return nil
-}
-
-// DeleteBlock removes all block data associated with a hash.
-func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- DeleteReceipts(db, hash, number)
- DeleteHeader(db, hash, number)
- DeleteBody(db, hash, number)
- DeleteTd(db, hash, number)
-}
-
-// DeleteBlockWithoutNumber removes all block data associated with a hash, except
-// the hash to number mapping.
-func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- DeleteReceipts(db, hash, number)
- deleteHeaderWithoutNumber(db, hash, number)
- DeleteBody(db, hash, number)
- DeleteTd(db, hash, number)
-}
-
-const badBlockToKeep = 10
-
-type badBlock struct {
- Header *types.Header
- Body *types.Body
-}
-
-// ReadBadBlock retrieves the bad block with the corresponding block hash.
-func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block {
- blob, err := db.Get(badBlockKey)
- if err != nil {
- return nil
- }
- var badBlocks []*badBlock
- if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
- return nil
- }
- for _, bad := range badBlocks {
- if bad.Header.Hash() == hash {
- return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles).WithWithdrawals(bad.Body.Withdrawals)
- }
- }
- return nil
-}
-
-// ReadAllBadBlocks retrieves all the bad blocks in the database.
-// All returned blocks are sorted in reverse order by number.
-func ReadAllBadBlocks(db ethdb.Reader) []*types.Block {
- blob, err := db.Get(badBlockKey)
- if err != nil {
- return nil
- }
- var badBlocks []*badBlock
- if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
- return nil
- }
- var blocks []*types.Block
- for _, bad := range badBlocks {
- blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles).WithWithdrawals(bad.Body.Withdrawals))
- }
- return blocks
-}
-
-// WriteBadBlock serializes the bad block into the database. If the cumulated
-// bad blocks exceeds the limitation, the oldest will be dropped.
-func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) {
- blob, err := db.Get(badBlockKey)
- if err != nil {
- log.Warn("Failed to load old bad blocks", "error", err)
- }
- var badBlocks []*badBlock
- if len(blob) > 0 {
- if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
- log.Crit("Failed to decode old bad blocks", "error", err)
- }
- }
- for _, b := range badBlocks {
- if b.Header.Number.Uint64() == block.NumberU64() && b.Header.Hash() == block.Hash() {
- log.Info("Skip duplicated bad block", "number", block.NumberU64(), "hash", block.Hash())
- return
- }
- }
- badBlocks = append(badBlocks, &badBlock{
- Header: block.Header(),
- Body: block.Body(),
- })
- slices.SortFunc(badBlocks, func(a, b *badBlock) int {
- // Note: sorting in descending number order.
- return -a.Header.Number.Cmp(b.Header.Number)
- })
- if len(badBlocks) > badBlockToKeep {
- badBlocks = badBlocks[:badBlockToKeep]
- }
- data, err := rlp.EncodeToBytes(badBlocks)
- if err != nil {
- log.Crit("Failed to encode bad blocks", "err", err)
- }
- if err := db.Put(badBlockKey, data); err != nil {
- log.Crit("Failed to write bad blocks", "err", err)
- }
-}
-
-// DeleteBadBlocks deletes all the bad blocks from the database
-func DeleteBadBlocks(db ethdb.KeyValueWriter) {
- if err := db.Delete(badBlockKey); err != nil {
- log.Crit("Failed to delete bad blocks", "err", err)
- }
-}
-
-// FindCommonAncestor returns the last common ancestor of two block headers
-func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header {
- for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
- a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
- if a == nil {
- return nil
- }
- }
- for an := a.Number.Uint64(); an < b.Number.Uint64(); {
- b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
- if b == nil {
- return nil
- }
- }
- for a.Hash() != b.Hash() {
- a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
- if a == nil {
- return nil
- }
- b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
- if b == nil {
- return nil
- }
- }
- return a
-}
-
-// ReadHeadHeader returns the current canonical head header.
-func ReadHeadHeader(db ethdb.Reader) *types.Header {
- headHeaderHash := ReadHeadHeaderHash(db)
- if headHeaderHash == (common.Hash{}) {
- return nil
- }
- headHeaderNumber := ReadHeaderNumber(db, headHeaderHash)
- if headHeaderNumber == nil {
- return nil
- }
- return ReadHeader(db, headHeaderHash, *headHeaderNumber)
-}
-
-// ReadHeadBlock returns the current canonical head block.
-func ReadHeadBlock(db ethdb.Reader) *types.Block {
- headBlockHash := ReadHeadBlockHash(db)
- if headBlockHash == (common.Hash{}) {
- return nil
- }
- headBlockNumber := ReadHeaderNumber(db, headBlockHash)
- if headBlockNumber == nil {
- return nil
- }
- return ReadBlock(db, headBlockHash, *headBlockNumber)
-}
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
deleted file mode 100644
index 530db77c82..0000000000
--- a/core/rawdb/accessors_chain_test.go
+++ /dev/null
@@ -1,933 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "encoding/hex"
- "fmt"
- "math/big"
- "math/rand"
- "os"
- "reflect"
- "testing"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/params"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/rlp"
- "golang.org/x/crypto/sha3"
-)
-
-// Tests block header storage and retrieval operations.
-func TestHeaderStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create a test header to move around the database and make sure it's really new
- header := &types.Header{Number: big.NewInt(42), Extra: []byte("test header")}
- if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
- t.Fatalf("Non existent header returned: %v", entry)
- }
- // Write and verify the header in the database
- WriteHeader(db, header)
- if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry == nil {
- t.Fatalf("Stored header not found")
- } else if entry.Hash() != header.Hash() {
- t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
- }
- if entry := ReadHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil {
- t.Fatalf("Stored header RLP not found")
- } else {
- hasher := sha3.NewLegacyKeccak256()
- hasher.Write(entry)
-
- if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
- t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header)
- }
- }
- // Delete the header and verify the execution
- DeleteHeader(db, header.Hash(), header.Number.Uint64())
- if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
- t.Fatalf("Deleted header returned: %v", entry)
- }
-}
-
-// Tests block body storage and retrieval operations.
-func TestBodyStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create a test body to move around the database and make sure it's really new
- body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
-
- hasher := sha3.NewLegacyKeccak256()
- rlp.Encode(hasher, body)
- hash := common.BytesToHash(hasher.Sum(nil))
-
- if entry := ReadBody(db, hash, 0); entry != nil {
- t.Fatalf("Non existent body returned: %v", entry)
- }
- // Write and verify the body in the database
- WriteBody(db, hash, 0, body)
- if entry := ReadBody(db, hash, 0); entry == nil {
- t.Fatalf("Stored body not found")
- } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
- t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
- }
- if entry := ReadBodyRLP(db, hash, 0); entry == nil {
- t.Fatalf("Stored body RLP not found")
- } else {
- hasher := sha3.NewLegacyKeccak256()
- hasher.Write(entry)
-
- if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
- t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body)
- }
- }
- // Delete the body and verify the execution
- DeleteBody(db, hash, 0)
- if entry := ReadBody(db, hash, 0); entry != nil {
- t.Fatalf("Deleted body returned: %v", entry)
- }
-}
-
-// Tests block storage and retrieval operations.
-func TestBlockStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create a test block to move around the database and make sure it's really new
- block := types.NewBlockWithHeader(&types.Header{
- Extra: []byte("test block"),
- UncleHash: types.EmptyUncleHash,
- TxHash: types.EmptyTxsHash,
- ReceiptHash: types.EmptyReceiptsHash,
- })
- if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent block returned: %v", entry)
- }
- if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent header returned: %v", entry)
- }
- if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent body returned: %v", entry)
- }
- // Write and verify the block in the database
- WriteBlock(db, block)
- if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry == nil {
- t.Fatalf("Stored block not found")
- } else if entry.Hash() != block.Hash() {
- t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
- }
- if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry == nil {
- t.Fatalf("Stored header not found")
- } else if entry.Hash() != block.Header().Hash() {
- t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header())
- }
- if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil {
- t.Fatalf("Stored body not found")
- } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(block.Transactions(), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
- t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body())
- }
- // Delete the block and verify the execution
- DeleteBlock(db, block.Hash(), block.NumberU64())
- if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Deleted block returned: %v", entry)
- }
- if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Deleted header returned: %v", entry)
- }
- if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Deleted body returned: %v", entry)
- }
-}
-
-// Tests that partial block contents don't get reassembled into full blocks.
-func TestPartialBlockStorage(t *testing.T) {
- db := NewMemoryDatabase()
- block := types.NewBlockWithHeader(&types.Header{
- Extra: []byte("test block"),
- UncleHash: types.EmptyUncleHash,
- TxHash: types.EmptyTxsHash,
- ReceiptHash: types.EmptyReceiptsHash,
- })
- // Store a header and check that it's not recognized as a block
- WriteHeader(db, block.Header())
- if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent block returned: %v", entry)
- }
- DeleteHeader(db, block.Hash(), block.NumberU64())
-
- // Store a body and check that it's not recognized as a block
- WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
- if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent block returned: %v", entry)
- }
- DeleteBody(db, block.Hash(), block.NumberU64())
-
- // Store a header and a body separately and check reassembly
- WriteHeader(db, block.Header())
- WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
-
- if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry == nil {
- t.Fatalf("Stored block not found")
- } else if entry.Hash() != block.Hash() {
- t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
- }
-}
-
-// Tests block storage and retrieval operations.
-func TestBadBlockStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create a test block to move around the database and make sure it's really new
- block := types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(1),
- Extra: []byte("bad block"),
- UncleHash: types.EmptyUncleHash,
- TxHash: types.EmptyTxsHash,
- ReceiptHash: types.EmptyReceiptsHash,
- })
- if entry := ReadBadBlock(db, block.Hash()); entry != nil {
- t.Fatalf("Non existent block returned: %v", entry)
- }
- // Write and verify the block in the database
- WriteBadBlock(db, block)
- if entry := ReadBadBlock(db, block.Hash()); entry == nil {
- t.Fatalf("Stored block not found")
- } else if entry.Hash() != block.Hash() {
- t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
- }
- // Write one more bad block
- blockTwo := types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(2),
- Extra: []byte("bad block two"),
- UncleHash: types.EmptyUncleHash,
- TxHash: types.EmptyTxsHash,
- ReceiptHash: types.EmptyReceiptsHash,
- })
- WriteBadBlock(db, blockTwo)
-
- // Write the block one again, should be filtered out.
- WriteBadBlock(db, block)
- badBlocks := ReadAllBadBlocks(db)
- if len(badBlocks) != 2 {
- t.Fatalf("Failed to load all bad blocks")
- }
-
- // Write a bunch of bad blocks, all the blocks are should sorted
- // in reverse order. The extra blocks should be truncated.
- for _, n := range rand.Perm(100) {
- block := types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(int64(n)),
- Extra: []byte("bad block"),
- UncleHash: types.EmptyUncleHash,
- TxHash: types.EmptyTxsHash,
- ReceiptHash: types.EmptyReceiptsHash,
- })
- WriteBadBlock(db, block)
- }
- badBlocks = ReadAllBadBlocks(db)
- if len(badBlocks) != badBlockToKeep {
- t.Fatalf("The number of persised bad blocks in incorrect %d", len(badBlocks))
- }
- for i := 0; i < len(badBlocks)-1; i++ {
- if badBlocks[i].NumberU64() < badBlocks[i+1].NumberU64() {
- t.Fatalf("The bad blocks are not sorted #[%d](%d) < #[%d](%d)", i, i+1, badBlocks[i].NumberU64(), badBlocks[i+1].NumberU64())
- }
- }
-
- // Delete all bad blocks
- DeleteBadBlocks(db)
- badBlocks = ReadAllBadBlocks(db)
- if len(badBlocks) != 0 {
- t.Fatalf("Failed to delete bad blocks")
- }
-}
-
-// Tests block total difficulty storage and retrieval operations.
-func TestTdStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create a test TD to move around the database and make sure it's really new
- hash, td := common.Hash{}, big.NewInt(314)
- if entry := ReadTd(db, hash, 0); entry != nil {
- t.Fatalf("Non existent TD returned: %v", entry)
- }
- // Write and verify the TD in the database
- WriteTd(db, hash, 0, td)
- if entry := ReadTd(db, hash, 0); entry == nil {
- t.Fatalf("Stored TD not found")
- } else if entry.Cmp(td) != 0 {
- t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td)
- }
- // Delete the TD and verify the execution
- DeleteTd(db, hash, 0)
- if entry := ReadTd(db, hash, 0); entry != nil {
- t.Fatalf("Deleted TD returned: %v", entry)
- }
-}
-
-// Tests that canonical numbers can be mapped to hashes and retrieved.
-func TestCanonicalMappingStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create a test canonical number and assigned hash to move around
- hash, number := common.Hash{0: 0xff}, uint64(314)
- if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) {
- t.Fatalf("Non existent canonical mapping returned: %v", entry)
- }
- // Write and verify the TD in the database
- WriteCanonicalHash(db, hash, number)
- if entry := ReadCanonicalHash(db, number); entry == (common.Hash{}) {
- t.Fatalf("Stored canonical mapping not found")
- } else if entry != hash {
- t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash)
- }
- // Delete the TD and verify the execution
- DeleteCanonicalHash(db, number)
- if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) {
- t.Fatalf("Deleted canonical mapping returned: %v", entry)
- }
-}
-
-// Tests that head headers and head blocks can be assigned, individually.
-func TestHeadStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
- blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
- blockFast := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block fast")})
-
- // Check that no head entries are in a pristine database
- if entry := ReadHeadHeaderHash(db); entry != (common.Hash{}) {
- t.Fatalf("Non head header entry returned: %v", entry)
- }
- if entry := ReadHeadBlockHash(db); entry != (common.Hash{}) {
- t.Fatalf("Non head block entry returned: %v", entry)
- }
- if entry := ReadHeadFastBlockHash(db); entry != (common.Hash{}) {
- t.Fatalf("Non fast head block entry returned: %v", entry)
- }
- // Assign separate entries for the head header and block
- WriteHeadHeaderHash(db, blockHead.Hash())
- WriteHeadBlockHash(db, blockFull.Hash())
- WriteHeadFastBlockHash(db, blockFast.Hash())
-
- // Check that both heads are present, and different (i.e. two heads maintained)
- if entry := ReadHeadHeaderHash(db); entry != blockHead.Hash() {
- t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
- }
- if entry := ReadHeadBlockHash(db); entry != blockFull.Hash() {
- t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
- }
- if entry := ReadHeadFastBlockHash(db); entry != blockFast.Hash() {
- t.Fatalf("Fast head block hash mismatch: have %v, want %v", entry, blockFast.Hash())
- }
-}
-
-// Tests that receipts associated with a single block can be stored and retrieved.
-func TestBlockReceiptStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create a live block since we need metadata to reconstruct the receipt
- tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil)
- tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)
-
- body := &types.Body{Transactions: types.Transactions{tx1, tx2}}
-
- // Create the two receipts to manage afterwards
- receipt1 := &types.Receipt{
- Status: types.ReceiptStatusFailed,
- CumulativeGasUsed: 1,
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x11})},
- {Address: common.BytesToAddress([]byte{0x01, 0x11})},
- },
- TxHash: tx1.Hash(),
- ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
- GasUsed: 111111,
- }
- receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1})
-
- receipt2 := &types.Receipt{
- PostState: common.Hash{2}.Bytes(),
- CumulativeGasUsed: 2,
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x22})},
- {Address: common.BytesToAddress([]byte{0x02, 0x22})},
- },
- TxHash: tx2.Hash(),
- ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
- GasUsed: 222222,
- }
- receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2})
- receipts := []*types.Receipt{receipt1, receipt2}
-
- // Check that no receipt entries are in a pristine database
- hash := common.BytesToHash([]byte{0x03, 0x14})
- if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) != 0 {
- t.Fatalf("non existent receipts returned: %v", rs)
- }
- // Insert the body that corresponds to the receipts
- WriteBody(db, hash, 0, body)
-
- // Insert the receipt slice into the database and check presence
- WriteReceipts(db, hash, 0, receipts)
- if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) == 0 {
- t.Fatalf("no receipts returned")
- } else {
- if err := checkReceiptsRLP(rs, receipts); err != nil {
- t.Fatalf(err.Error())
- }
- }
- // Delete the body and ensure that the receipts are no longer returned (metadata can't be recomputed)
- DeleteBody(db, hash, 0)
- if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); rs != nil {
- t.Fatalf("receipts returned when body was deleted: %v", rs)
- }
- // Ensure that receipts without metadata can be returned without the block body too
- if err := checkReceiptsRLP(ReadRawReceipts(db, hash, 0), receipts); err != nil {
- t.Fatalf(err.Error())
- }
- // Sanity check that body alone without the receipt is a full purge
- WriteBody(db, hash, 0, body)
-
- DeleteReceipts(db, hash, 0)
- if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) != 0 {
- t.Fatalf("deleted receipts returned: %v", rs)
- }
-}
-
-func checkReceiptsRLP(have, want types.Receipts) error {
- if len(have) != len(want) {
- return fmt.Errorf("receipts sizes mismatch: have %d, want %d", len(have), len(want))
- }
- for i := 0; i < len(want); i++ {
- rlpHave, err := rlp.EncodeToBytes(have[i])
- if err != nil {
- return err
- }
- rlpWant, err := rlp.EncodeToBytes(want[i])
- if err != nil {
- return err
- }
- if !bytes.Equal(rlpHave, rlpWant) {
- return fmt.Errorf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant))
- }
- }
- return nil
-}
-
-func TestAncientStorage(t *testing.T) {
- // Freezer style fast import the chain.
- frdir := t.TempDir()
- db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
- if err != nil {
- t.Fatalf("failed to create database with ancient backend")
- }
- defer db.Close()
-
- // Create a test block
- block := types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(0),
- Extra: []byte("test block"),
- UncleHash: types.EmptyUncleHash,
- TxHash: types.EmptyTxsHash,
- ReceiptHash: types.EmptyReceiptsHash,
- })
- // Ensure nothing non-existent will be read
- hash, number := block.Hash(), block.NumberU64()
- if blob := ReadHeaderRLP(db, hash, number); len(blob) > 0 {
- t.Fatalf("non existent header returned")
- }
- if blob := ReadBodyRLP(db, hash, number); len(blob) > 0 {
- t.Fatalf("non existent body returned")
- }
- if blob := ReadReceiptsRLP(db, hash, number); len(blob) > 0 {
- t.Fatalf("non existent receipts returned")
- }
- if blob := ReadTdRLP(db, hash, number); len(blob) > 0 {
- t.Fatalf("non existent td returned")
- }
-
- // Write and verify the header in the database
- WriteAncientBlocks(db, []*types.Block{block}, []types.Receipts{nil}, big.NewInt(100))
-
- if blob := ReadHeaderRLP(db, hash, number); len(blob) == 0 {
- t.Fatalf("no header returned")
- }
- if blob := ReadBodyRLP(db, hash, number); len(blob) == 0 {
- t.Fatalf("no body returned")
- }
- if blob := ReadReceiptsRLP(db, hash, number); len(blob) == 0 {
- t.Fatalf("no receipts returned")
- }
- if blob := ReadTdRLP(db, hash, number); len(blob) == 0 {
- t.Fatalf("no td returned")
- }
-
- // Use a fake hash for data retrieval, nothing should be returned.
- fakeHash := common.BytesToHash([]byte{0x01, 0x02, 0x03})
- if blob := ReadHeaderRLP(db, fakeHash, number); len(blob) != 0 {
- t.Fatalf("invalid header returned")
- }
- if blob := ReadBodyRLP(db, fakeHash, number); len(blob) != 0 {
- t.Fatalf("invalid body returned")
- }
- if blob := ReadReceiptsRLP(db, fakeHash, number); len(blob) != 0 {
- t.Fatalf("invalid receipts returned")
- }
- if blob := ReadTdRLP(db, fakeHash, number); len(blob) != 0 {
- t.Fatalf("invalid td returned")
- }
-}
-
-func TestCanonicalHashIteration(t *testing.T) {
- var cases = []struct {
- from, to uint64
- limit int
- expect []uint64
- }{
- {1, 8, 0, nil},
- {1, 8, 1, []uint64{1}},
- {1, 8, 10, []uint64{1, 2, 3, 4, 5, 6, 7}},
- {1, 9, 10, []uint64{1, 2, 3, 4, 5, 6, 7, 8}},
- {2, 9, 10, []uint64{2, 3, 4, 5, 6, 7, 8}},
- {9, 10, 10, nil},
- }
- // Test empty db iteration
- db := NewMemoryDatabase()
- numbers, _ := ReadAllCanonicalHashes(db, 0, 10, 10)
- if len(numbers) != 0 {
- t.Fatalf("No entry should be returned to iterate an empty db")
- }
- // Fill database with testing data.
- for i := uint64(1); i <= 8; i++ {
- WriteCanonicalHash(db, common.Hash{}, i)
- WriteTd(db, common.Hash{}, i, big.NewInt(10)) // Write some interferential data
- }
- for i, c := range cases {
- numbers, _ := ReadAllCanonicalHashes(db, c.from, c.to, c.limit)
- if !reflect.DeepEqual(numbers, c.expect) {
- t.Fatalf("Case %d failed, want %v, got %v", i, c.expect, numbers)
- }
- }
-}
-
-func TestHashesInRange(t *testing.T) {
- mkHeader := func(number, seq int) *types.Header {
- h := types.Header{
- Difficulty: new(big.Int),
- Number: big.NewInt(int64(number)),
- GasLimit: uint64(seq),
- }
- return &h
- }
- db := NewMemoryDatabase()
- // For each number, write N versions of that particular number
- total := 0
- for i := 0; i < 15; i++ {
- for ii := 0; ii < i; ii++ {
- WriteHeader(db, mkHeader(i, ii))
- total++
- }
- }
- if have, want := len(ReadAllHashesInRange(db, 10, 10)), 10; have != want {
- t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
- }
- if have, want := len(ReadAllHashesInRange(db, 10, 9)), 0; have != want {
- t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
- }
- if have, want := len(ReadAllHashesInRange(db, 0, 100)), total; have != want {
- t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
- }
- if have, want := len(ReadAllHashesInRange(db, 9, 10)), 9+10; have != want {
- t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
- }
- if have, want := len(ReadAllHashes(db, 10)), 10; have != want {
- t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
- }
- if have, want := len(ReadAllHashes(db, 16)), 0; have != want {
- t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
- }
- if have, want := len(ReadAllHashes(db, 1)), 1; have != want {
- t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
- }
-}
-
-// This measures the write speed of the WriteAncientBlocks operation.
-func BenchmarkWriteAncientBlocks(b *testing.B) {
- // Open freezer database.
- frdir := b.TempDir()
- db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
- if err != nil {
- b.Fatalf("failed to create database with ancient backend")
- }
- defer db.Close()
-
- // Create the data to insert. The blocks must have consecutive numbers, so we create
- // all of them ahead of time. However, there is no need to create receipts
- // individually for each block, just make one batch here and reuse it for all writes.
- const batchSize = 128
- const blockTxs = 20
- allBlocks := makeTestBlocks(b.N, blockTxs)
- batchReceipts := makeTestReceipts(batchSize, blockTxs)
- b.ResetTimer()
-
- // The benchmark loop writes batches of blocks, but note that the total block count is
- // b.N. This means the resulting ns/op measurement is the time it takes to write a
- // single block and its associated data.
- var td = big.NewInt(55)
- var totalSize int64
- for i := 0; i < b.N; i += batchSize {
- length := batchSize
- if i+batchSize > b.N {
- length = b.N - i
- }
-
- blocks := allBlocks[i : i+length]
- receipts := batchReceipts[:length]
- writeSize, err := WriteAncientBlocks(db, blocks, receipts, td)
- if err != nil {
- b.Fatal(err)
- }
- totalSize += writeSize
- }
-
- // Enable MB/s reporting.
- b.SetBytes(totalSize / int64(b.N))
-}
-
-// makeTestBlocks creates fake blocks for the ancient write benchmark.
-func makeTestBlocks(nblock int, txsPerBlock int) []*types.Block {
- key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- signer := types.LatestSignerForChainID(big.NewInt(8))
-
- // Create transactions.
- txs := make([]*types.Transaction, txsPerBlock)
- for i := 0; i < len(txs); i++ {
- var err error
- to := common.Address{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
- txs[i], err = types.SignNewTx(key, signer, &types.LegacyTx{
- Nonce: 2,
- GasPrice: big.NewInt(30000),
- Gas: 0x45454545,
- To: &to,
- })
- if err != nil {
- panic(err)
- }
- }
-
- // Create the blocks.
- blocks := make([]*types.Block, nblock)
- for i := 0; i < nblock; i++ {
- header := &types.Header{
- Number: big.NewInt(int64(i)),
- Extra: []byte("test block"),
- }
- blocks[i] = types.NewBlockWithHeader(header).WithBody(txs, nil)
- blocks[i].Hash() // pre-cache the block hash
- }
- return blocks
-}
-
-// makeTestReceipts creates fake receipts for the ancient write benchmark.
-func makeTestReceipts(n int, nPerBlock int) []types.Receipts {
- receipts := make([]*types.Receipt, nPerBlock)
- for i := 0; i < len(receipts); i++ {
- receipts[i] = &types.Receipt{
- Status: types.ReceiptStatusSuccessful,
- CumulativeGasUsed: 0x888888888,
- Logs: make([]*types.Log, 5),
- }
- }
- allReceipts := make([]types.Receipts, n)
- for i := 0; i < n; i++ {
- allReceipts[i] = receipts
- }
- return allReceipts
-}
-
-type fullLogRLP struct {
- Address common.Address
- Topics []common.Hash
- Data []byte
- BlockNumber uint64
- TxHash common.Hash
- TxIndex uint
- BlockHash common.Hash
- Index uint
-}
-
-func newFullLogRLP(l *types.Log) *fullLogRLP {
- return &fullLogRLP{
- Address: l.Address,
- Topics: l.Topics,
- Data: l.Data,
- BlockNumber: l.BlockNumber,
- TxHash: l.TxHash,
- TxIndex: l.TxIndex,
- BlockHash: l.BlockHash,
- Index: l.Index,
- }
-}
-
-// Tests that logs associated with a single block can be retrieved.
-func TestReadLogs(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create a live block since we need metadata to reconstruct the receipt
- tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil)
- tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)
-
- body := &types.Body{Transactions: types.Transactions{tx1, tx2}}
-
- // Create the two receipts to manage afterwards
- receipt1 := &types.Receipt{
- Status: types.ReceiptStatusFailed,
- CumulativeGasUsed: 1,
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x11})},
- {Address: common.BytesToAddress([]byte{0x01, 0x11})},
- },
- TxHash: tx1.Hash(),
- ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
- GasUsed: 111111,
- }
- receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1})
-
- receipt2 := &types.Receipt{
- PostState: common.Hash{2}.Bytes(),
- CumulativeGasUsed: 2,
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x22})},
- {Address: common.BytesToAddress([]byte{0x02, 0x22})},
- },
- TxHash: tx2.Hash(),
- ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
- GasUsed: 222222,
- }
- receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2})
- receipts := []*types.Receipt{receipt1, receipt2}
-
- hash := common.BytesToHash([]byte{0x03, 0x14})
- // Check that no receipt entries are in a pristine database
- if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) != 0 {
- t.Fatalf("non existent receipts returned: %v", rs)
- }
- // Insert the body that corresponds to the receipts
- WriteBody(db, hash, 0, body)
-
- // Insert the receipt slice into the database and check presence
- WriteReceipts(db, hash, 0, receipts)
-
- logs := ReadLogs(db, hash, 0)
- if len(logs) == 0 {
- t.Fatalf("no logs returned")
- }
- if have, want := len(logs), 2; have != want {
- t.Fatalf("unexpected number of logs returned, have %d want %d", have, want)
- }
- if have, want := len(logs[0]), 2; have != want {
- t.Fatalf("unexpected number of logs[0] returned, have %d want %d", have, want)
- }
- if have, want := len(logs[1]), 2; have != want {
- t.Fatalf("unexpected number of logs[1] returned, have %d want %d", have, want)
- }
-
- for i, pr := range receipts {
- for j, pl := range pr.Logs {
- rlpHave, err := rlp.EncodeToBytes(newFullLogRLP(logs[i][j]))
- if err != nil {
- t.Fatal(err)
- }
- rlpWant, err := rlp.EncodeToBytes(newFullLogRLP(pl))
- if err != nil {
- t.Fatal(err)
- }
- if !bytes.Equal(rlpHave, rlpWant) {
- t.Fatalf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant))
- }
- }
- }
-}
-
-func TestDeriveLogFields(t *testing.T) {
- // Create a few transactions to have receipts for
- to2 := common.HexToAddress("0x2")
- to3 := common.HexToAddress("0x3")
- txs := types.Transactions{
- types.NewTx(&types.LegacyTx{
- Nonce: 1,
- Value: big.NewInt(1),
- Gas: 1,
- GasPrice: big.NewInt(1),
- }),
- types.NewTx(&types.LegacyTx{
- To: &to2,
- Nonce: 2,
- Value: big.NewInt(2),
- Gas: 2,
- GasPrice: big.NewInt(2),
- }),
- types.NewTx(&types.AccessListTx{
- To: &to3,
- Nonce: 3,
- Value: big.NewInt(3),
- Gas: 3,
- GasPrice: big.NewInt(3),
- }),
- }
- // Create the corresponding receipts
- receipts := []*receiptLogs{
- {
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x11})},
- {Address: common.BytesToAddress([]byte{0x01, 0x11})},
- },
- },
- {
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x22})},
- {Address: common.BytesToAddress([]byte{0x02, 0x22})},
- },
- },
- {
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x33})},
- {Address: common.BytesToAddress([]byte{0x03, 0x33})},
- },
- },
- }
-
- // Derive log metadata fields
- number := big.NewInt(1)
- hash := common.BytesToHash([]byte{0x03, 0x14})
- if err := deriveLogFields(receipts, hash, number.Uint64(), txs); err != nil {
- t.Fatal(err)
- }
-
- // Iterate over all the computed fields and check that they're correct
- logIndex := uint(0)
- for i := range receipts {
- for j := range receipts[i].Logs {
- if receipts[i].Logs[j].BlockNumber != number.Uint64() {
- t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, receipts[i].Logs[j].BlockNumber, number.Uint64())
- }
- if receipts[i].Logs[j].BlockHash != hash {
- t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, receipts[i].Logs[j].BlockHash.String(), hash.String())
- }
- if receipts[i].Logs[j].TxHash != txs[i].Hash() {
- t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String())
- }
- if receipts[i].Logs[j].TxIndex != uint(i) {
- t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i)
- }
- if receipts[i].Logs[j].Index != logIndex {
- t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, receipts[i].Logs[j].Index, logIndex)
- }
- logIndex++
- }
- }
-}
-
-func BenchmarkDecodeRLPLogs(b *testing.B) {
- // Encoded receipts from block 0x14ee094309fbe8f70b65f45ebcc08fb33f126942d97464aad5eb91cfd1e2d269
- buf, err := os.ReadFile("testdata/stored_receipts.bin")
- if err != nil {
- b.Fatal(err)
- }
- b.Run("ReceiptForStorage", func(b *testing.B) {
- b.ReportAllocs()
- var r []*types.ReceiptForStorage
- for i := 0; i < b.N; i++ {
- if err := rlp.DecodeBytes(buf, &r); err != nil {
- b.Fatal(err)
- }
- }
- })
- b.Run("rlpLogs", func(b *testing.B) {
- b.ReportAllocs()
- var r []*receiptLogs
- for i := 0; i < b.N; i++ {
- if err := rlp.DecodeBytes(buf, &r); err != nil {
- b.Fatal(err)
- }
- }
- })
-}
-
-func TestHeadersRLPStorage(t *testing.T) {
- // Have N headers in the freezer
- frdir := t.TempDir()
-
- db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
- if err != nil {
- t.Fatalf("failed to create database with ancient backend")
- }
- defer db.Close()
- // Create blocks
- var chain []*types.Block
- var pHash common.Hash
- for i := 0; i < 100; i++ {
- block := types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(int64(i)),
- Extra: []byte("test block"),
- UncleHash: types.EmptyUncleHash,
- TxHash: types.EmptyTxsHash,
- ReceiptHash: types.EmptyReceiptsHash,
- ParentHash: pHash,
- })
- chain = append(chain, block)
- pHash = block.Hash()
- }
- var receipts []types.Receipts = make([]types.Receipts, 100)
- // Write first half to ancients
- WriteAncientBlocks(db, chain[:50], receipts[:50], big.NewInt(100))
- // Write second half to db
- for i := 50; i < 100; i++ {
- WriteCanonicalHash(db, chain[i].Hash(), chain[i].NumberU64())
- WriteBlock(db, chain[i])
- }
- checkSequence := func(from, amount int) {
- headersRlp := ReadHeaderRange(db, uint64(from), uint64(amount))
- if have, want := len(headersRlp), amount; have != want {
- t.Fatalf("have %d headers, want %d", have, want)
- }
- for i, headerRlp := range headersRlp {
- var header types.Header
- if err := rlp.DecodeBytes(headerRlp, &header); err != nil {
- t.Fatal(err)
- }
- if have, want := header.Number.Uint64(), uint64(from-i); have != want {
- t.Fatalf("wrong number, have %d want %d", have, want)
- }
- }
- }
- checkSequence(99, 20) // Latest block and 19 parents
- checkSequence(99, 50) // Latest block -> all db blocks
- checkSequence(99, 51) // Latest block -> one from ancients
- checkSequence(99, 52) // Latest blocks -> two from ancients
- checkSequence(50, 2) // One from db, one from ancients
- checkSequence(49, 1) // One from ancients
- checkSequence(49, 50) // All ancient ones
- checkSequence(99, 100) // All blocks
- checkSequence(0, 1) // Only genesis
- checkSequence(1, 1) // Only block 1
- checkSequence(1, 2) // Genesis + block 1
-}
diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go
deleted file mode 100644
index afb50354c9..0000000000
--- a/core/rawdb/accessors_indexes.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "math/big"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/params"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
- "github.com/ava-labs/libevm/rlp"
-)
-
-// ReadTxLookupEntry retrieves the positional metadata associated with a transaction
-// hash to allow retrieving the transaction or receipt by hash.
-func ReadTxLookupEntry(db ethdb.Reader, hash common.Hash) *uint64 {
- data, _ := db.Get(txLookupKey(hash))
- if len(data) == 0 {
- return nil
- }
- // Database v6 tx lookup just stores the block number
- if len(data) < common.HashLength {
- number := new(big.Int).SetBytes(data).Uint64()
- return &number
- }
- // Database v4-v5 tx lookup format just stores the hash
- if len(data) == common.HashLength {
- return ReadHeaderNumber(db, common.BytesToHash(data))
- }
- // Finally try database v3 tx lookup format
- var entry LegacyTxLookupEntry
- if err := rlp.DecodeBytes(data, &entry); err != nil {
- log.Error("Invalid transaction lookup entry RLP", "hash", hash, "blob", data, "err", err)
- return nil
- }
- return &entry.BlockIndex
-}
-
-// writeTxLookupEntry stores a positional metadata for a transaction,
-// enabling hash based transaction and receipt lookups.
-func writeTxLookupEntry(db ethdb.KeyValueWriter, hash common.Hash, numberBytes []byte) {
- if err := db.Put(txLookupKey(hash), numberBytes); err != nil {
- log.Crit("Failed to store transaction lookup entry", "err", err)
- }
-}
-
-// WriteTxLookupEntries is identical to WriteTxLookupEntry, but it works on
-// a list of hashes
-func WriteTxLookupEntries(db ethdb.KeyValueWriter, number uint64, hashes []common.Hash) {
- numberBytes := new(big.Int).SetUint64(number).Bytes()
- for _, hash := range hashes {
- writeTxLookupEntry(db, hash, numberBytes)
- }
-}
-
-// WriteTxLookupEntriesByBlock stores a positional metadata for every transaction from
-// a block, enabling hash based transaction and receipt lookups.
-func WriteTxLookupEntriesByBlock(db ethdb.KeyValueWriter, block *types.Block) {
- numberBytes := block.Number().Bytes()
- for _, tx := range block.Transactions() {
- writeTxLookupEntry(db, tx.Hash(), numberBytes)
- }
-}
-
-// DeleteTxLookupEntry removes all transaction data associated with a hash.
-func DeleteTxLookupEntry(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Delete(txLookupKey(hash)); err != nil {
- log.Crit("Failed to delete transaction lookup entry", "err", err)
- }
-}
-
-// DeleteTxLookupEntries removes all transaction lookups for a given block.
-func DeleteTxLookupEntries(db ethdb.KeyValueWriter, hashes []common.Hash) {
- for _, hash := range hashes {
- DeleteTxLookupEntry(db, hash)
- }
-}
-
-// ReadTransaction retrieves a specific transaction from the database, along with
-// its added positional metadata.
-func ReadTransaction(db ethdb.Reader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
- blockNumber := ReadTxLookupEntry(db, hash)
- if blockNumber == nil {
- return nil, common.Hash{}, 0, 0
- }
- blockHash := ReadCanonicalHash(db, *blockNumber)
- if blockHash == (common.Hash{}) {
- return nil, common.Hash{}, 0, 0
- }
- body := ReadBody(db, blockHash, *blockNumber)
- if body == nil {
- log.Error("Transaction referenced missing", "number", *blockNumber, "hash", blockHash)
- return nil, common.Hash{}, 0, 0
- }
- for txIndex, tx := range body.Transactions {
- if tx.Hash() == hash {
- return tx, blockHash, *blockNumber, uint64(txIndex)
- }
- }
- log.Error("Transaction not found", "number", *blockNumber, "hash", blockHash, "txhash", hash)
- return nil, common.Hash{}, 0, 0
-}
-
-// ReadReceipt retrieves a specific transaction receipt from the database, along with
-// its added positional metadata.
-func ReadReceipt(db ethdb.Reader, hash common.Hash, config *params.ChainConfig) (*types.Receipt, common.Hash, uint64, uint64) {
- // Retrieve the context of the receipt based on the transaction hash
- blockNumber := ReadTxLookupEntry(db, hash)
- if blockNumber == nil {
- return nil, common.Hash{}, 0, 0
- }
- blockHash := ReadCanonicalHash(db, *blockNumber)
- if blockHash == (common.Hash{}) {
- return nil, common.Hash{}, 0, 0
- }
- blockHeader := ReadHeader(db, blockHash, *blockNumber)
- if blockHeader == nil {
- return nil, common.Hash{}, 0, 0
- }
- // Read all the receipts from the block and return the one with the matching hash
- receipts := ReadReceipts(db, blockHash, *blockNumber, blockHeader.Time, config)
- for receiptIndex, receipt := range receipts {
- if receipt.TxHash == hash {
- return receipt, blockHash, *blockNumber, uint64(receiptIndex)
- }
- }
- log.Error("Receipt not found", "number", *blockNumber, "hash", blockHash, "txhash", hash)
- return nil, common.Hash{}, 0, 0
-}
-
-// ReadBloomBits retrieves the compressed bloom bit vector belonging to the given
-// section and bit index from the.
-func ReadBloomBits(db ethdb.KeyValueReader, bit uint, section uint64, head common.Hash) ([]byte, error) {
- return db.Get(bloomBitsKey(bit, section, head))
-}
-
-// WriteBloomBits stores the compressed bloom bits vector belonging to the given
-// section and bit index.
-func WriteBloomBits(db ethdb.KeyValueWriter, bit uint, section uint64, head common.Hash, bits []byte) {
- if err := db.Put(bloomBitsKey(bit, section, head), bits); err != nil {
- log.Crit("Failed to store bloom bits", "err", err)
- }
-}
-
-// DeleteBloombits removes all compressed bloom bits vector belonging to the
-// given section range and bit index.
-func DeleteBloombits(db ethdb.Database, bit uint, from uint64, to uint64) {
- start, end := bloomBitsKey(bit, from, common.Hash{}), bloomBitsKey(bit, to, common.Hash{})
- it := db.NewIterator(nil, start)
- defer it.Release()
-
- for it.Next() {
- if bytes.Compare(it.Key(), end) >= 0 {
- break
- }
- if len(it.Key()) != len(bloomBitsPrefix)+2+8+32 {
- continue
- }
- db.Delete(it.Key())
- }
- if it.Error() != nil {
- log.Crit("Failed to delete bloom bits", "err", it.Error())
- }
-}
diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go
deleted file mode 100644
index d9a391f730..0000000000
--- a/core/rawdb/accessors_indexes_test.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "math/big"
- "testing"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/internal/blocktest"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/params"
- "github.com/ava-labs/libevm/rlp"
-)
-
-var newTestHasher = blocktest.NewHasher
-
-// Tests that positional lookup metadata can be stored and retrieved.
-func TestLookupStorage(t *testing.T) {
- tests := []struct {
- name string
- writeTxLookupEntriesByBlock func(ethdb.Writer, *types.Block)
- }{
- {
- "DatabaseV6",
- func(db ethdb.Writer, block *types.Block) {
- WriteTxLookupEntriesByBlock(db, block)
- },
- },
- {
- "DatabaseV4-V5",
- func(db ethdb.Writer, block *types.Block) {
- for _, tx := range block.Transactions() {
- db.Put(txLookupKey(tx.Hash()), block.Hash().Bytes())
- }
- },
- },
- {
- "DatabaseV3",
- func(db ethdb.Writer, block *types.Block) {
- for index, tx := range block.Transactions() {
- entry := LegacyTxLookupEntry{
- BlockHash: block.Hash(),
- BlockIndex: block.NumberU64(),
- Index: uint64(index),
- }
- data, _ := rlp.EncodeToBytes(entry)
- db.Put(txLookupKey(tx.Hash()), data)
- }
- },
- },
- }
-
- for _, tc := range tests {
- t.Run(tc.name, func(t *testing.T) {
- db := NewMemoryDatabase()
-
- tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11})
- tx2 := types.NewTransaction(2, common.BytesToAddress([]byte{0x22}), big.NewInt(222), 2222, big.NewInt(22222), []byte{0x22, 0x22, 0x22})
- tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
- txs := []*types.Transaction{tx1, tx2, tx3}
-
- block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newTestHasher())
-
- // Check that no transactions entries are in a pristine database
- for i, tx := range txs {
- if txn, _, _, _ := ReadTransaction(db, tx.Hash()); txn != nil {
- t.Fatalf("tx #%d [%x]: non existent transaction returned: %v", i, tx.Hash(), txn)
- }
- }
- // Insert all the transactions into the database, and verify contents
- WriteCanonicalHash(db, block.Hash(), block.NumberU64())
- WriteBlock(db, block)
- tc.writeTxLookupEntriesByBlock(db, block)
-
- for i, tx := range txs {
- if txn, hash, number, index := ReadTransaction(db, tx.Hash()); txn == nil {
- t.Fatalf("tx #%d [%x]: transaction not found", i, tx.Hash())
- } else {
- if hash != block.Hash() || number != block.NumberU64() || index != uint64(i) {
- t.Fatalf("tx #%d [%x]: positional metadata mismatch: have %x/%d/%d, want %x/%v/%v", i, tx.Hash(), hash, number, index, block.Hash(), block.NumberU64(), i)
- }
- if tx.Hash() != txn.Hash() {
- t.Fatalf("tx #%d [%x]: transaction mismatch: have %v, want %v", i, tx.Hash(), txn, tx)
- }
- }
- }
- // Delete the transactions and check purge
- for i, tx := range txs {
- DeleteTxLookupEntry(db, tx.Hash())
- if txn, _, _, _ := ReadTransaction(db, tx.Hash()); txn != nil {
- t.Fatalf("tx #%d [%x]: deleted transaction returned: %v", i, tx.Hash(), txn)
- }
- }
- })
- }
-}
-
-func TestDeleteBloomBits(t *testing.T) {
- // Prepare testing data
- db := NewMemoryDatabase()
- for i := uint(0); i < 2; i++ {
- for s := uint64(0); s < 2; s++ {
- WriteBloomBits(db, i, s, params.MainnetGenesisHash, []byte{0x01, 0x02})
- WriteBloomBits(db, i, s, params.SepoliaGenesisHash, []byte{0x01, 0x02})
- }
- }
- check := func(bit uint, section uint64, head common.Hash, exist bool) {
- bits, _ := ReadBloomBits(db, bit, section, head)
- if exist && !bytes.Equal(bits, []byte{0x01, 0x02}) {
- t.Fatalf("Bloombits mismatch")
- }
- if !exist && len(bits) > 0 {
- t.Fatalf("Bloombits should be removed")
- }
- }
- // Check the existence of written data.
- check(0, 0, params.MainnetGenesisHash, true)
- check(0, 0, params.SepoliaGenesisHash, true)
-
- // Check the existence of deleted data.
- DeleteBloombits(db, 0, 0, 1)
- check(0, 0, params.MainnetGenesisHash, false)
- check(0, 0, params.SepoliaGenesisHash, false)
- check(0, 1, params.MainnetGenesisHash, true)
- check(0, 1, params.SepoliaGenesisHash, true)
-
- // Check the existence of deleted data.
- DeleteBloombits(db, 0, 0, 2)
- check(0, 0, params.MainnetGenesisHash, false)
- check(0, 0, params.SepoliaGenesisHash, false)
- check(0, 1, params.MainnetGenesisHash, false)
- check(0, 1, params.SepoliaGenesisHash, false)
-
- // Bit1 shouldn't be affect.
- check(1, 0, params.MainnetGenesisHash, true)
- check(1, 0, params.SepoliaGenesisHash, true)
- check(1, 1, params.MainnetGenesisHash, true)
- check(1, 1, params.SepoliaGenesisHash, true)
-}
diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go
deleted file mode 100644
index 844ef25b48..0000000000
--- a/core/rawdb/accessors_metadata.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "encoding/json"
- "time"
-
- "github.com/ava-labs/coreth/params"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
- "github.com/ava-labs/libevm/rlp"
-)
-
-// ReadDatabaseVersion retrieves the version number of the database.
-func ReadDatabaseVersion(db ethdb.KeyValueReader) *uint64 {
- var version uint64
-
- enc, _ := db.Get(databaseVersionKey)
- if len(enc) == 0 {
- return nil
- }
- if err := rlp.DecodeBytes(enc, &version); err != nil {
- return nil
- }
-
- return &version
-}
-
-// WriteDatabaseVersion stores the version number of the database
-func WriteDatabaseVersion(db ethdb.KeyValueWriter, version uint64) {
- enc, err := rlp.EncodeToBytes(version)
- if err != nil {
- log.Crit("Failed to encode database version", "err", err)
- }
- if err = db.Put(databaseVersionKey, enc); err != nil {
- log.Crit("Failed to store the database version", "err", err)
- }
-}
-
-// ReadChainConfig retrieves the consensus settings based on the given genesis hash.
-func ReadChainConfig(db ethdb.KeyValueReader, hash common.Hash) *params.ChainConfig {
- data, _ := db.Get(configKey(hash))
- if len(data) == 0 {
- return nil
- }
- var config params.ChainConfig
- if err := json.Unmarshal(data, &config); err != nil {
- log.Error("Invalid chain config JSON", "hash", hash, "err", err)
- return nil
- }
- return &config
-}
-
-// WriteChainConfig writes the chain config settings to the database.
-func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.ChainConfig) {
- if cfg == nil {
- return
- }
- data, err := json.Marshal(cfg)
- if err != nil {
- log.Crit("Failed to JSON encode chain config", "err", err)
- }
- if err := db.Put(configKey(hash), data); err != nil {
- log.Crit("Failed to store chain config", "err", err)
- }
-}
-
-// ReadGenesisStateSpec retrieves the genesis state specification based on the
-// given genesis (block-)hash.
-func ReadGenesisStateSpec(db ethdb.KeyValueReader, blockhash common.Hash) []byte {
- data, _ := db.Get(genesisStateSpecKey(blockhash))
- return data
-}
-
-// WriteGenesisStateSpec writes the genesis state specification into the disk.
-func WriteGenesisStateSpec(db ethdb.KeyValueWriter, blockhash common.Hash, data []byte) {
- if err := db.Put(genesisStateSpecKey(blockhash), data); err != nil {
- log.Crit("Failed to store genesis state", "err", err)
- }
-}
-
-// crashList is a list of unclean-shutdown-markers, for rlp-encoding to the
-// database
-type crashList struct {
- Discarded uint64 // how many ucs have we deleted
- Recent []uint64 // unix timestamps of 10 latest unclean shutdowns
-}
-
-const crashesToKeep = 10
-
-// PushUncleanShutdownMarker appends a new unclean shutdown marker and returns
-// the previous data
-// - a list of timestamps
-// - a count of how many old unclean-shutdowns have been discarded
-func PushUncleanShutdownMarker(db ethdb.KeyValueStore) ([]uint64, uint64, error) {
- var uncleanShutdowns crashList
- // Read old data
- if data, err := db.Get(uncleanShutdownKey); err == nil {
- if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil {
- return nil, 0, err
- }
- }
- var discarded = uncleanShutdowns.Discarded
- var previous = make([]uint64, len(uncleanShutdowns.Recent))
- copy(previous, uncleanShutdowns.Recent)
- // Add a new (but cap it)
- uncleanShutdowns.Recent = append(uncleanShutdowns.Recent, uint64(time.Now().Unix()))
- if count := len(uncleanShutdowns.Recent); count > crashesToKeep+1 {
- numDel := count - (crashesToKeep + 1)
- uncleanShutdowns.Recent = uncleanShutdowns.Recent[numDel:]
- uncleanShutdowns.Discarded += uint64(numDel)
- }
- // And save it again
- data, _ := rlp.EncodeToBytes(uncleanShutdowns)
- if err := db.Put(uncleanShutdownKey, data); err != nil {
- log.Warn("Failed to write unclean-shutdown marker", "err", err)
- return nil, 0, err
- }
- return previous, discarded, nil
-}
-
-// PopUncleanShutdownMarker removes the last unclean shutdown marker
-func PopUncleanShutdownMarker(db ethdb.KeyValueStore) {
- var uncleanShutdowns crashList
- // Read old data
- if data, err := db.Get(uncleanShutdownKey); err != nil {
- log.Warn("Error reading unclean shutdown markers", "error", err)
- } else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil {
- log.Error("Error decoding unclean shutdown markers", "error", err) // Should mos def _not_ happen
- }
- if l := len(uncleanShutdowns.Recent); l > 0 {
- uncleanShutdowns.Recent = uncleanShutdowns.Recent[:l-1]
- }
- data, _ := rlp.EncodeToBytes(uncleanShutdowns)
- if err := db.Put(uncleanShutdownKey, data); err != nil {
- log.Warn("Failed to clear unclean-shutdown marker", "err", err)
- }
-}
-
-// UpdateUncleanShutdownMarker updates the last marker's timestamp to now.
-func UpdateUncleanShutdownMarker(db ethdb.KeyValueStore) {
- var uncleanShutdowns crashList
- // Read old data
- if data, err := db.Get(uncleanShutdownKey); err != nil {
- log.Warn("Error reading unclean shutdown markers", "error", err)
- } else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil {
- log.Warn("Error decoding unclean shutdown markers", "error", err)
- }
- // This shouldn't happen because we push a marker on Backend instantiation
- count := len(uncleanShutdowns.Recent)
- if count == 0 {
- log.Warn("No unclean shutdown marker to update")
- return
- }
- uncleanShutdowns.Recent[count-1] = uint64(time.Now().Unix())
- data, _ := rlp.EncodeToBytes(uncleanShutdowns)
- if err := db.Put(uncleanShutdownKey, data); err != nil {
- log.Warn("Failed to write unclean-shutdown marker", "err", err)
- }
-}
-
-// ReadTransitionStatus retrieves the eth2 transition status from the database
-func ReadTransitionStatus(db ethdb.KeyValueReader) []byte {
- data, _ := db.Get(transitionStatusKey)
- return data
-}
-
-// WriteTransitionStatus stores the eth2 transition status to the database
-func WriteTransitionStatus(db ethdb.KeyValueWriter, data []byte) {
- if err := db.Put(transitionStatusKey, data); err != nil {
- log.Crit("Failed to store the eth2 transition status", "err", err)
- }
-}
diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go
deleted file mode 100644
index e924b4247b..0000000000
--- a/core/rawdb/accessors_snapshot.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "encoding/binary"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
-)
-
-// ReadSnapshotDisabled retrieves if the snapshot maintenance is disabled.
-func ReadSnapshotDisabled(db ethdb.KeyValueReader) bool {
- disabled, _ := db.Has(snapshotDisabledKey)
- return disabled
-}
-
-// WriteSnapshotDisabled stores the snapshot pause flag.
-func WriteSnapshotDisabled(db ethdb.KeyValueWriter) {
- if err := db.Put(snapshotDisabledKey, []byte("42")); err != nil {
- log.Crit("Failed to store snapshot disabled flag", "err", err)
- }
-}
-
-// DeleteSnapshotDisabled deletes the flag keeping the snapshot maintenance disabled.
-func DeleteSnapshotDisabled(db ethdb.KeyValueWriter) {
- if err := db.Delete(snapshotDisabledKey); err != nil {
- log.Crit("Failed to remove snapshot disabled flag", "err", err)
- }
-}
-
-// ReadSnapshotRoot retrieves the root of the block whose state is contained in
-// the persisted snapshot.
-func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash {
- data, _ := db.Get(SnapshotRootKey)
- if len(data) != common.HashLength {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
-// WriteSnapshotRoot stores the root of the block whose state is contained in
-// the persisted snapshot.
-func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) {
- if err := db.Put(SnapshotRootKey, root[:]); err != nil {
- log.Crit("Failed to store snapshot root", "err", err)
- }
-}
-
-// DeleteSnapshotRoot deletes the hash of the block whose state is contained in
-// the persisted snapshot. Since snapshots are not immutable, this method can
-// be used during updates, so a crash or failure will mark the entire snapshot
-// invalid.
-func DeleteSnapshotRoot(db ethdb.KeyValueWriter) {
- if err := db.Delete(SnapshotRootKey); err != nil {
- log.Crit("Failed to remove snapshot root", "err", err)
- }
-}
-
-// ReadAccountSnapshot retrieves the snapshot entry of an account trie leaf.
-func ReadAccountSnapshot(db ethdb.KeyValueReader, hash common.Hash) []byte {
- data, _ := db.Get(accountSnapshotKey(hash))
- return data
-}
-
-// WriteAccountSnapshot stores the snapshot entry of an account trie leaf.
-func WriteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash, entry []byte) {
- if err := db.Put(accountSnapshotKey(hash), entry); err != nil {
- log.Crit("Failed to store account snapshot", "err", err)
- }
-}
-
-// DeleteAccountSnapshot removes the snapshot entry of an account trie leaf.
-func DeleteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Delete(accountSnapshotKey(hash)); err != nil {
- log.Crit("Failed to delete account snapshot", "err", err)
- }
-}
-
-// ReadStorageSnapshot retrieves the snapshot entry of an storage trie leaf.
-func ReadStorageSnapshot(db ethdb.KeyValueReader, accountHash, storageHash common.Hash) []byte {
- data, _ := db.Get(storageSnapshotKey(accountHash, storageHash))
- return data
-}
-
-// WriteStorageSnapshot stores the snapshot entry of an storage trie leaf.
-func WriteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash common.Hash, entry []byte) {
- if err := db.Put(storageSnapshotKey(accountHash, storageHash), entry); err != nil {
- log.Crit("Failed to store storage snapshot", "err", err)
- }
-}
-
-// DeleteStorageSnapshot removes the snapshot entry of an storage trie leaf.
-func DeleteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash common.Hash) {
- if err := db.Delete(storageSnapshotKey(accountHash, storageHash)); err != nil {
- log.Crit("Failed to delete storage snapshot", "err", err)
- }
-}
-
-// IterateStorageSnapshots returns an iterator for walking the entire storage
-// space of a specific account.
-func IterateStorageSnapshots(db ethdb.Iteratee, accountHash common.Hash) ethdb.Iterator {
- return NewKeyLengthIterator(db.NewIterator(storageSnapshotsKey(accountHash), nil), len(SnapshotStoragePrefix)+2*common.HashLength)
-}
-
-// ReadSnapshotJournal retrieves the serialized in-memory diff layers saved at
-// the last shutdown. The blob is expected to be max a few 10s of megabytes.
-func ReadSnapshotJournal(db ethdb.KeyValueReader) []byte {
- data, _ := db.Get(snapshotJournalKey)
- return data
-}
-
-// WriteSnapshotJournal stores the serialized in-memory diff layers to save at
-// shutdown. The blob is expected to be max a few 10s of megabytes.
-func WriteSnapshotJournal(db ethdb.KeyValueWriter, journal []byte) {
- if err := db.Put(snapshotJournalKey, journal); err != nil {
- log.Crit("Failed to store snapshot journal", "err", err)
- }
-}
-
-// DeleteSnapshotJournal deletes the serialized in-memory diff layers saved at
-// the last shutdown
-func DeleteSnapshotJournal(db ethdb.KeyValueWriter) {
- if err := db.Delete(snapshotJournalKey); err != nil {
- log.Crit("Failed to remove snapshot journal", "err", err)
- }
-}
-
-// ReadSnapshotGenerator retrieves the serialized snapshot generator saved at
-// the last shutdown.
-func ReadSnapshotGenerator(db ethdb.KeyValueReader) []byte {
- data, _ := db.Get(snapshotGeneratorKey)
- return data
-}
-
-// WriteSnapshotGenerator stores the serialized snapshot generator to save at
-// shutdown.
-func WriteSnapshotGenerator(db ethdb.KeyValueWriter, generator []byte) {
- if err := db.Put(snapshotGeneratorKey, generator); err != nil {
- log.Crit("Failed to store snapshot generator", "err", err)
- }
-}
-
-// DeleteSnapshotGenerator deletes the serialized snapshot generator saved at
-// the last shutdown
-func DeleteSnapshotGenerator(db ethdb.KeyValueWriter) {
- if err := db.Delete(snapshotGeneratorKey); err != nil {
- log.Crit("Failed to remove snapshot generator", "err", err)
- }
-}
-
-// ReadSnapshotRecoveryNumber retrieves the block number of the last persisted
-// snapshot layer.
-func ReadSnapshotRecoveryNumber(db ethdb.KeyValueReader) *uint64 {
- data, _ := db.Get(snapshotRecoveryKey)
- if len(data) == 0 {
- return nil
- }
- if len(data) != 8 {
- return nil
- }
- number := binary.BigEndian.Uint64(data)
- return &number
-}
-
-// WriteSnapshotRecoveryNumber stores the block number of the last persisted
-// snapshot layer.
-func WriteSnapshotRecoveryNumber(db ethdb.KeyValueWriter, number uint64) {
- var buf [8]byte
- binary.BigEndian.PutUint64(buf[:], number)
- if err := db.Put(snapshotRecoveryKey, buf[:]); err != nil {
- log.Crit("Failed to store snapshot recovery number", "err", err)
- }
-}
-
-// DeleteSnapshotRecoveryNumber deletes the block number of the last persisted
-// snapshot layer.
-func DeleteSnapshotRecoveryNumber(db ethdb.KeyValueWriter) {
- if err := db.Delete(snapshotRecoveryKey); err != nil {
- log.Crit("Failed to remove snapshot recovery number", "err", err)
- }
-}
-
-// ReadSnapshotSyncStatus retrieves the serialized sync status saved at shutdown.
-func ReadSnapshotSyncStatus(db ethdb.KeyValueReader) []byte {
- data, _ := db.Get(snapshotSyncStatusKey)
- return data
-}
-
-// WriteSnapshotSyncStatus stores the serialized sync status to save at shutdown.
-func WriteSnapshotSyncStatus(db ethdb.KeyValueWriter, status []byte) {
- if err := db.Put(snapshotSyncStatusKey, status); err != nil {
- log.Crit("Failed to store snapshot sync status", "err", err)
- }
-}
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
deleted file mode 100644
index 41e7d30f89..0000000000
--- a/core/rawdb/accessors_state.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "encoding/binary"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
-)
-
-// ReadPreimage retrieves a single preimage of the provided hash.
-func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
- data, _ := db.Get(preimageKey(hash))
- return data
-}
-
-// WritePreimages writes the provided set of preimages to the database.
-func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
- for hash, preimage := range preimages {
- if err := db.Put(preimageKey(hash), preimage); err != nil {
- log.Crit("Failed to store trie preimage", "err", err)
- }
- }
- preimageCounter.Inc(int64(len(preimages)))
- preimageHitCounter.Inc(int64(len(preimages)))
-}
-
-// ReadCode retrieves the contract code of the provided code hash.
-func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
- // Try with the prefixed code scheme first, if not then try with legacy
- // scheme.
- data := ReadCodeWithPrefix(db, hash)
- if len(data) != 0 {
- return data
- }
- data, _ = db.Get(hash.Bytes())
- return data
-}
-
-// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
-// The main difference between this function and ReadCode is this function
-// will only check the existence with latest scheme(with prefix).
-func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
- data, _ := db.Get(codeKey(hash))
- return data
-}
-
-// HasCode checks if the contract code corresponding to the
-// provided code hash is present in the db.
-func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool {
- // Try with the prefixed code scheme first, if not then try with legacy
- // scheme.
- if ok := HasCodeWithPrefix(db, hash); ok {
- return true
- }
- ok, _ := db.Has(hash.Bytes())
- return ok
-}
-
-// HasCodeWithPrefix checks if the contract code corresponding to the
-// provided code hash is present in the db. This function will only check
-// presence using the prefix-scheme.
-func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool {
- ok, _ := db.Has(codeKey(hash))
- return ok
-}
-
-// WriteCode writes the provided contract code database.
-func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
- if err := db.Put(codeKey(hash), code); err != nil {
- log.Crit("Failed to store contract code", "err", err)
- }
-}
-
-// DeleteCode deletes the specified contract code from the database.
-func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Delete(codeKey(hash)); err != nil {
- log.Crit("Failed to delete contract code", "err", err)
- }
-}
-
-// ReadStateID retrieves the state id with the provided state root.
-func ReadStateID(db ethdb.KeyValueReader, root common.Hash) *uint64 {
- data, err := db.Get(stateIDKey(root))
- if err != nil || len(data) == 0 {
- return nil
- }
- number := binary.BigEndian.Uint64(data)
- return &number
-}
-
-// WriteStateID writes the provided state lookup to database.
-func WriteStateID(db ethdb.KeyValueWriter, root common.Hash, id uint64) {
- var buff [8]byte
- binary.BigEndian.PutUint64(buff[:], id)
- if err := db.Put(stateIDKey(root), buff[:]); err != nil {
- log.Crit("Failed to store state ID", "err", err)
- }
-}
-
-// DeleteStateID deletes the specified state lookup from the database.
-func DeleteStateID(db ethdb.KeyValueWriter, root common.Hash) {
- if err := db.Delete(stateIDKey(root)); err != nil {
- log.Crit("Failed to delete state ID", "err", err)
- }
-}
-
-// ReadPersistentStateID retrieves the id of the persistent state from the database.
-func ReadPersistentStateID(db ethdb.KeyValueReader) uint64 {
- data, _ := db.Get(persistentStateIDKey)
- if len(data) != 8 {
- return 0
- }
- return binary.BigEndian.Uint64(data)
-}
-
-// WritePersistentStateID stores the id of the persistent state into database.
-func WritePersistentStateID(db ethdb.KeyValueWriter, number uint64) {
- if err := db.Put(persistentStateIDKey, encodeBlockNumber(number)); err != nil {
- log.Crit("Failed to store the persistent state ID", "err", err)
- }
-}
-
-// ReadTrieJournal retrieves the serialized in-memory trie nodes of layers saved at
-// the last shutdown.
-func ReadTrieJournal(db ethdb.KeyValueReader) []byte {
- data, _ := db.Get(trieJournalKey)
- return data
-}
-
-// WriteTrieJournal stores the serialized in-memory trie nodes of layers to save at
-// shutdown.
-func WriteTrieJournal(db ethdb.KeyValueWriter, journal []byte) {
- if err := db.Put(trieJournalKey, journal); err != nil {
- log.Crit("Failed to store tries journal", "err", err)
- }
-}
-
-// DeleteTrieJournal deletes the serialized in-memory trie nodes of layers saved at
-// the last shutdown.
-func DeleteTrieJournal(db ethdb.KeyValueWriter) {
- if err := db.Delete(trieJournalKey); err != nil {
- log.Crit("Failed to remove tries journal", "err", err)
- }
-}
-
-// ReadStateHistoryMeta retrieves the metadata corresponding to the specified
-// state history. Compute the position of state history in freezer by minus
-// one since the id of first state history starts from one(zero for initial
-// state).
-func ReadStateHistoryMeta(db ethdb.AncientReaderOp, id uint64) []byte {
- blob, err := db.Ancient(stateHistoryMeta, id-1)
- if err != nil {
- return nil
- }
- return blob
-}
-
-// ReadStateHistoryMetaList retrieves a batch of meta objects with the specified
-// start position and count. Compute the position of state history in freezer by
-// minus one since the id of first state history starts from one(zero for initial
-// state).
-func ReadStateHistoryMetaList(db ethdb.AncientReaderOp, start uint64, count uint64) ([][]byte, error) {
- return db.AncientRange(stateHistoryMeta, start-1, count, 0)
-}
-
-// ReadStateAccountIndex retrieves the state root corresponding to the specified
-// state history. Compute the position of state history in freezer by minus one
-// since the id of first state history starts from one(zero for initial state).
-func ReadStateAccountIndex(db ethdb.AncientReaderOp, id uint64) []byte {
- blob, err := db.Ancient(stateHistoryAccountIndex, id-1)
- if err != nil {
- return nil
- }
- return blob
-}
-
-// ReadStateStorageIndex retrieves the state root corresponding to the specified
-// state history. Compute the position of state history in freezer by minus one
-// since the id of first state history starts from one(zero for initial state).
-func ReadStateStorageIndex(db ethdb.AncientReaderOp, id uint64) []byte {
- blob, err := db.Ancient(stateHistoryStorageIndex, id-1)
- if err != nil {
- return nil
- }
- return blob
-}
-
-// ReadStateAccountHistory retrieves the state root corresponding to the specified
-// state history. Compute the position of state history in freezer by minus one
-// since the id of first state history starts from one(zero for initial state).
-func ReadStateAccountHistory(db ethdb.AncientReaderOp, id uint64) []byte {
- blob, err := db.Ancient(stateHistoryAccountData, id-1)
- if err != nil {
- return nil
- }
- return blob
-}
-
-// ReadStateStorageHistory retrieves the state root corresponding to the specified
-// state history. Compute the position of state history in freezer by minus one
-// since the id of first state history starts from one(zero for initial state).
-func ReadStateStorageHistory(db ethdb.AncientReaderOp, id uint64) []byte {
- blob, err := db.Ancient(stateHistoryStorageData, id-1)
- if err != nil {
- return nil
- }
- return blob
-}
-
-// ReadStateHistory retrieves the state history from database with provided id.
-// Compute the position of state history in freezer by minus one since the id
-// of first state history starts from one(zero for initial state).
-func ReadStateHistory(db ethdb.AncientReaderOp, id uint64) ([]byte, []byte, []byte, []byte, []byte, error) {
- meta, err := db.Ancient(stateHistoryMeta, id-1)
- if err != nil {
- return nil, nil, nil, nil, nil, err
- }
- accountIndex, err := db.Ancient(stateHistoryAccountIndex, id-1)
- if err != nil {
- return nil, nil, nil, nil, nil, err
- }
- storageIndex, err := db.Ancient(stateHistoryStorageIndex, id-1)
- if err != nil {
- return nil, nil, nil, nil, nil, err
- }
- accountData, err := db.Ancient(stateHistoryAccountData, id-1)
- if err != nil {
- return nil, nil, nil, nil, nil, err
- }
- storageData, err := db.Ancient(stateHistoryStorageData, id-1)
- if err != nil {
- return nil, nil, nil, nil, nil, err
- }
- return meta, accountIndex, storageIndex, accountData, storageData, nil
-}
-
-// WriteStateHistory writes the provided state history to database. Compute the
-// position of state history in freezer by minus one since the id of first state
-// history starts from one(zero for initial state).
-func WriteStateHistory(db ethdb.AncientWriter, id uint64, meta []byte, accountIndex []byte, storageIndex []byte, accounts []byte, storages []byte) {
- db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
- op.AppendRaw(stateHistoryMeta, id-1, meta)
- op.AppendRaw(stateHistoryAccountIndex, id-1, accountIndex)
- op.AppendRaw(stateHistoryStorageIndex, id-1, storageIndex)
- op.AppendRaw(stateHistoryAccountData, id-1, accounts)
- op.AppendRaw(stateHistoryStorageData, id-1, storages)
- return nil
- })
-}
diff --git a/core/rawdb/accessors_sync.go b/core/rawdb/accessors_sync.go
deleted file mode 100644
index 8cf9c53fdb..0000000000
--- a/core/rawdb/accessors_sync.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
- "github.com/ava-labs/libevm/rlp"
-)
-
-// ReadSkeletonSyncStatus retrieves the serialized sync status saved at shutdown.
-func ReadSkeletonSyncStatus(db ethdb.KeyValueReader) []byte {
- data, _ := db.Get(skeletonSyncStatusKey)
- return data
-}
-
-// WriteSkeletonSyncStatus stores the serialized sync status to save at shutdown.
-func WriteSkeletonSyncStatus(db ethdb.KeyValueWriter, status []byte) {
- if err := db.Put(skeletonSyncStatusKey, status); err != nil {
- log.Crit("Failed to store skeleton sync status", "err", err)
- }
-}
-
-// DeleteSkeletonSyncStatus deletes the serialized sync status saved at the last
-// shutdown
-func DeleteSkeletonSyncStatus(db ethdb.KeyValueWriter) {
- if err := db.Delete(skeletonSyncStatusKey); err != nil {
- log.Crit("Failed to remove skeleton sync status", "err", err)
- }
-}
-
-// ReadSkeletonHeader retrieves a block header from the skeleton sync store,
-func ReadSkeletonHeader(db ethdb.KeyValueReader, number uint64) *types.Header {
- data, _ := db.Get(skeletonHeaderKey(number))
- if len(data) == 0 {
- return nil
- }
- header := new(types.Header)
- if err := rlp.DecodeBytes(data, header); err != nil {
- log.Error("Invalid skeleton header RLP", "number", number, "err", err)
- return nil
- }
- return header
-}
-
-// WriteSkeletonHeader stores a block header into the skeleton sync store.
-func WriteSkeletonHeader(db ethdb.KeyValueWriter, header *types.Header) {
- data, err := rlp.EncodeToBytes(header)
- if err != nil {
- log.Crit("Failed to RLP encode header", "err", err)
- }
- key := skeletonHeaderKey(header.Number.Uint64())
- if err := db.Put(key, data); err != nil {
- log.Crit("Failed to store skeleton header", "err", err)
- }
-}
-
-// DeleteSkeletonHeader removes all block header data associated with a hash.
-func DeleteSkeletonHeader(db ethdb.KeyValueWriter, number uint64) {
- if err := db.Delete(skeletonHeaderKey(number)); err != nil {
- log.Crit("Failed to delete skeleton header", "err", err)
- }
-}
-
-const (
- StateSyncUnknown = uint8(0) // flags the state snap sync is unknown
- StateSyncRunning = uint8(1) // flags the state snap sync is not completed yet
- StateSyncFinished = uint8(2) // flags the state snap sync is completed
-)
-
-// ReadSnapSyncStatusFlag retrieves the state snap sync status flag.
-func ReadSnapSyncStatusFlag(db ethdb.KeyValueReader) uint8 {
- blob, err := db.Get(snapSyncStatusFlagKey)
- if err != nil || len(blob) != 1 {
- return StateSyncUnknown
- }
- return blob[0]
-}
-
-// WriteSnapSyncStatusFlag stores the state snap sync status flag into database.
-func WriteSnapSyncStatusFlag(db ethdb.KeyValueWriter, flag uint8) {
- if err := db.Put(snapSyncStatusFlagKey, []byte{flag}); err != nil {
- log.Crit("Failed to store sync status flag", "err", err)
- }
-}
diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go
deleted file mode 100644
index 742a462c7c..0000000000
--- a/core/rawdb/accessors_trie.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// (c) 2023, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see
-
-package rawdb
-
-import (
- "fmt"
- "sync"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
- "golang.org/x/crypto/sha3"
-)
-
-// HashScheme is the legacy hash-based state scheme with which trie nodes are
-// stored in the disk with node hash as the database key. The advantage of this
-// scheme is that different versions of trie nodes can be stored in disk, which
-// is very beneficial for constructing archive nodes. The drawback is it will
-// store different trie nodes on the same path to different locations on the disk
-// with no data locality, and it's unfriendly for designing state pruning.
-//
-// Now this scheme is still kept for backward compatibility, and it will be used
-// for archive node and some other tries(e.g. light trie).
-const HashScheme = "hash"
-
-// PathScheme is the new path-based state scheme with which trie nodes are stored
-// in the disk with node path as the database key. This scheme will only store one
-// version of state data in the disk, which means that the state pruning operation
-// is native. At the same time, this scheme will put adjacent trie nodes in the same
-// area of the disk with good data locality property. But this scheme needs to rely
-// on extra state diffs to survive deep reorg.
-const PathScheme = "path"
-
-// hasher is used to compute the sha256 hash of the provided data.
-type hasher struct{ sha crypto.KeccakState }
-
-var hasherPool = sync.Pool{
- New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
-}
-
-func newHasher() *hasher {
- return hasherPool.Get().(*hasher)
-}
-
-func (h *hasher) hash(data []byte) common.Hash {
- return crypto.HashData(h.sha, data)
-}
-
-func (h *hasher) release() {
- hasherPool.Put(h)
-}
-
-// ReadAccountTrieNode retrieves the account trie node and the associated node
-// hash with the specified node path.
-func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) {
- data, err := db.Get(accountTrieNodeKey(path))
- if err != nil {
- return nil, common.Hash{}
- }
- h := newHasher()
- defer h.release()
- return data, h.hash(data)
-}
-
-// HasAccountTrieNode checks the account trie node presence with the specified
-// node path and the associated node hash.
-func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) bool {
- data, err := db.Get(accountTrieNodeKey(path))
- if err != nil {
- return false
- }
- h := newHasher()
- defer h.release()
- return h.hash(data) == hash
-}
-
-// ExistsAccountTrieNode checks the presence of the account trie node with the
-// specified node path, regardless of the node hash.
-func ExistsAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool {
- has, err := db.Has(accountTrieNodeKey(path))
- if err != nil {
- return false
- }
- return has
-}
-
-// WriteAccountTrieNode writes the provided account trie node into database.
-func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) {
- if err := db.Put(accountTrieNodeKey(path), node); err != nil {
- log.Crit("Failed to store account trie node", "err", err)
- }
-}
-
-// DeleteAccountTrieNode deletes the specified account trie node from the database.
-func DeleteAccountTrieNode(db ethdb.KeyValueWriter, path []byte) {
- if err := db.Delete(accountTrieNodeKey(path)); err != nil {
- log.Crit("Failed to delete account trie node", "err", err)
- }
-}
-
-// ReadStorageTrieNode retrieves the storage trie node and the associated node
-// hash with the specified node path.
-func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) ([]byte, common.Hash) {
- data, err := db.Get(storageTrieNodeKey(accountHash, path))
- if err != nil {
- return nil, common.Hash{}
- }
- h := newHasher()
- defer h.release()
- return data, h.hash(data)
-}
-
-// HasStorageTrieNode checks the storage trie node presence with the provided
-// node path and the associated node hash.
-func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte, hash common.Hash) bool {
- data, err := db.Get(storageTrieNodeKey(accountHash, path))
- if err != nil {
- return false
- }
- h := newHasher()
- defer h.release()
- return h.hash(data) == hash
-}
-
-// ExistsStorageTrieNode checks the presence of the storage trie node with the
-// specified account hash and node path, regardless of the node hash.
-func ExistsStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool {
- has, err := db.Has(storageTrieNodeKey(accountHash, path))
- if err != nil {
- return false
- }
- return has
-}
-
-// WriteStorageTrieNode writes the provided storage trie node into database.
-func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) {
- if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil {
- log.Crit("Failed to store storage trie node", "err", err)
- }
-}
-
-// DeleteStorageTrieNode deletes the specified storage trie node from the database.
-func DeleteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte) {
- if err := db.Delete(storageTrieNodeKey(accountHash, path)); err != nil {
- log.Crit("Failed to delete storage trie node", "err", err)
- }
-}
-
-// ReadLegacyTrieNode retrieves the legacy trie node with the given
-// associated node hash.
-func ReadLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
- data, err := db.Get(hash.Bytes())
- if err != nil {
- return nil
- }
- return data
-}
-
-// HasLegacyTrieNode checks if the trie node with the provided hash is present in db.
-func HasLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
- ok, _ := db.Has(hash.Bytes())
- return ok
-}
-
-// WriteLegacyTrieNode writes the provided legacy trie node to database.
-func WriteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
- if err := db.Put(hash.Bytes(), node); err != nil {
- log.Crit("Failed to store legacy trie node", "err", err)
- }
-}
-
-// DeleteLegacyTrieNode deletes the specified legacy trie node from database.
-func DeleteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Delete(hash.Bytes()); err != nil {
- log.Crit("Failed to delete legacy trie node", "err", err)
- }
-}
-
-// HasTrieNode checks the trie node presence with the provided node info and
-// the associated node hash.
-func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) bool {
- switch scheme {
- case HashScheme:
- return HasLegacyTrieNode(db, hash)
- case PathScheme:
- if owner == (common.Hash{}) {
- return HasAccountTrieNode(db, path, hash)
- }
- return HasStorageTrieNode(db, owner, path, hash)
- default:
- panic(fmt.Sprintf("Unknown scheme %v", scheme))
- }
-}
-
-// ReadTrieNode retrieves the trie node from database with the provided node info
-// and associated node hash.
-// hashScheme-based lookup requires the following:
-// - hash
-//
-// pathScheme-based lookup requires the following:
-// - owner
-// - path
-func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) []byte {
- switch scheme {
- case HashScheme:
- return ReadLegacyTrieNode(db, hash)
- case PathScheme:
- var (
- blob []byte
- nHash common.Hash
- )
- if owner == (common.Hash{}) {
- blob, nHash = ReadAccountTrieNode(db, path)
- } else {
- blob, nHash = ReadStorageTrieNode(db, owner, path)
- }
- if nHash != hash {
- return nil
- }
- return blob
- default:
- panic(fmt.Sprintf("Unknown scheme %v", scheme))
- }
-}
-
-// WriteTrieNode writes the trie node into database with the provided node info
-// and associated node hash.
-// hashScheme-based lookup requires the following:
-// - hash
-//
-// pathScheme-based lookup requires the following:
-// - owner
-// - path
-func WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte, scheme string) {
- switch scheme {
- case HashScheme:
- WriteLegacyTrieNode(db, hash, node)
- case PathScheme:
- if owner == (common.Hash{}) {
- WriteAccountTrieNode(db, path, node)
- } else {
- WriteStorageTrieNode(db, owner, path, node)
- }
- default:
- panic(fmt.Sprintf("Unknown scheme %v", scheme))
- }
-}
-
-// DeleteTrieNode deletes the trie node from database with the provided node info
-// and associated node hash.
-// hashScheme-based lookup requires the following:
-// - hash
-//
-// pathScheme-based lookup requires the following:
-// - owner
-// - path
-func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, scheme string) {
- switch scheme {
- case HashScheme:
- DeleteLegacyTrieNode(db, hash)
- case PathScheme:
- if owner == (common.Hash{}) {
- DeleteAccountTrieNode(db, path)
- } else {
- DeleteStorageTrieNode(db, owner, path)
- }
- default:
- panic(fmt.Sprintf("Unknown scheme %v", scheme))
- }
-}
-
-// ReadStateScheme reads the state scheme of persistent state, or none
-// if the state is not present in database.
-func ReadStateScheme(db ethdb.Reader) string {
- // Check if state in path-based scheme is present
- blob, _ := ReadAccountTrieNode(db, nil)
- if len(blob) != 0 {
- return PathScheme
- }
- // The root node might be deleted during the initial snap sync, check
- // the persistent state id then.
- if id := ReadPersistentStateID(db); id != 0 {
- return PathScheme
- }
- // In a hash-based scheme, the genesis state is consistently stored
- // on the disk. To assess the scheme of the persistent state, it
- // suffices to inspect the scheme of the genesis state.
- header := ReadHeader(db, ReadCanonicalHash(db, 0), 0)
- if header == nil {
- return "" // empty datadir
- }
- blob = ReadLegacyTrieNode(db, header.Root)
- if len(blob) == 0 {
- return "" // no state in disk
- }
- return HashScheme
-}
-
-// ParseStateScheme checks if the specified state scheme is compatible with
-// the stored state.
-//
-// - If the provided scheme is none, use the scheme consistent with persistent
-// state, or fallback to hash-based scheme if state is empty.
-//
-// - If the provided scheme is hash, use hash-based scheme or error out if not
-// compatible with persistent state scheme.
-//
-// - If the provided scheme is path: use path-based scheme or error out if not
-// compatible with persistent state scheme.
-func ParseStateScheme(provided string, disk ethdb.Database) (string, error) {
- // If state scheme is not specified, use the scheme consistent
- // with persistent state, or fallback to hash mode if database
- // is empty.
- stored := ReadStateScheme(disk)
- if provided == "" {
- if stored == "" {
- // use default scheme for empty database, flip it when
- // path mode is chosen as default
- log.Info("State schema set to default", "scheme", "hash")
- return HashScheme, nil
- }
- log.Info("State scheme set to already existing", "scheme", stored)
- return stored, nil // reuse scheme of persistent scheme
- }
- // If state scheme is specified, ensure it's compatible with
- // persistent state.
- if stored == "" || provided == stored {
- log.Info("State scheme set by user", "scheme", provided)
- return provided, nil
- }
- return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, provided)
-}
diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go
deleted file mode 100644
index e88867af0e..0000000000
--- a/core/rawdb/ancient_scheme.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import "path/filepath"
-
-// The list of table names of chain freezer.
-const (
- // ChainFreezerHeaderTable indicates the name of the freezer header table.
- ChainFreezerHeaderTable = "headers"
-
- // ChainFreezerHashTable indicates the name of the freezer canonical hash table.
- ChainFreezerHashTable = "hashes"
-
- // ChainFreezerBodiesTable indicates the name of the freezer block body table.
- ChainFreezerBodiesTable = "bodies"
-
- // ChainFreezerReceiptTable indicates the name of the freezer receipts table.
- ChainFreezerReceiptTable = "receipts"
-
- // ChainFreezerDifficultyTable indicates the name of the freezer total difficulty table.
- ChainFreezerDifficultyTable = "diffs"
-)
-
-// chainFreezerNoSnappy configures whether compression is disabled for the ancient-tables.
-// Hashes and difficulties don't compress well.
-var chainFreezerNoSnappy = map[string]bool{
- ChainFreezerHeaderTable: false,
- ChainFreezerHashTable: true,
- ChainFreezerBodiesTable: false,
- ChainFreezerReceiptTable: false,
- ChainFreezerDifficultyTable: true,
-}
-
-const (
- // stateHistoryTableSize defines the maximum size of freezer data files.
- stateHistoryTableSize = 2 * 1000 * 1000 * 1000
-
- // stateHistoryAccountIndex indicates the name of the freezer state history table.
- stateHistoryMeta = "history.meta"
- stateHistoryAccountIndex = "account.index"
- stateHistoryStorageIndex = "storage.index"
- stateHistoryAccountData = "account.data"
- stateHistoryStorageData = "storage.data"
-)
-
-var stateFreezerNoSnappy = map[string]bool{
- stateHistoryMeta: true,
- stateHistoryAccountIndex: false,
- stateHistoryStorageIndex: false,
- stateHistoryAccountData: false,
- stateHistoryStorageData: false,
-}
-
-// The list of identifiers of ancient stores.
-var (
- ChainFreezerName = "chain" // the folder name of chain segment ancient store.
- StateFreezerName = "state" // the folder name of reverse diff ancient store.
-)
-
-// freezers the collections of all builtin freezers.
-var freezers = []string{ChainFreezerName, StateFreezerName}
-
-// NewStateFreezer initializes the freezer for state history.
-func NewStateFreezer(ancientDir string, readOnly bool) (*ResettableFreezer, error) {
- return NewResettableFreezer(filepath.Join(ancientDir, StateFreezerName), "eth/db/state", readOnly, stateHistoryTableSize, stateFreezerNoSnappy)
-}
diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go
deleted file mode 100644
index 0b15234690..0000000000
--- a/core/rawdb/ancient_utils.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "fmt"
- "path/filepath"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
-)
-
-type tableSize struct {
- name string
- size common.StorageSize
-}
-
-// freezerInfo contains the basic information of the freezer.
-type freezerInfo struct {
- name string // The identifier of freezer
- head uint64 // The number of last stored item in the freezer
- tail uint64 // The number of first stored item in the freezer
- sizes []tableSize // The storage size per table
-}
-
-// count returns the number of stored items in the freezer.
-func (info *freezerInfo) count() uint64 {
- return info.head - info.tail + 1
-}
-
-// size returns the storage size of the entire freezer.
-func (info *freezerInfo) size() common.StorageSize {
- var total common.StorageSize
- for _, table := range info.sizes {
- total += table.size
- }
- return total
-}
-
-func inspect(name string, order map[string]bool, reader ethdb.AncientReader) (freezerInfo, error) {
- info := freezerInfo{name: name}
- for t := range order {
- size, err := reader.AncientSize(t)
- if err != nil {
- return freezerInfo{}, err
- }
- info.sizes = append(info.sizes, tableSize{name: t, size: common.StorageSize(size)})
- }
- // Retrieve the number of last stored item
- ancients, err := reader.Ancients()
- if err != nil {
- return freezerInfo{}, err
- }
- info.head = ancients - 1
-
- // Retrieve the number of first stored item
- tail, err := reader.Tail()
- if err != nil {
- return freezerInfo{}, err
- }
- info.tail = tail
- return info, nil
-}
-
-// inspectFreezers inspects all freezers registered in the system.
-func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
- var infos []freezerInfo
- for _, freezer := range freezers {
- switch freezer {
- case ChainFreezerName:
- info, err := inspect(ChainFreezerName, chainFreezerNoSnappy, db)
- if err != nil {
- return nil, err
- }
- infos = append(infos, info)
-
- case StateFreezerName:
- if ReadStateScheme(db) != PathScheme {
- continue
- }
- datadir, err := db.AncientDatadir()
- if err != nil {
- return nil, err
- }
- f, err := NewStateFreezer(datadir, true)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- info, err := inspect(StateFreezerName, stateFreezerNoSnappy, f)
- if err != nil {
- return nil, err
- }
- infos = append(infos, info)
-
- default:
- return nil, fmt.Errorf("unknown freezer, supported ones: %v", freezers)
- }
- }
- return infos, nil
-}
-
-// InspectFreezerTable dumps out the index of a specific freezer table. The passed
-// ancient indicates the path of root ancient directory where the chain freezer can
-// be opened. Start and end specify the range for dumping out indexes.
-// Note this function can only be used for debugging purposes.
-func InspectFreezerTable(ancient string, freezerName string, tableName string, start, end int64) error {
- var (
- path string
- tables map[string]bool
- )
- switch freezerName {
- case ChainFreezerName:
- path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
- case StateFreezerName:
- path, tables = filepath.Join(ancient, freezerName), stateFreezerNoSnappy
- default:
- return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
- }
- noSnappy, exist := tables[tableName]
- if !exist {
- var names []string
- for name := range tables {
- names = append(names, name)
- }
- return fmt.Errorf("unknown table, supported ones: %v", names)
- }
- table, err := newFreezerTable(path, tableName, noSnappy, true)
- if err != nil {
- return err
- }
- table.dumpIndexStdout(start, end)
- return nil
-}
diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go
deleted file mode 100644
index 850ecc27b6..0000000000
--- a/core/rawdb/chain_freezer.go
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "fmt"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
- "github.com/ava-labs/libevm/params"
-)
-
-const (
- // freezerRecheckInterval is the frequency to check the key-value database for
- // chain progression that might permit new blocks to be frozen into immutable
- // storage.
- freezerRecheckInterval = time.Minute
-
- // freezerBatchLimit is the maximum number of blocks to freeze in one batch
- // before doing an fsync and deleting it from the key-value store.
- freezerBatchLimit = 30000
-)
-
-// chainFreezer is a wrapper of freezer with additional chain freezing feature.
-// The background thread will keep moving ancient chain segments from key-value
-// database to flat files for saving space on live database.
-type chainFreezer struct {
- threshold atomic.Uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
-
- *Freezer
- quit chan struct{}
- wg sync.WaitGroup
- trigger chan chan struct{} // Manual blocking freeze trigger, test determinism
-}
-
-// newChainFreezer initializes the freezer for ancient chain data.
-func newChainFreezer(datadir string, namespace string, readonly bool) (*chainFreezer, error) {
- freezer, err := NewChainFreezer(datadir, namespace, readonly)
- if err != nil {
- return nil, err
- }
- cf := chainFreezer{
- Freezer: freezer,
- quit: make(chan struct{}),
- trigger: make(chan chan struct{}),
- }
- cf.threshold.Store(params.FullImmutabilityThreshold)
- return &cf, nil
-}
-
-// Close closes the chain freezer instance and terminates the background thread.
-func (f *chainFreezer) Close() error {
- select {
- case <-f.quit:
- default:
- close(f.quit)
- }
- f.wg.Wait()
- return f.Freezer.Close()
-}
-
-// freeze is a background thread that periodically checks the blockchain for any
-// import progress and moves ancient data from the fast database into the freezer.
-//
-// This functionality is deliberately broken off from block importing to avoid
-// incurring additional data shuffling delays on block propagation.
-func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
- var (
- backoff bool
- triggered chan struct{} // Used in tests
- nfdb = &nofreezedb{KeyValueStore: db}
- )
- timer := time.NewTimer(freezerRecheckInterval)
- defer timer.Stop()
-
- for {
- select {
- case <-f.quit:
- log.Info("Freezer shutting down")
- return
- default:
- }
- if backoff {
- // If we were doing a manual trigger, notify it
- if triggered != nil {
- triggered <- struct{}{}
- triggered = nil
- }
- select {
- case <-timer.C:
- backoff = false
- timer.Reset(freezerRecheckInterval)
- case triggered = <-f.trigger:
- backoff = false
- case <-f.quit:
- return
- }
- }
- // Retrieve the freezing threshold.
- hash := ReadHeadBlockHash(nfdb)
- if hash == (common.Hash{}) {
- log.Debug("Current full block hash unavailable") // new chain, empty database
- backoff = true
- continue
- }
- number := ReadHeaderNumber(nfdb, hash)
- threshold := f.threshold.Load()
- frozen := f.frozen.Load()
- switch {
- case number == nil:
- log.Error("Current full block number unavailable", "hash", hash)
- backoff = true
- continue
-
- case *number < threshold:
- log.Debug("Current full block not old enough to freeze", "number", *number, "hash", hash, "delay", threshold)
- backoff = true
- continue
-
- case *number-threshold <= frozen:
- log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", frozen)
- backoff = true
- continue
- }
- head := ReadHeader(nfdb, hash, *number)
- if head == nil {
- log.Error("Current full block unavailable", "number", *number, "hash", hash)
- backoff = true
- continue
- }
-
- // Seems we have data ready to be frozen, process in usable batches
- var (
- start = time.Now()
- first, _ = f.Ancients()
- limit = *number - threshold
- )
- if limit-first > freezerBatchLimit {
- limit = first + freezerBatchLimit
- }
- ancients, err := f.freezeRange(nfdb, first, limit)
- if err != nil {
- log.Error("Error in block freeze operation", "err", err)
- backoff = true
- continue
- }
-
- // Batch of blocks have been frozen, flush them before wiping from leveldb
- if err := f.Sync(); err != nil {
- log.Crit("Failed to flush frozen tables", "err", err)
- }
-
- // Wipe out all data from the active database
- batch := db.NewBatch()
- for i := 0; i < len(ancients); i++ {
- // Always keep the genesis block in active database
- if first+uint64(i) != 0 {
- DeleteBlockWithoutNumber(batch, ancients[i], first+uint64(i))
- DeleteCanonicalHash(batch, first+uint64(i))
- }
- }
- if err := batch.Write(); err != nil {
- log.Crit("Failed to delete frozen canonical blocks", "err", err)
- }
- batch.Reset()
-
- // Wipe out side chains also and track dangling side chains
- var dangling []common.Hash
- frozen = f.frozen.Load() // Needs reload after during freezeRange
- for number := first; number < frozen; number++ {
- // Always keep the genesis block in active database
- if number != 0 {
- dangling = ReadAllHashes(db, number)
- for _, hash := range dangling {
- log.Trace("Deleting side chain", "number", number, "hash", hash)
- DeleteBlock(batch, hash, number)
- }
- }
- }
- if err := batch.Write(); err != nil {
- log.Crit("Failed to delete frozen side blocks", "err", err)
- }
- batch.Reset()
-
- // Step into the future and delete any dangling side chains
- if frozen > 0 {
- tip := frozen
- for len(dangling) > 0 {
- drop := make(map[common.Hash]struct{})
- for _, hash := range dangling {
- log.Debug("Dangling parent from Freezer", "number", tip-1, "hash", hash)
- drop[hash] = struct{}{}
- }
- children := ReadAllHashes(db, tip)
- for i := 0; i < len(children); i++ {
- // Dig up the child and ensure it's dangling
- child := ReadHeader(nfdb, children[i], tip)
- if child == nil {
- log.Error("Missing dangling header", "number", tip, "hash", children[i])
- continue
- }
- if _, ok := drop[child.ParentHash]; !ok {
- children = append(children[:i], children[i+1:]...)
- i--
- continue
- }
- // Delete all block data associated with the child
- log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash)
- DeleteBlock(batch, children[i], tip)
- }
- dangling = children
- tip++
- }
- if err := batch.Write(); err != nil {
- log.Crit("Failed to delete dangling side blocks", "err", err)
- }
- }
-
- // Log something friendly for the user
- context := []interface{}{
- "blocks", frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", frozen - 1,
- }
- if n := len(ancients); n > 0 {
- context = append(context, []interface{}{"hash", ancients[n-1]}...)
- }
- log.Debug("Deep froze chain segment", context...)
-
- // Avoid database thrashing with tiny writes
- if frozen-first < freezerBatchLimit {
- backoff = true
- }
- }
-}
-
-func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hashes []common.Hash, err error) {
- hashes = make([]common.Hash, 0, limit-number)
-
- _, err = f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
- for ; number <= limit; number++ {
- // Retrieve all the components of the canonical block.
- hash := ReadCanonicalHash(nfdb, number)
- if hash == (common.Hash{}) {
- return fmt.Errorf("canonical hash missing, can't freeze block %d", number)
- }
- header := ReadHeaderRLP(nfdb, hash, number)
- if len(header) == 0 {
- return fmt.Errorf("block header missing, can't freeze block %d", number)
- }
- body := ReadBodyRLP(nfdb, hash, number)
- if len(body) == 0 {
- return fmt.Errorf("block body missing, can't freeze block %d", number)
- }
- receipts := ReadReceiptsRLP(nfdb, hash, number)
- if len(receipts) == 0 {
- return fmt.Errorf("block receipts missing, can't freeze block %d", number)
- }
- td := ReadTdRLP(nfdb, hash, number)
- if len(td) == 0 {
- return fmt.Errorf("total difficulty missing, can't freeze block %d", number)
- }
-
- // Write to the batch.
- if err := op.AppendRaw(ChainFreezerHashTable, number, hash[:]); err != nil {
- return fmt.Errorf("can't write hash to Freezer: %v", err)
- }
- if err := op.AppendRaw(ChainFreezerHeaderTable, number, header); err != nil {
- return fmt.Errorf("can't write header to Freezer: %v", err)
- }
- if err := op.AppendRaw(ChainFreezerBodiesTable, number, body); err != nil {
- return fmt.Errorf("can't write body to Freezer: %v", err)
- }
- if err := op.AppendRaw(ChainFreezerReceiptTable, number, receipts); err != nil {
- return fmt.Errorf("can't write receipts to Freezer: %v", err)
- }
- if err := op.AppendRaw(ChainFreezerDifficultyTable, number, td); err != nil {
- return fmt.Errorf("can't write td to Freezer: %v", err)
- }
-
- hashes = append(hashes, hash)
- }
- return nil
- })
-
- return hashes, err
-}
diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go
deleted file mode 100644
index f3247e679e..0000000000
--- a/core/rawdb/chain_iterator.go
+++ /dev/null
@@ -1,373 +0,0 @@
-// (c) 2019-2022, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "runtime"
- "sync/atomic"
- "time"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/common/prque"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
- "github.com/ava-labs/libevm/rlp"
-)
-
-// InitDatabaseFromFreezer reinitializes an empty database from a previous batch
-// of frozen ancient blocks. The method iterates over all the frozen blocks and
-// injects into the database the block hash->number mappings.
-func InitDatabaseFromFreezer(db ethdb.Database) {
- // If we can't access the freezer or it's empty, abort
- frozen, err := db.Ancients()
- if err != nil || frozen == 0 {
- return
- }
- var (
- batch = db.NewBatch()
- start = time.Now()
- logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log
- hash common.Hash
- )
- for i := uint64(0); i < frozen; {
- // We read 100K hashes at a time, for a total of 3.2M
- count := uint64(100_000)
- if i+count > frozen {
- count = frozen - i
- }
- data, err := db.AncientRange(ChainFreezerHashTable, i, count, 32*count)
- if err != nil {
- log.Crit("Failed to init database from freezer", "err", err)
- }
- for j, h := range data {
- number := i + uint64(j)
- hash = common.BytesToHash(h)
- WriteHeaderNumber(batch, hash, number)
- // If enough data was accumulated in memory or we're at the last block, dump to disk
- if batch.ValueSize() > ethdb.IdealBatchSize {
- if err := batch.Write(); err != nil {
- log.Crit("Failed to write data to db", "err", err)
- }
- batch.Reset()
- }
- }
- i += uint64(len(data))
- // If we've spent too much time already, notify the user of what we're doing
- if time.Since(logged) > 8*time.Second {
- log.Info("Initializing database from freezer", "total", frozen, "number", i, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start)))
- logged = time.Now()
- }
- }
- if err := batch.Write(); err != nil {
- log.Crit("Failed to write data to db", "err", err)
- }
- batch.Reset()
-
- WriteHeadHeaderHash(db, hash)
- WriteHeadFastBlockHash(db, hash)
- log.Info("Initialized database from freezer", "blocks", frozen, "elapsed", common.PrettyDuration(time.Since(start)))
-}
-
-type blockTxHashes struct {
- number uint64
- hashes []common.Hash
-}
-
-// iterateTransactions iterates over all transactions in the (canon) block
-// number(s) given, and yields the hashes on a channel. If there is a signal
-// received from interrupt channel, the iteration will be aborted and result
-// channel will be closed.
-func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool, interrupt chan struct{}) chan *blockTxHashes {
- // One thread sequentially reads data from db
- type numberRlp struct {
- number uint64
- rlp rlp.RawValue
- }
- if to == from {
- return nil
- }
- threads := to - from
- if cpus := runtime.NumCPU(); threads > uint64(cpus) {
- threads = uint64(cpus)
- }
- var (
- rlpCh = make(chan *numberRlp, threads*2) // we send raw rlp over this channel
- hashesCh = make(chan *blockTxHashes, threads*2) // send hashes over hashesCh
- )
- // lookup runs in one instance
- lookup := func() {
- n, end := from, to
- if reverse {
- n, end = to-1, from-1
- }
- defer close(rlpCh)
- for n != end {
- data := ReadCanonicalBodyRLP(db, n)
- // Feed the block to the aggregator, or abort on interrupt
- select {
- case rlpCh <- &numberRlp{n, data}:
- case <-interrupt:
- return
- }
- if reverse {
- n--
- } else {
- n++
- }
- }
- }
- // process runs in parallel
- var nThreadsAlive atomic.Int32
- nThreadsAlive.Store(int32(threads))
- process := func() {
- defer func() {
- // Last processor closes the result channel
- if nThreadsAlive.Add(-1) == 0 {
- close(hashesCh)
- }
- }()
- for data := range rlpCh {
- var body types.Body
- if err := rlp.DecodeBytes(data.rlp, &body); err != nil {
- log.Warn("Failed to decode block body", "block", data.number, "error", err)
- return
- }
- var hashes []common.Hash
- for _, tx := range body.Transactions {
- hashes = append(hashes, tx.Hash())
- }
- result := &blockTxHashes{
- hashes: hashes,
- number: data.number,
- }
- // Feed the block to the aggregator, or abort on interrupt
- select {
- case hashesCh <- result:
- case <-interrupt:
- return
- }
- }
- }
- go lookup() // start the sequential db accessor
- for i := 0; i < int(threads); i++ {
- go process()
- }
- return hashesCh
-}
-
-// indexTransactions creates txlookup indices of the specified block range.
-//
-// This function iterates canonical chain in reverse order, it has one main advantage:
-// We can write tx index tail flag periodically even without the whole indexing
-// procedure is finished. So that we can resume indexing procedure next time quickly.
-//
-// There is a passed channel, the whole procedure will be interrupted if any
-// signal received.
-func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool, report bool) {
- // short circuit for invalid range
- if from >= to {
- return
- }
- var (
- hashesCh = iterateTransactions(db, from, to, true, interrupt)
- batch = db.NewBatch()
- start = time.Now()
- logged = start.Add(-7 * time.Second)
-
- // Since we iterate in reverse, we expect the first number to come
- // in to be [to-1]. Therefore, setting lastNum to means that the
- // queue gap-evaluation will work correctly
- lastNum = to
- queue = prque.New[int64, *blockTxHashes](nil)
- blocks, txs = 0, 0 // for stats reporting
- )
- for chanDelivery := range hashesCh {
- // Push the delivery into the queue and process contiguous ranges.
- // Since we iterate in reverse, so lower numbers have lower prio, and
- // we can use the number directly as prio marker
- queue.Push(chanDelivery, int64(chanDelivery.number))
- for !queue.Empty() {
- // If the next available item is gapped, return
- if _, priority := queue.Peek(); priority != int64(lastNum-1) {
- break
- }
- // For testing
- if hook != nil && !hook(lastNum-1) {
- break
- }
- // Next block available, pop it off and index it
- delivery := queue.PopItem()
- lastNum = delivery.number
- WriteTxLookupEntries(batch, delivery.number, delivery.hashes)
- blocks++
- txs += len(delivery.hashes)
- // If enough data was accumulated in memory or we're at the last block, dump to disk
- if batch.ValueSize() > ethdb.IdealBatchSize {
- WriteTxIndexTail(batch, lastNum) // Also write the tail here
- if err := batch.Write(); err != nil {
- log.Crit("Failed writing batch to db", "error", err)
- return
- }
- batch.Reset()
- }
- // If we've spent too much time already, notify the user of what we're doing
- if time.Since(logged) > 8*time.Second {
- log.Info("Indexing transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
- logged = time.Now()
- }
- }
- }
- // Flush the new indexing tail and the last committed data. It can also happen
- // that the last batch is empty because nothing to index, but the tail has to
- // be flushed anyway.
- WriteTxIndexTail(batch, lastNum)
- if err := batch.Write(); err != nil {
- log.Crit("Failed writing batch to db", "error", err)
- return
- }
- logger := log.Debug
- if report {
- logger = log.Info
- }
- select {
- case <-interrupt:
- logger("Transaction indexing interrupted", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
- default:
- logger("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
- }
-}
-
-// IndexTransactions creates txlookup indices of the specified block range. The from
-// is included while to is excluded.
-//
-// This function iterates canonical chain in reverse order, it has one main advantage:
-// We can write tx index tail flag periodically even without the whole indexing
-// procedure is finished. So that we can resume indexing procedure next time quickly.
-//
-// There is a passed channel, the whole procedure will be interrupted if any
-// signal received.
-func IndexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, report bool) {
- indexTransactions(db, from, to, interrupt, nil, report)
-}
-
-// indexTransactionsForTesting is the internal debug version with an additional hook.
-func indexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
- indexTransactions(db, from, to, interrupt, hook, false)
-}
-
-// unindexTransactions removes txlookup indices of the specified block range.
-//
-// There is a passed channel, the whole procedure will be interrupted if any
-// signal received.
-func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool, report bool) {
- // short circuit for invalid range
- if from >= to {
- return
- }
- var (
- hashesCh = iterateTransactions(db, from, to, false, interrupt)
- batch = db.NewBatch()
- start = time.Now()
- logged = start.Add(-7 * time.Second)
-
- // we expect the first number to come in to be [from]. Therefore, setting
- // nextNum to from means that the queue gap-evaluation will work correctly
- nextNum = from
- queue = prque.New[int64, *blockTxHashes](nil)
- blocks, txs = 0, 0 // for stats reporting
- )
- // Otherwise spin up the concurrent iterator and unindexer
- for delivery := range hashesCh {
- // Push the delivery into the queue and process contiguous ranges.
- queue.Push(delivery, -int64(delivery.number))
- for !queue.Empty() {
- // If the next available item is gapped, return
- if _, priority := queue.Peek(); -priority != int64(nextNum) {
- break
- }
- // For testing
- if hook != nil && !hook(nextNum) {
- break
- }
- delivery := queue.PopItem()
- nextNum = delivery.number + 1
- DeleteTxLookupEntries(batch, delivery.hashes)
- txs += len(delivery.hashes)
- blocks++
-
- // If enough data was accumulated in memory or we're at the last block, dump to disk
- // A batch counts the size of deletion as '1', so we need to flush more
- // often than that.
- if blocks%1000 == 0 {
- WriteTxIndexTail(batch, nextNum)
- if err := batch.Write(); err != nil {
- log.Crit("Failed writing batch to db", "error", err)
- return
- }
- batch.Reset()
- }
- // If we've spent too much time already, notify the user of what we're doing
- if time.Since(logged) > 8*time.Second {
- log.Info("Unindexing transactions", "blocks", blocks, "txs", txs, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
- logged = time.Now()
- }
- }
- }
- // Flush the new indexing tail and the last committed data. It can also happen
- // that the last batch is empty because nothing to unindex, but the tail has to
- // be flushed anyway.
- WriteTxIndexTail(batch, nextNum)
- if err := batch.Write(); err != nil {
- log.Crit("Failed writing batch to db", "error", err)
- return
- }
- logger := log.Debug
- if report {
- logger = log.Info
- }
- select {
- case <-interrupt:
- logger("Transaction unindexing interrupted", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
- default:
- logger("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
- }
-}
-
-// UnindexTransactions removes txlookup indices of the specified block range.
-// The from is included while to is excluded.
-//
-// There is a passed channel, the whole procedure will be interrupted if any
-// signal received.
-func UnindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, report bool) {
- unindexTransactions(db, from, to, interrupt, nil, report)
-}
-
-// unindexTransactionsForTesting is the internal debug version with an additional hook.
-func unindexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
- unindexTransactions(db, from, to, interrupt, hook, false)
-}
diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go
deleted file mode 100644
index 2086fa72b5..0000000000
--- a/core/rawdb/chain_iterator_test.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// (c) 2019-2022, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "math/big"
- "reflect"
- "sort"
- "sync"
- "testing"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/libevm/common"
-)
-
-func TestChainIterator(t *testing.T) {
- // Construct test chain db
- chainDb := NewMemoryDatabase()
-
- var block *types.Block
- var txs []*types.Transaction
- to := common.BytesToAddress([]byte{0x11})
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) // Empty genesis block
- WriteBlock(chainDb, block)
- WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
- for i := uint64(1); i <= 10; i++ {
- var tx *types.Transaction
- if i%2 == 0 {
- tx = types.NewTx(&types.LegacyTx{
- Nonce: i,
- GasPrice: big.NewInt(11111),
- Gas: 1111,
- To: &to,
- Value: big.NewInt(111),
- Data: []byte{0x11, 0x11, 0x11},
- })
- } else {
- tx = types.NewTx(&types.AccessListTx{
- ChainID: big.NewInt(1337),
- Nonce: i,
- GasPrice: big.NewInt(11111),
- Gas: 1111,
- To: &to,
- Value: big.NewInt(111),
- Data: []byte{0x11, 0x11, 0x11},
- })
- }
- txs = append(txs, tx)
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher())
- WriteBlock(chainDb, block)
- WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
- }
-
- var cases = []struct {
- from, to uint64
- reverse bool
- expect []int
- }{
- {0, 11, true, []int{10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}},
- {0, 0, true, nil},
- {0, 5, true, []int{4, 3, 2, 1, 0}},
- {10, 11, true, []int{10}},
- {0, 11, false, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
- {0, 0, false, nil},
- {10, 11, false, []int{10}},
- }
- for i, c := range cases {
- var numbers []int
- hashCh := iterateTransactions(chainDb, c.from, c.to, c.reverse, nil)
- if hashCh != nil {
- for h := range hashCh {
- numbers = append(numbers, int(h.number))
- if len(h.hashes) > 0 {
- if got, exp := h.hashes[0], txs[h.number-1].Hash(); got != exp {
- t.Fatalf("block %d: hash wrong, got %x exp %x", h.number, got, exp)
- }
- }
- }
- }
- if !c.reverse {
- sort.Ints(numbers)
- } else {
- sort.Sort(sort.Reverse(sort.IntSlice(numbers)))
- }
- if !reflect.DeepEqual(numbers, c.expect) {
- t.Fatalf("Case %d failed, visit element mismatch, want %v, got %v", i, c.expect, numbers)
- }
- }
-}
-
-func TestIndexTransactions(t *testing.T) {
- // Construct test chain db
- chainDb := NewMemoryDatabase()
-
- var block *types.Block
- var txs []*types.Transaction
- to := common.BytesToAddress([]byte{0x11})
-
- // Write empty genesis block
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher())
- WriteBlock(chainDb, block)
- WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
-
- for i := uint64(1); i <= 10; i++ {
- var tx *types.Transaction
- if i%2 == 0 {
- tx = types.NewTx(&types.LegacyTx{
- Nonce: i,
- GasPrice: big.NewInt(11111),
- Gas: 1111,
- To: &to,
- Value: big.NewInt(111),
- Data: []byte{0x11, 0x11, 0x11},
- })
- } else {
- tx = types.NewTx(&types.AccessListTx{
- ChainID: big.NewInt(1337),
- Nonce: i,
- GasPrice: big.NewInt(11111),
- Gas: 1111,
- To: &to,
- Value: big.NewInt(111),
- Data: []byte{0x11, 0x11, 0x11},
- })
- }
- txs = append(txs, tx)
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher())
- WriteBlock(chainDb, block)
- WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
- }
- // verify checks whether the tx indices in the range [from, to)
- // is expected.
- verify := func(from, to int, exist bool, tail uint64) {
- for i := from; i < to; i++ {
- if i == 0 {
- continue
- }
- number := ReadTxLookupEntry(chainDb, txs[i-1].Hash())
- if exist && number == nil {
- t.Fatalf("Transaction index %d missing", i)
- }
- if !exist && number != nil {
- t.Fatalf("Transaction index %d is not deleted", i)
- }
- }
- number := ReadTxIndexTail(chainDb)
- if number == nil || *number != tail {
- t.Fatalf("Transaction tail mismatch")
- }
- }
- IndexTransactions(chainDb, 5, 11, nil, false)
- verify(5, 11, true, 5)
- verify(0, 5, false, 5)
-
- IndexTransactions(chainDb, 0, 5, nil, false)
- verify(0, 11, true, 0)
-
- UnindexTransactions(chainDb, 0, 5, nil, false)
- verify(5, 11, true, 5)
- verify(0, 5, false, 5)
-
- UnindexTransactions(chainDb, 5, 11, nil, false)
- verify(0, 11, false, 11)
-
- // Testing corner cases
- signal := make(chan struct{})
- var once sync.Once
- indexTransactionsForTesting(chainDb, 5, 11, signal, func(n uint64) bool {
- if n <= 8 {
- once.Do(func() {
- close(signal)
- })
- return false
- }
- return true
- })
- verify(9, 11, true, 9)
- verify(0, 9, false, 9)
- IndexTransactions(chainDb, 0, 9, nil, false)
-
- signal = make(chan struct{})
- var once2 sync.Once
- unindexTransactionsForTesting(chainDb, 0, 11, signal, func(n uint64) bool {
- if n >= 8 {
- once2.Do(func() {
- close(signal)
- })
- return false
- }
- return true
- })
- verify(8, 11, true, 8)
- verify(0, 8, false, 8)
-}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
deleted file mode 100644
index ed275b1a24..0000000000
--- a/core/rawdb/database.go
+++ /dev/null
@@ -1,496 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "errors"
- "fmt"
- "os"
- "path"
- "path/filepath"
- "strings"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/ethdb/leveldb"
- "github.com/ava-labs/libevm/ethdb/memorydb"
- "github.com/ava-labs/libevm/ethdb/pebble"
- "github.com/ava-labs/libevm/log"
-)
-
-// freezerdb is a database wrapper that enables freezer data retrievals.
-type freezerdb struct {
- ancientRoot string
- ethdb.KeyValueStore
- ethdb.AncientStore
-}
-
-// AncientDatadir returns the path of root ancient directory.
-func (frdb *freezerdb) AncientDatadir() (string, error) {
- return frdb.ancientRoot, nil
-}
-
-// Close implements io.Closer, closing both the fast key-value store as well as
-// the slow ancient tables.
-func (frdb *freezerdb) Close() error {
- var errs []error
- if err := frdb.AncientStore.Close(); err != nil {
- errs = append(errs, err)
- }
- if err := frdb.KeyValueStore.Close(); err != nil {
- errs = append(errs, err)
- }
- if len(errs) != 0 {
- return fmt.Errorf("%v", errs)
- }
- return nil
-}
-
-// Freeze is a helper method used for external testing to trigger and block until
-// a freeze cycle completes, without having to sleep for a minute to trigger the
-// automatic background run.
-func (frdb *freezerdb) Freeze(threshold uint64) error {
- if frdb.AncientStore.(*chainFreezer).readonly {
- return errReadOnly
- }
- // Set the freezer threshold to a temporary value
- defer func(old uint64) {
- frdb.AncientStore.(*chainFreezer).threshold.Store(old)
- }(frdb.AncientStore.(*chainFreezer).threshold.Load())
- frdb.AncientStore.(*chainFreezer).threshold.Store(threshold)
-
- // Trigger a freeze cycle and block until it's done
- trigger := make(chan struct{}, 1)
- frdb.AncientStore.(*chainFreezer).trigger <- trigger
- <-trigger
- return nil
-}
-
-// nofreezedb is a database wrapper that disables freezer data retrievals.
-type nofreezedb struct {
- ethdb.KeyValueStore
-}
-
-// HasAncient returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) {
- return false, errNotSupported
-}
-
-// Ancient returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
- return nil, errNotSupported
-}
-
-// AncientRange returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
- return nil, errNotSupported
-}
-
-// Ancients returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) Ancients() (uint64, error) {
- return 0, errNotSupported
-}
-
-// Tail returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) Tail() (uint64, error) {
- return 0, errNotSupported
-}
-
-// AncientSize returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
- return 0, errNotSupported
-}
-
-// ModifyAncients is not supported.
-func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) {
- return 0, errNotSupported
-}
-
-// TruncateHead returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) TruncateHead(items uint64) (uint64, error) {
- return 0, errNotSupported
-}
-
-// TruncateTail returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) TruncateTail(items uint64) (uint64, error) {
- return 0, errNotSupported
-}
-
-// Sync returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) Sync() error {
- return errNotSupported
-}
-
-func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
- // Unlike other ancient-related methods, this method does not return
- // errNotSupported when invoked.
- // The reason for this is that the caller might want to do several things:
- // 1. Check if something is in the freezer,
- // 2. If not, check leveldb.
- //
- // This will work, since the ancient-checks inside 'fn' will return errors,
- // and the leveldb work will continue.
- //
- // If we instead were to return errNotSupported here, then the caller would
- // have to explicitly check for that, having an extra clause to do the
- // non-ancient operations.
- return fn(db)
-}
-
-// MigrateTable processes the entries in a given table in sequence
-// converting them to a new format if they're of an old format.
-func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
- return errNotSupported
-}
-
-// AncientDatadir returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) AncientDatadir() (string, error) {
- return "", errNotSupported
-}
-
-// NewDatabase creates a high level database on top of a given key-value data
-// store without a freezer moving immutable chain segments into cold storage.
-func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
- return &nofreezedb{KeyValueStore: db}
-}
-
-// resolveChainFreezerDir is a helper function which resolves the absolute path
-// of chain freezer by considering backward compatibility.
-func resolveChainFreezerDir(ancient string) string {
- // Check if the chain freezer is already present in the specified
- // sub folder, if not then two possibilities:
- // - chain freezer is not initialized
- // - chain freezer exists in legacy location (root ancient folder)
- freezer := path.Join(ancient, ChainFreezerName)
- if !common.FileExist(freezer) {
- if !common.FileExist(ancient) {
- // The entire ancient store is not initialized, still use the sub
- // folder for initialization.
- } else {
- // Ancient root is already initialized, then we hold the assumption
- // that chain freezer is also initialized and located in root folder.
- // In this case fallback to legacy location.
- freezer = ancient
- log.Info("Found legacy ancient chain path", "location", ancient)
- }
- }
- return freezer
-}
-
-// NewDatabaseWithFreezer creates a high level database on top of a given key-
-// value data store with a freezer moving immutable chain segments into cold
-// storage. The passed ancient indicates the path of root ancient directory
-// where the chain freezer can be opened.
-func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly bool) (ethdb.Database, error) {
- // Create the idle freezer instance
- frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly)
- if err != nil {
- printChainMetadata(db)
- return nil, err
- }
- // Since the freezer can be stored separately from the user's key-value database,
- // there's a fairly high probability that the user requests invalid combinations
- // of the freezer and database. Ensure that we don't shoot ourselves in the foot
- // by serving up conflicting data, leading to both datastores getting corrupted.
- //
- // - If both the freezer and key-value store are empty (no genesis), we just
- // initialized a new empty freezer, so everything's fine.
- // - If the key-value store is empty, but the freezer is not, we need to make
- // sure the user's genesis matches the freezer. That will be checked in the
- // blockchain, since we don't have the genesis block here (nor should we at
- // this point care, the key-value/freezer combo is valid).
- // - If neither the key-value store nor the freezer is empty, cross validate
- // the genesis hashes to make sure they are compatible. If they are, also
- // ensure that there's no gap between the freezer and subsequently leveldb.
- // - If the key-value store is not empty, but the freezer is, we might just be
- // upgrading to the freezer release, or we might have had a small chain and
- // not frozen anything yet. Ensure that no blocks are missing yet from the
- // key-value store, since that would mean we already had an old freezer.
-
- // If the genesis hash is empty, we have a new key-value store, so nothing to
- // validate in this method. If, however, the genesis hash is not nil, compare
- // it to the freezer content.
- if kvgenesis, _ := db.Get(headerHashKey(0)); len(kvgenesis) > 0 {
- if frozen, _ := frdb.Ancients(); frozen > 0 {
- // If the freezer already contains something, ensure that the genesis blocks
- // match, otherwise we might mix up freezers across chains and destroy both
- // the freezer and the key-value store.
- frgenesis, err := frdb.Ancient(ChainFreezerHashTable, 0)
- if err != nil {
- printChainMetadata(db)
- return nil, fmt.Errorf("failed to retrieve genesis from ancient %v", err)
- } else if !bytes.Equal(kvgenesis, frgenesis) {
- printChainMetadata(db)
- return nil, fmt.Errorf("genesis mismatch: %#x (leveldb) != %#x (ancients)", kvgenesis, frgenesis)
- }
- // Key-value store and freezer belong to the same network. Ensure that they
- // are contiguous, otherwise we might end up with a non-functional freezer.
- if kvhash, _ := db.Get(headerHashKey(frozen)); len(kvhash) == 0 {
- // Subsequent header after the freezer limit is missing from the database.
- // Reject startup if the database has a more recent head.
- if head := *ReadHeaderNumber(db, ReadHeadHeaderHash(db)); head > frozen-1 {
- // Find the smallest block stored in the key-value store
- // in range of [frozen, head]
- var number uint64
- for number = frozen; number <= head; number++ {
- if present, _ := db.Has(headerHashKey(number)); present {
- break
- }
- }
- // We are about to exit on error. Print database metadata before exiting
- printChainMetadata(db)
- return nil, fmt.Errorf("gap in the chain between ancients [0 - #%d] and leveldb [#%d - #%d] ",
- frozen-1, number, head)
- }
- // Database contains only older data than the freezer, this happens if the
- // state was wiped and reinited from an existing freezer.
- }
- // Otherwise, key-value store continues where the freezer left off, all is fine.
- // We might have duplicate blocks (crash after freezer write but before key-value
- // store deletion, but that's fine).
- } else {
- // If the freezer is empty, ensure nothing was moved yet from the key-value
- // store, otherwise we'll end up missing data. We check block #1 to decide
- // if we froze anything previously or not, but do take care of databases with
- // only the genesis block.
- if ReadHeadHeaderHash(db) != common.BytesToHash(kvgenesis) {
- // Key-value store contains more data than the genesis block, make sure we
- // didn't freeze anything yet.
- if kvblob, _ := db.Get(headerHashKey(1)); len(kvblob) == 0 {
- printChainMetadata(db)
- return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path")
- }
- // Block #1 is still in the database, we're allowed to init a new freezer
- }
- // Otherwise, the head header is still the genesis, we're allowed to init a new
- // freezer.
- }
- }
- // Freezer is consistent with the key-value database, permit combining the two
- if !frdb.readonly {
- frdb.wg.Add(1)
- go func() {
- frdb.freeze(db)
- frdb.wg.Done()
- }()
- }
- return &freezerdb{
- ancientRoot: ancient,
- KeyValueStore: db,
- AncientStore: frdb,
- }, nil
-}
-
-// NewMemoryDatabase creates an ephemeral in-memory key-value database without a
-// freezer moving immutable chain segments into cold storage.
-func NewMemoryDatabase() ethdb.Database {
- return NewDatabase(memorydb.New())
-}
-
-// NewMemoryDatabaseWithCap creates an ephemeral in-memory key-value database
-// with an initial starting capacity, but without a freezer moving immutable
-// chain segments into cold storage.
-func NewMemoryDatabaseWithCap(size int) ethdb.Database {
- return NewDatabase(memorydb.NewWithCap(size))
-}
-
-// NewLevelDBDatabase creates a persistent key-value database without a freezer
-// moving immutable chain segments into cold storage.
-func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
- db, err := leveldb.New(file, cache, handles, namespace, readonly)
- if err != nil {
- return nil, err
- }
- log.Info("Using LevelDB as the backing database")
- return NewDatabase(db), nil
-}
-
-// NewPebbleDBDatabase creates a persistent key-value database without a freezer
-// moving immutable chain segments into cold storage.
-func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) {
- db, err := pebble.New(file, cache, handles, namespace, readonly, ephemeral)
- if err != nil {
- return nil, err
- }
- return NewDatabase(db), nil
-}
-
-const (
- dbPebble = "pebble"
- dbLeveldb = "leveldb"
-)
-
-// PreexistingDatabase checks the given data directory whether a database is already
-// instantiated at that location, and if so, returns the type of database (or the
-// empty string).
-func PreexistingDatabase(path string) string {
- if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
- return "" // No pre-existing db
- }
- if matches, err := filepath.Glob(filepath.Join(path, "OPTIONS*")); len(matches) > 0 || err != nil {
- if err != nil {
- panic(err) // only possible if the pattern is malformed
- }
- return dbPebble
- }
- return dbLeveldb
-}
-
-// OpenOptions contains the options to apply when opening a database.
-// OBS: If AncientsDirectory is empty, it indicates that no freezer is to be used.
-type OpenOptions struct {
- Type string // "leveldb" | "pebble"
- Directory string // the datadir
- AncientsDirectory string // the ancients-dir
- Namespace string // the namespace for database relevant metrics
- Cache int // the capacity(in megabytes) of the data caching
- Handles int // number of files to be open simultaneously
- ReadOnly bool
- // Ephemeral means that filesystem sync operations should be avoided: data integrity in the face of
- // a crash is not important. This option should typically be used in tests.
- Ephemeral bool
-}
-
-// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
-//
-// type == null type != null
-// +----------------------------------------
-// db is non-existent | pebble default | specified type
-// db is existent | from db | specified type (if compatible)
-func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
- // Reject any unsupported database type
- if len(o.Type) != 0 && o.Type != dbLeveldb && o.Type != dbPebble {
- return nil, fmt.Errorf("unknown db.engine %v", o.Type)
- }
- // Retrieve any pre-existing database's type and use that or the requested one
- // as long as there's no conflict between the two types
- existingDb := PreexistingDatabase(o.Directory)
- if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb {
- return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb)
- }
- if o.Type == dbPebble || existingDb == dbPebble {
- log.Info("Using pebble as the backing database")
- return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
- }
- if o.Type == dbLeveldb || existingDb == dbLeveldb {
- log.Info("Using leveldb as the backing database")
- return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
- }
- // No pre-existing database, no user-requested one either. Default to Pebble.
- log.Info("Defaulting to pebble as the backing database")
- return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
-}
-
-// Open opens both a disk-based key-value database such as leveldb or pebble, but also
-// integrates it with a freezer database -- if the AncientDir option has been
-// set on the provided OpenOptions.
-// The passed o.AncientDir indicates the path of root ancient directory where
-// the chain freezer can be opened.
-func Open(o OpenOptions) (ethdb.Database, error) {
- kvdb, err := openKeyValueDatabase(o)
- if err != nil {
- return nil, err
- }
- if len(o.AncientsDirectory) == 0 {
- return kvdb, nil
- }
- frdb, err := NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly)
- if err != nil {
- kvdb.Close()
- return nil, err
- }
- return frdb, nil
-}
-
-type counter uint64
-
-func (c counter) String() string {
- return fmt.Sprintf("%d", c)
-}
-
-func (c counter) Percentage(current uint64) string {
- return fmt.Sprintf("%d", current*100/uint64(c))
-}
-
-// stat stores sizes and count for a parameter
-type stat struct {
- size common.StorageSize
- count counter
-}
-
-// Add size to the stat and increase the counter by 1
-func (s *stat) Add(size common.StorageSize) {
- s.size += size
- s.count++
-}
-
-func (s *stat) Size() string {
- return s.size.String()
-}
-
-func (s *stat) Count() string {
- return s.count.String()
-}
-
-// printChainMetadata prints out chain metadata to stderr.
-func printChainMetadata(db ethdb.KeyValueStore) {
- fmt.Fprintf(os.Stderr, "Chain metadata\n")
- for _, v := range ReadChainMetadata(db) {
- fmt.Fprintf(os.Stderr, " %s\n", strings.Join(v, ": "))
- }
- fmt.Fprintf(os.Stderr, "\n\n")
-}
-
-// ReadChainMetadata returns a set of key/value pairs that contains information
-// about the database chain status. This can be used for diagnostic purposes
-// when investigating the state of the node.
-func ReadChainMetadata(db ethdb.KeyValueStore) [][]string {
- pp := func(val *uint64) string {
- if val == nil {
- return ""
- }
- return fmt.Sprintf("%d (%#x)", *val, *val)
- }
- data := [][]string{
- {"databaseVersion", pp(ReadDatabaseVersion(db))},
- {"headBlockHash", fmt.Sprintf("%v", ReadHeadBlockHash(db))},
- {"headFastBlockHash", fmt.Sprintf("%v", ReadHeadFastBlockHash(db))},
- {"headHeaderHash", fmt.Sprintf("%v", ReadHeadHeaderHash(db))},
- {"lastPivotNumber", pp(ReadLastPivotNumber(db))},
- {"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(ReadSnapshotSyncStatus(db)))},
- {"snapshotDisabled", fmt.Sprintf("%v", ReadSnapshotDisabled(db))},
- {"snapshotJournal", fmt.Sprintf("%d bytes", len(ReadSnapshotJournal(db)))},
- {"snapshotRecoveryNumber", pp(ReadSnapshotRecoveryNumber(db))},
- {"snapshotRoot", fmt.Sprintf("%v", ReadSnapshotRoot(db))},
- {"txIndexTail", pp(ReadTxIndexTail(db))},
- }
- if b := ReadSkeletonSyncStatus(db); b != nil {
- data = append(data, []string{"SkeletonSyncStatus", string(b)})
- }
- return data
-}
diff --git a/core/rawdb/database_ext.go b/core/rawdb/database_ext.go
index da7936644c..5c5c86997e 100644
--- a/core/rawdb/database_ext.go
+++ b/core/rawdb/database_ext.go
@@ -3,15 +3,10 @@ package rawdb
import (
"bytes"
- "fmt"
- "os"
- "strings"
- "time"
"github.com/ava-labs/libevm/common"
+ ethrawdb "github.com/ava-labs/libevm/core/rawdb"
"github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
- "github.com/olekukonko/tablewriter"
)
// ClearPrefix removes all keys in db that begin with prefix and match an
@@ -46,193 +41,32 @@ func ClearPrefix(db ethdb.KeyValueStore, prefix []byte, keyLen int) error {
// InspectDatabase traverses the entire database and checks the size
// of all different categories of data.
func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
- it := db.NewIterator(keyPrefix, keyStart)
- defer it.Release()
-
- var (
- count int64
- start = time.Now()
- logged = time.Now()
-
- // Key-value store statistics
- headers stat
- bodies stat
- receipts stat
- tds stat
- numHashPairings stat
- hashNumPairings stat
- legacyTries stat
- stateLookups stat
- accountTries stat
- storageTries stat
- codes stat
- txLookups stat
- accountSnaps stat
- storageSnaps stat
- preimages stat
- bloomBits stat
- beaconHeaders stat
- cliqueSnaps stat
-
- // State sync statistics
- codeToFetch stat
- syncProgress stat
- syncSegments stat
- syncPerformed stat
-
- // Les statistic
- chtTrieNodes stat
- bloomTrieNodes stat
-
- // Meta- and unaccounted data
- metadata stat
- unaccounted stat
-
- // Totals
- total common.StorageSize
- )
- // Inspect key-value database first.
- for it.Next() {
- var (
- key = it.Key()
- size = common.StorageSize(len(key) + len(it.Value()))
- )
- total += size
- switch {
- case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength):
- headers.Add(size)
- case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength):
- bodies.Add(size)
- case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
- receipts.Add(size)
- case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
- tds.Add(size)
- case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
- numHashPairings.Add(size)
- case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
- hashNumPairings.Add(size)
- case IsLegacyTrieNode(key, it.Value()):
- legacyTries.Add(size)
- case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength:
- stateLookups.Add(size)
- case IsAccountTrieNode(key):
- accountTries.Add(size)
- case IsStorageTrieNode(key):
- storageTries.Add(size)
- case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength:
- codes.Add(size)
- case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
- txLookups.Add(size)
- case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength):
- accountSnaps.Add(size)
- case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
- storageSnaps.Add(size)
- case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength):
- preimages.Add(size)
- case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
- metadata.Add(size)
- case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
- metadata.Add(size)
- case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
- bloomBits.Add(size)
- case bytes.HasPrefix(key, BloomBitsIndexPrefix):
- bloomBits.Add(size)
- case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
- beaconHeaders.Add(size)
- case bytes.HasPrefix(key, CliqueSnapshotPrefix) && len(key) == 7+common.HashLength:
- cliqueSnaps.Add(size)
- case bytes.HasPrefix(key, ChtTablePrefix) ||
- bytes.HasPrefix(key, ChtIndexTablePrefix) ||
- bytes.HasPrefix(key, ChtPrefix): // Canonical hash trie
- chtTrieNodes.Add(size)
- case bytes.HasPrefix(key, BloomTrieTablePrefix) ||
- bytes.HasPrefix(key, BloomTrieIndexPrefix) ||
- bytes.HasPrefix(key, BloomTriePrefix): // Bloomtrie sub
- bloomTrieNodes.Add(size)
- case bytes.HasPrefix(key, syncStorageTriesPrefix) && len(key) == syncStorageTriesKeyLength:
- syncProgress.Add(size)
- case bytes.HasPrefix(key, syncSegmentsPrefix) && len(key) == syncSegmentsKeyLength:
- syncSegments.Add(size)
- case bytes.HasPrefix(key, CodeToFetchPrefix) && len(key) == codeToFetchKeyLength:
- codeToFetch.Add(size)
- case bytes.HasPrefix(key, syncPerformedPrefix) && len(key) == syncPerformedKeyLength:
- syncPerformed.Add(size)
- default:
- var accounted bool
- for _, meta := range [][]byte{
- databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, headFinalizedBlockKey,
- lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
- snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
- uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
- persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, snapSyncStatusFlagKey,
- snapshotBlockHashKey, syncRootKey,
- } {
- if bytes.Equal(key, meta) {
- metadata.Add(size)
- accounted = true
- break
- }
- }
- if !accounted {
- unaccounted.Add(size)
- }
- }
- count++
- if count%1000 == 0 && time.Since(logged) > 8*time.Second {
- log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
- logged = time.Now()
- }
+ options := []ethrawdb.InspectDatabaseOption{
+ ethrawdb.WithInspectDatabaseExtraMeta(snapshotBlockHashKey),
+ ethrawdb.WithInspectDatabaseExtraMeta(syncRootKey),
+ ethrawdb.WithInspectDatabaseRemoval("Key-Value store", "Difficulties"),
+ ethrawdb.WithInspectDatabaseRemoval("Key-Value store", "Beacon sync headers"),
+ ethrawdb.WithInspectDatabaseExtraStat(
+ "State sync", "Trie segments", func(key []byte) bool {
+ return bytes.HasPrefix(key, syncSegmentsPrefix) && len(key) == syncSegmentsKeyLength
+ },
+ ),
+ ethrawdb.WithInspectDatabaseExtraStat(
+ "State sync", "Storage tries to fetch", func(key []byte) bool {
+ return bytes.HasPrefix(key, syncStorageTriesPrefix) && len(key) == syncStorageTriesKeyLength
+ },
+ ),
+ ethrawdb.WithInspectDatabaseExtraStat(
+ "State sync", "Code to fetch", func(key []byte) bool {
+ return bytes.HasPrefix(key, CodeToFetchPrefix) && len(key) == codeToFetchKeyLength
+ },
+ ),
+ ethrawdb.WithInspectDatabaseExtraStat(
+ "State sync", "Block numbers synced to", func(key []byte) bool {
+ return bytes.HasPrefix(key, syncPerformedPrefix) && len(key) == syncPerformedKeyLength
+ },
+ ),
}
- // Display the database statistic of key-value store.
- stats := [][]string{
- {"Key-Value store", "Headers", headers.Size(), headers.Count()},
- {"Key-Value store", "Bodies", bodies.Size(), bodies.Count()},
- {"Key-Value store", "Receipt lists", receipts.Size(), receipts.Count()},
- {"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
- {"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
- {"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
- {"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()},
- {"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
- {"Key-Value store", "Hash trie nodes", legacyTries.Size(), legacyTries.Count()},
- {"Key-Value store", "Path trie state lookups", stateLookups.Size(), stateLookups.Count()},
- {"Key-Value store", "Path trie account nodes", accountTries.Size(), accountTries.Count()},
- {"Key-Value store", "Path trie storage nodes", storageTries.Size(), storageTries.Count()},
- {"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
- {"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
- {"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
- {"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
- {"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
- {"Light client", "CHT trie nodes", chtTrieNodes.Size(), chtTrieNodes.Count()},
- {"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()},
- {"State sync", "Trie segments", syncSegments.Size(), syncSegments.Count()},
- {"State sync", "Storage tries to fetch", syncProgress.Size(), syncProgress.Count()},
- {"State sync", "Code to fetch", codeToFetch.Size(), codeToFetch.Count()},
- {"State sync", "Block numbers synced to", syncPerformed.Size(), syncPerformed.Count()},
- }
- // Inspect all registered append-only file store then.
- ancients, err := inspectFreezers(db)
- if err != nil {
- return err
- }
- for _, ancient := range ancients {
- for _, table := range ancient.sizes {
- stats = append(stats, []string{
- fmt.Sprintf("Ancient store (%s)", strings.Title(ancient.name)),
- strings.Title(table.name),
- table.size.String(),
- fmt.Sprintf("%d", ancient.count()),
- })
- }
- total += ancient.size()
- }
- table := tablewriter.NewWriter(os.Stdout)
- table.SetHeader([]string{"Database", "Category", "Size", "Items"})
- table.SetFooter([]string{"", "Total", total.String(), " "})
- table.AppendBulk(stats)
- table.Render()
- if unaccounted.size > 0 {
- log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count)
- }
- return nil
+ return ethrawdb.InspectDatabase(db, keyPrefix, keyStart, options...)
}
diff --git a/core/rawdb/database_test.go b/core/rawdb/database_test.go
deleted file mode 100644
index a0d7b5ec66..0000000000
--- a/core/rawdb/database_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2017 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
deleted file mode 100644
index a93d97dfad..0000000000
--- a/core/rawdb/freezer.go
+++ /dev/null
@@ -1,509 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "errors"
- "fmt"
- "math"
- "os"
- "path/filepath"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
- "github.com/gofrs/flock"
-)
-
-var (
- // errReadOnly is returned if the freezer is opened in read only mode. All the
- // mutations are disallowed.
- errReadOnly = errors.New("read only")
-
- // errUnknownTable is returned if the user attempts to read from a table that is
- // not tracked by the freezer.
- errUnknownTable = errors.New("unknown table")
-
- // errOutOrderInsertion is returned if the user attempts to inject out-of-order
- // binary blobs into the freezer.
- errOutOrderInsertion = errors.New("the append operation is out-order")
-
- // errSymlinkDatadir is returned if the ancient directory specified by user
- // is a symbolic link.
- errSymlinkDatadir = errors.New("symbolic link datadir is not supported")
-)
-
-// freezerTableSize defines the maximum size of freezer data files.
-const freezerTableSize = 2 * 1000 * 1000 * 1000
-
-// Freezer is a memory mapped append-only database to store immutable ordered
-// data into flat files:
-//
-// - The append-only nature ensures that disk writes are minimized.
-// - The memory mapping ensures we can max out system memory for caching without
-// reserving it for go-ethereum. This would also reduce the memory requirements
-// of Geth, and thus also GC overhead.
-type Freezer struct {
- frozen atomic.Uint64 // Number of blocks already frozen
- tail atomic.Uint64 // Number of the first stored item in the freezer
-
- // This lock synchronizes writers and the truncate operation, as well as
- // the "atomic" (batched) read operations.
- writeLock sync.RWMutex
- writeBatch *freezerBatch
-
- readonly bool
- tables map[string]*freezerTable // Data tables for storing everything
- instanceLock *flock.Flock // File-system lock to prevent double opens
- closeOnce sync.Once
-}
-
-// NewChainFreezer is a small utility method around NewFreezer that sets the
-// default parameters for the chain storage.
-func NewChainFreezer(datadir string, namespace string, readonly bool) (*Freezer, error) {
- return NewFreezer(datadir, namespace, readonly, freezerTableSize, chainFreezerNoSnappy)
-}
-
-// NewFreezer creates a freezer instance for maintaining immutable ordered
-// data according to the given parameters.
-//
-// The 'tables' argument defines the data tables. If the value of a map
-// entry is true, snappy compression is disabled for the table.
-func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*Freezer, error) {
- // Create the initial freezer object
- var (
- readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
- writeMeter = metrics.NewRegisteredMeter(namespace+"ancient/write", nil)
- sizeGauge = metrics.NewRegisteredGauge(namespace+"ancient/size", nil)
- )
- // Ensure the datadir is not a symbolic link if it exists.
- if info, err := os.Lstat(datadir); !os.IsNotExist(err) {
- if info.Mode()&os.ModeSymlink != 0 {
- log.Warn("Symbolic link ancient database is not supported", "path", datadir)
- return nil, errSymlinkDatadir
- }
- }
- flockFile := filepath.Join(datadir, "FLOCK")
- if err := os.MkdirAll(filepath.Dir(flockFile), 0755); err != nil {
- return nil, err
- }
- // Leveldb uses LOCK as the filelock filename. To prevent the
- // name collision, we use FLOCK as the lock name.
- lock := flock.New(flockFile)
- tryLock := lock.TryLock
- if readonly {
- tryLock = lock.TryRLock
- }
- if locked, err := tryLock(); err != nil {
- return nil, err
- } else if !locked {
- return nil, errors.New("locking failed")
- }
- // Open all the supported data tables
- freezer := &Freezer{
- readonly: readonly,
- tables: make(map[string]*freezerTable),
- instanceLock: lock,
- }
-
- // Create the tables.
- for name, disableSnappy := range tables {
- table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy, readonly)
- if err != nil {
- for _, table := range freezer.tables {
- table.Close()
- }
- lock.Unlock()
- return nil, err
- }
- freezer.tables[name] = table
- }
- var err error
- if freezer.readonly {
- // In readonly mode only validate, don't truncate.
- // validate also sets `freezer.frozen`.
- err = freezer.validate()
- } else {
- // Truncate all tables to common length.
- err = freezer.repair()
- }
- if err != nil {
- for _, table := range freezer.tables {
- table.Close()
- }
- lock.Unlock()
- return nil, err
- }
-
- // Create the write batch.
- freezer.writeBatch = newFreezerBatch(freezer)
-
- log.Info("Opened ancient database", "database", datadir, "readonly", readonly)
- return freezer, nil
-}
-
-// Close terminates the chain freezer, unmapping all the data files.
-func (f *Freezer) Close() error {
- f.writeLock.Lock()
- defer f.writeLock.Unlock()
-
- var errs []error
- f.closeOnce.Do(func() {
- for _, table := range f.tables {
- if err := table.Close(); err != nil {
- errs = append(errs, err)
- }
- }
- if err := f.instanceLock.Unlock(); err != nil {
- errs = append(errs, err)
- }
- })
- if errs != nil {
- return fmt.Errorf("%v", errs)
- }
- return nil
-}
-
-// HasAncient returns an indicator whether the specified ancient data exists
-// in the freezer.
-func (f *Freezer) HasAncient(kind string, number uint64) (bool, error) {
- if table := f.tables[kind]; table != nil {
- return table.has(number), nil
- }
- return false, nil
-}
-
-// Ancient retrieves an ancient binary blob from the append-only immutable files.
-func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) {
- if table := f.tables[kind]; table != nil {
- return table.Retrieve(number)
- }
- return nil, errUnknownTable
-}
-
-// AncientRange retrieves multiple items in sequence, starting from the index 'start'.
-// It will return
-// - at most 'count' items,
-// - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize),
-// but will otherwise return as many items as fit into maxByteSize.
-// - if maxBytes is not specified, 'count' items will be returned if they are present.
-func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
- if table := f.tables[kind]; table != nil {
- return table.RetrieveItems(start, count, maxBytes)
- }
- return nil, errUnknownTable
-}
-
-// Ancients returns the length of the frozen items.
-func (f *Freezer) Ancients() (uint64, error) {
- return f.frozen.Load(), nil
-}
-
-// Tail returns the number of first stored item in the freezer.
-func (f *Freezer) Tail() (uint64, error) {
- return f.tail.Load(), nil
-}
-
-// AncientSize returns the ancient size of the specified category.
-func (f *Freezer) AncientSize(kind string) (uint64, error) {
- // This needs the write lock to avoid data races on table fields.
- // Speed doesn't matter here, AncientSize is for debugging.
- f.writeLock.RLock()
- defer f.writeLock.RUnlock()
-
- if table := f.tables[kind]; table != nil {
- return table.size()
- }
- return 0, errUnknownTable
-}
-
-// ReadAncients runs the given read operation while ensuring that no writes take place
-// on the underlying freezer.
-func (f *Freezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) {
- f.writeLock.RLock()
- defer f.writeLock.RUnlock()
-
- return fn(f)
-}
-
-// ModifyAncients runs the given write operation.
-func (f *Freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
- if f.readonly {
- return 0, errReadOnly
- }
- f.writeLock.Lock()
- defer f.writeLock.Unlock()
-
- // Roll back all tables to the starting position in case of error.
- prevItem := f.frozen.Load()
- defer func() {
- if err != nil {
- // The write operation has failed. Go back to the previous item position.
- for name, table := range f.tables {
- err := table.truncateHead(prevItem)
- if err != nil {
- log.Error("Freezer table roll-back failed", "table", name, "index", prevItem, "err", err)
- }
- }
- }
- }()
-
- f.writeBatch.reset()
- if err := fn(f.writeBatch); err != nil {
- return 0, err
- }
- item, writeSize, err := f.writeBatch.commit()
- if err != nil {
- return 0, err
- }
- f.frozen.Store(item)
- return writeSize, nil
-}
-
-// TruncateHead discards any recent data above the provided threshold number.
-// It returns the previous head number.
-func (f *Freezer) TruncateHead(items uint64) (uint64, error) {
- if f.readonly {
- return 0, errReadOnly
- }
- f.writeLock.Lock()
- defer f.writeLock.Unlock()
-
- oitems := f.frozen.Load()
- if oitems <= items {
- return oitems, nil
- }
- for _, table := range f.tables {
- if err := table.truncateHead(items); err != nil {
- return 0, err
- }
- }
- f.frozen.Store(items)
- return oitems, nil
-}
-
-// TruncateTail discards any recent data below the provided threshold number.
-func (f *Freezer) TruncateTail(tail uint64) (uint64, error) {
- if f.readonly {
- return 0, errReadOnly
- }
- f.writeLock.Lock()
- defer f.writeLock.Unlock()
-
- old := f.tail.Load()
- if old >= tail {
- return old, nil
- }
- for _, table := range f.tables {
- if err := table.truncateTail(tail); err != nil {
- return 0, err
- }
- }
- f.tail.Store(tail)
- return old, nil
-}
-
-// Sync flushes all data tables to disk.
-func (f *Freezer) Sync() error {
- var errs []error
- for _, table := range f.tables {
- if err := table.Sync(); err != nil {
- errs = append(errs, err)
- }
- }
- if errs != nil {
- return fmt.Errorf("%v", errs)
- }
- return nil
-}
-
-// validate checks that every table has the same boundary.
-// Used instead of `repair` in readonly mode.
-func (f *Freezer) validate() error {
- if len(f.tables) == 0 {
- return nil
- }
- var (
- head uint64
- tail uint64
- name string
- )
- // Hack to get boundary of any table
- for kind, table := range f.tables {
- head = table.items.Load()
- tail = table.itemHidden.Load()
- name = kind
- break
- }
- // Now check every table against those boundaries.
- for kind, table := range f.tables {
- if head != table.items.Load() {
- return fmt.Errorf("freezer tables %s and %s have differing head: %d != %d", kind, name, table.items.Load(), head)
- }
- if tail != table.itemHidden.Load() {
- return fmt.Errorf("freezer tables %s and %s have differing tail: %d != %d", kind, name, table.itemHidden.Load(), tail)
- }
- }
- f.frozen.Store(head)
- f.tail.Store(tail)
- return nil
-}
-
-// repair truncates all data tables to the same length.
-func (f *Freezer) repair() error {
- var (
- head = uint64(math.MaxUint64)
- tail = uint64(0)
- )
- for _, table := range f.tables {
- items := table.items.Load()
- if head > items {
- head = items
- }
- hidden := table.itemHidden.Load()
- if hidden > tail {
- tail = hidden
- }
- }
- for _, table := range f.tables {
- if err := table.truncateHead(head); err != nil {
- return err
- }
- if err := table.truncateTail(tail); err != nil {
- return err
- }
- }
- f.frozen.Store(head)
- f.tail.Store(tail)
- return nil
-}
-
-// convertLegacyFn takes a raw freezer entry in an older format and
-// returns it in the new format.
-type convertLegacyFn = func([]byte) ([]byte, error)
-
-// MigrateTable processes the entries in a given table in sequence
-// converting them to a new format if they're of an old format.
-func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error {
- if f.readonly {
- return errReadOnly
- }
- f.writeLock.Lock()
- defer f.writeLock.Unlock()
-
- table, ok := f.tables[kind]
- if !ok {
- return errUnknownTable
- }
- // forEach iterates every entry in the table serially and in order, calling `fn`
- // with the item as argument. If `fn` returns an error the iteration stops
- // and that error will be returned.
- forEach := func(t *freezerTable, offset uint64, fn func(uint64, []byte) error) error {
- var (
- items = t.items.Load()
- batchSize = uint64(1024)
- maxBytes = uint64(1024 * 1024)
- )
- for i := offset; i < items; {
- if i+batchSize > items {
- batchSize = items - i
- }
- data, err := t.RetrieveItems(i, batchSize, maxBytes)
- if err != nil {
- return err
- }
- for j, item := range data {
- if err := fn(i+uint64(j), item); err != nil {
- return err
- }
- }
- i += uint64(len(data))
- }
- return nil
- }
- // TODO(s1na): This is a sanity-check since as of now no process does tail-deletion. But the migration
- // process assumes no deletion at tail and needs to be modified to account for that.
- if table.itemOffset.Load() > 0 || table.itemHidden.Load() > 0 {
- return errors.New("migration not supported for tail-deleted freezers")
- }
- ancientsPath := filepath.Dir(table.index.Name())
- // Set up new dir for the migrated table, the content of which
- // we'll at the end move over to the ancients dir.
- migrationPath := filepath.Join(ancientsPath, "migration")
- newTable, err := newFreezerTable(migrationPath, kind, table.noCompression, false)
- if err != nil {
- return err
- }
- var (
- batch = newTable.newBatch()
- out []byte
- start = time.Now()
- logged = time.Now()
- offset = newTable.items.Load()
- )
- if offset > 0 {
- log.Info("found previous migration attempt", "migrated", offset)
- }
- // Iterate through entries and transform them
- if err := forEach(table, offset, func(i uint64, blob []byte) error {
- if i%10000 == 0 && time.Since(logged) > 16*time.Second {
- log.Info("Processing legacy elements", "count", i, "elapsed", common.PrettyDuration(time.Since(start)))
- logged = time.Now()
- }
- out, err = convert(blob)
- if err != nil {
- return err
- }
- if err := batch.AppendRaw(i, out); err != nil {
- return err
- }
- return nil
- }); err != nil {
- return err
- }
- if err := batch.commit(); err != nil {
- return err
- }
- log.Info("Replacing old table files with migrated ones", "elapsed", common.PrettyDuration(time.Since(start)))
- // Release and delete old table files. Note this won't
- // delete the index file.
- table.releaseFilesAfter(0, true)
-
- if err := newTable.Close(); err != nil {
- return err
- }
- files, err := os.ReadDir(migrationPath)
- if err != nil {
- return err
- }
- // Move migrated files to ancients dir.
- for _, f := range files {
- // This will replace the old index file as a side-effect.
- if err := os.Rename(filepath.Join(migrationPath, f.Name()), filepath.Join(ancientsPath, f.Name())); err != nil {
- return err
- }
- }
- // Delete by now empty dir.
- if err := os.Remove(migrationPath); err != nil {
- return err
- }
- return nil
-}
diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go
deleted file mode 100644
index d3ea615a87..0000000000
--- a/core/rawdb/freezer_batch.go
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2021 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "fmt"
-
- "github.com/ava-labs/libevm/common/math"
- "github.com/ava-labs/libevm/rlp"
- "github.com/golang/snappy"
-)
-
-// This is the maximum amount of data that will be buffered in memory
-// for a single freezer table batch.
-const freezerBatchBufferLimit = 2 * 1024 * 1024
-
-// freezerBatch is a write operation of multiple items on a freezer.
-type freezerBatch struct {
- tables map[string]*freezerTableBatch
-}
-
-func newFreezerBatch(f *Freezer) *freezerBatch {
- batch := &freezerBatch{tables: make(map[string]*freezerTableBatch, len(f.tables))}
- for kind, table := range f.tables {
- batch.tables[kind] = table.newBatch()
- }
- return batch
-}
-
-// Append adds an RLP-encoded item of the given kind.
-func (batch *freezerBatch) Append(kind string, num uint64, item interface{}) error {
- return batch.tables[kind].Append(num, item)
-}
-
-// AppendRaw adds an item of the given kind.
-func (batch *freezerBatch) AppendRaw(kind string, num uint64, item []byte) error {
- return batch.tables[kind].AppendRaw(num, item)
-}
-
-// reset initializes the batch.
-func (batch *freezerBatch) reset() {
- for _, tb := range batch.tables {
- tb.reset()
- }
-}
-
-// commit is called at the end of a write operation and
-// writes all remaining data to tables.
-func (batch *freezerBatch) commit() (item uint64, writeSize int64, err error) {
- // Check that count agrees on all batches.
- item = uint64(math.MaxUint64)
- for name, tb := range batch.tables {
- if item < math.MaxUint64 && tb.curItem != item {
- return 0, 0, fmt.Errorf("table %s is at item %d, want %d", name, tb.curItem, item)
- }
- item = tb.curItem
- }
-
- // Commit all table batches.
- for _, tb := range batch.tables {
- if err := tb.commit(); err != nil {
- return 0, 0, err
- }
- writeSize += tb.totalBytes
- }
- return item, writeSize, nil
-}
-
-// freezerTableBatch is a batch for a freezer table.
-type freezerTableBatch struct {
- t *freezerTable
-
- sb *snappyBuffer
- encBuffer writeBuffer
- dataBuffer []byte
- indexBuffer []byte
- curItem uint64 // expected index of next append
- totalBytes int64 // counts written bytes since reset
-}
-
-// newBatch creates a new batch for the freezer table.
-func (t *freezerTable) newBatch() *freezerTableBatch {
- batch := &freezerTableBatch{t: t}
- if !t.noCompression {
- batch.sb = new(snappyBuffer)
- }
- batch.reset()
- return batch
-}
-
-// reset clears the batch for reuse.
-func (batch *freezerTableBatch) reset() {
- batch.dataBuffer = batch.dataBuffer[:0]
- batch.indexBuffer = batch.indexBuffer[:0]
- batch.curItem = batch.t.items.Load()
- batch.totalBytes = 0
-}
-
-// Append rlp-encodes and adds data at the end of the freezer table. The item number is a
-// precautionary parameter to ensure data correctness, but the table will reject already
-// existing data.
-func (batch *freezerTableBatch) Append(item uint64, data interface{}) error {
- if item != batch.curItem {
- return fmt.Errorf("%w: have %d want %d", errOutOrderInsertion, item, batch.curItem)
- }
-
- // Encode the item.
- batch.encBuffer.Reset()
- if err := rlp.Encode(&batch.encBuffer, data); err != nil {
- return err
- }
- encItem := batch.encBuffer.data
- if batch.sb != nil {
- encItem = batch.sb.compress(encItem)
- }
- return batch.appendItem(encItem)
-}
-
-// AppendRaw injects a binary blob at the end of the freezer table. The item number is a
-// precautionary parameter to ensure data correctness, but the table will reject already
-// existing data.
-func (batch *freezerTableBatch) AppendRaw(item uint64, blob []byte) error {
- if item != batch.curItem {
- return fmt.Errorf("%w: have %d want %d", errOutOrderInsertion, item, batch.curItem)
- }
-
- encItem := blob
- if batch.sb != nil {
- encItem = batch.sb.compress(blob)
- }
- return batch.appendItem(encItem)
-}
-
-func (batch *freezerTableBatch) appendItem(data []byte) error {
- // Check if item fits into current data file.
- itemSize := int64(len(data))
- itemOffset := batch.t.headBytes + int64(len(batch.dataBuffer))
- if itemOffset+itemSize > int64(batch.t.maxFileSize) {
- // It doesn't fit, go to next file first.
- if err := batch.commit(); err != nil {
- return err
- }
- if err := batch.t.advanceHead(); err != nil {
- return err
- }
- itemOffset = 0
- }
-
- // Put data to buffer.
- batch.dataBuffer = append(batch.dataBuffer, data...)
- batch.totalBytes += itemSize
-
- // Put index entry to buffer.
- entry := indexEntry{filenum: batch.t.headId, offset: uint32(itemOffset + itemSize)}
- batch.indexBuffer = entry.append(batch.indexBuffer)
- batch.curItem++
-
- return batch.maybeCommit()
-}
-
-// maybeCommit writes the buffered data if the buffer is full enough.
-func (batch *freezerTableBatch) maybeCommit() error {
- if len(batch.dataBuffer) > freezerBatchBufferLimit {
- return batch.commit()
- }
- return nil
-}
-
-// commit writes the batched items to the backing freezerTable.
-func (batch *freezerTableBatch) commit() error {
- // Write data. The head file is fsync'd after write to ensure the
- // data is truly transferred to disk.
- _, err := batch.t.head.Write(batch.dataBuffer)
- if err != nil {
- return err
- }
- if err := batch.t.head.Sync(); err != nil {
- return err
- }
- dataSize := int64(len(batch.dataBuffer))
- batch.dataBuffer = batch.dataBuffer[:0]
-
- // Write indices. The index file is fsync'd after write to ensure the
- // data indexes are truly transferred to disk.
- _, err = batch.t.index.Write(batch.indexBuffer)
- if err != nil {
- return err
- }
- if err := batch.t.index.Sync(); err != nil {
- return err
- }
- indexSize := int64(len(batch.indexBuffer))
- batch.indexBuffer = batch.indexBuffer[:0]
-
- // Update headBytes of table.
- batch.t.headBytes += dataSize
- batch.t.items.Store(batch.curItem)
-
- // Update metrics.
- batch.t.sizeGauge.Inc(dataSize + indexSize)
- batch.t.writeMeter.Mark(dataSize + indexSize)
- return nil
-}
-
-// snappyBuffer writes snappy in block format, and can be reused. It is
-// reset when WriteTo is called.
-type snappyBuffer struct {
- dst []byte
-}
-
-// compress snappy-compresses the data.
-func (s *snappyBuffer) compress(data []byte) []byte {
- // The snappy library does not care what the capacity of the buffer is,
- // but only checks the length. If the length is too small, it will
- // allocate a brand new buffer.
- // To avoid that, we check the required size here, and grow the size of the
- // buffer to utilize the full capacity.
- if n := snappy.MaxEncodedLen(len(data)); len(s.dst) < n {
- if cap(s.dst) < n {
- s.dst = make([]byte, n)
- }
- s.dst = s.dst[:n]
- }
-
- s.dst = snappy.Encode(s.dst, data)
- return s.dst
-}
-
-// writeBuffer implements io.Writer for a byte slice.
-type writeBuffer struct {
- data []byte
-}
-
-func (wb *writeBuffer) Write(data []byte) (int, error) {
- wb.data = append(wb.data, data...)
- return len(data), nil
-}
-
-func (wb *writeBuffer) Reset() {
- wb.data = wb.data[:0]
-}
diff --git a/core/rawdb/freezer_meta.go b/core/rawdb/freezer_meta.go
deleted file mode 100644
index 7134d6504d..0000000000
--- a/core/rawdb/freezer_meta.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "io"
- "os"
-
- "github.com/ava-labs/libevm/log"
- "github.com/ava-labs/libevm/rlp"
-)
-
-const freezerVersion = 1 // The initial version tag of freezer table metadata
-
-// freezerTableMeta wraps all the metadata of the freezer table.
-type freezerTableMeta struct {
- // Version is the versioning descriptor of the freezer table.
- Version uint16
-
- // VirtualTail indicates how many items have been marked as deleted.
- // Its value is equal to the number of items removed from the table
- // plus the number of items hidden in the table, so it should never
- // be lower than the "actual tail".
- VirtualTail uint64
-}
-
-// newMetadata initializes the metadata object with the given virtual tail.
-func newMetadata(tail uint64) *freezerTableMeta {
- return &freezerTableMeta{
- Version: freezerVersion,
- VirtualTail: tail,
- }
-}
-
-// readMetadata reads the metadata of the freezer table from the
-// given metadata file.
-func readMetadata(file *os.File) (*freezerTableMeta, error) {
- _, err := file.Seek(0, io.SeekStart)
- if err != nil {
- return nil, err
- }
- var meta freezerTableMeta
- if err := rlp.Decode(file, &meta); err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// writeMetadata writes the metadata of the freezer table into the
-// given metadata file.
-func writeMetadata(file *os.File, meta *freezerTableMeta) error {
- _, err := file.Seek(0, io.SeekStart)
- if err != nil {
- return err
- }
- return rlp.Encode(file, meta)
-}
-
-// loadMetadata loads the metadata from the given metadata file.
-// Initializes the metadata file with the given "actual tail" if
-// it's empty.
-func loadMetadata(file *os.File, tail uint64) (*freezerTableMeta, error) {
- stat, err := file.Stat()
- if err != nil {
- return nil, err
- }
- // Write the metadata with the given actual tail into metadata file
- // if it's non-existent. There are two possible scenarios here:
- // - the freezer table is empty
- // - the freezer table is legacy
- // In both cases, write the meta into the file with the actual tail
- // as the virtual tail.
- if stat.Size() == 0 {
- m := newMetadata(tail)
- if err := writeMetadata(file, m); err != nil {
- return nil, err
- }
- return m, nil
- }
- m, err := readMetadata(file)
- if err != nil {
- return nil, err
- }
- // Update the virtual tail with the given actual tail if it's even
- // lower than it. Theoretically it shouldn't happen at all, print
- // a warning here.
- if m.VirtualTail < tail {
- log.Warn("Updated virtual tail", "have", m.VirtualTail, "now", tail)
- m.VirtualTail = tail
- if err := writeMetadata(file, m); err != nil {
- return nil, err
- }
- }
- return m, nil
-}
diff --git a/core/rawdb/freezer_meta_test.go b/core/rawdb/freezer_meta_test.go
deleted file mode 100644
index ba1a95e453..0000000000
--- a/core/rawdb/freezer_meta_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "os"
- "testing"
-)
-
-func TestReadWriteFreezerTableMeta(t *testing.T) {
- f, err := os.CreateTemp(os.TempDir(), "*")
- if err != nil {
- t.Fatalf("Failed to create file %v", err)
- }
- err = writeMetadata(f, newMetadata(100))
- if err != nil {
- t.Fatalf("Failed to write metadata %v", err)
- }
- meta, err := readMetadata(f)
- if err != nil {
- t.Fatalf("Failed to read metadata %v", err)
- }
- if meta.Version != freezerVersion {
- t.Fatalf("Unexpected version field")
- }
- if meta.VirtualTail != uint64(100) {
- t.Fatalf("Unexpected virtual tail field")
- }
-}
-
-func TestInitializeFreezerTableMeta(t *testing.T) {
- f, err := os.CreateTemp(os.TempDir(), "*")
- if err != nil {
- t.Fatalf("Failed to create file %v", err)
- }
- meta, err := loadMetadata(f, uint64(100))
- if err != nil {
- t.Fatalf("Failed to read metadata %v", err)
- }
- if meta.Version != freezerVersion {
- t.Fatalf("Unexpected version field")
- }
- if meta.VirtualTail != uint64(100) {
- t.Fatalf("Unexpected virtual tail field")
- }
-}
diff --git a/core/rawdb/freezer_resettable.go b/core/rawdb/freezer_resettable.go
deleted file mode 100644
index e0f1f40b93..0000000000
--- a/core/rawdb/freezer_resettable.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "os"
- "path/filepath"
- "sync"
-
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
-)
-
-const tmpSuffix = ".tmp"
-
-// freezerOpenFunc is the function used to open/create a freezer.
-type freezerOpenFunc = func() (*Freezer, error)
-
-// ResettableFreezer is a wrapper of the freezer which makes the
-// freezer resettable.
-type ResettableFreezer struct {
- freezer *Freezer
- opener freezerOpenFunc
- datadir string
- lock sync.RWMutex
-}
-
-// NewResettableFreezer creates a resettable freezer, note freezer is
-// only resettable if the passed file directory is exclusively occupied
-// by the freezer. And also the user-configurable ancient root directory
-// is **not** supported for reset since it might be a mount and rename
-// will cause a copy of hundreds of gigabyte into local directory. It
-// needs some other file based solutions.
-//
-// The reset function will delete directory atomically and re-create the
-// freezer from scratch.
-func NewResettableFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*ResettableFreezer, error) {
- if err := cleanup(datadir); err != nil {
- return nil, err
- }
- opener := func() (*Freezer, error) {
- return NewFreezer(datadir, namespace, readonly, maxTableSize, tables)
- }
- freezer, err := opener()
- if err != nil {
- return nil, err
- }
- return &ResettableFreezer{
- freezer: freezer,
- opener: opener,
- datadir: datadir,
- }, nil
-}
-
-// Reset deletes the file directory exclusively occupied by the freezer and
-// recreate the freezer from scratch. The atomicity of directory deletion
-// is guaranteed by the rename operation, the leftover directory will be
-// cleaned up in next startup in case crash happens after rename.
-func (f *ResettableFreezer) Reset() error {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- if err := f.freezer.Close(); err != nil {
- return err
- }
- tmp := tmpName(f.datadir)
- if err := os.Rename(f.datadir, tmp); err != nil {
- return err
- }
- if err := os.RemoveAll(tmp); err != nil {
- return err
- }
- freezer, err := f.opener()
- if err != nil {
- return err
- }
- f.freezer = freezer
- return nil
-}
-
-// Close terminates the chain freezer, unmapping all the data files.
-func (f *ResettableFreezer) Close() error {
- f.lock.RLock()
- defer f.lock.RUnlock()
-
- return f.freezer.Close()
-}
-
-// HasAncient returns an indicator whether the specified ancient data exists
-// in the freezer
-func (f *ResettableFreezer) HasAncient(kind string, number uint64) (bool, error) {
- f.lock.RLock()
- defer f.lock.RUnlock()
-
- return f.freezer.HasAncient(kind, number)
-}
-
-// Ancient retrieves an ancient binary blob from the append-only immutable files.
-func (f *ResettableFreezer) Ancient(kind string, number uint64) ([]byte, error) {
- f.lock.RLock()
- defer f.lock.RUnlock()
-
- return f.freezer.Ancient(kind, number)
-}
-
-// AncientRange retrieves multiple items in sequence, starting from the index 'start'.
-// It will return
-// - at most 'count' items,
-// - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize),
-// but will otherwise return as many items as fit into maxByteSize.
-// - if maxBytes is not specified, 'count' items will be returned if they are present.
-func (f *ResettableFreezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
- f.lock.RLock()
- defer f.lock.RUnlock()
-
- return f.freezer.AncientRange(kind, start, count, maxBytes)
-}
-
-// Ancients returns the length of the frozen items.
-func (f *ResettableFreezer) Ancients() (uint64, error) {
- f.lock.RLock()
- defer f.lock.RUnlock()
-
- return f.freezer.Ancients()
-}
-
-// Tail returns the number of first stored item in the freezer.
-func (f *ResettableFreezer) Tail() (uint64, error) {
- f.lock.RLock()
- defer f.lock.RUnlock()
-
- return f.freezer.Tail()
-}
-
-// AncientSize returns the ancient size of the specified category.
-func (f *ResettableFreezer) AncientSize(kind string) (uint64, error) {
- f.lock.RLock()
- defer f.lock.RUnlock()
-
- return f.freezer.AncientSize(kind)
-}
-
-// ReadAncients runs the given read operation while ensuring that no writes take place
-// on the underlying freezer.
-func (f *ResettableFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) {
- f.lock.RLock()
- defer f.lock.RUnlock()
-
- return f.freezer.ReadAncients(fn)
-}
-
-// ModifyAncients runs the given write operation.
-func (f *ResettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
- f.lock.RLock()
- defer f.lock.RUnlock()
-
- return f.freezer.ModifyAncients(fn)
-}
-
-// TruncateHead discards any recent data above the provided threshold number.
-// It returns the previous head number.
-func (f *ResettableFreezer) TruncateHead(items uint64) (uint64, error) {
- f.lock.RLock()
- defer f.lock.RUnlock()
-
- return f.freezer.TruncateHead(items)
-}
-
-// TruncateTail discards any recent data below the provided threshold number.
-// It returns the previous value
-func (f *ResettableFreezer) TruncateTail(tail uint64) (uint64, error) {
- f.lock.RLock()
- defer f.lock.RUnlock()
-
- return f.freezer.TruncateTail(tail)
-}
-
-// Sync flushes all data tables to disk.
-func (f *ResettableFreezer) Sync() error {
- f.lock.RLock()
- defer f.lock.RUnlock()
-
- return f.freezer.Sync()
-}
-
-// MigrateTable processes the entries in a given table in sequence
-// converting them to a new format if they're of an old format.
-func (f *ResettableFreezer) MigrateTable(kind string, convert convertLegacyFn) error {
- f.lock.RLock()
- defer f.lock.RUnlock()
-
- return f.freezer.MigrateTable(kind, convert)
-}
-
-// cleanup removes the directory located in the specified path
-// has the name with deletion marker suffix.
-func cleanup(path string) error {
- parent := filepath.Dir(path)
- if _, err := os.Lstat(parent); os.IsNotExist(err) {
- return nil
- }
- dir, err := os.Open(parent)
- if err != nil {
- return err
- }
- names, err := dir.Readdirnames(0)
- if err != nil {
- return err
- }
- if cerr := dir.Close(); cerr != nil {
- return cerr
- }
- for _, name := range names {
- if name == filepath.Base(path)+tmpSuffix {
- log.Info("Removed leftover freezer directory", "name", name)
- return os.RemoveAll(filepath.Join(parent, name))
- }
- }
- return nil
-}
-
-func tmpName(path string) string {
- return filepath.Join(filepath.Dir(path), filepath.Base(path)+tmpSuffix)
-}
diff --git a/core/rawdb/freezer_resettable_test.go b/core/rawdb/freezer_resettable_test.go
deleted file mode 100644
index 4b6eb11f9c..0000000000
--- a/core/rawdb/freezer_resettable_test.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "os"
- "testing"
-
- "github.com/ava-labs/libevm/ethdb"
-)
-
-func TestResetFreezer(t *testing.T) {
- items := []struct {
- id uint64
- blob []byte
- }{
- {0, bytes.Repeat([]byte{0}, 2048)},
- {1, bytes.Repeat([]byte{1}, 2048)},
- {2, bytes.Repeat([]byte{2}, 2048)},
- }
- f, _ := NewResettableFreezer(t.TempDir(), "", false, 2048, freezerTestTableDef)
- defer f.Close()
-
- f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
- for _, item := range items {
- op.AppendRaw("test", item.id, item.blob)
- }
- return nil
- })
- for _, item := range items {
- blob, _ := f.Ancient("test", item.id)
- if !bytes.Equal(blob, item.blob) {
- t.Fatal("Unexpected blob")
- }
- }
-
- // Reset freezer
- f.Reset()
- count, _ := f.Ancients()
- if count != 0 {
- t.Fatal("Failed to reset freezer")
- }
- for _, item := range items {
- blob, _ := f.Ancient("test", item.id)
- if len(blob) != 0 {
- t.Fatal("Unexpected blob")
- }
- }
-
- // Fill the freezer
- f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
- for _, item := range items {
- op.AppendRaw("test", item.id, item.blob)
- }
- return nil
- })
- for _, item := range items {
- blob, _ := f.Ancient("test", item.id)
- if !bytes.Equal(blob, item.blob) {
- t.Fatal("Unexpected blob")
- }
- }
-}
-
-func TestFreezerCleanup(t *testing.T) {
- items := []struct {
- id uint64
- blob []byte
- }{
- {0, bytes.Repeat([]byte{0}, 2048)},
- {1, bytes.Repeat([]byte{1}, 2048)},
- {2, bytes.Repeat([]byte{2}, 2048)},
- }
- datadir := t.TempDir()
- f, _ := NewResettableFreezer(datadir, "", false, 2048, freezerTestTableDef)
- f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
- for _, item := range items {
- op.AppendRaw("test", item.id, item.blob)
- }
- return nil
- })
- f.Close()
- os.Rename(datadir, tmpName(datadir))
-
- // Open the freezer again, trigger cleanup operation
- f, _ = NewResettableFreezer(datadir, "", false, 2048, freezerTestTableDef)
- f.Close()
-
- if _, err := os.Lstat(tmpName(datadir)); !os.IsNotExist(err) {
- t.Fatal("Failed to cleanup leftover directory")
- }
-}
diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go
deleted file mode 100644
index 9ea8fa3887..0000000000
--- a/core/rawdb/freezer_table.go
+++ /dev/null
@@ -1,990 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "sync"
- "sync/atomic"
-
- "github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/log"
- "github.com/golang/snappy"
-)
-
-var (
- // errClosed is returned if an operation attempts to read from or write to the
- // freezer table after it has already been closed.
- errClosed = errors.New("closed")
-
- // errOutOfBounds is returned if the item requested is not contained within the
- // freezer table.
- errOutOfBounds = errors.New("out of bounds")
-
- // errNotSupported is returned if the database doesn't support the required operation.
- errNotSupported = errors.New("this operation is not supported")
-)
-
-// indexEntry contains the number/id of the file that the data resides in, as well as the
-// offset within the file to the end of the data.
-// In serialized form, the filenum is stored as uint16.
-type indexEntry struct {
- filenum uint32 // stored as uint16 ( 2 bytes )
- offset uint32 // stored as uint32 ( 4 bytes )
-}
-
-const indexEntrySize = 6
-
-// unmarshalBinary deserializes binary b into the rawIndex entry.
-func (i *indexEntry) unmarshalBinary(b []byte) {
- i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
- i.offset = binary.BigEndian.Uint32(b[2:6])
-}
-
-// append adds the encoded entry to the end of b.
-func (i *indexEntry) append(b []byte) []byte {
- offset := len(b)
- out := append(b, make([]byte, indexEntrySize)...)
- binary.BigEndian.PutUint16(out[offset:], uint16(i.filenum))
- binary.BigEndian.PutUint32(out[offset+2:], i.offset)
- return out
-}
-
-// bounds returns the start- and end- offsets, and the file number of where to
-// read there data item marked by the two index entries. The two entries are
-// assumed to be sequential.
-func (i *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uint32) {
- if i.filenum != end.filenum {
- // If a piece of data 'crosses' a data-file,
- // it's actually in one piece on the second data-file.
- // We return a zero-indexEntry for the second file as start
- return 0, end.offset, end.filenum
- }
- return i.offset, end.offset, end.filenum
-}
-
-// freezerTable represents a single chained data table within the freezer (e.g. blocks).
-// It consists of a data file (snappy encoded arbitrary data blobs) and an indexEntry
-// file (uncompressed 64 bit indices into the data file).
-type freezerTable struct {
- items atomic.Uint64 // Number of items stored in the table (including items removed from tail)
- itemOffset atomic.Uint64 // Number of items removed from the table
-
- // itemHidden is the number of items marked as deleted. Tail deletion is
- // only supported at file level which means the actual deletion will be
- // delayed until the entire data file is marked as deleted. Before that
- // these items will be hidden to prevent being visited again. The value
- // should never be lower than itemOffset.
- itemHidden atomic.Uint64
-
- noCompression bool // if true, disables snappy compression. Note: does not work retroactively
- readonly bool
- maxFileSize uint32 // Max file size for data-files
- name string
- path string
-
- head *os.File // File descriptor for the data head of the table
- index *os.File // File descriptor for the indexEntry file of the table
- meta *os.File // File descriptor for metadata of the table
- files map[uint32]*os.File // open files
- headId uint32 // number of the currently active head file
- tailId uint32 // number of the earliest file
-
- headBytes int64 // Number of bytes written to the head file
- readMeter metrics.Meter // Meter for measuring the effective amount of data read
- writeMeter metrics.Meter // Meter for measuring the effective amount of data written
- sizeGauge metrics.Gauge // Gauge for tracking the combined size of all freezer tables
-
- logger log.Logger // Logger with database path and table name embedded
- lock sync.RWMutex // Mutex protecting the data file descriptors
-}
-
-// newFreezerTable opens the given path as a freezer table.
-func newFreezerTable(path, name string, disableSnappy, readonly bool) (*freezerTable, error) {
- return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly)
-}
-
-// newTable opens a freezer table, creating the data and index files if they are
-// non-existent. Both files are truncated to the shortest common length to ensure
-// they don't go out of sync.
-func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression, readonly bool) (*freezerTable, error) {
- // Ensure the containing directory exists and open the indexEntry file
- if err := os.MkdirAll(path, 0755); err != nil {
- return nil, err
- }
- var idxName string
- if noCompression {
- idxName = fmt.Sprintf("%s.ridx", name) // raw index file
- } else {
- idxName = fmt.Sprintf("%s.cidx", name) // compressed index file
- }
- var (
- err error
- index *os.File
- meta *os.File
- )
- if readonly {
- // Will fail if table index file or meta file is not existent
- index, err = openFreezerFileForReadOnly(filepath.Join(path, idxName))
- if err != nil {
- return nil, err
- }
- meta, err = openFreezerFileForReadOnly(filepath.Join(path, fmt.Sprintf("%s.meta", name)))
- if err != nil {
- return nil, err
- }
- } else {
- index, err = openFreezerFileForAppend(filepath.Join(path, idxName))
- if err != nil {
- return nil, err
- }
- meta, err = openFreezerFileForAppend(filepath.Join(path, fmt.Sprintf("%s.meta", name)))
- if err != nil {
- return nil, err
- }
- }
- // Create the table and repair any past inconsistency
- tab := &freezerTable{
- index: index,
- meta: meta,
- files: make(map[uint32]*os.File),
- readMeter: readMeter,
- writeMeter: writeMeter,
- sizeGauge: sizeGauge,
- name: name,
- path: path,
- logger: log.New("database", path, "table", name),
- noCompression: noCompression,
- readonly: readonly,
- maxFileSize: maxFilesize,
- }
- if err := tab.repair(); err != nil {
- tab.Close()
- return nil, err
- }
- // Initialize the starting size counter
- size, err := tab.sizeNolock()
- if err != nil {
- tab.Close()
- return nil, err
- }
- tab.sizeGauge.Inc(int64(size))
-
- return tab, nil
-}
-
-// repair cross-checks the head and the index file and truncates them to
-// be in sync with each other after a potential crash / data loss.
-func (t *freezerTable) repair() error {
- // Create a temporary offset buffer to init files with and read indexEntry into
- buffer := make([]byte, indexEntrySize)
-
- // If we've just created the files, initialize the index with the 0 indexEntry
- stat, err := t.index.Stat()
- if err != nil {
- return err
- }
- if stat.Size() == 0 {
- if _, err := t.index.Write(buffer); err != nil {
- return err
- }
- }
- // Ensure the index is a multiple of indexEntrySize bytes
- if overflow := stat.Size() % indexEntrySize; overflow != 0 {
- if t.readonly {
- return fmt.Errorf("index file(path: %s, name: %s) size is not a multiple of %d", t.path, t.name, indexEntrySize)
- }
- if err := truncateFreezerFile(t.index, stat.Size()-overflow); err != nil {
- return err
- } // New file can't trigger this path
- }
- // Retrieve the file sizes and prepare for truncation
- if stat, err = t.index.Stat(); err != nil {
- return err
- }
- offsetsSize := stat.Size()
-
- // Open the head file
- var (
- firstIndex indexEntry
- lastIndex indexEntry
- contentSize int64
- contentExp int64
- verbose bool
- )
- // Read index zero, determine what file is the earliest
- // and what item offset to use
- t.index.ReadAt(buffer, 0)
- firstIndex.unmarshalBinary(buffer)
-
- // Assign the tail fields with the first stored index.
- // The total removed items is represented with an uint32,
- // which is not enough in theory but enough in practice.
- // TODO: use uint64 to represent total removed items.
- t.tailId = firstIndex.filenum
- t.itemOffset.Store(uint64(firstIndex.offset))
-
- // Load metadata from the file
- meta, err := loadMetadata(t.meta, t.itemOffset.Load())
- if err != nil {
- return err
- }
- t.itemHidden.Store(meta.VirtualTail)
-
- // Read the last index, use the default value in case the freezer is empty
- if offsetsSize == indexEntrySize {
- lastIndex = indexEntry{filenum: t.tailId, offset: 0}
- } else {
- t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
- lastIndex.unmarshalBinary(buffer)
- }
- // Print an error log if the index is corrupted due to an incorrect
- // last index item. While it is theoretically possible to have a zero offset
- // by storing all zero-size items, it is highly unlikely to occur in practice.
- if lastIndex.offset == 0 && offsetsSize/indexEntrySize > 1 {
- log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "indexes", offsetsSize/indexEntrySize)
- }
- if t.readonly {
- t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
- } else {
- t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
- }
- if err != nil {
- return err
- }
- if stat, err = t.head.Stat(); err != nil {
- return err
- }
- contentSize = stat.Size()
-
- // Keep truncating both files until they come in sync
- contentExp = int64(lastIndex.offset)
- for contentExp != contentSize {
- if t.readonly {
- return fmt.Errorf("freezer table(path: %s, name: %s, num: %d) is corrupted", t.path, t.name, lastIndex.filenum)
- }
- verbose = true
- // Truncate the head file to the last offset pointer
- if contentExp < contentSize {
- t.logger.Warn("Truncating dangling head", "indexed", contentExp, "stored", contentSize)
- if err := truncateFreezerFile(t.head, contentExp); err != nil {
- return err
- }
- contentSize = contentExp
- }
- // Truncate the index to point within the head file
- if contentExp > contentSize {
- t.logger.Warn("Truncating dangling indexes", "indexes", offsetsSize/indexEntrySize, "indexed", contentExp, "stored", contentSize)
- if err := truncateFreezerFile(t.index, offsetsSize-indexEntrySize); err != nil {
- return err
- }
- offsetsSize -= indexEntrySize
-
- // Read the new head index, use the default value in case
- // the freezer is already empty.
- var newLastIndex indexEntry
- if offsetsSize == indexEntrySize {
- newLastIndex = indexEntry{filenum: t.tailId, offset: 0}
- } else {
- t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
- newLastIndex.unmarshalBinary(buffer)
- }
- // We might have slipped back into an earlier head-file here
- if newLastIndex.filenum != lastIndex.filenum {
- // Release earlier opened file
- t.releaseFile(lastIndex.filenum)
- if t.head, err = t.openFile(newLastIndex.filenum, openFreezerFileForAppend); err != nil {
- return err
- }
- if stat, err = t.head.Stat(); err != nil {
- // TODO, anything more we can do here?
- // A data file has gone missing...
- return err
- }
- contentSize = stat.Size()
- }
- lastIndex = newLastIndex
- contentExp = int64(lastIndex.offset)
- }
- }
- // Sync() fails for read-only files on windows.
- if !t.readonly {
- // Ensure all reparation changes have been written to disk
- if err := t.index.Sync(); err != nil {
- return err
- }
- if err := t.head.Sync(); err != nil {
- return err
- }
- if err := t.meta.Sync(); err != nil {
- return err
- }
- }
- // Update the item and byte counters and return
- t.items.Store(t.itemOffset.Load() + uint64(offsetsSize/indexEntrySize-1)) // last indexEntry points to the end of the data file
- t.headBytes = contentSize
- t.headId = lastIndex.filenum
-
- // Delete the leftover files because of head deletion
- t.releaseFilesAfter(t.headId, true)
-
- // Delete the leftover files because of tail deletion
- t.releaseFilesBefore(t.tailId, true)
-
- // Close opened files and preopen all files
- if err := t.preopen(); err != nil {
- return err
- }
- if verbose {
- t.logger.Info("Chain freezer table opened", "items", t.items.Load(), "deleted", t.itemOffset.Load(), "hidden", t.itemHidden.Load(), "tailId", t.tailId, "headId", t.headId, "size", t.headBytes)
- } else {
- t.logger.Debug("Chain freezer table opened", "items", t.items.Load(), "size", common.StorageSize(t.headBytes))
- }
- return nil
-}
-
-// preopen opens all files that the freezer will need. This method should be called from an init-context,
-// since it assumes that it doesn't have to bother with locking
-// The rationale for doing preopen is to not have to do it from within Retrieve, thus not needing to ever
-// obtain a write-lock within Retrieve.
-func (t *freezerTable) preopen() (err error) {
- // The repair might have already opened (some) files
- t.releaseFilesAfter(0, false)
-
- // Open all except head in RDONLY
- for i := t.tailId; i < t.headId; i++ {
- if _, err = t.openFile(i, openFreezerFileForReadOnly); err != nil {
- return err
- }
- }
- if t.readonly {
- t.head, err = t.openFile(t.headId, openFreezerFileForReadOnly)
- } else {
- // Open head in read/write
- t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
- }
- return err
-}
-
-// truncateHead discards any recent data above the provided threshold number.
-func (t *freezerTable) truncateHead(items uint64) error {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- // Ensure the given truncate target falls in the correct range
- existing := t.items.Load()
- if existing <= items {
- return nil
- }
- if items < t.itemHidden.Load() {
- return errors.New("truncation below tail")
- }
- // We need to truncate, save the old size for metrics tracking
- oldSize, err := t.sizeNolock()
- if err != nil {
- return err
- }
- // Something's out of sync, truncate the table's offset index
- log := t.logger.Debug
- if existing > items+1 {
- log = t.logger.Warn // Only loud warn if we delete multiple items
- }
- log("Truncating freezer table", "items", existing, "limit", items)
-
- // Truncate the index file first, the tail position is also considered
- // when calculating the new freezer table length.
- length := items - t.itemOffset.Load()
- if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
- return err
- }
- if err := t.index.Sync(); err != nil {
- return err
- }
- // Calculate the new expected size of the data file and truncate it
- var expected indexEntry
- if length == 0 {
- expected = indexEntry{filenum: t.tailId, offset: 0}
- } else {
- buffer := make([]byte, indexEntrySize)
- if _, err := t.index.ReadAt(buffer, int64(length*indexEntrySize)); err != nil {
- return err
- }
- expected.unmarshalBinary(buffer)
- }
- // We might need to truncate back to older files
- if expected.filenum != t.headId {
- // If already open for reading, force-reopen for writing
- t.releaseFile(expected.filenum)
- newHead, err := t.openFile(expected.filenum, openFreezerFileForAppend)
- if err != nil {
- return err
- }
- // Release any files _after the current head -- both the previous head
- // and any files which may have been opened for reading
- t.releaseFilesAfter(expected.filenum, true)
-
- // Set back the historic head
- t.head = newHead
- t.headId = expected.filenum
- }
- if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
- return err
- }
- if err := t.head.Sync(); err != nil {
- return err
- }
- // All data files truncated, set internal counters and return
- t.headBytes = int64(expected.offset)
- t.items.Store(items)
-
- // Retrieve the new size and update the total size counter
- newSize, err := t.sizeNolock()
- if err != nil {
- return err
- }
- t.sizeGauge.Dec(int64(oldSize - newSize))
- return nil
-}
-
-// sizeHidden returns the total data size of hidden items in the freezer table.
-// This function assumes the lock is already held.
-func (t *freezerTable) sizeHidden() (uint64, error) {
- hidden, offset := t.itemHidden.Load(), t.itemOffset.Load()
- if hidden <= offset {
- return 0, nil
- }
- indices, err := t.getIndices(hidden-1, 1)
- if err != nil {
- return 0, err
- }
- return uint64(indices[1].offset), nil
-}
-
-// truncateTail discards any recent data before the provided threshold number.
-func (t *freezerTable) truncateTail(items uint64) error {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- // Ensure the given truncate target falls in the correct range
- if t.itemHidden.Load() >= items {
- return nil
- }
- if t.items.Load() < items {
- return errors.New("truncation above head")
- }
- // Load the new tail index by the given new tail position
- var (
- newTailId uint32
- buffer = make([]byte, indexEntrySize)
- )
- if t.items.Load() == items {
- newTailId = t.headId
- } else {
- offset := items - t.itemOffset.Load()
- if _, err := t.index.ReadAt(buffer, int64((offset+1)*indexEntrySize)); err != nil {
- return err
- }
- var newTail indexEntry
- newTail.unmarshalBinary(buffer)
- newTailId = newTail.filenum
- }
- // Save the old size for metrics tracking. This needs to be done
- // before any updates to either itemHidden or itemOffset.
- oldSize, err := t.sizeNolock()
- if err != nil {
- return err
- }
- // Update the virtual tail marker and hidden these entries in table.
- t.itemHidden.Store(items)
- if err := writeMetadata(t.meta, newMetadata(items)); err != nil {
- return err
- }
- // Hidden items still fall in the current tail file, no data file
- // can be dropped.
- if t.tailId == newTailId {
- return nil
- }
- // Hidden items fall in the incorrect range, returns the error.
- if t.tailId > newTailId {
- return fmt.Errorf("invalid index, tail-file %d, item-file %d", t.tailId, newTailId)
- }
- // Count how many items can be deleted from the file.
- var (
- newDeleted = items
- deleted = t.itemOffset.Load()
- )
- // Hidden items exceed the current tail file, drop the relevant data files.
- for current := items - 1; current >= deleted; current -= 1 {
- if _, err := t.index.ReadAt(buffer, int64((current-deleted+1)*indexEntrySize)); err != nil {
- return err
- }
- var pre indexEntry
- pre.unmarshalBinary(buffer)
- if pre.filenum != newTailId {
- break
- }
- newDeleted = current
- }
- // Commit the changes of metadata file first before manipulating
- // the indexes file.
- if err := t.meta.Sync(); err != nil {
- return err
- }
- // Close the index file before shorten it.
- if err := t.index.Close(); err != nil {
- return err
- }
- // Truncate the deleted index entries from the index file.
- err = copyFrom(t.index.Name(), t.index.Name(), indexEntrySize*(newDeleted-deleted+1), func(f *os.File) error {
- tailIndex := indexEntry{
- filenum: newTailId,
- offset: uint32(newDeleted),
- }
- _, err := f.Write(tailIndex.append(nil))
- return err
- })
- if err != nil {
- return err
- }
- // Reopen the modified index file to load the changes
- t.index, err = openFreezerFileForAppend(t.index.Name())
- if err != nil {
- return err
- }
- // Sync the file to ensure changes are flushed to disk
- if err := t.index.Sync(); err != nil {
- return err
- }
- // Release any files before the current tail
- t.tailId = newTailId
- t.itemOffset.Store(newDeleted)
- t.releaseFilesBefore(t.tailId, true)
-
- // Retrieve the new size and update the total size counter
- newSize, err := t.sizeNolock()
- if err != nil {
- return err
- }
- t.sizeGauge.Dec(int64(oldSize - newSize))
- return nil
-}
-
-// Close closes all opened files.
-func (t *freezerTable) Close() error {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- var errs []error
- doClose := func(f *os.File, sync bool, close bool) {
- if sync && !t.readonly {
- if err := f.Sync(); err != nil {
- errs = append(errs, err)
- }
- }
- if close {
- if err := f.Close(); err != nil {
- errs = append(errs, err)
- }
- }
- }
- // Trying to fsync a file opened in rdonly causes "Access denied"
- // error on Windows.
- doClose(t.index, true, true)
- doClose(t.meta, true, true)
-
- // The preopened non-head data-files are all opened in readonly.
- // The head is opened in rw-mode, so we sync it here - but since it's also
- // part of t.files, it will be closed in the loop below.
- doClose(t.head, true, false) // sync but do not close
-
- for _, f := range t.files {
- doClose(f, false, true) // close but do not sync
- }
- t.index = nil
- t.meta = nil
- t.head = nil
-
- if errs != nil {
- return fmt.Errorf("%v", errs)
- }
- return nil
-}
-
-// openFile assumes that the write-lock is held by the caller
-func (t *freezerTable) openFile(num uint32, opener func(string) (*os.File, error)) (f *os.File, err error) {
- var exist bool
- if f, exist = t.files[num]; !exist {
- var name string
- if t.noCompression {
- name = fmt.Sprintf("%s.%04d.rdat", t.name, num)
- } else {
- name = fmt.Sprintf("%s.%04d.cdat", t.name, num)
- }
- f, err = opener(filepath.Join(t.path, name))
- if err != nil {
- return nil, err
- }
- t.files[num] = f
- }
- return f, err
-}
-
-// releaseFile closes a file, and removes it from the open file cache.
-// Assumes that the caller holds the write lock
-func (t *freezerTable) releaseFile(num uint32) {
- if f, exist := t.files[num]; exist {
- delete(t.files, num)
- f.Close()
- }
-}
-
-// releaseFilesAfter closes all open files with a higher number, and optionally also deletes the files
-func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
- for fnum, f := range t.files {
- if fnum > num {
- delete(t.files, fnum)
- f.Close()
- if remove {
- os.Remove(f.Name())
- }
- }
- }
-}
-
-// releaseFilesBefore closes all open files with a lower number, and optionally also deletes the files
-func (t *freezerTable) releaseFilesBefore(num uint32, remove bool) {
- for fnum, f := range t.files {
- if fnum < num {
- delete(t.files, fnum)
- f.Close()
- if remove {
- os.Remove(f.Name())
- }
- }
- }
-}
-
-// getIndices returns the index entries for the given from-item, covering 'count' items.
-// N.B: The actual number of returned indices for N items will always be N+1 (unless an
-// error is returned).
-// OBS: This method assumes that the caller has already verified (and/or trimmed) the range
-// so that the items are within bounds. If this method is used to read out of bounds,
-// it will return error.
-func (t *freezerTable) getIndices(from, count uint64) ([]*indexEntry, error) {
- // Apply the table-offset
- from = from - t.itemOffset.Load()
-
- // For reading N items, we need N+1 indices.
- buffer := make([]byte, (count+1)*indexEntrySize)
- if _, err := t.index.ReadAt(buffer, int64(from*indexEntrySize)); err != nil {
- return nil, err
- }
- var (
- indices []*indexEntry
- offset int
- )
- for i := from; i <= from+count; i++ {
- index := new(indexEntry)
- index.unmarshalBinary(buffer[offset:])
- offset += indexEntrySize
- indices = append(indices, index)
- }
- if from == 0 {
- // Special case if we're reading the first item in the freezer. We assume that
- // the first item always start from zero(regarding the deletion, we
- // only support deletion by files, so that the assumption is held).
- // This means we can use the first item metadata to carry information about
- // the 'global' offset, for the deletion-case
- indices[0].offset = 0
- indices[0].filenum = indices[1].filenum
- }
- return indices, nil
-}
-
-// Retrieve looks up the data offset of an item with the given number and retrieves
-// the raw binary blob from the data file.
-func (t *freezerTable) Retrieve(item uint64) ([]byte, error) {
- items, err := t.RetrieveItems(item, 1, 0)
- if err != nil {
- return nil, err
- }
- return items[0], nil
-}
-
-// RetrieveItems returns multiple items in sequence, starting from the index 'start'.
-// It will return at most 'max' items, but will abort earlier to respect the
-// 'maxBytes' argument. However, if the 'maxBytes' is smaller than the size of one
-// item, it _will_ return one element and possibly overflow the maxBytes.
-func (t *freezerTable) RetrieveItems(start, count, maxBytes uint64) ([][]byte, error) {
- // First we read the 'raw' data, which might be compressed.
- diskData, sizes, err := t.retrieveItems(start, count, maxBytes)
- if err != nil {
- return nil, err
- }
- var (
- output = make([][]byte, 0, count)
- offset int // offset for reading
- outputSize int // size of uncompressed data
- )
- // Now slice up the data and decompress.
- for i, diskSize := range sizes {
- item := diskData[offset : offset+diskSize]
- offset += diskSize
- decompressedSize := diskSize
- if !t.noCompression {
- decompressedSize, _ = snappy.DecodedLen(item)
- }
- if i > 0 && maxBytes != 0 && uint64(outputSize+decompressedSize) > maxBytes {
- break
- }
- if !t.noCompression {
- data, err := snappy.Decode(nil, item)
- if err != nil {
- return nil, err
- }
- output = append(output, data)
- } else {
- output = append(output, item)
- }
- outputSize += decompressedSize
- }
- return output, nil
-}
-
-// retrieveItems reads up to 'count' items from the table. It reads at least
-// one item, but otherwise avoids reading more than maxBytes bytes. Freezer
-// will ignore the size limitation and continuously allocate memory to store
-// data if maxBytes is 0. It returns the (potentially compressed) data, and
-// the sizes.
-func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []int, error) {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- // Ensure the table and the item are accessible
- if t.index == nil || t.head == nil || t.meta == nil {
- return nil, nil, errClosed
- }
- var (
- items = t.items.Load() // the total items(head + 1)
- hidden = t.itemHidden.Load() // the number of hidden items
- )
- // Ensure the start is written, not deleted from the tail, and that the
- // caller actually wants something
- if items <= start || hidden > start || count == 0 {
- return nil, nil, errOutOfBounds
- }
- if start+count > items {
- count = items - start
- }
- var output []byte // Buffer to read data into
- if maxBytes != 0 {
- output = make([]byte, 0, maxBytes)
- } else {
- output = make([]byte, 0, 1024) // initial buffer cap
- }
- // readData is a helper method to read a single data item from disk.
- readData := func(fileId, start uint32, length int) error {
- output = grow(output, length)
- dataFile, exist := t.files[fileId]
- if !exist {
- return fmt.Errorf("missing data file %d", fileId)
- }
- if _, err := dataFile.ReadAt(output[len(output)-length:], int64(start)); err != nil {
- return fmt.Errorf("%w, fileid: %d, start: %d, length: %d", err, fileId, start, length)
- }
- return nil
- }
- // Read all the indexes in one go
- indices, err := t.getIndices(start, count)
- if err != nil {
- return nil, nil, err
- }
- var (
- sizes []int // The sizes for each element
- totalSize = 0 // The total size of all data read so far
- readStart = indices[0].offset // Where, in the file, to start reading
- unreadSize = 0 // The size of the as-yet-unread data
- )
-
- for i, firstIndex := range indices[:len(indices)-1] {
- secondIndex := indices[i+1]
- // Determine the size of the item.
- offset1, offset2, _ := firstIndex.bounds(secondIndex)
- size := int(offset2 - offset1)
- // Crossing a file boundary?
- if secondIndex.filenum != firstIndex.filenum {
- // If we have unread data in the first file, we need to do that read now.
- if unreadSize > 0 {
- if err := readData(firstIndex.filenum, readStart, unreadSize); err != nil {
- return nil, nil, err
- }
- unreadSize = 0
- }
- readStart = 0
- }
- if i > 0 && uint64(totalSize+size) > maxBytes && maxBytes != 0 {
- // About to break out due to byte limit being exceeded. We don't
- // read this last item, but we need to do the deferred reads now.
- if unreadSize > 0 {
- if err := readData(secondIndex.filenum, readStart, unreadSize); err != nil {
- return nil, nil, err
- }
- }
- break
- }
- // Defer the read for later
- unreadSize += size
- totalSize += size
- sizes = append(sizes, size)
- if i == len(indices)-2 || (uint64(totalSize) > maxBytes && maxBytes != 0) {
- // Last item, need to do the read now
- if err := readData(secondIndex.filenum, readStart, unreadSize); err != nil {
- return nil, nil, err
- }
- break
- }
- }
-
- // Update metrics.
- t.readMeter.Mark(int64(totalSize))
- return output, sizes, nil
-}
-
-// has returns an indicator whether the specified number data is still accessible
-// in the freezer table.
-func (t *freezerTable) has(number uint64) bool {
- return t.items.Load() > number && t.itemHidden.Load() <= number
-}
-
-// size returns the total data size in the freezer table.
-func (t *freezerTable) size() (uint64, error) {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- return t.sizeNolock()
-}
-
-// sizeNolock returns the total data size in the freezer table. This function
-// assumes the lock is already held.
-func (t *freezerTable) sizeNolock() (uint64, error) {
- stat, err := t.index.Stat()
- if err != nil {
- return 0, err
- }
- hidden, err := t.sizeHidden()
- if err != nil {
- return 0, err
- }
- total := uint64(t.maxFileSize)*uint64(t.headId-t.tailId) + uint64(t.headBytes) + uint64(stat.Size()) - hidden
- return total, nil
-}
-
-// advanceHead should be called when the current head file would outgrow the file limits,
-// and a new file must be opened. The caller of this method must hold the write-lock
-// before calling this method.
-func (t *freezerTable) advanceHead() error {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- // We open the next file in truncated mode -- if this file already
- // exists, we need to start over from scratch on it.
- nextID := t.headId + 1
- newHead, err := t.openFile(nextID, openFreezerFileTruncated)
- if err != nil {
- return err
- }
- // Commit the contents of the old file to stable storage and
- // tear it down. It will be re-opened in read-only mode.
- if err := t.head.Sync(); err != nil {
- return err
- }
- t.releaseFile(t.headId)
- t.openFile(t.headId, openFreezerFileForReadOnly)
-
- // Swap out the current head.
- t.head = newHead
- t.headBytes = 0
- t.headId = nextID
- return nil
-}
-
-// Sync pushes any pending data from memory out to disk. This is an expensive
-// operation, so use it with care.
-func (t *freezerTable) Sync() error {
- t.lock.Lock()
- defer t.lock.Unlock()
- if t.index == nil || t.head == nil || t.meta == nil {
- return errClosed
- }
- var err error
- trackError := func(e error) {
- if e != nil && err == nil {
- err = e
- }
- }
-
- trackError(t.index.Sync())
- trackError(t.meta.Sync())
- trackError(t.head.Sync())
- return err
-}
-
-func (t *freezerTable) dumpIndexStdout(start, stop int64) {
- t.dumpIndex(os.Stdout, start, stop)
-}
-
-func (t *freezerTable) dumpIndexString(start, stop int64) string {
- var out bytes.Buffer
- out.WriteString("\n")
- t.dumpIndex(&out, start, stop)
- return out.String()
-}
-
-func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
- meta, err := readMetadata(t.meta)
- if err != nil {
- fmt.Fprintf(w, "Failed to decode freezer table %v\n", err)
- return
- }
- fmt.Fprintf(w, "Version %d count %d, deleted %d, hidden %d\n", meta.Version,
- t.items.Load(), t.itemOffset.Load(), t.itemHidden.Load())
-
- buf := make([]byte, indexEntrySize)
-
- fmt.Fprintf(w, "| number | fileno | offset |\n")
- fmt.Fprintf(w, "|--------|--------|--------|\n")
-
- for i := uint64(start); ; i++ {
- if _, err := t.index.ReadAt(buf, int64((i+1)*indexEntrySize)); err != nil {
- break
- }
- var entry indexEntry
- entry.unmarshalBinary(buf)
- fmt.Fprintf(w, "| %03d | %03d | %03d | \n", i, entry.filenum, entry.offset)
- if stop > 0 && i >= uint64(stop) {
- break
- }
- }
- fmt.Fprintf(w, "|--------------------------|\n")
-}
diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go
deleted file mode 100644
index 4fdc09a538..0000000000
--- a/core/rawdb/freezer_table_test.go
+++ /dev/null
@@ -1,1369 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "math/rand"
- "os"
- "path/filepath"
- "reflect"
- "testing"
- "testing/quick"
-
- "github.com/ava-labs/coreth/metrics"
- "github.com/davecgh/go-spew/spew"
- "github.com/stretchr/testify/require"
-)
-
-// TestFreezerBasics test initializing a freezertable from scratch, writing to the table,
-// and reading it back.
-func TestFreezerBasics(t *testing.T) {
- t.Parallel()
- // set cutoff at 50 bytes
- f, err := newTable(os.TempDir(),
- fmt.Sprintf("unittest-%d", rand.Uint64()),
- metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- defer f.Close()
-
- // Write 15 bytes 255 times, results in 85 files
- writeChunks(t, f, 255, 15)
-
- //print(t, f, 0)
- //print(t, f, 1)
- //print(t, f, 2)
- //
- //db[0] = 000000000000000000000000000000
- //db[1] = 010101010101010101010101010101
- //db[2] = 020202020202020202020202020202
-
- for y := 0; y < 255; y++ {
- exp := getChunk(15, y)
- got, err := f.Retrieve(uint64(y))
- if err != nil {
- t.Fatalf("reading item %d: %v", y, err)
- }
- if !bytes.Equal(got, exp) {
- t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
- }
- }
- // Check that we cannot read too far
- _, err = f.Retrieve(uint64(255))
- if err != errOutOfBounds {
- t.Fatal(err)
- }
-}
-
-// TestFreezerBasicsClosing tests same as TestFreezerBasics, but also closes and reopens the freezer between
-// every operation
-func TestFreezerBasicsClosing(t *testing.T) {
- t.Parallel()
- // set cutoff at 50 bytes
- var (
- fname = fmt.Sprintf("basics-close-%d", rand.Uint64())
- rm, wm, sg = metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
- f *freezerTable
- err error
- )
- f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
-
- // Write 15 bytes 255 times, results in 85 files.
- // In-between writes, the table is closed and re-opened.
- for x := 0; x < 255; x++ {
- data := getChunk(15, x)
- batch := f.newBatch()
- require.NoError(t, batch.AppendRaw(uint64(x), data))
- require.NoError(t, batch.commit())
- f.Close()
-
- f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- }
- defer f.Close()
-
- for y := 0; y < 255; y++ {
- exp := getChunk(15, y)
- got, err := f.Retrieve(uint64(y))
- if err != nil {
- t.Fatal(err)
- }
- if !bytes.Equal(got, exp) {
- t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
- }
- f.Close()
- f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-// TestFreezerRepairDanglingHead tests that we can recover if index entries are removed
-func TestFreezerRepairDanglingHead(t *testing.T) {
- t.Parallel()
- rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
- fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
-
- // Fill table
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- // Write 15 bytes 255 times
- writeChunks(t, f, 255, 15)
-
- // The last item should be there
- if _, err = f.Retrieve(0xfe); err != nil {
- t.Fatal(err)
- }
- f.Close()
- }
-
- // open the index
- idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
- if err != nil {
- t.Fatalf("Failed to open index file: %v", err)
- }
- // Remove 4 bytes
- stat, err := idxFile.Stat()
- if err != nil {
- t.Fatalf("Failed to stat index file: %v", err)
- }
- idxFile.Truncate(stat.Size() - 4)
- idxFile.Close()
-
- // Now open it again
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- // The last item should be missing
- if _, err = f.Retrieve(0xff); err == nil {
- t.Errorf("Expected error for missing index entry")
- }
- // The one before should still be there
- if _, err = f.Retrieve(0xfd); err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
- }
-}
-
-// TestFreezerRepairDanglingHeadLarge tests that we can recover if very many index entries are removed
-func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
- t.Parallel()
- rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
- fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
-
- // Fill a table and close it
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- // Write 15 bytes 255 times
- writeChunks(t, f, 255, 15)
-
- // The last item should be there
- if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
- t.Fatal(err)
- }
- f.Close()
- }
-
- // open the index
- idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
- if err != nil {
- t.Fatalf("Failed to open index file: %v", err)
- }
- // Remove everything but the first item, and leave data unaligned
- // 0-indexEntry, 1-indexEntry, corrupt-indexEntry
- idxFile.Truncate(2*indexEntrySize + indexEntrySize/2)
- idxFile.Close()
-
- // Now open it again
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- // The first item should be there
- if _, err = f.Retrieve(0); err != nil {
- t.Fatal(err)
- }
- // The second item should be missing
- if _, err = f.Retrieve(1); err == nil {
- t.Errorf("Expected error for missing index entry")
- }
- // We should now be able to store items again, from item = 1
- batch := f.newBatch()
- for x := 1; x < 0xff; x++ {
- require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x)))
- }
- require.NoError(t, batch.commit())
- f.Close()
- }
-
- // And if we open it, we should now be able to read all of them (new values)
- {
- f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- for y := 1; y < 255; y++ {
- exp := getChunk(15, ^y)
- got, err := f.Retrieve(uint64(y))
- if err != nil {
- t.Fatal(err)
- }
- if !bytes.Equal(got, exp) {
- t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
- }
- }
- }
-}
-
-// TestSnappyDetection tests that we fail to open a snappy database and vice versa
-func TestSnappyDetection(t *testing.T) {
- t.Parallel()
- rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
- fname := fmt.Sprintf("snappytest-%d", rand.Uint64())
-
- // Open with snappy
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- // Write 15 bytes 255 times
- writeChunks(t, f, 255, 15)
- f.Close()
- }
-
- // Open without snappy
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false)
- if err != nil {
- t.Fatal(err)
- }
- if _, err = f.Retrieve(0); err == nil {
- f.Close()
- t.Fatalf("expected empty table")
- }
- }
-
- // Open with snappy
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- // There should be 255 items
- if _, err = f.Retrieve(0xfe); err != nil {
- f.Close()
- t.Fatalf("expected no error, got %v", err)
- }
- }
-}
-
-func assertFileSize(f string, size int64) error {
- stat, err := os.Stat(f)
- if err != nil {
- return err
- }
- if stat.Size() != size {
- return fmt.Errorf("error, expected size %d, got %d", size, stat.Size())
- }
- return nil
-}
-
-// TestFreezerRepairDanglingIndex checks that if the index has more entries than there are data,
-// the index is repaired
-func TestFreezerRepairDanglingIndex(t *testing.T) {
- t.Parallel()
- rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
- fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64())
-
- // Fill a table and close it
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- // Write 15 bytes 9 times : 150 bytes
- writeChunks(t, f, 9, 15)
-
- // The last item should be there
- if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
- f.Close()
- t.Fatal(err)
- }
- f.Close()
- // File sizes should be 45, 45, 45 : items[3, 3, 3)
- }
-
- // Crop third file
- fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0002.rdat", fname))
- // Truncate third file: 45 ,45, 20
- {
- if err := assertFileSize(fileToCrop, 45); err != nil {
- t.Fatal(err)
- }
- file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644)
- if err != nil {
- t.Fatal(err)
- }
- file.Truncate(20)
- file.Close()
- }
-
- // Open db it again
- // It should restore the file(s) to
- // 45, 45, 15
- // with 3+3+1 items
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- defer f.Close()
- if f.items.Load() != 7 {
- t.Fatalf("expected %d items, got %d", 7, f.items.Load())
- }
- if err := assertFileSize(fileToCrop, 15); err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestFreezerTruncate(t *testing.T) {
- t.Parallel()
- rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
- fname := fmt.Sprintf("truncation-%d", rand.Uint64())
-
- // Fill table
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- // Write 15 bytes 30 times
- writeChunks(t, f, 30, 15)
-
- // The last item should be there
- if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
- t.Fatal(err)
- }
- f.Close()
- }
-
- // Reopen, truncate
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- defer f.Close()
- f.truncateHead(10) // 150 bytes
- if f.items.Load() != 10 {
- t.Fatalf("expected %d items, got %d", 10, f.items.Load())
- }
- // 45, 45, 45, 15 -- bytes should be 15
- if f.headBytes != 15 {
- t.Fatalf("expected %d bytes, got %d", 15, f.headBytes)
- }
- }
-}
-
-// TestFreezerRepairFirstFile tests a head file with the very first item only half-written.
-// That will rewind the index, and _should_ truncate the head file
-func TestFreezerRepairFirstFile(t *testing.T) {
- t.Parallel()
- rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
- fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64())
-
- // Fill table
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- // Write 80 bytes, splitting out into two files
- batch := f.newBatch()
- require.NoError(t, batch.AppendRaw(0, getChunk(40, 0xFF)))
- require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xEE)))
- require.NoError(t, batch.commit())
-
- // The last item should be there
- if _, err = f.Retrieve(1); err != nil {
- t.Fatal(err)
- }
- f.Close()
- }
-
- // Truncate the file in half
- fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0001.rdat", fname))
- {
- if err := assertFileSize(fileToCrop, 40); err != nil {
- t.Fatal(err)
- }
- file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644)
- if err != nil {
- t.Fatal(err)
- }
- file.Truncate(20)
- file.Close()
- }
-
- // Reopen
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- if f.items.Load() != 1 {
- f.Close()
- t.Fatalf("expected %d items, got %d", 0, f.items.Load())
- }
-
- // Write 40 bytes
- batch := f.newBatch()
- require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xDD)))
- require.NoError(t, batch.commit())
-
- f.Close()
-
- // Should have been truncated down to zero and then 40 written
- if err := assertFileSize(fileToCrop, 40); err != nil {
- t.Fatal(err)
- }
- }
-}
-
-// TestFreezerReadAndTruncate tests:
-// - we have a table open
-// - do some reads, so files are open in readonly
-// - truncate so those files are 'removed'
-// - check that we did not keep the rdonly file descriptors
-func TestFreezerReadAndTruncate(t *testing.T) {
- t.Parallel()
- rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
- fname := fmt.Sprintf("read_truncate-%d", rand.Uint64())
-
- // Fill table
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- // Write 15 bytes 30 times
- writeChunks(t, f, 30, 15)
-
- // The last item should be there
- if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
- t.Fatal(err)
- }
- f.Close()
- }
-
- // Reopen and read all files
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- if f.items.Load() != 30 {
- f.Close()
- t.Fatalf("expected %d items, got %d", 0, f.items.Load())
- }
- for y := byte(0); y < 30; y++ {
- f.Retrieve(uint64(y))
- }
-
- // Now, truncate back to zero
- f.truncateHead(0)
-
- // Write the data again
- batch := f.newBatch()
- for x := 0; x < 30; x++ {
- require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x)))
- }
- require.NoError(t, batch.commit())
- f.Close()
- }
-}
-
-func TestFreezerOffset(t *testing.T) {
- t.Parallel()
- rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
- fname := fmt.Sprintf("offset-%d", rand.Uint64())
-
- // Fill table
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
- if err != nil {
- t.Fatal(err)
- }
-
- // Write 6 x 20 bytes, splitting out into three files
- batch := f.newBatch()
- require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
- require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
-
- require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
- require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
-
- require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
- require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
- require.NoError(t, batch.commit())
-
- t.Log(f.dumpIndexString(0, 100))
- f.Close()
- }
-
- // Now crop it.
- {
- // delete files 0 and 1
- for i := 0; i < 2; i++ {
- p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.%04d.rdat", fname, i))
- if err := os.Remove(p); err != nil {
- t.Fatal(err)
- }
- }
- // Read the index file
- p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname))
- indexFile, err := os.OpenFile(p, os.O_RDWR, 0644)
- if err != nil {
- t.Fatal(err)
- }
- indexBuf := make([]byte, 7*indexEntrySize)
- indexFile.Read(indexBuf)
-
- // Update the index file, so that we store
- // [ file = 2, offset = 4 ] at index zero
-
- zeroIndex := indexEntry{
- filenum: uint32(2), // First file is 2
- offset: uint32(4), // We have removed four items
- }
- buf := zeroIndex.append(nil)
-
- // Overwrite index zero
- copy(indexBuf, buf)
-
- // Remove the four next indices by overwriting
- copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:])
- indexFile.WriteAt(indexBuf, 0)
-
- // Need to truncate the moved index items
- indexFile.Truncate(indexEntrySize * (1 + 2))
- indexFile.Close()
- }
-
- // Now open again
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
- if err != nil {
- t.Fatal(err)
- }
- defer f.Close()
- t.Log(f.dumpIndexString(0, 100))
-
- // It should allow writing item 6.
- batch := f.newBatch()
- require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99)))
- require.NoError(t, batch.commit())
-
- checkRetrieveError(t, f, map[uint64]error{
- 0: errOutOfBounds,
- 1: errOutOfBounds,
- 2: errOutOfBounds,
- 3: errOutOfBounds,
- })
- checkRetrieve(t, f, map[uint64][]byte{
- 4: getChunk(20, 0xbb),
- 5: getChunk(20, 0xaa),
- 6: getChunk(20, 0x99),
- })
- }
-
- // Edit the index again, with a much larger initial offset of 1M.
- {
- // Read the index file
- p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname))
- indexFile, err := os.OpenFile(p, os.O_RDWR, 0644)
- if err != nil {
- t.Fatal(err)
- }
- indexBuf := make([]byte, 3*indexEntrySize)
- indexFile.Read(indexBuf)
-
- // Update the index file, so that we store
- // [ file = 2, offset = 1M ] at index zero
-
- zeroIndex := indexEntry{
- offset: uint32(1000000), // We have removed 1M items
- filenum: uint32(2), // First file is 2
- }
- buf := zeroIndex.append(nil)
-
- // Overwrite index zero
- copy(indexBuf, buf)
- indexFile.WriteAt(indexBuf, 0)
- indexFile.Close()
- }
-
- // Check that existing items have been moved to index 1M.
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
- if err != nil {
- t.Fatal(err)
- }
- defer f.Close()
- t.Log(f.dumpIndexString(0, 100))
-
- checkRetrieveError(t, f, map[uint64]error{
- 0: errOutOfBounds,
- 1: errOutOfBounds,
- 2: errOutOfBounds,
- 3: errOutOfBounds,
- 999999: errOutOfBounds,
- })
- checkRetrieve(t, f, map[uint64][]byte{
- 1000000: getChunk(20, 0xbb),
- 1000001: getChunk(20, 0xaa),
- })
- }
-}
-
-func assertTableSize(t *testing.T, f *freezerTable, size int) {
- t.Helper()
- if got, err := f.size(); got != uint64(size) {
- t.Fatalf("expected size of %d bytes, got %d, err: %v", size, got, err)
- }
-}
-
-func TestTruncateTail(t *testing.T) {
- t.Parallel()
- rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
- fname := fmt.Sprintf("truncate-tail-%d", rand.Uint64())
-
- // Fill table
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
- if err != nil {
- t.Fatal(err)
- }
-
- // Write 7 x 20 bytes, splitting out into four files
- batch := f.newBatch()
- require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
- require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
- require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
- require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
- require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
- require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
- require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
- require.NoError(t, batch.commit())
-
- // nothing to do, all the items should still be there.
- f.truncateTail(0)
- fmt.Println(f.dumpIndexString(0, 1000))
- checkRetrieve(t, f, map[uint64][]byte{
- 0: getChunk(20, 0xFF),
- 1: getChunk(20, 0xEE),
- 2: getChunk(20, 0xdd),
- 3: getChunk(20, 0xcc),
- 4: getChunk(20, 0xbb),
- 5: getChunk(20, 0xaa),
- 6: getChunk(20, 0x11),
- })
- // maxFileSize*fileCount + headBytes + indexFileSize - hiddenBytes
- expected := 20*7 + 48 - 0
- assertTableSize(t, f, expected)
-
- // truncate single element( item 0 ), deletion is only supported at file level
- f.truncateTail(1)
- fmt.Println(f.dumpIndexString(0, 1000))
- checkRetrieveError(t, f, map[uint64]error{
- 0: errOutOfBounds,
- })
- checkRetrieve(t, f, map[uint64][]byte{
- 1: getChunk(20, 0xEE),
- 2: getChunk(20, 0xdd),
- 3: getChunk(20, 0xcc),
- 4: getChunk(20, 0xbb),
- 5: getChunk(20, 0xaa),
- 6: getChunk(20, 0x11),
- })
- expected = 20*7 + 48 - 20
- assertTableSize(t, f, expected)
-
- // Reopen the table, the deletion information should be persisted as well
- f.Close()
- f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
- if err != nil {
- t.Fatal(err)
- }
- checkRetrieveError(t, f, map[uint64]error{
- 0: errOutOfBounds,
- })
- checkRetrieve(t, f, map[uint64][]byte{
- 1: getChunk(20, 0xEE),
- 2: getChunk(20, 0xdd),
- 3: getChunk(20, 0xcc),
- 4: getChunk(20, 0xbb),
- 5: getChunk(20, 0xaa),
- 6: getChunk(20, 0x11),
- })
-
- // truncate two elements( item 0, item 1 ), the file 0 should be deleted
- f.truncateTail(2)
- checkRetrieveError(t, f, map[uint64]error{
- 0: errOutOfBounds,
- 1: errOutOfBounds,
- })
- checkRetrieve(t, f, map[uint64][]byte{
- 2: getChunk(20, 0xdd),
- 3: getChunk(20, 0xcc),
- 4: getChunk(20, 0xbb),
- 5: getChunk(20, 0xaa),
- 6: getChunk(20, 0x11),
- })
- expected = 20*5 + 36 - 0
- assertTableSize(t, f, expected)
-
- // Reopen the table, the above testing should still pass
- f.Close()
- f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
- if err != nil {
- t.Fatal(err)
- }
- defer f.Close()
-
- checkRetrieveError(t, f, map[uint64]error{
- 0: errOutOfBounds,
- 1: errOutOfBounds,
- })
- checkRetrieve(t, f, map[uint64][]byte{
- 2: getChunk(20, 0xdd),
- 3: getChunk(20, 0xcc),
- 4: getChunk(20, 0xbb),
- 5: getChunk(20, 0xaa),
- 6: getChunk(20, 0x11),
- })
-
- // truncate 3 more elements( item 2, 3, 4), the file 1 should be deleted
- // file 2 should only contain item 5
- f.truncateTail(5)
- checkRetrieveError(t, f, map[uint64]error{
- 0: errOutOfBounds,
- 1: errOutOfBounds,
- 2: errOutOfBounds,
- 3: errOutOfBounds,
- 4: errOutOfBounds,
- })
- checkRetrieve(t, f, map[uint64][]byte{
- 5: getChunk(20, 0xaa),
- 6: getChunk(20, 0x11),
- })
- expected = 20*3 + 24 - 20
- assertTableSize(t, f, expected)
-
- // truncate all, the entire freezer should be deleted
- f.truncateTail(7)
- checkRetrieveError(t, f, map[uint64]error{
- 0: errOutOfBounds,
- 1: errOutOfBounds,
- 2: errOutOfBounds,
- 3: errOutOfBounds,
- 4: errOutOfBounds,
- 5: errOutOfBounds,
- 6: errOutOfBounds,
- })
- expected = 12
- assertTableSize(t, f, expected)
-}
-
-func TestTruncateHead(t *testing.T) {
- t.Parallel()
- rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
- fname := fmt.Sprintf("truncate-head-blow-tail-%d", rand.Uint64())
-
- // Fill table
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
- if err != nil {
- t.Fatal(err)
- }
-
- // Write 7 x 20 bytes, splitting out into four files
- batch := f.newBatch()
- require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
- require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
- require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
- require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
- require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
- require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
- require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
- require.NoError(t, batch.commit())
-
- f.truncateTail(4) // Tail = 4
-
- // NewHead is required to be 3, the entire table should be truncated
- f.truncateHead(4)
- checkRetrieveError(t, f, map[uint64]error{
- 0: errOutOfBounds, // Deleted by tail
- 1: errOutOfBounds, // Deleted by tail
- 2: errOutOfBounds, // Deleted by tail
- 3: errOutOfBounds, // Deleted by tail
- 4: errOutOfBounds, // Deleted by Head
- 5: errOutOfBounds, // Deleted by Head
- 6: errOutOfBounds, // Deleted by Head
- })
-
- // Append new items
- batch = f.newBatch()
- require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
- require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
- require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
- require.NoError(t, batch.commit())
-
- checkRetrieve(t, f, map[uint64][]byte{
- 4: getChunk(20, 0xbb),
- 5: getChunk(20, 0xaa),
- 6: getChunk(20, 0x11),
- })
-}
-
-func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) {
- t.Helper()
-
- for item, wantBytes := range items {
- value, err := f.Retrieve(item)
- if err != nil {
- t.Fatalf("can't get expected item %d: %v", item, err)
- }
- if !bytes.Equal(value, wantBytes) {
- t.Fatalf("item %d has wrong value %x (want %x)", item, value, wantBytes)
- }
- }
-}
-
-func checkRetrieveError(t *testing.T, f *freezerTable, items map[uint64]error) {
- t.Helper()
-
- for item, wantError := range items {
- value, err := f.Retrieve(item)
- if err == nil {
- t.Fatalf("unexpected value %x for item %d, want error %v", item, value, wantError)
- }
- if err != wantError {
- t.Fatalf("wrong error for item %d: %v", item, err)
- }
- }
-}
-
-// Gets a chunk of data, filled with 'b'
-func getChunk(size int, b int) []byte {
- data := make([]byte, size)
- for i := range data {
- data[i] = byte(b)
- }
- return data
-}
-
-// TODO (?)
-// - test that if we remove several head-files, as well as data last data-file,
-// the index is truncated accordingly
-// Right now, the freezer would fail on these conditions:
-// 1. have data files d0, d1, d2, d3
-// 2. remove d2,d3
-//
-// However, all 'normal' failure modes arising due to failing to sync() or save a file
-// should be handled already, and the case described above can only (?) happen if an
-// external process/user deletes files from the filesystem.
-
-func writeChunks(t *testing.T, ft *freezerTable, n int, length int) {
- t.Helper()
-
- batch := ft.newBatch()
- for i := 0; i < n; i++ {
- if err := batch.AppendRaw(uint64(i), getChunk(length, i)); err != nil {
- t.Fatalf("AppendRaw(%d, ...) returned error: %v", i, err)
- }
- }
- if err := batch.commit(); err != nil {
- t.Fatalf("Commit returned error: %v", err)
- }
-}
-
-// TestSequentialRead does some basic tests on the RetrieveItems.
-func TestSequentialRead(t *testing.T) {
- rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
- fname := fmt.Sprintf("batchread-%d", rand.Uint64())
- { // Fill table
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- // Write 15 bytes 30 times
- writeChunks(t, f, 30, 15)
- f.dumpIndexStdout(0, 30)
- f.Close()
- }
- { // Open it, iterate, verify iteration
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
- if err != nil {
- t.Fatal(err)
- }
- items, err := f.RetrieveItems(0, 10000, 100000)
- if err != nil {
- t.Fatal(err)
- }
- if have, want := len(items), 30; have != want {
- t.Fatalf("want %d items, have %d ", want, have)
- }
- for i, have := range items {
- want := getChunk(15, i)
- if !bytes.Equal(want, have) {
- t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want)
- }
- }
- f.Close()
- }
- { // Open it, iterate, verify byte limit. The byte limit is less than item
- // size, so each lookup should only return one item
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
- if err != nil {
- t.Fatal(err)
- }
- items, err := f.RetrieveItems(0, 10000, 10)
- if err != nil {
- t.Fatal(err)
- }
- if have, want := len(items), 1; have != want {
- t.Fatalf("want %d items, have %d ", want, have)
- }
- for i, have := range items {
- want := getChunk(15, i)
- if !bytes.Equal(want, have) {
- t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want)
- }
- }
- f.Close()
- }
-}
-
-// TestSequentialReadByteLimit does some more advanced tests on batch reads.
-// These tests check that when the byte limit hits, we correctly abort in time,
-// but also properly do all the deferred reads for the previous data, regardless
-// of whether the data crosses a file boundary or not.
-func TestSequentialReadByteLimit(t *testing.T) {
- rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
- fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
- { // Fill table
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
- if err != nil {
- t.Fatal(err)
- }
- // Write 10 bytes 30 times,
- // Splitting it at every 100 bytes (10 items)
- writeChunks(t, f, 30, 10)
- f.Close()
- }
- for i, tc := range []struct {
- items uint64
- limit uint64
- want int
- }{
- {9, 89, 8},
- {10, 99, 9},
- {11, 109, 10},
- {100, 89, 8},
- {100, 99, 9},
- {100, 109, 10},
- } {
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
- if err != nil {
- t.Fatal(err)
- }
- items, err := f.RetrieveItems(0, tc.items, tc.limit)
- if err != nil {
- t.Fatal(err)
- }
- if have, want := len(items), tc.want; have != want {
- t.Fatalf("test %d: want %d items, have %d ", i, want, have)
- }
- for ii, have := range items {
- want := getChunk(10, ii)
- if !bytes.Equal(want, have) {
- t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want)
- }
- }
- f.Close()
- }
- }
-}
-
-// TestSequentialReadNoByteLimit tests the batch-read if maxBytes is not specified.
-// Freezer should return the requested items regardless the size limitation.
-func TestSequentialReadNoByteLimit(t *testing.T) {
- rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
- fname := fmt.Sprintf("batchread-3-%d", rand.Uint64())
- { // Fill table
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
- if err != nil {
- t.Fatal(err)
- }
- // Write 10 bytes 30 times,
- // Splitting it at every 100 bytes (10 items)
- writeChunks(t, f, 30, 10)
- f.Close()
- }
- for i, tc := range []struct {
- items uint64
- want int
- }{
- {1, 1},
- {30, 30},
- {31, 30},
- } {
- {
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
- if err != nil {
- t.Fatal(err)
- }
- items, err := f.RetrieveItems(0, tc.items, 0)
- if err != nil {
- t.Fatal(err)
- }
- if have, want := len(items), tc.want; have != want {
- t.Fatalf("test %d: want %d items, have %d ", i, want, have)
- }
- for ii, have := range items {
- want := getChunk(10, ii)
- if !bytes.Equal(want, have) {
- t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want)
- }
- }
- f.Close()
- }
- }
-}
-
-func TestFreezerReadonly(t *testing.T) {
- tmpdir := os.TempDir()
- // Case 1: Check it fails on non-existent file.
- _, err := newTable(tmpdir,
- fmt.Sprintf("readonlytest-%d", rand.Uint64()),
- metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
- if err == nil {
- t.Fatal("readonly table instantiation should fail for non-existent table")
- }
-
- // Case 2: Check that it fails on invalid index length.
- fname := fmt.Sprintf("readonlytest-%d", rand.Uint64())
- idxFile, err := openFreezerFileForAppend(filepath.Join(tmpdir, fmt.Sprintf("%s.ridx", fname)))
- if err != nil {
- t.Errorf("Failed to open index file: %v\n", err)
- }
- // size should not be a multiple of indexEntrySize.
- idxFile.Write(make([]byte, 17))
- idxFile.Close()
- _, err = newTable(tmpdir, fname,
- metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
- if err == nil {
- t.Errorf("readonly table instantiation should fail for invalid index size")
- }
-
- // Case 3: Open table non-readonly table to write some data.
- // Then corrupt the head file and make sure opening the table
- // again in readonly triggers an error.
- fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
- f, err := newTable(tmpdir, fname,
- metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
- if err != nil {
- t.Fatalf("failed to instantiate table: %v", err)
- }
- writeChunks(t, f, 8, 32)
- // Corrupt table file
- if _, err := f.head.Write([]byte{1, 1}); err != nil {
- t.Fatal(err)
- }
- if err := f.Close(); err != nil {
- t.Fatal(err)
- }
- _, err = newTable(tmpdir, fname,
- metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
- if err == nil {
- t.Errorf("readonly table instantiation should fail for corrupt table file")
- }
-
- // Case 4: Write some data to a table and later re-open it as readonly.
- // Should be successful.
- fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
- f, err = newTable(tmpdir, fname,
- metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
- if err != nil {
- t.Fatalf("failed to instantiate table: %v\n", err)
- }
- writeChunks(t, f, 32, 128)
- if err := f.Close(); err != nil {
- t.Fatal(err)
- }
- f, err = newTable(tmpdir, fname,
- metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
- if err != nil {
- t.Fatal(err)
- }
- v, err := f.Retrieve(10)
- if err != nil {
- t.Fatal(err)
- }
- exp := getChunk(128, 10)
- if !bytes.Equal(v, exp) {
- t.Errorf("retrieved value is incorrect")
- }
-
- // Case 5: Now write some data via a batch.
- // This should fail either during AppendRaw or Commit
- batch := f.newBatch()
- writeErr := batch.AppendRaw(32, make([]byte, 1))
- if writeErr == nil {
- writeErr = batch.commit()
- }
- if writeErr == nil {
- t.Fatalf("Writing to readonly table should fail")
- }
-}
-
-// randTest performs random freezer table operations.
-// Instances of this test are created by Generate.
-type randTest []randTestStep
-
-type randTestStep struct {
- op int
- items []uint64 // for append and retrieve
- blobs [][]byte // for append
- target uint64 // for truncate(head/tail)
- err error // for debugging
-}
-
-const (
- opReload = iota
- opAppend
- opRetrieve
- opTruncateHead
- opTruncateHeadAll
- opTruncateTail
- opTruncateTailAll
- opCheckAll
- opMax // boundary value, not an actual op
-)
-
-func getVals(first uint64, n int) [][]byte {
- var ret [][]byte
- for i := 0; i < n; i++ {
- val := make([]byte, 8)
- binary.BigEndian.PutUint64(val, first+uint64(i))
- ret = append(ret, val)
- }
- return ret
-}
-
-func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
- var (
- deleted uint64 // The number of deleted items from tail
- items []uint64 // The index of entries in table
-
- // getItems retrieves the indexes for items in table.
- getItems = func(n int) []uint64 {
- length := len(items)
- if length == 0 {
- return nil
- }
- var ret []uint64
- index := rand.Intn(length)
- for i := index; len(ret) < n && i < length; i++ {
- ret = append(ret, items[i])
- }
- return ret
- }
-
- // addItems appends the given length items into the table.
- addItems = func(n int) []uint64 {
- var first = deleted
- if len(items) != 0 {
- first = items[len(items)-1] + 1
- }
- var ret []uint64
- for i := 0; i < n; i++ {
- ret = append(ret, first+uint64(i))
- }
- items = append(items, ret...)
- return ret
- }
- )
-
- var steps randTest
- for i := 0; i < size; i++ {
- step := randTestStep{op: r.Intn(opMax)}
- switch step.op {
- case opReload, opCheckAll:
- case opAppend:
- num := r.Intn(3)
- step.items = addItems(num)
- if len(step.items) == 0 {
- step.blobs = nil
- } else {
- step.blobs = getVals(step.items[0], num)
- }
- case opRetrieve:
- step.items = getItems(r.Intn(3))
- case opTruncateHead:
- if len(items) == 0 {
- step.target = deleted
- } else {
- index := r.Intn(len(items))
- items = items[:index]
- step.target = deleted + uint64(index)
- }
- case opTruncateHeadAll:
- step.target = deleted
- items = items[:0]
- case opTruncateTail:
- if len(items) == 0 {
- step.target = deleted
- } else {
- index := r.Intn(len(items))
- items = items[index:]
- deleted += uint64(index)
- step.target = deleted
- }
- case opTruncateTailAll:
- step.target = deleted + uint64(len(items))
- items = items[:0]
- deleted = step.target
- }
- steps = append(steps, step)
- }
- return reflect.ValueOf(steps)
-}
-
-func runRandTest(rt randTest) bool {
- fname := fmt.Sprintf("randtest-%d", rand.Uint64())
- f, err := newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
- if err != nil {
- panic("failed to initialize table")
- }
- var values [][]byte
- for i, step := range rt {
- switch step.op {
- case opReload:
- f.Close()
- f, err = newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
- if err != nil {
- rt[i].err = fmt.Errorf("failed to reload table %v", err)
- }
- case opCheckAll:
- tail := f.itemHidden.Load()
- head := f.items.Load()
-
- if tail == head {
- continue
- }
- got, err := f.RetrieveItems(f.itemHidden.Load(), head-tail, 100000)
- if err != nil {
- rt[i].err = err
- } else {
- if !reflect.DeepEqual(got, values) {
- rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v", got, values)
- }
- }
-
- case opAppend:
- batch := f.newBatch()
- for i := 0; i < len(step.items); i++ {
- batch.AppendRaw(step.items[i], step.blobs[i])
- }
- batch.commit()
- values = append(values, step.blobs...)
-
- case opRetrieve:
- var blobs [][]byte
- if len(step.items) == 0 {
- continue
- }
- tail := f.itemHidden.Load()
- for i := 0; i < len(step.items); i++ {
- blobs = append(blobs, values[step.items[i]-tail])
- }
- got, err := f.RetrieveItems(step.items[0], uint64(len(step.items)), 100000)
- if err != nil {
- rt[i].err = err
- } else {
- if !reflect.DeepEqual(got, blobs) {
- rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v %v", got, blobs, step.items)
- }
- }
-
- case opTruncateHead:
- f.truncateHead(step.target)
-
- length := f.items.Load() - f.itemHidden.Load()
- values = values[:length]
-
- case opTruncateHeadAll:
- f.truncateHead(step.target)
- values = nil
-
- case opTruncateTail:
- prev := f.itemHidden.Load()
- f.truncateTail(step.target)
-
- truncated := f.itemHidden.Load() - prev
- values = values[truncated:]
-
- case opTruncateTailAll:
- f.truncateTail(step.target)
- values = nil
- }
- // Abort the test on error.
- if rt[i].err != nil {
- return false
- }
- }
- f.Close()
- return true
-}
-
-func TestRandom(t *testing.T) {
- if err := quick.Check(runRandTest, nil); err != nil {
- if cerr, ok := err.(*quick.CheckError); ok {
- t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In))
- }
- t.Fatal(err)
- }
-}
diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go
deleted file mode 100644
index e6484ab460..0000000000
--- a/core/rawdb/freezer_test.go
+++ /dev/null
@@ -1,482 +0,0 @@
-// Copyright 2021 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "errors"
- "fmt"
- "math/big"
- "math/rand"
- "os"
- "path"
- "sync"
- "testing"
-
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/rlp"
- "github.com/stretchr/testify/require"
-)
-
-var freezerTestTableDef = map[string]bool{"test": true}
-
-func TestFreezerModify(t *testing.T) {
- t.Parallel()
-
- // Create test data.
- var valuesRaw [][]byte
- var valuesRLP []*big.Int
- for x := 0; x < 100; x++ {
- v := getChunk(256, x)
- valuesRaw = append(valuesRaw, v)
- iv := big.NewInt(int64(x))
- iv = iv.Exp(iv, iv, nil)
- valuesRLP = append(valuesRLP, iv)
- }
-
- tables := map[string]bool{"raw": true, "rlp": false}
- f, _ := newFreezerForTesting(t, tables)
- defer f.Close()
-
- // Commit test data.
- _, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
- for i := range valuesRaw {
- if err := op.AppendRaw("raw", uint64(i), valuesRaw[i]); err != nil {
- return err
- }
- if err := op.Append("rlp", uint64(i), valuesRLP[i]); err != nil {
- return err
- }
- }
- return nil
- })
- if err != nil {
- t.Fatal("ModifyAncients failed:", err)
- }
-
- // Dump indexes.
- for _, table := range f.tables {
- t.Log(table.name, "index:", table.dumpIndexString(0, int64(len(valuesRaw))))
- }
-
- // Read back test data.
- checkAncientCount(t, f, "raw", uint64(len(valuesRaw)))
- checkAncientCount(t, f, "rlp", uint64(len(valuesRLP)))
- for i := range valuesRaw {
- v, _ := f.Ancient("raw", uint64(i))
- if !bytes.Equal(v, valuesRaw[i]) {
- t.Fatalf("wrong raw value at %d: %x", i, v)
- }
- ivEnc, _ := f.Ancient("rlp", uint64(i))
- want, _ := rlp.EncodeToBytes(valuesRLP[i])
- if !bytes.Equal(ivEnc, want) {
- t.Fatalf("wrong RLP value at %d: %x", i, ivEnc)
- }
- }
-}
-
-// This checks that ModifyAncients rolls back freezer updates
-// when the function passed to it returns an error.
-func TestFreezerModifyRollback(t *testing.T) {
- t.Parallel()
-
- f, dir := newFreezerForTesting(t, freezerTestTableDef)
-
- theError := errors.New("oops")
- _, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
- // Append three items. This creates two files immediately,
- // because the table size limit of the test freezer is 2048.
- require.NoError(t, op.AppendRaw("test", 0, make([]byte, 2048)))
- require.NoError(t, op.AppendRaw("test", 1, make([]byte, 2048)))
- require.NoError(t, op.AppendRaw("test", 2, make([]byte, 2048)))
- return theError
- })
- if err != theError {
- t.Errorf("ModifyAncients returned wrong error %q", err)
- }
- checkAncientCount(t, f, "test", 0)
- f.Close()
-
- // Reopen and check that the rolled-back data doesn't reappear.
- tables := map[string]bool{"test": true}
- f2, err := NewFreezer(dir, "", false, 2049, tables)
- if err != nil {
- t.Fatalf("can't reopen freezer after failed ModifyAncients: %v", err)
- }
- defer f2.Close()
- checkAncientCount(t, f2, "test", 0)
-}
-
-// This test runs ModifyAncients and Ancient concurrently with each other.
-func TestFreezerConcurrentModifyRetrieve(t *testing.T) {
- t.Parallel()
-
- f, _ := newFreezerForTesting(t, freezerTestTableDef)
- defer f.Close()
-
- var (
- numReaders = 5
- writeBatchSize = uint64(50)
- written = make(chan uint64, numReaders*6)
- wg sync.WaitGroup
- )
- wg.Add(numReaders + 1)
-
- // Launch the writer. It appends 10000 items in batches.
- go func() {
- defer wg.Done()
- defer close(written)
- for item := uint64(0); item < 10000; item += writeBatchSize {
- _, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
- for i := uint64(0); i < writeBatchSize; i++ {
- item := item + i
- value := getChunk(32, int(item))
- if err := op.AppendRaw("test", item, value); err != nil {
- return err
- }
- }
- return nil
- })
- if err != nil {
- panic(err)
- }
- for i := 0; i < numReaders; i++ {
- written <- item + writeBatchSize
- }
- }
- }()
-
- // Launch the readers. They read random items from the freezer up to the
- // current frozen item count.
- for i := 0; i < numReaders; i++ {
- go func() {
- defer wg.Done()
- for frozen := range written {
- for rc := 0; rc < 80; rc++ {
- num := uint64(rand.Intn(int(frozen)))
- value, err := f.Ancient("test", num)
- if err != nil {
- panic(fmt.Errorf("error reading %d (frozen %d): %v", num, frozen, err))
- }
- if !bytes.Equal(value, getChunk(32, int(num))) {
- panic(fmt.Errorf("wrong value at %d", num))
- }
- }
- }
- }()
- }
-
- wg.Wait()
-}
-
-// This test runs ModifyAncients and TruncateHead concurrently with each other.
-func TestFreezerConcurrentModifyTruncate(t *testing.T) {
- f, _ := newFreezerForTesting(t, freezerTestTableDef)
- defer f.Close()
-
- var item = make([]byte, 256)
-
- for i := 0; i < 10; i++ {
- // First reset and write 100 items.
- if _, err := f.TruncateHead(0); err != nil {
- t.Fatal("truncate failed:", err)
- }
- _, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
- for i := uint64(0); i < 100; i++ {
- if err := op.AppendRaw("test", i, item); err != nil {
- return err
- }
- }
- return nil
- })
- if err != nil {
- t.Fatal("modify failed:", err)
- }
- checkAncientCount(t, f, "test", 100)
-
- // Now append 100 more items and truncate concurrently.
- var (
- wg sync.WaitGroup
- truncateErr error
- modifyErr error
- )
- wg.Add(3)
- go func() {
- _, modifyErr = f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
- for i := uint64(100); i < 200; i++ {
- if err := op.AppendRaw("test", i, item); err != nil {
- return err
- }
- }
- return nil
- })
- wg.Done()
- }()
- go func() {
- _, truncateErr = f.TruncateHead(10)
- wg.Done()
- }()
- go func() {
- f.AncientSize("test")
- wg.Done()
- }()
- wg.Wait()
-
- // Now check the outcome. If the truncate operation went through first, the append
- // fails, otherwise it succeeds. In either case, the freezer should be positioned
- // at 10 after both operations are done.
- if truncateErr != nil {
- t.Fatal("concurrent truncate failed:", err)
- }
- if !(errors.Is(modifyErr, nil) || errors.Is(modifyErr, errOutOrderInsertion)) {
- t.Fatal("wrong error from concurrent modify:", modifyErr)
- }
- checkAncientCount(t, f, "test", 10)
- }
-}
-
-func TestFreezerReadonlyValidate(t *testing.T) {
- tables := map[string]bool{"a": true, "b": true}
- dir := t.TempDir()
- // Open non-readonly freezer and fill individual tables
- // with different amount of data.
- f, err := NewFreezer(dir, "", false, 2049, tables)
- if err != nil {
- t.Fatal("can't open freezer", err)
- }
- var item = make([]byte, 1024)
- aBatch := f.tables["a"].newBatch()
- require.NoError(t, aBatch.AppendRaw(0, item))
- require.NoError(t, aBatch.AppendRaw(1, item))
- require.NoError(t, aBatch.AppendRaw(2, item))
- require.NoError(t, aBatch.commit())
- bBatch := f.tables["b"].newBatch()
- require.NoError(t, bBatch.AppendRaw(0, item))
- require.NoError(t, bBatch.commit())
- if f.tables["a"].items.Load() != 3 {
- t.Fatalf("unexpected number of items in table")
- }
- if f.tables["b"].items.Load() != 1 {
- t.Fatalf("unexpected number of items in table")
- }
- require.NoError(t, f.Close())
-
- // Re-openening as readonly should fail when validating
- // table lengths.
- _, err = NewFreezer(dir, "", true, 2049, tables)
- if err == nil {
- t.Fatal("readonly freezer should fail with differing table lengths")
- }
-}
-
-func TestFreezerConcurrentReadonly(t *testing.T) {
- t.Parallel()
-
- tables := map[string]bool{"a": true}
- dir := t.TempDir()
-
- f, err := NewFreezer(dir, "", false, 2049, tables)
- if err != nil {
- t.Fatal("can't open freezer", err)
- }
- var item = make([]byte, 1024)
- batch := f.tables["a"].newBatch()
- items := uint64(10)
- for i := uint64(0); i < items; i++ {
- require.NoError(t, batch.AppendRaw(i, item))
- }
- require.NoError(t, batch.commit())
- if loaded := f.tables["a"].items.Load(); loaded != items {
- t.Fatalf("unexpected number of items in table, want: %d, have: %d", items, loaded)
- }
- require.NoError(t, f.Close())
-
- var (
- wg sync.WaitGroup
- fs = make([]*Freezer, 5)
- errs = make([]error, 5)
- )
- for i := 0; i < 5; i++ {
- wg.Add(1)
- go func(i int) {
- defer wg.Done()
-
- f, err := NewFreezer(dir, "", true, 2049, tables)
- if err == nil {
- fs[i] = f
- } else {
- errs[i] = err
- }
- }(i)
- }
-
- wg.Wait()
-
- for i := range fs {
- if err := errs[i]; err != nil {
- t.Fatal("failed to open freezer", err)
- }
- require.NoError(t, fs[i].Close())
- }
-}
-
-func newFreezerForTesting(t *testing.T, tables map[string]bool) (*Freezer, string) {
- t.Helper()
-
- dir := t.TempDir()
- // note: using low max table size here to ensure the tests actually
- // switch between multiple files.
- f, err := NewFreezer(dir, "", false, 2049, tables)
- if err != nil {
- t.Fatal("can't open freezer", err)
- }
- return f, dir
-}
-
-// checkAncientCount verifies that the freezer contains n items.
-func checkAncientCount(t *testing.T, f *Freezer, kind string, n uint64) {
- t.Helper()
-
- if frozen, _ := f.Ancients(); frozen != n {
- t.Fatalf("Ancients() returned %d, want %d", frozen, n)
- }
-
- // Check at index n-1.
- if n > 0 {
- index := n - 1
- if ok, _ := f.HasAncient(kind, index); !ok {
- t.Errorf("HasAncient(%q, %d) returned false unexpectedly", kind, index)
- }
- if _, err := f.Ancient(kind, index); err != nil {
- t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err)
- }
- }
-
- // Check at index n.
- index := n
- if ok, _ := f.HasAncient(kind, index); ok {
- t.Errorf("HasAncient(%q, %d) returned true unexpectedly", kind, index)
- }
- if _, err := f.Ancient(kind, index); err == nil {
- t.Errorf("Ancient(%q, %d) didn't return expected error", kind, index)
- } else if err != errOutOfBounds {
- t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err)
- }
-}
-
-func TestRenameWindows(t *testing.T) {
- var (
- fname = "file.bin"
- fname2 = "file2.bin"
- data = []byte{1, 2, 3, 4}
- data2 = []byte{2, 3, 4, 5}
- data3 = []byte{3, 5, 6, 7}
- dataLen = 4
- )
-
- // Create 2 temp dirs
- dir1 := t.TempDir()
- dir2 := t.TempDir()
-
- // Create file in dir1 and fill with data
- f, err := os.Create(path.Join(dir1, fname))
- if err != nil {
- t.Fatal(err)
- }
- f2, err := os.Create(path.Join(dir1, fname2))
- if err != nil {
- t.Fatal(err)
- }
- f3, err := os.Create(path.Join(dir2, fname2))
- if err != nil {
- t.Fatal(err)
- }
- if _, err := f.Write(data); err != nil {
- t.Fatal(err)
- }
- if _, err := f2.Write(data2); err != nil {
- t.Fatal(err)
- }
- if _, err := f3.Write(data3); err != nil {
- t.Fatal(err)
- }
- if err := f.Close(); err != nil {
- t.Fatal(err)
- }
- if err := f2.Close(); err != nil {
- t.Fatal(err)
- }
- if err := f3.Close(); err != nil {
- t.Fatal(err)
- }
- if err := os.Rename(f.Name(), path.Join(dir2, fname)); err != nil {
- t.Fatal(err)
- }
- if err := os.Rename(f2.Name(), path.Join(dir2, fname2)); err != nil {
- t.Fatal(err)
- }
-
- // Check file contents
- f, err = os.Open(path.Join(dir2, fname))
- if err != nil {
- t.Fatal(err)
- }
- defer f.Close()
- defer os.Remove(f.Name())
- buf := make([]byte, dataLen)
- if _, err := f.Read(buf); err != nil {
- t.Fatal(err)
- }
- if !bytes.Equal(buf, data) {
- t.Errorf("unexpected file contents. Got %v\n", buf)
- }
-
- f, err = os.Open(path.Join(dir2, fname2))
- if err != nil {
- t.Fatal(err)
- }
- defer f.Close()
- defer os.Remove(f.Name())
- if _, err := f.Read(buf); err != nil {
- t.Fatal(err)
- }
- if !bytes.Equal(buf, data2) {
- t.Errorf("unexpected file contents. Got %v\n", buf)
- }
-}
-
-func TestFreezerCloseSync(t *testing.T) {
- t.Parallel()
- f, _ := newFreezerForTesting(t, map[string]bool{"a": true, "b": true})
- defer f.Close()
-
- // Now, close and sync. This mimics the behaviour if the node is shut down,
- // just as the chain freezer is writing.
- // 1: thread-1: chain treezer writes, via freezeRange (holds lock)
- // 2: thread-2: Close called, waits for write to finish
- // 3: thread-1: finishes writing, releases lock
- // 4: thread-2: obtains lock, completes Close()
- // 5: thread-1: calls f.Sync()
- if err := f.Close(); err != nil {
- t.Fatal(err)
- }
- if err := f.Sync(); err == nil {
- t.Fatalf("want error, have nil")
- } else if have, want := err.Error(), "[closed closed]"; have != want {
- t.Fatalf("want %v, have %v", have, want)
- }
-}
diff --git a/core/rawdb/freezer_utils.go b/core/rawdb/freezer_utils.go
deleted file mode 100644
index 752e95ba6a..0000000000
--- a/core/rawdb/freezer_utils.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "io"
- "os"
- "path/filepath"
-)
-
-// copyFrom copies data from 'srcPath' at offset 'offset' into 'destPath'.
-// The 'destPath' is created if it doesn't exist, otherwise it is overwritten.
-// Before the copy is executed, there is a callback can be registered to
-// manipulate the dest file.
-// It is perfectly valid to have destPath == srcPath.
-func copyFrom(srcPath, destPath string, offset uint64, before func(f *os.File) error) error {
- // Create a temp file in the same dir where we want it to wind up
- f, err := os.CreateTemp(filepath.Dir(destPath), "*")
- if err != nil {
- return err
- }
- fname := f.Name()
-
- // Clean up the leftover file
- defer func() {
- if f != nil {
- f.Close()
- }
- os.Remove(fname)
- }()
- // Apply the given function if it's not nil before we copy
- // the content from the src.
- if before != nil {
- if err := before(f); err != nil {
- return err
- }
- }
- // Open the source file
- src, err := os.Open(srcPath)
- if err != nil {
- return err
- }
- if _, err = src.Seek(int64(offset), 0); err != nil {
- src.Close()
- return err
- }
- // io.Copy uses 32K buffer internally.
- _, err = io.Copy(f, src)
- if err != nil {
- src.Close()
- return err
- }
- // Rename the temporary file to the specified dest name.
- // src may be same as dest, so needs to be closed before
- // we do the final move.
- src.Close()
-
- if err := f.Close(); err != nil {
- return err
- }
- f = nil
- return os.Rename(fname, destPath)
-}
-
-// openFreezerFileForAppend opens a freezer table file and seeks to the end
-func openFreezerFileForAppend(filename string) (*os.File, error) {
- // Open the file without the O_APPEND flag
- // because it has differing behaviour during Truncate operations
- // on different OS's
- file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
- if err != nil {
- return nil, err
- }
- // Seek to end for append
- if _, err = file.Seek(0, io.SeekEnd); err != nil {
- return nil, err
- }
- return file, nil
-}
-
-// openFreezerFileForReadOnly opens a freezer table file for read only access
-func openFreezerFileForReadOnly(filename string) (*os.File, error) {
- return os.OpenFile(filename, os.O_RDONLY, 0644)
-}
-
-// openFreezerFileTruncated opens a freezer table making sure it is truncated
-func openFreezerFileTruncated(filename string) (*os.File, error) {
- return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
-}
-
-// truncateFreezerFile resizes a freezer table file and seeks to the end
-func truncateFreezerFile(file *os.File, size int64) error {
- if err := file.Truncate(size); err != nil {
- return err
- }
- // Seek to end for append
- if _, err := file.Seek(0, io.SeekEnd); err != nil {
- return err
- }
- return nil
-}
-
-// grow prepares the slice space for new item, and doubles the slice capacity
-// if space is not enough.
-func grow(buf []byte, n int) []byte {
- if cap(buf)-len(buf) < n {
- newcap := 2 * cap(buf)
- if newcap-len(buf) < n {
- newcap = len(buf) + n
- }
- nbuf := make([]byte, len(buf), newcap)
- copy(nbuf, buf)
- buf = nbuf
- }
- buf = buf[:len(buf)+n]
- return buf
-}
diff --git a/core/rawdb/freezer_utils_test.go b/core/rawdb/freezer_utils_test.go
deleted file mode 100644
index 829cbfb4f3..0000000000
--- a/core/rawdb/freezer_utils_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "os"
- "testing"
-)
-
-func TestCopyFrom(t *testing.T) {
- var (
- content = []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}
- prefix = []byte{0x9, 0xa, 0xb, 0xc, 0xd, 0xf}
- )
- var cases = []struct {
- src, dest string
- offset uint64
- writePrefix bool
- }{
- {"foo", "bar", 0, false},
- {"foo", "bar", 1, false},
- {"foo", "bar", 8, false},
- {"foo", "foo", 0, false},
- {"foo", "foo", 1, false},
- {"foo", "foo", 8, false},
- {"foo", "bar", 0, true},
- {"foo", "bar", 1, true},
- {"foo", "bar", 8, true},
- }
- for _, c := range cases {
- os.WriteFile(c.src, content, 0600)
-
- if err := copyFrom(c.src, c.dest, c.offset, func(f *os.File) error {
- if !c.writePrefix {
- return nil
- }
- f.Write(prefix)
- return nil
- }); err != nil {
- os.Remove(c.src)
- t.Fatalf("Failed to copy %v", err)
- }
-
- blob, err := os.ReadFile(c.dest)
- if err != nil {
- os.Remove(c.src)
- os.Remove(c.dest)
- t.Fatalf("Failed to read %v", err)
- }
- want := content[c.offset:]
- if c.writePrefix {
- want = append(prefix, want...)
- }
- if !bytes.Equal(blob, want) {
- t.Fatal("Unexpected value")
- }
- os.Remove(c.src)
- os.Remove(c.dest)
- }
-}
diff --git a/core/rawdb/imports.go b/core/rawdb/imports.go
new file mode 100644
index 0000000000..4702fb915e
--- /dev/null
+++ b/core/rawdb/imports.go
@@ -0,0 +1,127 @@
+// (c) 2025, Ava Labs, Inc. All rights reserved.
+// See the file LICENSE for licensing terms.
+
+package rawdb
+
+import (
+ ethrawdb "github.com/ava-labs/libevm/core/rawdb"
+)
+
+// Types used directly as their upstream definition.
+type (
+ LegacyTxLookupEntry = ethrawdb.LegacyTxLookupEntry
+ OpenOptions = ethrawdb.OpenOptions
+)
+
+// Constants used directly as their upstream definition.
+const (
+ PathScheme = ethrawdb.PathScheme
+)
+
+// Variables used directly as their upstream definition.
+var (
+ BloomBitsIndexPrefix = ethrawdb.BloomBitsIndexPrefix
+ CodePrefix = ethrawdb.CodePrefix
+)
+
+// Functions used directly as their upstream definition.
+var (
+ DeleteAccountSnapshot = ethrawdb.DeleteAccountSnapshot
+ DeleteAccountTrieNode = ethrawdb.DeleteAccountTrieNode
+ DeleteBlock = ethrawdb.DeleteBlock
+ DeleteCanonicalHash = ethrawdb.DeleteCanonicalHash
+ DeleteSnapshotRoot = ethrawdb.DeleteSnapshotRoot
+ DeleteStorageSnapshot = ethrawdb.DeleteStorageSnapshot
+ DeleteStorageTrieNode = ethrawdb.DeleteStorageTrieNode
+ DeleteTrieJournal = ethrawdb.DeleteTrieJournal
+ DeleteTrieNode = ethrawdb.DeleteTrieNode
+ ExistsAccountTrieNode = ethrawdb.ExistsAccountTrieNode
+ FindCommonAncestor = ethrawdb.FindCommonAncestor
+ HasBody = ethrawdb.HasBody
+ HasCode = ethrawdb.HasCode
+ HasHeader = ethrawdb.HasHeader
+ HashScheme = ethrawdb.HashScheme
+ HasLegacyTrieNode = ethrawdb.HasLegacyTrieNode
+ HasReceipts = ethrawdb.HasReceipts
+ IsCodeKey = ethrawdb.IsCodeKey
+ IterateStorageSnapshots = ethrawdb.IterateStorageSnapshots
+ NewDatabase = ethrawdb.NewDatabase
+ NewDatabaseWithFreezer = ethrawdb.NewDatabaseWithFreezer
+ NewKeyLengthIterator = ethrawdb.NewKeyLengthIterator
+ NewLevelDBDatabase = ethrawdb.NewLevelDBDatabase
+ NewMemoryDatabase = ethrawdb.NewMemoryDatabase
+ NewStateFreezer = ethrawdb.NewStateFreezer
+ NewTable = ethrawdb.NewTable
+ Open = ethrawdb.Open
+ ParseStateScheme = ethrawdb.ParseStateScheme
+ PopUncleanShutdownMarker = ethrawdb.PopUncleanShutdownMarker
+ PushUncleanShutdownMarker = ethrawdb.PushUncleanShutdownMarker
+ ReadAccountSnapshot = ethrawdb.ReadAccountSnapshot
+ ReadAccountTrieNode = ethrawdb.ReadAccountTrieNode
+ ReadAllHashes = ethrawdb.ReadAllHashes
+ ReadBlock = ethrawdb.ReadBlock
+ ReadBloomBits = ethrawdb.ReadBloomBits
+ ReadBody = ethrawdb.ReadBody
+ ReadCanonicalHash = ethrawdb.ReadCanonicalHash
+ ReadChainConfig = ethrawdb.ReadChainConfig
+ ReadCode = ethrawdb.ReadCode
+ ReadDatabaseVersion = ethrawdb.ReadDatabaseVersion
+ ReadHeadBlock = ethrawdb.ReadHeadBlock
+ ReadHeadBlockHash = ethrawdb.ReadHeadBlockHash
+ ReadHeader = ethrawdb.ReadHeader
+ ReadHeaderNumber = ethrawdb.ReadHeaderNumber
+ ReadHeadFastBlockHash = ethrawdb.ReadHeadFastBlockHash
+ ReadHeadHeaderHash = ethrawdb.ReadHeadHeaderHash
+ ReadLastPivotNumber = ethrawdb.ReadLastPivotNumber
+ ReadLegacyTrieNode = ethrawdb.ReadLegacyTrieNode
+ ReadLogs = ethrawdb.ReadLogs
+ ReadPersistentStateID = ethrawdb.ReadPersistentStateID
+ ReadPreimage = ethrawdb.ReadPreimage
+ ReadRawReceipts = ethrawdb.ReadRawReceipts
+ ReadReceipts = ethrawdb.ReadReceipts
+ ReadSkeletonSyncStatus = ethrawdb.ReadSkeletonSyncStatus
+ ReadSnapshotDisabled = ethrawdb.ReadSnapshotDisabled
+ ReadSnapshotGenerator = ethrawdb.ReadSnapshotGenerator
+ ReadSnapshotJournal = ethrawdb.ReadSnapshotJournal
+ ReadSnapshotRecoveryNumber = ethrawdb.ReadSnapshotRecoveryNumber
+ ReadSnapshotRoot = ethrawdb.ReadSnapshotRoot
+ ReadSnapshotSyncStatus = ethrawdb.ReadSnapshotSyncStatus
+ ReadSnapSyncStatusFlag = ethrawdb.ReadSnapSyncStatusFlag
+ ReadStateID = ethrawdb.ReadStateID
+ ReadStorageSnapshot = ethrawdb.ReadStorageSnapshot
+ ReadStorageTrieNode = ethrawdb.ReadStorageTrieNode
+ ReadTransaction = ethrawdb.ReadTransaction
+ ReadTrieJournal = ethrawdb.ReadTrieJournal
+ ReadTxIndexTail = ethrawdb.ReadTxIndexTail
+ ReadTxLookupEntry = ethrawdb.ReadTxLookupEntry
+ SnapshotAccountPrefix = ethrawdb.SnapshotAccountPrefix
+ SnapshotStoragePrefix = ethrawdb.SnapshotStoragePrefix
+ UnindexTransactions = ethrawdb.UnindexTransactions
+ UpdateUncleanShutdownMarker = ethrawdb.UpdateUncleanShutdownMarker
+ WriteAccountSnapshot = ethrawdb.WriteAccountSnapshot
+ WriteAccountTrieNode = ethrawdb.WriteAccountTrieNode
+ WriteBlock = ethrawdb.WriteBlock
+ WriteBloomBits = ethrawdb.WriteBloomBits
+ WriteBody = ethrawdb.WriteBody
+ WriteCanonicalHash = ethrawdb.WriteCanonicalHash
+ WriteChainConfig = ethrawdb.WriteChainConfig
+ WriteCode = ethrawdb.WriteCode
+ WriteDatabaseVersion = ethrawdb.WriteDatabaseVersion
+ WriteHeadBlockHash = ethrawdb.WriteHeadBlockHash
+ WriteHeader = ethrawdb.WriteHeader
+ WriteHeadHeaderHash = ethrawdb.WriteHeadHeaderHash
+ WriteLegacyTrieNode = ethrawdb.WriteLegacyTrieNode
+ WritePersistentStateID = ethrawdb.WritePersistentStateID
+ WritePreimages = ethrawdb.WritePreimages
+ WriteReceipts = ethrawdb.WriteReceipts
+ WriteSnapshotGenerator = ethrawdb.WriteSnapshotGenerator
+ WriteSnapshotRoot = ethrawdb.WriteSnapshotRoot
+ WriteSnapSyncStatusFlag = ethrawdb.WriteSnapSyncStatusFlag
+ WriteStateID = ethrawdb.WriteStateID
+ WriteStorageSnapshot = ethrawdb.WriteStorageSnapshot
+ WriteStorageTrieNode = ethrawdb.WriteStorageTrieNode
+ WriteTrieJournal = ethrawdb.WriteTrieJournal
+ WriteTrieNode = ethrawdb.WriteTrieNode
+ WriteTxIndexTail = ethrawdb.WriteTxIndexTail
+ WriteTxLookupEntriesByBlock = ethrawdb.WriteTxLookupEntriesByBlock
+)
diff --git a/core/rawdb/key_length_iterator.go b/core/rawdb/key_length_iterator.go
deleted file mode 100644
index 8d1a7d2f54..0000000000
--- a/core/rawdb/key_length_iterator.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// (c) 2022, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import "github.com/ava-labs/libevm/ethdb"
-
-// KeyLengthIterator is a wrapper for a database iterator that ensures only key-value pairs
-// with a specific key length will be returned.
-type KeyLengthIterator struct {
- requiredKeyLength int
- ethdb.Iterator
-}
-
-// NewKeyLengthIterator returns a wrapped version of the iterator that will only return key-value
-// pairs where keys with a specific key length will be returned.
-func NewKeyLengthIterator(it ethdb.Iterator, keyLen int) ethdb.Iterator {
- return &KeyLengthIterator{
- Iterator: it,
- requiredKeyLength: keyLen,
- }
-}
-
-func (it *KeyLengthIterator) Next() bool {
- // Return true as soon as a key with the required key length is discovered
- for it.Iterator.Next() {
- if len(it.Iterator.Key()) == it.requiredKeyLength {
- return true
- }
- }
-
- // Return false when we exhaust the keys in the underlying iterator.
- return false
-}
diff --git a/core/rawdb/key_length_iterator_test.go b/core/rawdb/key_length_iterator_test.go
deleted file mode 100644
index 654efc5b55..0000000000
--- a/core/rawdb/key_length_iterator_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "encoding/binary"
- "testing"
-)
-
-func TestKeyLengthIterator(t *testing.T) {
- db := NewMemoryDatabase()
-
- keyLen := 8
- expectedKeys := make(map[string]struct{})
- for i := 0; i < 100; i++ {
- key := make([]byte, keyLen)
- binary.BigEndian.PutUint64(key, uint64(i))
- if err := db.Put(key, []byte{0x1}); err != nil {
- t.Fatal(err)
- }
- expectedKeys[string(key)] = struct{}{}
-
- longerKey := make([]byte, keyLen*2)
- binary.BigEndian.PutUint64(longerKey, uint64(i))
- if err := db.Put(longerKey, []byte{0x1}); err != nil {
- t.Fatal(err)
- }
- }
-
- it := NewKeyLengthIterator(db.NewIterator(nil, nil), keyLen)
- for it.Next() {
- key := it.Key()
- _, exists := expectedKeys[string(key)]
- if !exists {
- t.Fatalf("Found unexpected key %d", binary.BigEndian.Uint64(key))
- }
- delete(expectedKeys, string(key))
- if len(key) != keyLen {
- t.Fatalf("Found unexpected key in key length iterator with length %d", len(key))
- }
- }
-
- if len(expectedKeys) != 0 {
- t.Fatalf("Expected all keys of length %d to be removed from expected keys during iteration", keyLen)
- }
-}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
deleted file mode 100644
index 938338ff71..0000000000
--- a/core/rawdb/schema.go
+++ /dev/null
@@ -1,351 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Package rawdb contains a collection of low level database accessors.
-package rawdb
-
-import (
- "bytes"
- "encoding/binary"
-
- "github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
-)
-
-// The fields below define the low level database schema prefixing.
-var (
- // databaseVersionKey tracks the current database version.
- databaseVersionKey = []byte("DatabaseVersion")
-
- // headHeaderKey tracks the latest known header's hash.
- headHeaderKey = []byte("LastHeader")
-
- // headBlockKey tracks the latest known full block's hash.
- headBlockKey = []byte("LastBlock")
-
- // headFastBlockKey tracks the latest known incomplete block's hash during fast sync.
- headFastBlockKey = []byte("LastFast")
-
- // headFinalizedBlockKey tracks the latest known finalized block hash.
- headFinalizedBlockKey = []byte("LastFinalized")
-
- // persistentStateIDKey tracks the id of latest stored state(for path-based only).
- persistentStateIDKey = []byte("LastStateID")
-
- // lastPivotKey tracks the last pivot block used by fast sync (to reenable on sethead).
- lastPivotKey = []byte("LastPivot")
-
- // fastTrieProgressKey tracks the number of trie entries imported during fast sync.
- fastTrieProgressKey = []byte("TrieSync")
-
- // snapshotDisabledKey flags that the snapshot should not be maintained due to initial sync.
- snapshotDisabledKey = []byte("SnapshotDisabled")
-
- // SnapshotRootKey tracks the hash of the last snapshot.
- SnapshotRootKey = []byte("SnapshotRoot")
-
- // snapshotJournalKey tracks the in-memory diff layers across restarts.
- snapshotJournalKey = []byte("SnapshotJournal")
-
- // snapshotGeneratorKey tracks the snapshot generation marker across restarts.
- snapshotGeneratorKey = []byte("SnapshotGenerator")
-
- // snapshotRecoveryKey tracks the snapshot recovery marker across restarts.
- snapshotRecoveryKey = []byte("SnapshotRecovery")
-
- // snapshotSyncStatusKey tracks the snapshot sync status across restarts.
- snapshotSyncStatusKey = []byte("SnapshotSyncStatus")
-
- // skeletonSyncStatusKey tracks the skeleton sync status across restarts.
- skeletonSyncStatusKey = []byte("SkeletonSyncStatus")
-
- // trieJournalKey tracks the in-memory trie node layers across restarts.
- trieJournalKey = []byte("TrieJournal")
-
- // txIndexTailKey tracks the oldest block whose transactions have been indexed.
- txIndexTailKey = []byte("TransactionIndexTail")
-
- // fastTxLookupLimitKey tracks the transaction lookup limit during fast sync.
- // This flag is deprecated, it's kept to avoid reporting errors when inspect
- // database.
- fastTxLookupLimitKey = []byte("FastTransactionLookupLimit")
-
- // badBlockKey tracks the list of bad blocks seen by local
- badBlockKey = []byte("InvalidBlock")
-
- // uncleanShutdownKey tracks the list of local crashes
- uncleanShutdownKey = []byte("unclean-shutdown") // config prefix for the db
-
- // transitionStatusKey tracks the eth2 transition status.
- transitionStatusKey = []byte("eth2-transition")
-
- // snapSyncStatusFlagKey flags that status of snap sync.
- snapSyncStatusFlagKey = []byte("SnapSyncStatus")
-
- // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
- headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
- headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td
- headerHashSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + headerHashSuffix -> hash
- headerNumberPrefix = []byte("H") // headerNumberPrefix + hash -> num (uint64 big endian)
-
- blockBodyPrefix = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body
- blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
-
- txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata
- bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
- SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value
- SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
- CodePrefix = []byte("c") // CodePrefix + code hash -> account code
- skeletonHeaderPrefix = []byte("S") // skeletonHeaderPrefix + num (uint64 big endian) -> header
-
- // Path-based storage scheme of merkle patricia trie.
- trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node
- trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node
- stateIDPrefix = []byte("L") // stateIDPrefix + state root -> state id
-
- PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
- configPrefix = []byte("ethereum-config-") // config prefix for the db
- genesisPrefix = []byte("ethereum-genesis-") // genesis state prefix for the db
-
- // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
- BloomBitsIndexPrefix = []byte("iB")
-
- ChtPrefix = []byte("chtRootV2-") // ChtPrefix + chtNum (uint64 big endian) -> trie root hash
- ChtTablePrefix = []byte("cht-")
- ChtIndexTablePrefix = []byte("chtIndexV2-")
-
- BloomTriePrefix = []byte("bltRoot-") // BloomTriePrefix + bloomTrieNum (uint64 big endian) -> trie root hash
- BloomTrieTablePrefix = []byte("blt-")
- BloomTrieIndexPrefix = []byte("bltIndex-")
-
- CliqueSnapshotPrefix = []byte("clique-")
-
- BestUpdateKey = []byte("update-") // bigEndian64(syncPeriod) -> RLP(types.LightClientUpdate) (nextCommittee only referenced by root hash)
- FixedCommitteeRootKey = []byte("fixedRoot-") // bigEndian64(syncPeriod) -> committee root hash
- SyncCommitteeKey = []byte("committee-") // bigEndian64(syncPeriod) -> serialized committee
-
- preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
- preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
-)
-
-// LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary
-// fields.
-type LegacyTxLookupEntry struct {
- BlockHash common.Hash
- BlockIndex uint64
- Index uint64
-}
-
-// encodeBlockNumber encodes a block number as big endian uint64
-func encodeBlockNumber(number uint64) []byte {
- enc := make([]byte, 8)
- binary.BigEndian.PutUint64(enc, number)
- return enc
-}
-
-// headerKeyPrefix = headerPrefix + num (uint64 big endian)
-func headerKeyPrefix(number uint64) []byte {
- return append(headerPrefix, encodeBlockNumber(number)...)
-}
-
-// headerKey = headerPrefix + num (uint64 big endian) + hash
-func headerKey(number uint64, hash common.Hash) []byte {
- return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
-}
-
-// headerTDKey = headerPrefix + num (uint64 big endian) + hash + headerTDSuffix
-func headerTDKey(number uint64, hash common.Hash) []byte {
- return append(headerKey(number, hash), headerTDSuffix...)
-}
-
-// headerHashKey = headerPrefix + num (uint64 big endian) + headerHashSuffix
-func headerHashKey(number uint64) []byte {
- return append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...)
-}
-
-// headerNumberKey = headerNumberPrefix + hash
-func headerNumberKey(hash common.Hash) []byte {
- return append(headerNumberPrefix, hash.Bytes()...)
-}
-
-// blockBodyKey = blockBodyPrefix + num (uint64 big endian) + hash
-func blockBodyKey(number uint64, hash common.Hash) []byte {
- return append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
-}
-
-// blockReceiptsKey = blockReceiptsPrefix + num (uint64 big endian) + hash
-func blockReceiptsKey(number uint64, hash common.Hash) []byte {
- return append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
-}
-
-// txLookupKey = txLookupPrefix + hash
-func txLookupKey(hash common.Hash) []byte {
- return append(txLookupPrefix, hash.Bytes()...)
-}
-
-// accountSnapshotKey = SnapshotAccountPrefix + hash
-func accountSnapshotKey(hash common.Hash) []byte {
- return append(SnapshotAccountPrefix, hash.Bytes()...)
-}
-
-// storageSnapshotKey = SnapshotStoragePrefix + account hash + storage hash
-func storageSnapshotKey(accountHash, storageHash common.Hash) []byte {
- buf := make([]byte, len(SnapshotStoragePrefix)+common.HashLength+common.HashLength)
- n := copy(buf, SnapshotStoragePrefix)
- n += copy(buf[n:], accountHash.Bytes())
- copy(buf[n:], storageHash.Bytes())
- return buf
-}
-
-// storageSnapshotsKey = SnapshotStoragePrefix + account hash + storage hash
-func storageSnapshotsKey(accountHash common.Hash) []byte {
- return append(SnapshotStoragePrefix, accountHash.Bytes()...)
-}
-
-// bloomBitsKey = bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash
-func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
- key := append(append(bloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...)
-
- binary.BigEndian.PutUint16(key[1:], uint16(bit))
- binary.BigEndian.PutUint64(key[3:], section)
-
- return key
-}
-
-// skeletonHeaderKey = skeletonHeaderPrefix + num (uint64 big endian)
-func skeletonHeaderKey(number uint64) []byte {
- return append(skeletonHeaderPrefix, encodeBlockNumber(number)...)
-}
-
-// preimageKey = PreimagePrefix + hash
-func preimageKey(hash common.Hash) []byte {
- return append(PreimagePrefix, hash.Bytes()...)
-}
-
-// codeKey = CodePrefix + hash
-func codeKey(hash common.Hash) []byte {
- return append(CodePrefix, hash.Bytes()...)
-}
-
-// IsCodeKey reports whether the given byte slice is the key of contract code,
-// if so return the raw code hash as well.
-func IsCodeKey(key []byte) (bool, []byte) {
- if bytes.HasPrefix(key, CodePrefix) && len(key) == common.HashLength+len(CodePrefix) {
- return true, key[len(CodePrefix):]
- }
- return false, nil
-}
-
-// configKey = configPrefix + hash
-func configKey(hash common.Hash) []byte {
- return append(configPrefix, hash.Bytes()...)
-}
-
-// genesisStateSpecKey = genesisPrefix + hash
-func genesisStateSpecKey(hash common.Hash) []byte {
- return append(genesisPrefix, hash.Bytes()...)
-}
-
-// stateIDKey = stateIDPrefix + root (32 bytes)
-func stateIDKey(root common.Hash) []byte {
- return append(stateIDPrefix, root.Bytes()...)
-}
-
-// accountTrieNodeKey = trieNodeAccountPrefix + nodePath.
-func accountTrieNodeKey(path []byte) []byte {
- return append(trieNodeAccountPrefix, path...)
-}
-
-// storageTrieNodeKey = trieNodeStoragePrefix + accountHash + nodePath.
-func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte {
- buf := make([]byte, len(trieNodeStoragePrefix)+common.HashLength+len(path))
- n := copy(buf, trieNodeStoragePrefix)
- n += copy(buf[n:], accountHash.Bytes())
- copy(buf[n:], path)
- return buf
-}
-
-// IsLegacyTrieNode reports whether a provided database entry is a legacy trie
-// node. The characteristics of legacy trie node are:
-// - the key length is 32 bytes
-// - the key is the hash of val
-func IsLegacyTrieNode(key []byte, val []byte) bool {
- if len(key) != common.HashLength {
- return false
- }
- return bytes.Equal(key, crypto.Keccak256(val))
-}
-
-// ResolveAccountTrieNodeKey reports whether a provided database entry is an
-// account trie node in path-based state scheme, and returns the resolved
-// node path if so.
-func ResolveAccountTrieNodeKey(key []byte) (bool, []byte) {
- if !bytes.HasPrefix(key, trieNodeAccountPrefix) {
- return false, nil
- }
- // The remaining key should only consist a hex node path
- // whose length is in the range 0 to 64 (64 is excluded
- // since leaves are always wrapped with shortNode).
- if len(key) >= len(trieNodeAccountPrefix)+common.HashLength*2 {
- return false, nil
- }
- return true, key[len(trieNodeAccountPrefix):]
-}
-
-// IsAccountTrieNode reports whether a provided database entry is an account
-// trie node in path-based state scheme.
-func IsAccountTrieNode(key []byte) bool {
- ok, _ := ResolveAccountTrieNodeKey(key)
- return ok
-}
-
-// ResolveStorageTrieNode reports whether a provided database entry is a storage
-// trie node in path-based state scheme, and returns the resolved account hash
-// and node path if so.
-func ResolveStorageTrieNode(key []byte) (bool, common.Hash, []byte) {
- if !bytes.HasPrefix(key, trieNodeStoragePrefix) {
- return false, common.Hash{}, nil
- }
- // The remaining key consists of 2 parts:
- // - 32 bytes account hash
- // - hex node path whose length is in the range 0 to 64
- if len(key) < len(trieNodeStoragePrefix)+common.HashLength {
- return false, common.Hash{}, nil
- }
- if len(key) >= len(trieNodeStoragePrefix)+common.HashLength+common.HashLength*2 {
- return false, common.Hash{}, nil
- }
- accountHash := common.BytesToHash(key[len(trieNodeStoragePrefix) : len(trieNodeStoragePrefix)+common.HashLength])
- return true, accountHash, key[len(trieNodeStoragePrefix)+common.HashLength:]
-}
-
-// IsStorageTrieNode reports whether a provided database entry is a storage
-// trie node in path-based state scheme.
-func IsStorageTrieNode(key []byte) bool {
- ok, _, _ := ResolveStorageTrieNode(key)
- return ok
-}
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
deleted file mode 100644
index cb9156173a..0000000000
--- a/core/rawdb/table.go
+++ /dev/null
@@ -1,317 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "github.com/ava-labs/libevm/ethdb"
-)
-
-// table is a wrapper around a database that prefixes each key access with a pre-
-// configured string.
-type table struct {
- db ethdb.Database
- prefix string
-}
-
-// NewTable returns a database object that prefixes all keys with a given string.
-func NewTable(db ethdb.Database, prefix string) ethdb.Database {
- return &table{
- db: db,
- prefix: prefix,
- }
-}
-
-// Close is a noop to implement the Database interface.
-func (t *table) Close() error {
- return nil
-}
-
-// Has retrieves if a prefixed version of a key is present in the database.
-func (t *table) Has(key []byte) (bool, error) {
- return t.db.Has(append([]byte(t.prefix), key...))
-}
-
-// Get retrieves the given prefixed key if it's present in the database.
-func (t *table) Get(key []byte) ([]byte, error) {
- return t.db.Get(append([]byte(t.prefix), key...))
-}
-
-// HasAncient is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) HasAncient(kind string, number uint64) (bool, error) {
- return t.db.HasAncient(kind, number)
-}
-
-// Ancient is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) Ancient(kind string, number uint64) ([]byte, error) {
- return t.db.Ancient(kind, number)
-}
-
-// AncientRange is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
- return t.db.AncientRange(kind, start, count, maxBytes)
-}
-
-// Ancients is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) Ancients() (uint64, error) {
- return t.db.Ancients()
-}
-
-// Tail is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) Tail() (uint64, error) {
- return t.db.Tail()
-}
-
-// AncientSize is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) AncientSize(kind string) (uint64, error) {
- return t.db.AncientSize(kind)
-}
-
-// ModifyAncients runs an ancient write operation on the underlying database.
-func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) {
- return t.db.ModifyAncients(fn)
-}
-
-func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
- return t.db.ReadAncients(fn)
-}
-
-// TruncateHead is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) TruncateHead(items uint64) (uint64, error) {
- return t.db.TruncateHead(items)
-}
-
-// TruncateTail is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) TruncateTail(items uint64) (uint64, error) {
- return t.db.TruncateTail(items)
-}
-
-// Sync is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) Sync() error {
- return t.db.Sync()
-}
-
-// MigrateTable processes the entries in a given table in sequence
-// converting them to a new format if they're of an old format.
-func (t *table) MigrateTable(kind string, convert convertLegacyFn) error {
- return t.db.MigrateTable(kind, convert)
-}
-
-// AncientDatadir returns the ancient datadir of the underlying database.
-func (t *table) AncientDatadir() (string, error) {
- return t.db.AncientDatadir()
-}
-
-// Put inserts the given value into the database at a prefixed version of the
-// provided key.
-func (t *table) Put(key []byte, value []byte) error {
- return t.db.Put(append([]byte(t.prefix), key...), value)
-}
-
-// Delete removes the given prefixed key from the database.
-func (t *table) Delete(key []byte) error {
- return t.db.Delete(append([]byte(t.prefix), key...))
-}
-
-// NewIterator creates a binary-alphabetical iterator over a subset
-// of database content with a particular key prefix, starting at a particular
-// initial key (or after, if it does not exist).
-func (t *table) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
- innerPrefix := append([]byte(t.prefix), prefix...)
- iter := t.db.NewIterator(innerPrefix, start)
- return &tableIterator{
- iter: iter,
- prefix: t.prefix,
- }
-}
-
-// Stat returns a particular internal stat of the database.
-func (t *table) Stat(property string) (string, error) {
- return t.db.Stat(property)
-}
-
-// Compact flattens the underlying data store for the given key range. In essence,
-// deleted and overwritten versions are discarded, and the data is rearranged to
-// reduce the cost of operations needed to access them.
-//
-// A nil start is treated as a key before all keys in the data store; a nil limit
-// is treated as a key after all keys in the data store. If both is nil then it
-// will compact entire data store.
-func (t *table) Compact(start []byte, limit []byte) error {
- // If no start was specified, use the table prefix as the first value
- if start == nil {
- start = []byte(t.prefix)
- } else {
- start = append([]byte(t.prefix), start...)
- }
- // If no limit was specified, use the first element not matching the prefix
- // as the limit
- if limit == nil {
- limit = []byte(t.prefix)
- for i := len(limit) - 1; i >= 0; i-- {
- // Bump the current character, stopping if it doesn't overflow
- limit[i]++
- if limit[i] > 0 {
- break
- }
- // Character overflown, proceed to the next or nil if the last
- if i == 0 {
- limit = nil
- }
- }
- } else {
- limit = append([]byte(t.prefix), limit...)
- }
- // Range correctly calculated based on table prefix, delegate down
- return t.db.Compact(start, limit)
-}
-
-// NewBatch creates a write-only database that buffers changes to its host db
-// until a final write is called, each operation prefixing all keys with the
-// pre-configured string.
-func (t *table) NewBatch() ethdb.Batch {
- return &tableBatch{t.db.NewBatch(), t.prefix}
-}
-
-// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
-func (t *table) NewBatchWithSize(size int) ethdb.Batch {
- return &tableBatch{t.db.NewBatchWithSize(size), t.prefix}
-}
-
-// NewSnapshot creates a database snapshot based on the current state.
-// The created snapshot will not be affected by all following mutations
-// happened on the database.
-func (t *table) NewSnapshot() (ethdb.Snapshot, error) {
- return t.db.NewSnapshot()
-}
-
-// tableBatch is a wrapper around a database batch that prefixes each key access
-// with a pre-configured string.
-type tableBatch struct {
- batch ethdb.Batch
- prefix string
-}
-
-// Put inserts the given value into the batch for later committing.
-func (b *tableBatch) Put(key, value []byte) error {
- return b.batch.Put(append([]byte(b.prefix), key...), value)
-}
-
-// Delete inserts a key removal into the batch for later committing.
-func (b *tableBatch) Delete(key []byte) error {
- return b.batch.Delete(append([]byte(b.prefix), key...))
-}
-
-// ValueSize retrieves the amount of data queued up for writing.
-func (b *tableBatch) ValueSize() int {
- return b.batch.ValueSize()
-}
-
-// Write flushes any accumulated data to disk.
-func (b *tableBatch) Write() error {
- return b.batch.Write()
-}
-
-// Reset resets the batch for reuse.
-func (b *tableBatch) Reset() {
- b.batch.Reset()
-}
-
-// tableReplayer is a wrapper around a batch replayer which truncates
-// the added prefix.
-type tableReplayer struct {
- w ethdb.KeyValueWriter
- prefix string
-}
-
-// Put implements the interface KeyValueWriter.
-func (r *tableReplayer) Put(key []byte, value []byte) error {
- trimmed := key[len(r.prefix):]
- return r.w.Put(trimmed, value)
-}
-
-// Delete implements the interface KeyValueWriter.
-func (r *tableReplayer) Delete(key []byte) error {
- trimmed := key[len(r.prefix):]
- return r.w.Delete(trimmed)
-}
-
-// Replay replays the batch contents.
-func (b *tableBatch) Replay(w ethdb.KeyValueWriter) error {
- return b.batch.Replay(&tableReplayer{w: w, prefix: b.prefix})
-}
-
-// tableIterator is a wrapper around a database iterator that prefixes each key access
-// with a pre-configured string.
-type tableIterator struct {
- iter ethdb.Iterator
- prefix string
-}
-
-// Next moves the iterator to the next key/value pair. It returns whether the
-// iterator is exhausted.
-func (iter *tableIterator) Next() bool {
- return iter.iter.Next()
-}
-
-// Error returns any accumulated error. Exhausting all the key/value pairs
-// is not considered to be an error.
-func (iter *tableIterator) Error() error {
- return iter.iter.Error()
-}
-
-// Key returns the key of the current key/value pair, or nil if done. The caller
-// should not modify the contents of the returned slice, and its contents may
-// change on the next call to Next.
-func (iter *tableIterator) Key() []byte {
- key := iter.iter.Key()
- if key == nil {
- return nil
- }
- return key[len(iter.prefix):]
-}
-
-// Value returns the value of the current key/value pair, or nil if done. The
-// caller should not modify the contents of the returned slice, and its contents
-// may change on the next call to Next.
-func (iter *tableIterator) Value() []byte {
- return iter.iter.Value()
-}
-
-// Release releases associated resources. Release should always succeed and can
-// be called multiple times without causing error.
-func (iter *tableIterator) Release() {
- iter.iter.Release()
-}
diff --git a/core/rawdb/table_test.go b/core/rawdb/table_test.go
deleted file mode 100644
index a6f4b454f6..0000000000
--- a/core/rawdb/table_test.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "bytes"
- "testing"
-
- "github.com/ava-labs/libevm/ethdb"
-)
-
-func TestTableDatabase(t *testing.T) { testTableDatabase(t, "prefix") }
-func TestEmptyPrefixTableDatabase(t *testing.T) { testTableDatabase(t, "") }
-
-type testReplayer struct {
- puts [][]byte
- dels [][]byte
-}
-
-func (r *testReplayer) Put(key []byte, value []byte) error {
- r.puts = append(r.puts, key)
- return nil
-}
-
-func (r *testReplayer) Delete(key []byte) error {
- r.dels = append(r.dels, key)
- return nil
-}
-
-func testTableDatabase(t *testing.T, prefix string) {
- db := NewTable(NewMemoryDatabase(), prefix)
-
- var entries = []struct {
- key []byte
- value []byte
- }{
- {[]byte{0x01, 0x02}, []byte{0x0a, 0x0b}},
- {[]byte{0x03, 0x04}, []byte{0x0c, 0x0d}},
- {[]byte{0x05, 0x06}, []byte{0x0e, 0x0f}},
-
- {[]byte{0xff, 0xff, 0x01}, []byte{0x1a, 0x1b}},
- {[]byte{0xff, 0xff, 0x02}, []byte{0x1c, 0x1d}},
- {[]byte{0xff, 0xff, 0x03}, []byte{0x1e, 0x1f}},
- }
-
- // Test Put/Get operation
- for _, entry := range entries {
- db.Put(entry.key, entry.value)
- }
- for _, entry := range entries {
- got, err := db.Get(entry.key)
- if err != nil {
- t.Fatalf("Failed to get value: %v", err)
- }
- if !bytes.Equal(got, entry.value) {
- t.Fatalf("Value mismatch: want=%v, got=%v", entry.value, got)
- }
- }
-
- // Test batch operation
- db = NewTable(NewMemoryDatabase(), prefix)
- batch := db.NewBatch()
- for _, entry := range entries {
- batch.Put(entry.key, entry.value)
- }
- batch.Write()
- for _, entry := range entries {
- got, err := db.Get(entry.key)
- if err != nil {
- t.Fatalf("Failed to get value: %v", err)
- }
- if !bytes.Equal(got, entry.value) {
- t.Fatalf("Value mismatch: want=%v, got=%v", entry.value, got)
- }
- }
-
- // Test batch replayer
- r := &testReplayer{}
- batch.Replay(r)
- for index, entry := range entries {
- got := r.puts[index]
- if !bytes.Equal(got, entry.key) {
- t.Fatalf("Key mismatch: want=%v, got=%v", entry.key, got)
- }
- }
-
- check := func(iter ethdb.Iterator, expCount, index int) {
- count := 0
- for iter.Next() {
- key, value := iter.Key(), iter.Value()
- if !bytes.Equal(key, entries[index].key) {
- t.Fatalf("Key mismatch: want=%v, got=%v", entries[index].key, key)
- }
- if !bytes.Equal(value, entries[index].value) {
- t.Fatalf("Value mismatch: want=%v, got=%v", entries[index].value, value)
- }
- index += 1
- count++
- }
- if count != expCount {
- t.Fatalf("Wrong number of elems, exp %d got %d", expCount, count)
- }
- iter.Release()
- }
- // Test iterators
- check(db.NewIterator(nil, nil), 6, 0)
- // Test iterators with prefix
- check(db.NewIterator([]byte{0xff, 0xff}, nil), 3, 3)
- // Test iterators with start point
- check(db.NewIterator(nil, []byte{0xff, 0xff, 0x02}), 2, 4)
- // Test iterators with prefix and start point
- check(db.NewIterator([]byte{0xee}, nil), 0, 0)
- check(db.NewIterator(nil, []byte{0x00}), 6, 0)
-}
diff --git a/core/types/block_ext.go b/core/types/block_ext.go
index 6e123beb43..545e372250 100644
--- a/core/types/block_ext.go
+++ b/core/types/block_ext.go
@@ -56,6 +56,13 @@ func (b *BlockExtra) Body(block *Block) *Body {
return WithBodyExtra(body, extra)
}
+func (b *BlockExtra) WithExtra(block *Block, body *Body) *Block {
+ bodyExtra := GetBodyExtra(body)
+ const recalc = false
+ block = WithBlockExtra(block, bodyExtra.Version, bodyExtra.ExtData, recalc)
+ return block
+}
+
// blockSerializable defines the block in the Ethereum blockchain,
// as it is to be serialized into RLP.
type blockSerializable struct {
diff --git a/go.mod b/go.mod
index 256f100699..def18273d8 100644
--- a/go.mod
+++ b/go.mod
@@ -9,8 +9,6 @@ require (
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set/v2 v2.1.0
- github.com/gofrs/flock v0.8.1
- github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/gorilla/rpc v1.2.0
github.com/gorilla/websocket v1.5.0
github.com/hashicorp/go-bexpr v0.1.10
@@ -20,7 +18,6 @@ require (
github.com/holiman/uint256 v1.2.4
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.17
- github.com/olekukonko/tablewriter v0.0.5
github.com/prometheus/client_golang v1.16.0
github.com/prometheus/client_model v0.3.0
github.com/shirou/gopsutil v3.21.11+incompatible
@@ -72,8 +69,10 @@ require (
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
+ github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
+ github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect
github.com/google/renameio/v2 v2.0.0 // indirect
github.com/google/uuid v1.6.0 // indirect
@@ -93,6 +92,7 @@ require (
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d // indirect
+ github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
@@ -136,4 +136,4 @@ require (
rsc.io/tmplfunc v0.0.3 // indirect
)
-replace github.com/ava-labs/libevm => github.com/ava-labs/libevm v0.0.0-20250122094956-11c780f117f8
+replace github.com/ava-labs/libevm => github.com/ava-labs/libevm v0.0.0-20250131144451-c0f677c030ad
diff --git a/go.sum b/go.sum
index a71dc0f262..c09d3346cf 100644
--- a/go.sum
+++ b/go.sum
@@ -58,8 +58,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/ava-labs/avalanchego v1.12.1-0.20250107220127-32f58b4fa9c8 h1:qN3MOBHB//Ynhgt5Vys3iVe42Sr0EWSeN18VL3ecXzE=
github.com/ava-labs/avalanchego v1.12.1-0.20250107220127-32f58b4fa9c8/go.mod h1:2B7+E5neLvkOr2zursGhebjU26d4AfB7RazPxBs8hHg=
-github.com/ava-labs/libevm v0.0.0-20250122094956-11c780f117f8 h1:koH85Ew+1o1oaZotJy6BVJsuigu0Am3dHexS24WMFb0=
-github.com/ava-labs/libevm v0.0.0-20250122094956-11c780f117f8/go.mod h1:M8TCw2g1D5GBB7hu7g1F4aot5bRHGSxnBawNVmHE9Z0=
+github.com/ava-labs/libevm v0.0.0-20250131144451-c0f677c030ad h1:CNNdNhacDkItmu7P/tD4qOO/e6bL6ZvZ7Ao3AwhlK34=
+github.com/ava-labs/libevm v0.0.0-20250131144451-c0f677c030ad/go.mod h1:M8TCw2g1D5GBB7hu7g1F4aot5bRHGSxnBawNVmHE9Z0=
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
diff --git a/scripts/tests.e2e.sh b/scripts/tests.e2e.sh
index e68a0f19d0..e54b50a548 100755
--- a/scripts/tests.e2e.sh
+++ b/scripts/tests.e2e.sh
@@ -45,7 +45,7 @@ git checkout -B "test-${AVALANCHE_VERSION}" "${AVALANCHE_VERSION}"
echo "updating coreth dependency to point to ${CORETH_PATH}"
go mod edit -replace "github.com/ava-labs/coreth=${CORETH_PATH}"
-go mod edit -replace "github.com/ava-labs/libevm=github.com/ava-labs/libevm@v0.0.0-20250122094956-11c780f117f8"
+go mod edit -replace "github.com/ava-labs/libevm=github.com/ava-labs/libevm@v0.0.0-20250131144451-c0f677c030ad"
go mod tidy
echo "building avalanchego"