diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go
index 9f9b7a000d..75fbc91ceb 100644
--- a/accounts/abi/bind/util_test.go
+++ b/accounts/abi/bind/util_test.go
@@ -56,14 +56,17 @@ func TestWaitDeployed(t *testing.T) {
for name, test := range waitDeployedTests {
backend := backends.NewSimulatedBackend(
core.GenesisAlloc{
- crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
+ crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
},
10000000,
)
defer backend.Close()
- // Create the transaction.
- tx := types.NewContractCreation(0, big.NewInt(0), test.gas, big.NewInt(1), common.FromHex(test.code))
+ // Create the transaction
+ head, _ := backend.HeaderByNumber(context.Background(), nil) // Should be child's, good enough
+ gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
+
+ tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code))
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
// Wait for it to get mined in the background.
@@ -99,15 +102,18 @@ func TestWaitDeployed(t *testing.T) {
func TestWaitDeployedCornerCases(t *testing.T) {
backend := backends.NewSimulatedBackend(
core.GenesisAlloc{
- crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
+ crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
},
10000000,
)
defer backend.Close()
+ head, _ := backend.HeaderByNumber(context.Background(), nil) // Should be child's, good enough
+ gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
+
// Create a transaction to an account.
code := "6060604052600a8060106000396000f360606040526008565b00"
- tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, big.NewInt(1), common.FromHex(code))
+ tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, gasPrice, common.FromHex(code))
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -119,7 +125,7 @@ func TestWaitDeployedCornerCases(t *testing.T) {
}
// Create a transaction that is not mined.
- tx = types.NewContractCreation(1, big.NewInt(0), 3000000, big.NewInt(1), common.FromHex(code))
+ tx = types.NewContractCreation(1, big.NewInt(0), 3000000, gasPrice, common.FromHex(code))
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
go func() {
diff --git a/accounts/external/backend.go b/accounts/external/backend.go
index dbfdd39d54..fb1fcffa35 100644
--- a/accounts/external/backend.go
+++ b/accounts/external/backend.go
@@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/signer/core"
+ "github.com/ethereum/go-ethereum/signer/core/apitypes"
"github.com/harmony-one/harmony/accounts"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/eth/rpc"
@@ -218,12 +218,13 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
t := common.NewMixedcaseAddress(*tx.To())
to = &t
}
- args := &core.SendTxArgs{
+ gas := hexutil.Big(*tx.GasPrice())
+ args := &apitypes.SendTxArgs{
Data: &data,
Nonce: hexutil.Uint64(tx.Nonce()),
Value: hexutil.Big(*tx.Value()),
Gas: hexutil.Uint64(tx.GasLimit()),
- GasPrice: hexutil.Big(*tx.GasPrice()),
+ GasPrice: &gas,
To: to,
From: common.NewMixedcaseAddress(account.Address),
}
diff --git a/api/service/blockproposal/service.go b/api/service/blockproposal/service.go
index 1cbb5accf3..82db7c63a7 100644
--- a/api/service/blockproposal/service.go
+++ b/api/service/blockproposal/service.go
@@ -10,17 +10,15 @@ import (
type Service struct {
stopChan chan struct{}
stoppedChan chan struct{}
- readySignal chan consensus.ProposalType
- commitSigsChan chan []byte
+ c *consensus.Consensus
messageChan chan *msg_pb.Message
- waitForConsensusReady func(readySignal chan consensus.ProposalType, commitSigsChan chan []byte, stopChan chan struct{}, stoppedChan chan struct{})
+ waitForConsensusReady func(c *consensus.Consensus, stopChan chan struct{}, stoppedChan chan struct{})
}
// New returns a block proposal service.
-func New(readySignal chan consensus.ProposalType, commitSigsChan chan []byte, waitForConsensusReady func(readySignal chan consensus.ProposalType, commitSigsChan chan []byte, stopChan chan struct{}, stoppedChan chan struct{})) *Service {
+func New(c *consensus.Consensus, waitForConsensusReady func(c *consensus.Consensus, stopChan chan struct{}, stoppedChan chan struct{})) *Service {
return &Service{
- readySignal: readySignal,
- commitSigsChan: commitSigsChan,
+ c: c,
waitForConsensusReady: waitForConsensusReady,
stopChan: make(chan struct{}),
stoppedChan: make(chan struct{}),
@@ -34,7 +32,7 @@ func (s *Service) Start() error {
}
func (s *Service) run() {
- s.waitForConsensusReady(s.readySignal, s.commitSigsChan, s.stopChan, s.stoppedChan)
+ s.waitForConsensusReady(s.c, s.stopChan, s.stoppedChan)
}
// Stop stops block proposal service.
diff --git a/api/service/explorer/interface.go b/api/service/explorer/interface.go
index a8e179415e..aa8a9ac856 100644
--- a/api/service/explorer/interface.go
+++ b/api/service/explorer/interface.go
@@ -42,7 +42,7 @@ type explorerDB struct {
// newExplorerLvlDB new explorer storage using leveldb
func newExplorerLvlDB(dbPath string) (database, error) {
- db, err := leveldb.New(dbPath, 16, 500, "explorer_db")
+ db, err := leveldb.New(dbPath, 16, 500, "explorer_db", false)
if err != nil {
return nil, err
}
@@ -80,7 +80,7 @@ func (db *explorerDB) NewBatch() batch {
}
func (db *explorerDB) NewPrefixIterator(prefix []byte) iterator {
- it := db.db.NewIteratorWithPrefix(prefix)
+ it := db.db.NewIterator(prefix, nil)
return it
}
@@ -95,7 +95,7 @@ type sizedIterator struct {
}
func (db *explorerDB) newSizedIterator(start []byte, size int) *sizedIterator {
- it := db.db.NewIteratorWithStart(start)
+ it := db.db.NewIterator(nil, start)
return &sizedIterator{
it: it,
curIndex: 0,
diff --git a/cmd/harmony/config_migrations.go b/cmd/harmony/config_migrations.go
index b3da9ec2ba..84607c2d0d 100644
--- a/cmd/harmony/config_migrations.go
+++ b/cmd/harmony/config_migrations.go
@@ -8,7 +8,7 @@ import (
"strings"
goversion "github.com/hashicorp/go-version"
- "github.com/pelletier/go-toml"
+ "github.com/pelletier/go-toml" // TODO support go-toml/v2
"github.com/harmony-one/harmony/api/service/legacysync"
harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony"
@@ -357,6 +357,27 @@ func init() {
return confTree
}
+ migrations["2.5.13"] = func(confTree *toml.Tree) *toml.Tree {
+ if confTree.Get("TxPool.AccountQueue") == nil {
+ confTree.Set("TxPool.AccountQueue", defaultConfig.TxPool.AccountQueue)
+ }
+ if confTree.Get("TxPool.GlobalQueue") == nil {
+ confTree.Set("TxPool.GlobalQueue", defaultConfig.TxPool.GlobalQueue)
+ }
+ if confTree.Get("TxPool.Lifetime") == nil {
+ confTree.Set("TxPool.Lifetime", defaultConfig.TxPool.Lifetime.String())
+ }
+ if confTree.Get("TxPool.PriceLimit") == nil {
+ confTree.Set("TxPool.PriceLimit", defaultConfig.TxPool.PriceLimit)
+ }
+ if confTree.Get("TxPool.PriceBump") == nil {
+ confTree.Set("TxPool.PriceBump", defaultConfig.TxPool.PriceBump)
+ }
+
+ confTree.Set("Version", "2.5.14")
+ return confTree
+ }
+
// check that the latest version here is the same as in default.go
largestKey := getNextVersion(migrations)
if largestKey != tomlConfigVersion {
diff --git a/cmd/harmony/config_migrations_test.go b/cmd/harmony/config_migrations_test.go
index 72db5d7900..e52c7347b2 100644
--- a/cmd/harmony/config_migrations_test.go
+++ b/cmd/harmony/config_migrations_test.go
@@ -1,10 +1,10 @@
package main
import (
- "reflect"
"testing"
harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony"
+ "github.com/stretchr/testify/require"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
)
@@ -72,6 +72,11 @@ Version = "1.0.2"
[TxPool]
BlacklistFile = "./.hmy/blacklist.txt"
LocalAccountsFile = "./.hmy/locals.txt"
+ AccountQueue = 64
+ GlobalQueue = 5120
+ Lifetime = "30m"
+ PriceBump = 1
+ PriceLimit = 100e9
[WS]
Enabled = true
@@ -142,6 +147,11 @@ Version = "1.0.3"
[TxPool]
BlacklistFile = "./.hmy/blacklist.txt"
LocalAccountsFile = "./.hmy/locals.txt"
+ AccountQueue = 64
+ GlobalQueue = 5120
+ Lifetime = "30m"
+ PriceBump = 1
+ PriceLimit = 100e9
[WS]
Enabled = true
@@ -224,6 +234,11 @@ Version = "1.0.4"
[TxPool]
BlacklistFile = "./.hmy/blacklist.txt"
LocalAccountsFile = "./.hmy/locals.txt"
+ AccountQueue = 64
+ GlobalQueue = 5120
+ Lifetime = "30m"
+ PriceBump = 1
+ PriceLimit = 100e9
[WS]
Enabled = true
@@ -314,6 +329,11 @@ Version = "1.0.4"
BlacklistFile = "./.hmy/blacklist.txt"
LocalAccountsFile = "./.hmy/locals.txt"
AllowedTxsFile = "./.hmy/allowedtxs.txt"
+ AccountQueue = 64
+ GlobalQueue = 5120
+ Lifetime = "30m"
+ PriceBump = 1
+ PriceLimit = 100e9
[WS]
Enabled = true
@@ -389,9 +409,7 @@ func Test_migrateConf(t *testing.T) {
t.Errorf("migrateConf() error = %v, wantErr %v", err, tt.wantErr)
return
}
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("migrateConf() = %+v, want %+v", got, tt.want)
- }
+ require.Equal(t, tt.want, got)
})
}
}
diff --git a/cmd/harmony/config_test.go b/cmd/harmony/config_test.go
index 0d58797008..d19f5d9858 100644
--- a/cmd/harmony/config_test.go
+++ b/cmd/harmony/config_test.go
@@ -9,6 +9,7 @@ import (
"testing"
harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony"
+ "github.com/stretchr/testify/require"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
)
@@ -86,6 +87,11 @@ Version = "1.0.4"
BlacklistFile = "./.hmy/blacklist.txt"
LocalAccountsFile = "./.hmy/locals.txt"
AllowedTxsFile = "./.hmy/allowedtxs.txt"
+ AccountQueue = 64
+ GlobalQueue = 5120
+ Lifetime = "30m"
+ PriceBump = 1
+ PriceLimit = 100e9
[Sync]
Downloader = false
@@ -136,9 +142,7 @@ Version = "1.0.4"
t.Errorf("Expected config version: 1.0.4, not %v", config.Version)
}
config.Version = defConf.Version // Shortcut for testing, value checked above
- if !reflect.DeepEqual(config, defConf) {
- t.Errorf("Unexpected config \n\t%+v \n\t%+v", config, defaultConfig)
- }
+ require.Equal(t, config, defConf)
}
func TestPersistConfig(t *testing.T) {
diff --git a/cmd/harmony/default.go b/cmd/harmony/default.go
index 95e05b29cc..2c15e123f7 100644
--- a/cmd/harmony/default.go
+++ b/cmd/harmony/default.go
@@ -1,11 +1,12 @@
package main
import (
+ "github.com/harmony-one/harmony/core"
harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
)
-const tomlConfigVersion = "2.5.13"
+const tomlConfigVersion = "2.5.14"
const (
defNetworkType = nodeconfig.Mainnet
@@ -81,9 +82,14 @@ var defaultConfig = harmonyconfig.HarmonyConfig{
BlacklistFile: "./.hmy/blacklist.txt",
AllowedTxsFile: "./.hmy/allowedtxs.txt",
RosettaFixFile: "",
- AccountSlots: 16,
+ AccountSlots: core.DefaultTxPoolConfig.AccountSlots,
LocalAccountsFile: "./.hmy/locals.txt",
- GlobalSlots: 5120,
+ GlobalSlots: core.DefaultTxPoolConfig.GlobalSlots,
+ AccountQueue: core.DefaultTxPoolConfig.AccountQueue,
+ GlobalQueue: core.DefaultTxPoolConfig.GlobalQueue,
+ Lifetime: core.DefaultTxPoolConfig.Lifetime,
+ PriceLimit: harmonyconfig.PriceLimit(core.DefaultTxPoolConfig.PriceLimit),
+ PriceBump: core.DefaultTxPoolConfig.PriceBump,
},
Sync: getDefaultSyncConfig(defNetworkType),
Pprof: harmonyconfig.PprofConfig{
diff --git a/cmd/harmony/dumpdb.go b/cmd/harmony/dumpdb.go
index e66d3617bb..5803359e5b 100644
--- a/cmd/harmony/dumpdb.go
+++ b/cmd/harmony/dumpdb.go
@@ -12,7 +12,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
- ethRawDB "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
@@ -176,7 +175,8 @@ func (db *KakashiDB) Close() error {
return db.Database.Close()
}
-func (db *KakashiDB) OnRoot(common.Hash) {}
+func (db *KakashiDB) OnRoot(common.Hash) {}
+func (db *KakashiDB) OnAccount(common.Address, state.DumpAccount) {}
// OnAccount implements DumpCollector interface
func (db *KakashiDB) OnAccountStart(addr common.Address, acc state.DumpAccount) {
@@ -345,7 +345,7 @@ func (db *KakashiDB) stateDataDump(block *types.Block) {
fmt.Println("stateDataDump:", snapdbInfo.LastAccountKey.String(), snapdbInfo.LastAccountStateKey.String())
stateDB0 := state.NewDatabaseWithCache(db, STATEDB_CACHE_SIZE)
rootHash := block.Root()
- stateDB, err := state.New(rootHash, stateDB0)
+ stateDB, err := state.New(rootHash, stateDB0, nil)
if err != nil {
panic(err)
}
@@ -354,8 +354,8 @@ func (db *KakashiDB) stateDataDump(block *types.Block) {
if len(snapdbInfo.LastAccountStateKey) > 0 {
stateKey := new(big.Int).SetBytes(snapdbInfo.LastAccountStateKey)
stateKey.Add(stateKey, big.NewInt(1))
- config.StateStart = stateKey.Bytes()
- if len(config.StateStart) != len(snapdbInfo.LastAccountStateKey) {
+ config.Start = stateKey.Bytes()
+ if len(config.Start) != len(snapdbInfo.LastAccountStateKey) {
panic("statekey overflow")
}
}
@@ -366,12 +366,12 @@ func (db *KakashiDB) stateDataDump(block *types.Block) {
func dumpMain(srcDBDir, destDBDir string, batchLimit int) {
fmt.Println("===dumpMain===")
- srcDB, err := ethRawDB.NewLevelDBDatabase(srcDBDir, LEVELDB_CACHE_SIZE, LEVELDB_HANDLES, "")
+ srcDB, err := rawdb.NewLevelDBDatabase(srcDBDir, LEVELDB_CACHE_SIZE, LEVELDB_HANDLES, "", false)
if err != nil {
fmt.Println("open src db error:", err)
os.Exit(-1)
}
- destDB, err := ethRawDB.NewLevelDBDatabase(destDBDir, LEVELDB_CACHE_SIZE, LEVELDB_HANDLES, "")
+ destDB, err := rawdb.NewLevelDBDatabase(destDBDir, LEVELDB_CACHE_SIZE, LEVELDB_HANDLES, "", false)
if err != nil {
fmt.Println("open dest db error:", err)
os.Exit(-1)
diff --git a/cmd/harmony/flags.go b/cmd/harmony/flags.go
index 8a2799ce28..9760652e03 100644
--- a/cmd/harmony/flags.go
+++ b/cmd/harmony/flags.go
@@ -4,6 +4,7 @@ import (
"fmt"
"strconv"
"strings"
+ "time"
harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony"
@@ -139,12 +140,17 @@ var (
txPoolFlags = []cli.Flag{
tpAccountSlotsFlag,
+ tpGlobalSlotsFlag,
+ tpAccountQueueFlag,
+ tpGlobalQueueFlag,
+ tpLifetimeFlag,
rosettaFixFileFlag,
tpBlacklistFileFlag,
legacyTPBlacklistFileFlag,
localAccountsFileFlag,
allowedTxsFileFlag,
- tpGlobalSlotsFlag,
+ tpPriceLimitFlag,
+ tpPriceBumpFlag,
}
pprofFlags = []cli.Flag{
@@ -1186,6 +1192,31 @@ var (
Usage: "maximum global number of non-executable transactions in the pool",
DefValue: int(defaultConfig.TxPool.GlobalSlots),
}
+ tpAccountQueueFlag = cli.IntFlag{
+ Name: "txpool.accountqueue",
+ Usage: "capacity of queued transactions for account in the pool",
+ DefValue: int(defaultConfig.TxPool.AccountQueue),
+ }
+ tpGlobalQueueFlag = cli.IntFlag{
+ Name: "txpool.globalqueue",
+ Usage: "global capacity for queued transactions in the pool",
+ DefValue: int(defaultConfig.TxPool.GlobalQueue),
+ }
+ tpLifetimeFlag = cli.StringFlag{
+ Name: "txpool.lifetime",
+ Usage: "maximum lifetime of transactions in the pool as a golang duration string",
+ DefValue: defaultConfig.TxPool.Lifetime.String(),
+ }
+ tpPriceLimitFlag = cli.IntFlag{
+ Name: "txpool.pricelimit",
+ Usage: "minimum gas price to enforce for acceptance into the pool",
+ DefValue: int(defaultConfig.TxPool.PriceLimit),
+ }
+ tpPriceBumpFlag = cli.IntFlag{
+ Name: "txpool.pricebump",
+ Usage: "minimum price bump to replace an already existing transaction (nonce)",
+ DefValue: int(defaultConfig.TxPool.PriceLimit),
+ }
)
func applyTxPoolFlags(cmd *cobra.Command, config *harmonyconfig.HarmonyConfig) {
@@ -1206,6 +1237,20 @@ func applyTxPoolFlags(cmd *cobra.Command, config *harmonyconfig.HarmonyConfig) {
}
config.TxPool.GlobalSlots = uint64(value)
}
+ if cli.IsFlagChanged(cmd, tpAccountQueueFlag) {
+ value := cli.GetIntFlagValue(cmd, tpAccountQueueFlag)
+ if value <= 0 {
+ panic("Must provide positive value for txpool.accountqueue")
+ }
+ config.TxPool.AccountQueue = uint64(value)
+ }
+ if cli.IsFlagChanged(cmd, tpGlobalQueueFlag) {
+ value := cli.GetIntFlagValue(cmd, tpGlobalQueueFlag)
+ if value <= 0 {
+ panic("Must provide positive value for txpool.globalqueue")
+ }
+ config.TxPool.GlobalQueue = uint64(value)
+ }
if cli.IsFlagChanged(cmd, tpBlacklistFileFlag) {
config.TxPool.BlacklistFile = cli.GetStringFlagValue(cmd, tpBlacklistFileFlag)
} else if cli.IsFlagChanged(cmd, legacyTPBlacklistFileFlag) {
@@ -1217,6 +1262,27 @@ func applyTxPoolFlags(cmd *cobra.Command, config *harmonyconfig.HarmonyConfig) {
if cli.IsFlagChanged(cmd, allowedTxsFileFlag) {
config.TxPool.AllowedTxsFile = cli.GetStringFlagValue(cmd, allowedTxsFileFlag)
}
+ if cli.IsFlagChanged(cmd, tpLifetimeFlag) {
+ value, err := time.ParseDuration(cli.GetStringFlagValue(cmd, tpLifetimeFlag))
+ if err != nil {
+ panic(fmt.Sprintf("Invalid value for txpool.lifetime: %v", err))
+ }
+ config.TxPool.Lifetime = value
+ }
+ if cli.IsFlagChanged(cmd, tpPriceLimitFlag) {
+ value := cli.GetIntFlagValue(cmd, tpPriceLimitFlag)
+ if value <= 0 {
+ panic("Must provide positive value for txpool.pricelimit")
+ }
+ config.TxPool.PriceLimit = harmonyconfig.PriceLimit(value)
+ }
+ if cli.IsFlagChanged(cmd, tpPriceBumpFlag) {
+ value := cli.GetIntFlagValue(cmd, tpPriceBumpFlag)
+ if value <= 0 {
+ panic("Must provide positive value for txpool.pricebump")
+ }
+ config.TxPool.PriceBump = uint64(value)
+ }
}
// pprof flags
diff --git a/cmd/harmony/flags_test.go b/cmd/harmony/flags_test.go
index 2015188ed4..5338b9f71e 100644
--- a/cmd/harmony/flags_test.go
+++ b/cmd/harmony/flags_test.go
@@ -5,6 +5,7 @@ import (
"reflect"
"strings"
"testing"
+ "time"
harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony"
@@ -118,8 +119,13 @@ func TestHarmonyFlags(t *testing.T) {
AllowedTxsFile: "./.hmy/allowedtxs.txt",
RosettaFixFile: "",
AccountSlots: 16,
- GlobalSlots: 5120,
+ GlobalSlots: 4096,
LocalAccountsFile: "./.hmy/locals.txt",
+ AccountQueue: 64,
+ GlobalQueue: 5120,
+ Lifetime: 30 * time.Minute,
+ PriceLimit: 100e9,
+ PriceBump: 1,
},
Pprof: harmonyconfig.PprofConfig{
Enabled: false,
@@ -1005,6 +1011,11 @@ func TestTxPoolFlags(t *testing.T) {
AccountSlots: defaultConfig.TxPool.AccountSlots,
LocalAccountsFile: defaultConfig.TxPool.LocalAccountsFile,
GlobalSlots: defaultConfig.TxPool.GlobalSlots,
+ AccountQueue: defaultConfig.TxPool.AccountQueue,
+ GlobalQueue: defaultConfig.TxPool.GlobalQueue,
+ Lifetime: defaultConfig.TxPool.Lifetime,
+ PriceLimit: 100e9,
+ PriceBump: 1,
},
},
{
@@ -1015,7 +1026,12 @@ func TestTxPoolFlags(t *testing.T) {
RosettaFixFile: "rosettafix.file",
AccountSlots: defaultConfig.TxPool.AccountSlots,
GlobalSlots: defaultConfig.TxPool.GlobalSlots,
+ AccountQueue: defaultConfig.TxPool.AccountQueue,
+ GlobalQueue: defaultConfig.TxPool.GlobalQueue,
+ Lifetime: defaultConfig.TxPool.Lifetime,
LocalAccountsFile: defaultConfig.TxPool.LocalAccountsFile,
+ PriceLimit: 100e9,
+ PriceBump: 1,
},
},
{
@@ -1026,7 +1042,12 @@ func TestTxPoolFlags(t *testing.T) {
AllowedTxsFile: defaultConfig.TxPool.AllowedTxsFile,
AccountSlots: defaultConfig.TxPool.AccountSlots,
GlobalSlots: defaultConfig.TxPool.GlobalSlots,
+ AccountQueue: defaultConfig.TxPool.AccountQueue,
+ GlobalQueue: defaultConfig.TxPool.GlobalQueue,
+ Lifetime: defaultConfig.TxPool.Lifetime,
LocalAccountsFile: defaultConfig.TxPool.LocalAccountsFile,
+ PriceLimit: 100e9,
+ PriceBump: 1,
},
},
{
@@ -1038,6 +1059,11 @@ func TestTxPoolFlags(t *testing.T) {
RosettaFixFile: "rosettafix.file",
LocalAccountsFile: defaultConfig.TxPool.LocalAccountsFile,
GlobalSlots: defaultConfig.TxPool.GlobalSlots,
+ AccountQueue: defaultConfig.TxPool.AccountQueue,
+ GlobalQueue: defaultConfig.TxPool.GlobalQueue,
+ Lifetime: defaultConfig.TxPool.Lifetime,
+ PriceLimit: 100e9,
+ PriceBump: 1,
},
},
{
@@ -1049,6 +1075,11 @@ func TestTxPoolFlags(t *testing.T) {
AccountSlots: defaultConfig.TxPool.AccountSlots,
LocalAccountsFile: "locals.txt",
GlobalSlots: defaultConfig.TxPool.GlobalSlots,
+ AccountQueue: defaultConfig.TxPool.AccountQueue,
+ GlobalQueue: defaultConfig.TxPool.GlobalQueue,
+ Lifetime: defaultConfig.TxPool.Lifetime,
+ PriceLimit: 100e9,
+ PriceBump: 1,
},
},
{
@@ -1060,6 +1091,27 @@ func TestTxPoolFlags(t *testing.T) {
AccountSlots: defaultConfig.TxPool.AccountSlots,
LocalAccountsFile: defaultConfig.TxPool.LocalAccountsFile,
GlobalSlots: 10240,
+ AccountQueue: defaultConfig.TxPool.AccountQueue,
+ GlobalQueue: defaultConfig.TxPool.GlobalQueue,
+ Lifetime: defaultConfig.TxPool.Lifetime,
+ PriceLimit: 100e9,
+ PriceBump: 1,
+ },
+ },
+ {
+ args: []string{"--txpool.accountqueue", "128", "--txpool.globalqueue", "10240", "--txpool.lifetime", "15m", "--txpool.pricelimit", "100", "--txpool.pricebump", "2"},
+ expConfig: harmonyconfig.TxPoolConfig{
+ BlacklistFile: defaultConfig.TxPool.BlacklistFile,
+ AllowedTxsFile: defaultConfig.TxPool.AllowedTxsFile,
+ RosettaFixFile: defaultConfig.TxPool.RosettaFixFile,
+ AccountSlots: defaultConfig.TxPool.AccountSlots,
+ LocalAccountsFile: defaultConfig.TxPool.LocalAccountsFile,
+ GlobalSlots: defaultConfig.TxPool.GlobalSlots,
+ AccountQueue: 128,
+ GlobalQueue: 10240,
+ Lifetime: 15 * time.Minute,
+ PriceLimit: 100,
+ PriceBump: 2,
},
},
}
diff --git a/cmd/harmony/main.go b/cmd/harmony/main.go
index f01cb758ed..88da5f7513 100644
--- a/cmd/harmony/main.go
+++ b/cmd/harmony/main.go
@@ -495,7 +495,7 @@ func nodeconfigSetShardSchedule(config harmonyconfig.HarmonyConfig) {
}
devnetConfig, err := shardingconfig.NewInstance(
- uint32(dnConfig.NumShards), dnConfig.ShardSize, dnConfig.HmyNodeSize, dnConfig.SlotsLimit, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccounts, shardingconfig.Allowlist{}, ethCommon.Address{}, nil, shardingconfig.VLBPE)
+ uint32(dnConfig.NumShards), dnConfig.ShardSize, dnConfig.HmyNodeSize, dnConfig.SlotsLimit, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccounts, shardingconfig.Allowlist{}, nil, nil, shardingconfig.VLBPE)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "ERROR invalid devnet sharding config: %s",
err)
diff --git a/consensus/consensus.go b/consensus/consensus.go
index e0cc591dfe..c5573c972d 100644
--- a/consensus/consensus.go
+++ b/consensus/consensus.go
@@ -87,9 +87,9 @@ type Consensus struct {
// ViewChange struct
vc *viewChange
// Signal channel for proposing a new block and start new consensus
- ReadySignal chan ProposalType
+ readySignal chan ProposalType
// Channel to send full commit signatures to finish new block proposal
- CommitSigChannel chan []byte
+ commitSigChannel chan []byte
// The post-consensus job func passed from Node object
// Called when consensus on a new block is done
PostConsensusJob func(*types.Block) error
@@ -139,6 +139,22 @@ func (consensus *Consensus) Blockchain() core.BlockChain {
return consensus.registry.GetBlockchain()
}
+func (consensus *Consensus) ReadySignal(p ProposalType) {
+ consensus.readySignal <- p
+}
+
+func (consensus *Consensus) GetReadySignal() chan ProposalType {
+ return consensus.readySignal
+}
+
+func (consensus *Consensus) CommitSigChannel() chan []byte {
+ return consensus.commitSigChannel
+}
+
+func (consensus *Consensus) GetCommitSigChannel() chan []byte {
+ return consensus.commitSigChannel
+}
+
// VerifyBlock is a function used to verify the block and keep trace of verified blocks.
func (consensus *Consensus) verifyBlock(block *types.Block) error {
if !consensus.FBFTLog.IsBlockVerified(block.Hash()) {
@@ -218,6 +234,8 @@ func (consensus *Consensus) getConsensusLeaderPrivateKey() (*bls.PrivateKeyWrapp
// SetBlockVerifier sets the block verifier
func (consensus *Consensus) SetBlockVerifier(verifier VerifyBlockFunc) {
+ consensus.mutex.Lock()
+ defer consensus.mutex.Unlock()
consensus.BlockVerifier = verifier
consensus.vc.SetVerifyBlock(consensus.verifyBlock)
}
@@ -272,8 +290,8 @@ func New(
consensus.SetCurBlockViewID(0)
consensus.ShardID = shard
consensus.SlashChan = make(chan slash.Record)
- consensus.ReadySignal = make(chan ProposalType)
- consensus.CommitSigChannel = make(chan []byte)
+ consensus.readySignal = make(chan ProposalType)
+ consensus.commitSigChannel = make(chan []byte)
// channel for receiving newly generated VDF
consensus.RndChannel = make(chan [vdfAndSeedSize]byte)
consensus.IgnoreViewIDCheck = abool.NewBool(false)
diff --git a/consensus/consensus_service.go b/consensus/consensus_service.go
index aac91fbd1b..e8d7e1645c 100644
--- a/consensus/consensus_service.go
+++ b/consensus/consensus_service.go
@@ -458,10 +458,10 @@ func (consensus *Consensus) updateConsensusInformation() Mode {
if (oldLeader != nil && consensus.LeaderPubKey != nil &&
!consensus.LeaderPubKey.Object.IsEqual(oldLeader.Object)) && consensus.isLeader() {
go func() {
- consensus.getLogger().Info().
+ consensus.GetLogger().Info().
Str("myKey", myPubKeys.SerializeToHexStr()).
Msg("[UpdateConsensusInformation] I am the New Leader")
- consensus.ReadySignal <- SyncProposal
+ consensus.ReadySignal(SyncProposal)
}()
}
return Normal
@@ -549,7 +549,7 @@ func (consensus *Consensus) GetFinality() int64 {
return consensus.finality
}
-// switchPhase will switch FBFTPhase to nextPhase if the desirePhase equals the nextPhase
+// switchPhase will switch FBFTPhase to desired phase.
func (consensus *Consensus) switchPhase(subject string, desired FBFTPhase) {
consensus.getLogger().Info().
Str("from:", consensus.phase.String()).
diff --git a/consensus/consensus_test.go b/consensus/consensus_test.go
index 41fe5b127f..d2397a64aa 100644
--- a/consensus/consensus_test.go
+++ b/consensus/consensus_test.go
@@ -66,8 +66,8 @@ func TestConsensusInitialization(t *testing.T) {
assert.IsType(t, make(chan slash.Record), consensus.SlashChan)
assert.NotNil(t, consensus.SlashChan)
- assert.IsType(t, make(chan ProposalType), consensus.ReadySignal)
- assert.NotNil(t, consensus.ReadySignal)
+ assert.IsType(t, make(chan ProposalType), consensus.GetReadySignal())
+ assert.NotNil(t, consensus.GetReadySignal())
assert.IsType(t, make(chan [vdfAndSeedSize]byte), consensus.RndChannel)
assert.NotNil(t, consensus.RndChannel)
diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go
index 99bccf7554..8e72896c3e 100644
--- a/consensus/consensus_v2.go
+++ b/consensus/consensus_v2.go
@@ -13,7 +13,6 @@ import (
"github.com/harmony-one/harmony/consensus/signature"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/harmony-one/harmony/internal/utils"
-
"github.com/rs/zerolog"
msg_pb "github.com/harmony-one/harmony/api/proto/message"
@@ -255,13 +254,13 @@ func (consensus *Consensus) finalCommit() {
// No pipelining
go func() {
consensus.getLogger().Info().Msg("[finalCommit] sending block proposal signal")
- consensus.ReadySignal <- SyncProposal
+ consensus.ReadySignal(SyncProposal)
}()
} else {
// pipelining
go func() {
select {
- case consensus.CommitSigChannel <- commitSigAndBitmap:
+ case consensus.GetCommitSigChannel() <- commitSigAndBitmap:
case <-time.After(CommitSigSenderTimeout):
utils.Logger().Error().Err(err).Msg("[finalCommit] channel not received after 6s for commitSigAndBitmap")
}
@@ -335,7 +334,7 @@ func (consensus *Consensus) StartChannel() {
consensus.start = true
consensus.getLogger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Send ReadySignal")
consensus.mutex.Unlock()
- consensus.ReadySignal <- SyncProposal
+ consensus.ReadySignal(SyncProposal)
return
}
consensus.mutex.Unlock()
@@ -403,7 +402,7 @@ func (consensus *Consensus) tick() {
continue
}
}
- if !v.CheckExpire() {
+ if !v.Expired(time.Now()) {
continue
}
if k != timeoutViewChange {
@@ -429,7 +428,7 @@ func (consensus *Consensus) BlockChannel(newBlock *types.Block) {
return
}
// Sleep to wait for the full block time
- consensus.getLogger().Info().Msg("[ConsensusMainLoop] Waiting for Block Time")
+ consensus.GetLogger().Info().Msg("[ConsensusMainLoop] Waiting for Block Time")
time.AfterFunc(time.Until(consensus.NextBlockDue), func() {
consensus.StartFinalityCount()
consensus.mutex.Lock()
@@ -588,7 +587,7 @@ func (consensus *Consensus) preCommitAndPropose(blk *types.Block) error {
// Send signal to Node to propose the new block for consensus
consensus.getLogger().Info().Msg("[preCommitAndPropose] sending block proposal signal")
- consensus.ReadySignal <- AsyncProposal
+ consensus.ReadySignal(AsyncProposal)
}()
return nil
@@ -688,40 +687,70 @@ func (consensus *Consensus) commitBlock(blk *types.Block, committedMsg *FBFTMess
// rotateLeader rotates the leader to the next leader in the committee.
// This function must be called with enabled leader rotation.
func (consensus *Consensus) rotateLeader(epoch *big.Int) {
- prev := consensus.getLeaderPubKey()
- bc := consensus.Blockchain()
- curNumber := bc.CurrentHeader().Number().Uint64()
- utils.Logger().Info().Msgf("[Rotating leader] epoch: %v rotation:%v numblocks:%d", epoch.Uint64(), bc.Config().IsLeaderRotation(epoch), bc.Config().LeaderRotationBlocksCount)
- leader := consensus.getLeaderPubKey()
- for i := 0; i < bc.Config().LeaderRotationBlocksCount; i++ {
- header := bc.GetHeaderByNumber(curNumber - uint64(i))
- if header == nil {
- return
- }
- // Previous epoch, we should not change leader.
- if header.Epoch().Uint64() != epoch.Uint64() {
- return
- }
- // Check if the same leader.
- pub, err := bc.GetLeaderPubKeyFromCoinbase(header)
- if err != nil {
- utils.Logger().Error().Err(err).Msg("Failed to get leader public key from coinbase")
- return
- }
- if !pub.Object.IsEqual(leader.Object) {
- // Another leader.
- return
- }
+ var (
+ bc = consensus.Blockchain()
+ prev = consensus.getLeaderPubKey()
+ leader = consensus.getLeaderPubKey()
+ )
+ utils.Logger().Info().Msgf("[Rotating leader] epoch: %v rotation:%v external rotation %v", epoch.Uint64(), bc.Config().IsLeaderRotation(epoch), bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch, consensus.ShardID))
+ ss, err := bc.ReadShardState(epoch)
+ if err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to read shard state")
+ return
+ }
+ committee, err := ss.FindCommitteeByID(consensus.ShardID)
+ if err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to find committee")
+ return
+ }
+ slotsCount := len(committee.Slots)
+ blocksPerEpoch := shard.Schedule.InstanceForEpoch(epoch).BlocksPerEpoch()
+ if blocksPerEpoch == 0 {
+ utils.Logger().Error().Msg("[Rotating leader] blocks per epoch is 0")
+ return
+ }
+ if slotsCount == 0 {
+ utils.Logger().Error().Msg("[Rotating leader] slots count is 0")
+ return
+ }
+ numBlocksProducedByLeader := blocksPerEpoch / uint64(slotsCount)
+ rest := blocksPerEpoch % uint64(slotsCount)
+ const minimumBlocksForLeaderInRow = 3
+ if numBlocksProducedByLeader < minimumBlocksForLeaderInRow {
+ // mine no less than 3 blocks in a row
+ numBlocksProducedByLeader = minimumBlocksForLeaderInRow
+ }
+ type stored struct {
+ pub []byte
+ epoch uint64
+ count uint64
+ shifts uint64 // count how much changes validator per epoch
+ }
+ var s stored
+ s.pub, s.epoch, s.count, s.shifts, _ = bc.LeaderRotationMeta()
+ if !bytes.Equal(leader.Bytes[:], s.pub) {
+ // Another leader.
+ return
+ }
+ // if it is the first validator which produce blocks, then it should produce `rest` blocks too.
+ if s.shifts == 0 {
+ numBlocksProducedByLeader += rest
+ }
+ if s.count < numBlocksProducedByLeader {
+ // Not enough blocks produced by the leader.
+ return
}
// Passed all checks, we can change leader.
+ // NthNext will move the leader to the next leader in the committee.
+ // It does not know anything about external or internal validators.
var (
wasFound bool
next *bls.PublicKeyWrapper
)
- if consensus.ShardID == shard.BeaconChainShardID {
- wasFound, next = consensus.Decider.NthNextHmy(shard.Schedule.InstanceForEpoch(epoch), leader, 1)
- } else {
+ if bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch, consensus.ShardID) {
wasFound, next = consensus.Decider.NthNext(leader, 1)
+ } else {
+ wasFound, next = consensus.Decider.NthNextHmy(shard.Schedule.InstanceForEpoch(epoch), leader, 1)
}
if !wasFound {
utils.Logger().Error().Msg("Failed to get next leader")
@@ -732,7 +761,7 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) {
if consensus.isLeader() && !consensus.getLeaderPubKey().Object.IsEqual(prev.Object) {
// leader changed
go func() {
- consensus.ReadySignal <- SyncProposal
+ consensus.ReadySignal(SyncProposal)
}()
}
}
diff --git a/consensus/quorum/quorom_test.go b/consensus/quorum/quorom_test.go
index b663b58d5b..09ec1779b8 100644
--- a/consensus/quorum/quorom_test.go
+++ b/consensus/quorum/quorom_test.go
@@ -565,7 +565,7 @@ func TestNthNextHmyExt(test *testing.T) {
allLeaders := append(blsKeys[:numHmyNodes], allowlistLeaders...)
decider := NewDecider(SuperMajorityVote, shard.BeaconChainShardID)
- fakeInstance := shardingconfig.MustNewInstance(2, 20, numHmyNodes, 0, numeric.OneDec(), nil, nil, allowlist, common.Address{}, nil, 0)
+ fakeInstance := shardingconfig.MustNewInstance(2, 20, numHmyNodes, 0, numeric.OneDec(), nil, nil, allowlist, nil, nil, 0)
decider.UpdateParticipants(blsKeys, allowlistLeaders)
for i := 0; i < len(allLeaders); i++ {
diff --git a/consensus/validator.go b/consensus/validator.go
index f85cb8e3d8..92008a91e6 100644
--- a/consensus/validator.go
+++ b/consensus/validator.go
@@ -63,9 +63,8 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
go func() {
// Best effort check, no need to error out.
_, err := consensus.ValidateNewBlock(recvMsg)
-
if err == nil {
- consensus.getLogger().Info().
+ consensus.GetLogger().Info().
Msg("[Announce] Block verified")
}
}()
diff --git a/consensus/view_change.go b/consensus/view_change.go
index aafdfd1210..151074817a 100644
--- a/consensus/view_change.go
+++ b/consensus/view_change.go
@@ -203,13 +203,13 @@ func (consensus *Consensus) getNextLeaderKey(viewID uint64) *bls.PublicKeyWrappe
var wasFound bool
var next *bls.PublicKeyWrapper
if blockchain != nil && blockchain.Config().IsLeaderRotation(epoch) {
- if consensus.ShardID == shard.BeaconChainShardID {
- wasFound, next = consensus.Decider.NthNextHmy(
- shard.Schedule.InstanceForEpoch(epoch),
+ if blockchain.Config().IsLeaderRotationExternalValidatorsAllowed(epoch, consensus.ShardID) {
+ wasFound, next = consensus.Decider.NthNext(
lastLeaderPubKey,
gap)
} else {
- wasFound, next = consensus.Decider.NthNext(
+ wasFound, next = consensus.Decider.NthNextHmy(
+ shard.Schedule.InstanceForEpoch(epoch),
lastLeaderPubKey,
gap)
}
@@ -422,10 +422,7 @@ func (consensus *Consensus) onViewChange(recvMsg *FBFTMessage) {
consensus.getLogger().Error().Err(err).Msg("[onViewChange] startNewView failed")
return
}
-
- go func() {
- consensus.ReadySignal <- SyncProposal
- }()
+ go consensus.ReadySignal(SyncProposal)
return
}
diff --git a/contracts/Puzzle.go b/contracts/Puzzle.go
index 916ae81950..de9a0334b2 100644
--- a/contracts/Puzzle.go
+++ b/contracts/Puzzle.go
@@ -20,7 +20,6 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
@@ -154,7 +153,7 @@ func bindPuzzle(address common.Address, caller bind.ContractCaller, transactor b
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
-func (_Puzzle *PuzzleRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {
+func (_Puzzle *PuzzleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
return _Puzzle.Contract.PuzzleCaller.contract.Call(opts, result, method, params...)
}
@@ -173,7 +172,7 @@ func (_Puzzle *PuzzleRaw) Transact(opts *bind.TransactOpts, method string, param
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
-func (_Puzzle *PuzzleCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {
+func (_Puzzle *PuzzleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
return _Puzzle.Contract.contract.Call(opts, result, method, params...)
}
@@ -192,12 +191,13 @@ func (_Puzzle *PuzzleTransactorRaw) Transact(opts *bind.TransactOpts, method str
//
// Solidity: function getPlayers() constant returns(address[])
func (_Puzzle *PuzzleCaller) GetPlayers(opts *bind.CallOpts) ([]common.Address, error) {
- var (
- ret0 = new([]common.Address)
- )
- out := ret0
- err := _Puzzle.contract.Call(opts, out, "getPlayers")
- return *ret0, err
+ var results []interface{}
+ err := _Puzzle.contract.Call(opts, &results, "getPlayers")
+ if err != nil {
+ return *new([]common.Address), err
+ }
+ out := *abi.ConvertType(results, new([]common.Address)).(*[]common.Address)
+ return out, err
}
// GetPlayers is a free data retrieval call binding the contract method 0x8b5b9ccc.
@@ -218,12 +218,13 @@ func (_Puzzle *PuzzleCallerSession) GetPlayers() ([]common.Address, error) {
//
// Solidity: function manager() constant returns(address)
func (_Puzzle *PuzzleCaller) Manager(opts *bind.CallOpts) (common.Address, error) {
- var (
- ret0 = new(common.Address)
- )
- out := ret0
- err := _Puzzle.contract.Call(opts, out, "manager")
- return *ret0, err
+ var results []interface{}
+ err := _Puzzle.contract.Call(opts, &results, "manager")
+ if err != nil {
+ return *new(common.Address), err
+ }
+ out := *abi.ConvertType(results[0], new([]common.Address)).(*[]common.Address)
+ return out[0], err
}
// Manager is a free data retrieval call binding the contract method 0x481c6a75.
@@ -244,12 +245,13 @@ func (_Puzzle *PuzzleCallerSession) Manager() (common.Address, error) {
//
// Solidity: function players(uint256 ) constant returns(address)
func (_Puzzle *PuzzleCaller) Players(opts *bind.CallOpts, arg0 *big.Int) (common.Address, error) {
- var (
- ret0 = new(common.Address)
- )
- out := ret0
- err := _Puzzle.contract.Call(opts, out, "players", arg0)
- return *ret0, err
+ var results []interface{}
+ err := _Puzzle.contract.Call(opts, &results, "players", arg0)
+ if err != nil {
+ return *new(common.Address), err
+ }
+ out := *abi.ConvertType(results[0], new([]common.Address)).(*[]common.Address)
+ return out[0], err
}
// Players is a free data retrieval call binding the contract method 0xf71d96cb.
diff --git a/core/blockchain.go b/core/blockchain.go
index fda4831655..8afe622c70 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -107,6 +107,8 @@ type BlockChain interface {
//
// After insertion is done, all accumulated events will be fired.
InsertChain(chain types.Blocks, verifyHeaders bool) (int, error)
+ // LeaderRotationMeta returns the number of continuous blocks by the leader.
+ LeaderRotationMeta() (publicKeyBytes []byte, epoch, count, shifts uint64, err error)
// BadBlocks returns a list of the last 'bad blocks' that
// the client has seen on the network.
BadBlocks() []BadBlock
diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go
index 8b0683bd65..f1d0968422 100644
--- a/core/blockchain_impl.go
+++ b/core/blockchain_impl.go
@@ -114,6 +114,7 @@ const (
validatorListByDelegatorCacheLimit = 128
pendingCrossLinksCacheLimit = 2
blockAccumulatorCacheLimit = 64
+ leaderPubKeyFromCoinbaseLimit = 8
maxPendingSlashes = 256
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
BlockChainVersion = 3
@@ -135,9 +136,9 @@ type BlockChainImpl struct {
pruneBeaconChainEnable bool // pruneBeaconChainEnable is enable prune BeaconChain feature
shardID uint32 // Shard number
- db ethdb.Database // Low level persistent database to store final content in
- triegc *prque.Prque // Priority queue mapping block numbers to tries to gc
- gcproc time.Duration // Accumulates canonical block processing for trie dumping
+ db ethdb.Database // Low level persistent database to store final content in
+ triegc *prque.Prque[int64, common.Hash] // Priority queue mapping block numbers to tries to gc
+ gcproc time.Duration // Accumulates canonical block processing for trie dumping
// The following two variables are used to clean up the cache of redis in tikv mode.
// This can improve the cache hit rate of redis
@@ -240,13 +241,13 @@ func newBlockChainWithOptions(
validatorListByDelegatorCache, _ := lru.New(validatorListByDelegatorCacheLimit)
pendingCrossLinksCache, _ := lru.New(pendingCrossLinksCacheLimit)
blockAccumulatorCache, _ := lru.New(blockAccumulatorCacheLimit)
- leaderPubKeyFromCoinbase, _ := lru.New(chainConfig.LeaderRotationBlocksCount + 2)
+ leaderPubKeyFromCoinbase, _ := lru.New(leaderPubKeyFromCoinbaseLimit)
bc := &BlockChainImpl{
chainConfig: chainConfig,
cacheConfig: cacheConfig,
db: db,
- triegc: prque.New(nil),
+ triegc: prque.New[int64, common.Hash](nil),
stateCache: stateCache,
quit: make(chan struct{}),
bodyCache: bodyCache,
@@ -462,7 +463,7 @@ func (bc *BlockChainImpl) ValidateNewBlock(block *types.Block, beaconChain Block
}
func (bc *BlockChainImpl) validateNewBlock(block *types.Block) error {
- state, err := state.New(bc.CurrentBlock().Root(), bc.stateCache)
+ state, err := state.New(bc.CurrentBlock().Root(), bc.stateCache, nil)
if err != nil {
return err
}
@@ -520,7 +521,7 @@ func (bc *BlockChainImpl) loadLastState() error {
return bc.Reset()
}
// Make sure the state associated with the block is available
- if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
+ if _, err := state.New(currentBlock.Root(), bc.stateCache, nil); err != nil {
// Dangling block without a state associated, init from scratch
utils.Logger().Warn().
Str("number", currentBlock.Number().String()).
@@ -617,7 +618,7 @@ func (bc *BlockChainImpl) SetHead(head uint64) error {
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
}
if currentBlock := bc.CurrentBlock(); currentBlock != nil {
- if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
+ if _, err := state.New(currentBlock.Root(), bc.stateCache, nil); err != nil {
// Rewound state missing, rolled back to before pivot, reset to genesis
bc.currentBlock.Store(bc.genesisBlock)
headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
@@ -694,7 +695,7 @@ func (bc *BlockChainImpl) State() (*state.DB, error) {
}
func (bc *BlockChainImpl) StateAt(root common.Hash) (*state.DB, error) {
- return state.New(root, bc.stateCache)
+ return state.New(root, bc.stateCache, nil)
}
func (bc *BlockChainImpl) Reset() error {
@@ -739,7 +740,7 @@ func (bc *BlockChainImpl) repair(head **types.Block) error {
valsToRemove := map[common.Address]struct{}{}
for {
// Abort if we've rewound to a head block that does have associated state
- if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
+ if _, err := state.New((*head).Root(), bc.stateCache, nil); err == nil {
utils.Logger().Info().
Str("number", (*head).Number().String()).
Str("hash", (*head).Hash().Hex()).
@@ -1007,7 +1008,7 @@ func (bc *BlockChainImpl) GetReceiptsByHash(hash common.Hash) types.Receipts {
return nil
}
- receipts := rawdb.ReadReceipts(bc.db, hash, *number)
+ receipts := rawdb.ReadReceipts(bc.db, hash, *number, nil)
bc.receiptsCache.Add(hash, receipts)
return receipts
}
@@ -1091,7 +1092,8 @@ func (bc *BlockChainImpl) Stop() {
}
}
for !bc.triegc.Empty() {
- triedb.Dereference(bc.triegc.PopItem().(common.Hash))
+ v := common.Hash(bc.triegc.PopItem())
+ triedb.Dereference(v)
}
if size, _ := triedb.Size(); size != 0 {
utils.Logger().Error().Msg("Dangling trie nodes after full cleanup")
@@ -1400,6 +1402,7 @@ func (bc *BlockChainImpl) WriteBlockWithState(
} else {
// Full but not archive node, do proper garbage collection
triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
+ // r := common.Hash(root)
bc.triegc.Push(root, -int64(block.NumberU64()))
if current := block.NumberU64(); current > bc.cacheConfig.TriesInMemory {
@@ -1442,7 +1445,7 @@ func (bc *BlockChainImpl) WriteBlockWithState(
if -number > bc.maxGarbCollectedBlkNum {
bc.maxGarbCollectedBlkNum = -number
}
- triedb.Dereference(root.(common.Hash))
+ triedb.Dereference(root)
}
}
}
@@ -1473,7 +1476,7 @@ func (bc *BlockChainImpl) WriteBlockWithState(
if err := rawdb.WriteCxLookupEntries(batch, block); err != nil {
return NonStatTy, err
}
- if err := rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages()); err != nil {
+ if err := rawdb.WritePreimages(batch, state.Preimages()); err != nil {
return NonStatTy, err
}
@@ -1520,6 +1523,18 @@ func (bc *BlockChainImpl) InsertChain(chain types.Blocks, verifyHeaders bool) (i
n, events, logs, err := bc.insertChain(chain, verifyHeaders)
bc.PostChainEvents(events, logs)
+ if err == nil {
+ // there should be only 1 block.
+ for _, b := range chain {
+ if b.Epoch().Uint64() > 0 {
+ err := bc.saveLeaderRotationMeta(b.Header())
+ if err != nil {
+ utils.Logger().Error().Err(err).Msg("save leader continuous blocks count error")
+ return n, err
+ }
+ }
+ }
+ }
if bc.isInitTiKV() && err != nil {
// if has some error, master writer node will release the permission
_, _ = bc.redisPreempt.Unlock()
@@ -1527,6 +1542,47 @@ func (bc *BlockChainImpl) InsertChain(chain types.Blocks, verifyHeaders bool) (i
return n, err
}
+func (bc *BlockChainImpl) saveLeaderRotationMeta(h *block.Header) error {
+ blockPubKey, err := bc.getLeaderPubKeyFromCoinbase(h)
+ if err != nil {
+ return err
+ }
+ type stored struct {
+ pub []byte
+ epoch uint64
+ count uint64
+ shifts uint64
+ }
+ var s stored
+ // error is possible here only on the first iteration, so we can ignore it
+ s.pub, s.epoch, s.count, s.shifts, _ = rawdb.ReadLeaderRotationMeta(bc.db)
+
+ // increase counter only if the same leader and epoch
+ if bytes.Equal(s.pub, blockPubKey.Bytes[:]) && s.epoch == h.Epoch().Uint64() {
+ s.count++
+ } else {
+ s.count = 1
+ }
+ // we should increase shifts if the leader is changed.
+ if !bytes.Equal(s.pub, blockPubKey.Bytes[:]) {
+ s.shifts++
+ }
+ // but set to zero if new
+ if s.epoch != h.Epoch().Uint64() {
+ s.shifts = 0
+ }
+
+ err = rawdb.WriteLeaderRotationMeta(bc.db, blockPubKey.Bytes[:], h.Epoch().Uint64(), s.count, s.shifts)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (bc *BlockChainImpl) LeaderRotationMeta() (publicKeyBytes []byte, epoch, count, shifts uint64, err error) {
+ return rawdb.ReadLeaderRotationMeta(bc.db)
+}
+
// insertChain will execute the actual chain insertion and event aggregation. The
// only reason this method exists as a separate one is to make locking cleaner
// with deferred statements.
@@ -1677,7 +1733,7 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i
} else {
parent = chain[i-1]
}
- state, err := state.New(parent.Root(), bc.stateCache)
+ state, err := state.New(parent.Root(), bc.stateCache, nil)
if err != nil {
return i, events, coalescedLogs, err
}
@@ -3362,14 +3418,14 @@ func (bc *BlockChainImpl) tikvCleanCache() {
for i := bc.latestCleanCacheNum + 1; i <= to; i++ {
// build previous block statedb
fromBlock := bc.GetBlockByNumber(i)
- fromTrie, err := state.New(fromBlock.Root(), bc.stateCache)
+ fromTrie, err := state.New(fromBlock.Root(), bc.stateCache, nil)
if err != nil {
continue
}
// build current block statedb
toBlock := bc.GetBlockByNumber(i + 1)
- toTrie, err := state.New(toBlock.Root(), bc.stateCache)
+ toTrie, err := state.New(toBlock.Root(), bc.stateCache, nil)
if err != nil {
continue
}
@@ -3469,7 +3525,7 @@ func (bc *BlockChainImpl) InitTiKV(conf *harmonyconfig.TiKVConfig) {
// If redis is empty, the hit rate will be too low and the synchronization block speed will be slow
// set LOAD_PRE_FETCH is yes can significantly improve this.
if os.Getenv("LOAD_PRE_FETCH") == "yes" {
- if trie, err := state.New(bc.CurrentBlock().Root(), bc.stateCache); err == nil {
+ if trie, err := state.New(bc.CurrentBlock().Root(), bc.stateCache, nil); err == nil {
trie.Prefetch(512)
} else {
log.Println("LOAD_PRE_FETCH ERR: ", err)
diff --git a/core/blockchain_pruner.go b/core/blockchain_pruner.go
index 0dbe6fb696..744a7d7061 100644
--- a/core/blockchain_pruner.go
+++ b/core/blockchain_pruner.go
@@ -43,6 +43,11 @@ func newBlockchainPruner(db ethdb.Database) *blockchainPruner {
}
}
+// Put inserts the given value into the key-value data store.
+func (bp *blockchainPruner) Put(key []byte, value []byte) error {
+ return nil
+}
+
func (bp *blockchainPruner) Delete(key []byte) error {
err := bp.batchWriter.Delete(key)
if err != nil {
diff --git a/core/blockchain_stub.go b/core/blockchain_stub.go
index f9e9111eae..cfe72eed6d 100644
--- a/core/blockchain_stub.go
+++ b/core/blockchain_stub.go
@@ -423,3 +423,7 @@ func (a Stub) SyncFromTiKVWriter(newBlkNum uint64, logs []*types.Log) error {
func (a Stub) InitTiKV(conf *harmonyconfig.TiKVConfig) {
return
}
+
+func (a Stub) LeaderRotationMeta() (publicKeyBytes []byte, epoch, count, shifts uint64, err error) {
+ return nil, 0, 0, 0, errors.Errorf("method LeaderRotationMeta not implemented for %s", a.Name)
+}
diff --git a/core/epochchain_test.go b/core/epochchain_test.go
index eec7bd6280..32f0dd530b 100644
--- a/core/epochchain_test.go
+++ b/core/epochchain_test.go
@@ -3,8 +3,8 @@ package core_test
import (
"testing"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/harmony-one/harmony/core"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/vm"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/stretchr/testify/require"
diff --git a/core/evm.go b/core/evm.go
index f395ab042f..e11726a569 100644
--- a/core/evm.go
+++ b/core/evm.go
@@ -26,7 +26,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/block"
- consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm"
"github.com/harmony-one/harmony/internal/params"
@@ -39,9 +38,6 @@ import (
// ChainContext supports retrieving headers and consensus parameters from the
// current blockchain to be used during transaction processing.
type ChainContext interface {
- // Engine retrieves the chain's consensus engine.
- Engine() consensus_engine.Engine
-
// GetHeader returns the hash corresponding to their hash.
GetHeader(common.Hash, uint64) *block.Header
diff --git a/core/evm_test.go b/core/evm_test.go
index 962dedd307..cab3f712cf 100644
--- a/core/evm_test.go
+++ b/core/evm_test.go
@@ -9,13 +9,13 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
bls_core "github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/block"
blockfactory "github.com/harmony-one/harmony/block/factory"
"github.com/harmony-one/harmony/common/denominations"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm"
diff --git a/core/genesis.go b/core/genesis.go
index dec247a766..3e230cf9e6 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -244,10 +244,10 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
utils.Logger().Error().Msg("db should be initialized")
os.Exit(1)
}
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(db), nil)
for addr, account := range g.Alloc {
statedb.AddBalance(addr, account.Balance)
- statedb.SetCode(addr, account.Code)
+ statedb.SetCode(addr, account.Code, false)
statedb.SetNonce(addr, account.Nonce)
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 106a6ff0a6..72ce358e29 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -19,13 +19,20 @@ package rawdb
import (
"bytes"
"encoding/binary"
+ "errors"
+ "fmt"
"math/big"
+ "sort"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/block"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
+
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
)
// MsgNoShardStateFromDB error message for shard state reading failure
@@ -39,7 +46,7 @@ const (
)
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
-func ReadCanonicalHash(db DatabaseReader, number uint64) common.Hash {
+func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
data, _ := db.Get(headerHashKey(number))
if len(data) == 0 {
return common.Hash{}
@@ -48,7 +55,7 @@ func ReadCanonicalHash(db DatabaseReader, number uint64) common.Hash {
}
// WriteCanonicalHash stores the hash assigned to a canonical block number.
-func WriteCanonicalHash(db DatabaseWriter, hash common.Hash, number uint64) error {
+func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) error {
if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
utils.Logger().Error().Msg("Failed to store number to hash mapping")
return err
@@ -57,7 +64,7 @@ func WriteCanonicalHash(db DatabaseWriter, hash common.Hash, number uint64) erro
}
// DeleteCanonicalHash removes the number to hash canonical mapping.
-func DeleteCanonicalHash(db DatabaseDeleter, number uint64) error {
+func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) error {
if err := db.Delete(headerHashKey(number)); err != nil {
utils.Logger().Error().Msg("Failed to delete number to hash mapping")
return err
@@ -66,7 +73,7 @@ func DeleteCanonicalHash(db DatabaseDeleter, number uint64) error {
}
// ReadHeaderNumber returns the header number assigned to a hash.
-func ReadHeaderNumber(db DatabaseReader, hash common.Hash) *uint64 {
+func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
data, _ := db.Get(headerNumberKey(hash))
if len(data) != 8 {
return nil
@@ -76,7 +83,7 @@ func ReadHeaderNumber(db DatabaseReader, hash common.Hash) *uint64 {
}
// WriteHeaderNumber stores reference from hash to number.
-func WriteHeaderNumber(db DatabaseWriter, hash common.Hash, number uint64) error {
+func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) error {
var (
key = headerNumberKey(hash)
encoded = encodeBlockNumber(number)
@@ -86,7 +93,7 @@ func WriteHeaderNumber(db DatabaseWriter, hash common.Hash, number uint64) error
}
// ReadHeadHeaderHash retrieves the hash of the current canonical head header.
-func ReadHeadHeaderHash(db DatabaseReader) common.Hash {
+func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash {
data, _ := db.Get(headHeaderKey)
if len(data) == 0 {
return common.Hash{}
@@ -95,7 +102,7 @@ func ReadHeadHeaderHash(db DatabaseReader) common.Hash {
}
// WriteHeadHeaderHash stores the hash of the current canonical head header.
-func WriteHeadHeaderHash(db DatabaseWriter, hash common.Hash) error {
+func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) error {
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
utils.Logger().Error().Msg("Failed to store last header's hash")
return err
@@ -104,7 +111,7 @@ func WriteHeadHeaderHash(db DatabaseWriter, hash common.Hash) error {
}
// ReadHeadBlockHash retrieves the hash of the current canonical head block.
-func ReadHeadBlockHash(db DatabaseReader) common.Hash {
+func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash {
data, _ := db.Get(headBlockKey)
if len(data) == 0 {
return common.Hash{}
@@ -113,7 +120,7 @@ func ReadHeadBlockHash(db DatabaseReader) common.Hash {
}
// WriteHeadBlockHash stores the head block's hash.
-func WriteHeadBlockHash(db DatabaseWriter, hash common.Hash) error {
+func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) error {
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
utils.Logger().Error().Msg("Failed to store last block's hash")
return err
@@ -122,7 +129,7 @@ func WriteHeadBlockHash(db DatabaseWriter, hash common.Hash) error {
}
// ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
-func ReadHeadFastBlockHash(db DatabaseReader) common.Hash {
+func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash {
data, _ := db.Get(headFastBlockKey)
if len(data) == 0 {
return common.Hash{}
@@ -131,7 +138,7 @@ func ReadHeadFastBlockHash(db DatabaseReader) common.Hash {
}
// WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
-func WriteHeadFastBlockHash(db DatabaseWriter, hash common.Hash) error {
+func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) error {
if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
utils.Logger().Error().Msg("Failed to store last fast block's hash")
return err
@@ -140,13 +147,13 @@ func WriteHeadFastBlockHash(db DatabaseWriter, hash common.Hash) error {
}
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
-func ReadHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
+func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
data, _ := db.Get(headerKey(number, hash))
return data
}
// HasHeader verifies the existence of a block header corresponding to the hash.
-func HasHeader(db DatabaseReader, hash common.Hash, number uint64) bool {
+func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
return false
}
@@ -154,7 +161,7 @@ func HasHeader(db DatabaseReader, hash common.Hash, number uint64) bool {
}
// ReadHeader retrieves the block header corresponding to the hash.
-func ReadHeader(db DatabaseReader, hash common.Hash, number uint64) *block.Header {
+func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *block.Header {
data := ReadHeaderRLP(db, hash, number)
if len(data) == 0 {
return nil
@@ -169,7 +176,7 @@ func ReadHeader(db DatabaseReader, hash common.Hash, number uint64) *block.Heade
// WriteHeader stores a block header into the database and also stores the hash-
// to-number mapping.
-func WriteHeader(db DatabaseWriter, header *block.Header) error {
+func WriteHeader(db ethdb.KeyValueWriter, header *block.Header) error {
// Write the hash -> number mapping
var (
hash = header.Hash()
@@ -196,7 +203,7 @@ func WriteHeader(db DatabaseWriter, header *block.Header) error {
}
// DeleteHeader removes all block header data associated with a hash.
-func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) error {
+func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) error {
if err := db.Delete(headerKey(number, hash)); err != nil {
utils.Logger().Error().Msg("Failed to delete header")
return err
@@ -209,13 +216,13 @@ func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) error {
}
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
-func ReadBodyRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
+func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
data, _ := db.Get(blockBodyKey(number, hash))
return data
}
// WriteBodyRLP stores an RLP encoded block body into the database.
-func WriteBodyRLP(db DatabaseWriter, hash common.Hash, number uint64, rlp rlp.RawValue) error {
+func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) error {
if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
utils.Logger().Error().Msg("Failed to store block body")
return err
@@ -224,7 +231,7 @@ func WriteBodyRLP(db DatabaseWriter, hash common.Hash, number uint64, rlp rlp.Ra
}
// HasBody verifies the existence of a block body corresponding to the hash.
-func HasBody(db DatabaseReader, hash common.Hash, number uint64) bool {
+func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
return false
}
@@ -232,7 +239,7 @@ func HasBody(db DatabaseReader, hash common.Hash, number uint64) bool {
}
// ReadBody retrieves the block body corresponding to the hash.
-func ReadBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body {
+func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body {
data := ReadBodyRLP(db, hash, number)
if len(data) == 0 {
return nil
@@ -246,7 +253,7 @@ func ReadBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body {
}
// WriteBody storea a block body into the database.
-func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.Body) error {
+func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) error {
data, err := rlp.EncodeToBytes(body)
if err != nil {
utils.Logger().Error().Msg("Failed to RLP encode body")
@@ -256,7 +263,7 @@ func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.B
}
// DeleteBody removes all block body data associated with a hash.
-func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) error {
+func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) error {
if err := db.Delete(blockBodyKey(number, hash)); err != nil {
utils.Logger().Error().Msg("Failed to delete block body")
return err
@@ -265,7 +272,7 @@ func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) error {
}
// ReadTd retrieves a block's total difficulty corresponding to the hash.
-func ReadTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int {
+func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
data, _ := db.Get(headerTDKey(number, hash))
if len(data) == 0 {
return nil
@@ -279,7 +286,7 @@ func ReadTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int {
}
// WriteTd stores the total difficulty of a block into the database.
-func WriteTd(db DatabaseWriter, hash common.Hash, number uint64, td *big.Int) error {
+func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) error {
data, err := rlp.EncodeToBytes(td)
if err != nil {
utils.Logger().Error().Msg("Failed to RLP encode block total difficulty")
@@ -293,7 +300,7 @@ func WriteTd(db DatabaseWriter, hash common.Hash, number uint64, td *big.Int) er
}
// DeleteTd removes all block total difficulty data associated with a hash.
-func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) error {
+func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) error {
if err := db.Delete(headerTDKey(number, hash)); err != nil {
utils.Logger().Error().Msg("Failed to delete block total difficulty")
return err
@@ -302,7 +309,7 @@ func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) error {
}
// ReadReceipts retrieves all the transaction receipts belonging to a block.
-func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts {
+func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts {
// Retrieve the flattened receipt slice
data, _ := db.Get(blockReceiptsKey(number, hash))
if len(data) == 0 {
@@ -321,8 +328,24 @@ func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Rece
return receipts
}
+// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
+func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
+ var data []byte
+ db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
+ // Check if the data is in ancients
+ if isCanon(reader, number, hash) {
+ data, _ = reader.Ancient(ChainFreezerReceiptTable, number)
+ return nil
+ }
+ // If not, try reading from leveldb
+ data, _ = db.Get(blockReceiptsKey(number, hash))
+ return nil
+ })
+ return data
+}
+
// WriteReceipts stores all the transaction receipts belonging to a block.
-func WriteReceipts(db DatabaseWriter, hash common.Hash, number uint64, receipts types.Receipts) error {
+func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) error {
// Convert the receipts into their storage form and serialize them
storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
for i, receipt := range receipts {
@@ -342,7 +365,7 @@ func WriteReceipts(db DatabaseWriter, hash common.Hash, number uint64, receipts
}
// DeleteReceipts removes all receipt data associated with a block hash.
-func DeleteReceipts(db DatabaseDeleter, hash common.Hash, number uint64) error {
+func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) error {
if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
utils.Logger().Error().Msg("Failed to delete block receipts")
return err
@@ -356,7 +379,7 @@ func DeleteReceipts(db DatabaseDeleter, hash common.Hash, number uint64) error {
//
// Note, due to concurrent download of header and block body the header and thus
// canonical hash can be stored in the database but the body data not (yet).
-func ReadBlock(db DatabaseReader, hash common.Hash, number uint64) *types.Block {
+func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
header := ReadHeader(db, hash, number)
if header == nil {
return nil
@@ -369,7 +392,7 @@ func ReadBlock(db DatabaseReader, hash common.Hash, number uint64) *types.Block
}
// WriteBlock serializes a block into the database, header and body separately.
-func WriteBlock(db DatabaseWriter, block *types.Block) error {
+func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) error {
if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
return err
}
@@ -387,7 +410,7 @@ func WriteBlock(db DatabaseWriter, block *types.Block) error {
}
// DeleteBlock removes all block data associated with a hash.
-func DeleteBlock(db DatabaseDeleter, hash common.Hash, number uint64) error {
+func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) error {
if err := DeleteReceipts(db, hash, number); err != nil {
return err
}
@@ -404,7 +427,7 @@ func DeleteBlock(db DatabaseDeleter, hash common.Hash, number uint64) error {
}
// FindCommonAncestor returns the last common ancestor of two block headers
-func FindCommonAncestor(db DatabaseReader, a, b *block.Header) *block.Header {
+func FindCommonAncestor(db ethdb.Reader, a, b *block.Header) *block.Header {
for bn := b.Number().Uint64(); a.Number().Uint64() > bn; {
a = ReadHeader(db, a.ParentHash(), a.Number().Uint64()-1)
if a == nil {
@@ -431,7 +454,7 @@ func FindCommonAncestor(db DatabaseReader, a, b *block.Header) *block.Header {
}
func IteratorBlocks(iterator DatabaseIterator, cb func(blockNum uint64, hash common.Hash) bool) (minKey []byte, maxKey []byte) {
- iter := iterator.NewIteratorWithPrefix(headerPrefix)
+ iter := iterator.NewIterator(headerPrefix, nil)
defer iter.Release()
minKey = headerPrefix
@@ -453,3 +476,443 @@ func IteratorBlocks(iterator DatabaseIterator, cb func(blockNum uint64, hash com
return
}
+
+// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
+// both canonical and reorged forks included.
+func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
+ prefix := headerKeyPrefix(number)
+
+ hashes := make([]common.Hash, 0, 1)
+ it := db.NewIterator(prefix, nil)
+ defer it.Release()
+
+ for it.Next() {
+ if key := it.Key(); len(key) == len(prefix)+32 {
+ hashes = append(hashes, common.BytesToHash(key[len(key)-32:]))
+ }
+ }
+ return hashes
+}
+
+type NumberHash struct {
+ Number uint64
+ Hash common.Hash
+}
+
+// ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain
+// heights, both canonical and reorged forks included.
+// This method considers both limits to be _inclusive_.
+func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
+ var (
+ start = encodeBlockNumber(first)
+ keyLength = len(headerPrefix) + 8 + 32
+ hashes = make([]*NumberHash, 0, 1+last-first)
+ it = db.NewIterator(headerPrefix, start)
+ )
+ defer it.Release()
+ for it.Next() {
+ key := it.Key()
+ if len(key) != keyLength {
+ continue
+ }
+ num := binary.BigEndian.Uint64(key[len(headerPrefix) : len(headerPrefix)+8])
+ if num > last {
+ break
+ }
+ hash := common.BytesToHash(key[len(key)-32:])
+ hashes = append(hashes, &NumberHash{num, hash})
+ }
+ return hashes
+}
+
+// ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
+// certain chain range. If the accumulated entries reaches the given threshold,
+// abort the iteration and return the semi-finish result.
+func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) {
+ // Short circuit if the limit is 0.
+ if limit == 0 {
+ return nil, nil
+ }
+ var (
+ numbers []uint64
+ hashes []common.Hash
+ )
+ // Construct the key prefix of start point.
+ start, end := headerHashKey(from), headerHashKey(to)
+ it := db.NewIterator(nil, start)
+ defer it.Release()
+
+ for it.Next() {
+ if bytes.Compare(it.Key(), end) >= 0 {
+ break
+ }
+ if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) {
+ numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8]))
+ hashes = append(hashes, common.BytesToHash(it.Value()))
+ // If the accumulated entries reaches the limit threshold, return.
+ if len(numbers) >= limit {
+ break
+ }
+ }
+ }
+ return numbers, hashes
+}
+
+// DeleteHeaderNumber removes hash->number mapping.
+func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Delete(headerNumberKey(hash)); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to delete hash to number mapping")
+ }
+}
+
+// ReadFinalizedBlockHash retrieves the hash of the finalized block.
+func ReadFinalizedBlockHash(db ethdb.KeyValueReader) common.Hash {
+ data, _ := db.Get(headFinalizedBlockKey)
+ if len(data) == 0 {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
+// WriteFinalizedBlockHash stores the hash of the finalized block.
+func WriteFinalizedBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Put(headFinalizedBlockKey, hash.Bytes()); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store last finalized block's hash")
+ }
+}
+
+// ReadLastPivotNumber retrieves the number of the last pivot block. If the node
+// full synced, the last pivot will always be nil.
+func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 {
+ data, _ := db.Get(lastPivotKey)
+ if len(data) == 0 {
+ return nil
+ }
+ var pivot uint64
+ if err := rlp.DecodeBytes(data, &pivot); err != nil {
+ utils.Logger().Error().Err(err).Msg("Invalid pivot block number in database")
+ return nil
+ }
+ return &pivot
+}
+
+// WriteLastPivotNumber stores the number of the last pivot block.
+func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) {
+ enc, err := rlp.EncodeToBytes(pivot)
+ if err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to encode pivot block number")
+ }
+ if err := db.Put(lastPivotKey, enc); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store pivot block number")
+ }
+}
+
+// ReadTxIndexTail retrieves the number of oldest indexed block
+// whose transaction indices has been indexed.
+func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 {
+ data, _ := db.Get(txIndexTailKey)
+ if len(data) != 8 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteTxIndexTail stores the number of oldest indexed block
+// into database.
+func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) {
+ if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store the transaction index tail")
+ }
+}
+
+// ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync.
+func ReadFastTxLookupLimit(db ethdb.KeyValueReader) *uint64 {
+ data, _ := db.Get(fastTxLookupLimitKey)
+ if len(data) != 8 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database.
+func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
+ if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store transaction lookup limit for fast sync")
+ }
+}
+
+// deleteHeaderWithoutNumber removes only the block header but does not remove
+// the hash to number mapping.
+func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ if err := db.Delete(headerKey(number, hash)); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to delete header")
+ }
+}
+
+// isCanon is an internal utility method, to check whether the given number/hash
+// is part of the ancient (canon) set.
+func isCanon(reader ethdb.AncientReaderOp, number uint64, hash common.Hash) bool {
+ h, err := reader.Ancient(ChainFreezerHashTable, number)
+ if err != nil {
+ return false
+ }
+ return bytes.Equal(h, hash[:])
+}
+
+// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
+// block at number, in RLP encoding.
+func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
+ var data []byte
+ db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
+ data, _ = reader.Ancient(ChainFreezerBodiesTable, number)
+ if len(data) > 0 {
+ return nil
+ }
+ // Block is not in ancients, read from leveldb by hash and number.
+ // Note: ReadCanonicalHash cannot be used here because it also
+ // calls ReadAncients internally.
+ hash, _ := db.Get(headerHashKey(number))
+ data, _ = db.Get(blockBodyKey(number, common.BytesToHash(hash)))
+ return nil
+ })
+ return data
+}
+
+// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
+func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
+ var data []byte
+ db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
+ // Check if the data is in ancients
+ if isCanon(reader, number, hash) {
+ data, _ = reader.Ancient(ChainFreezerDifficultyTable, number)
+ return nil
+ }
+ // If not, try reading from leveldb
+ data, _ = db.Get(headerTDKey(number, hash))
+ return nil
+ })
+ return data
+}
+
+// HasReceipts verifies the existence of all the transaction receipts belonging
+// to a block.
+func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
+ if isCanon(db, number, hash) {
+ return true
+ }
+ if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
+ return false
+ }
+ return true
+}
+
+// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
+// The receipt metadata fields are not guaranteed to be populated, so they
+// should not be used. Use ReadReceipts instead if the metadata is needed.
+func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts {
+ // Retrieve the flattened receipt slice
+ data := ReadReceiptsRLP(db, hash, number)
+ if len(data) == 0 {
+ return nil
+ }
+ // Convert the receipts from their storage form to their internal representation
+ storageReceipts := []*types.ReceiptForStorage{}
+ if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
+ utils.Logger().Error().Err(err).Interface("hash", hash).Msg("Invalid receipt array RLP")
+ return nil
+ }
+ receipts := make(types.Receipts, len(storageReceipts))
+ for i, storageReceipt := range storageReceipts {
+ receipts[i] = (*types.Receipt)(storageReceipt)
+ }
+ return receipts
+}
+
+// storedReceiptRLP is the storage encoding of a receipt.
+// Re-definition in core/types/receipt.go.
+// TODO: Re-use the existing definition.
+type storedReceiptRLP struct {
+ PostStateOrStatus []byte
+ CumulativeGasUsed uint64
+ Logs []*types.Log
+}
+
+// ReceiptLogs is a barebone version of ReceiptForStorage which only keeps
+// the list of logs. When decoding a stored receipt into this object we
+// avoid creating the bloom filter.
+type receiptLogs struct {
+ Logs []*types.Log
+}
+
+// DecodeRLP implements rlp.Decoder.
+func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error {
+ var stored storedReceiptRLP
+ if err := s.Decode(&stored); err != nil {
+ return err
+ }
+ r.Logs = stored.Logs
+ return nil
+}
+
+// DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc.
+func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error {
+ logIndex := uint(0)
+ if len(txs) != len(receipts) {
+ return errors.New("transaction and receipt count mismatch")
+ }
+ for i := 0; i < len(receipts); i++ {
+ txHash := txs[i].Hash()
+ // The derived log fields can simply be set from the block and transaction
+ for j := 0; j < len(receipts[i].Logs); j++ {
+ receipts[i].Logs[j].BlockNumber = number
+ receipts[i].Logs[j].BlockHash = hash
+ receipts[i].Logs[j].TxHash = txHash
+ receipts[i].Logs[j].TxIndex = uint(i)
+ receipts[i].Logs[j].Index = logIndex
+ logIndex++
+ }
+ }
+ return nil
+}
+
+// ReadLogs retrieves the logs for all transactions in a block. In case
+// receipts is not found, a nil is returned.
+// Note: ReadLogs does not derive unstored log fields.
+func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) [][]*types.Log {
+ // Retrieve the flattened receipt slice
+ data := ReadReceiptsRLP(db, hash, number)
+ if len(data) == 0 {
+ return nil
+ }
+ receipts := []*receiptLogs{}
+ if err := rlp.DecodeBytes(data, &receipts); err != nil {
+ utils.Logger().Error().Err(err).Interface("hash", hash).Msg("Invalid receipt array RLP")
+ return nil
+ }
+
+ logs := make([][]*types.Log, len(receipts))
+ for i, receipt := range receipts {
+ logs[i] = receipt.Logs
+ }
+ return logs
+}
+
+// This function is NOT used, just ported over from the Ethereum
+func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *block.Header, receipts []*types.ReceiptForStorage, td *big.Int) error {
+ num := block.NumberU64()
+ if err := op.AppendRaw(ChainFreezerHashTable, num, block.Hash().Bytes()); err != nil {
+ return fmt.Errorf("can't add block %d hash: %v", num, err)
+ }
+ if err := op.Append(ChainFreezerHeaderTable, num, header); err != nil {
+ return fmt.Errorf("can't append block header %d: %v", num, err)
+ }
+ if err := op.Append(ChainFreezerBodiesTable, num, block.Body()); err != nil {
+ return fmt.Errorf("can't append block body %d: %v", num, err)
+ }
+ if err := op.Append(ChainFreezerReceiptTable, num, receipts); err != nil {
+ return fmt.Errorf("can't append block %d receipts: %v", num, err)
+ }
+ if err := op.Append(ChainFreezerDifficultyTable, num, td); err != nil {
+ return fmt.Errorf("can't append block %d total difficulty: %v", num, err)
+ }
+ return nil
+}
+
+// DeleteBlockWithoutNumber removes all block data associated with a hash, except
+// the hash to number mapping.
+func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ DeleteReceipts(db, hash, number)
+ deleteHeaderWithoutNumber(db, hash, number)
+ DeleteBody(db, hash, number)
+ DeleteTd(db, hash, number)
+}
+
+const badBlockToKeep = 10
+
+type badBlock struct {
+ Header *block.Header
+ Body *types.Body
+}
+
+// badBlockList implements the sort interface to allow sorting a list of
+// bad blocks by their number in the reverse order.
+type badBlockList []*badBlock
+
+func (s badBlockList) Len() int { return len(s) }
+func (s badBlockList) Less(i, j int) bool {
+ return s[i].Header.Number().Uint64() < s[j].Header.Number().Uint64()
+}
+func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// WriteBadBlock serializes the bad block into the database. If the cumulated
+// bad blocks exceeds the limitation, the oldest will be dropped.
+// This function is NOT used, just ported over from the Ethereum
+func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) {
+ blob, err := db.Get(badBlockKey)
+ if err != nil {
+ utils.Logger().Warn().Err(err).Msg("Failed to load old bad blocks")
+ }
+ var badBlocks badBlockList
+ if len(blob) > 0 {
+ if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to decode old bad blocks")
+ }
+ }
+ for _, b := range badBlocks {
+ if b.Header.Number().Uint64() == block.NumberU64() && b.Header.Hash() == block.Hash() {
+ log.Info("Skip duplicated bad block", "number", block.NumberU64(), "hash", block.Hash())
+ return
+ }
+ }
+ badBlocks = append(badBlocks, &badBlock{
+ Header: block.Header(),
+ Body: block.Body(),
+ })
+ sort.Sort(sort.Reverse(badBlocks))
+ if len(badBlocks) > badBlockToKeep {
+ badBlocks = badBlocks[:badBlockToKeep]
+ }
+ data, err := rlp.EncodeToBytes(badBlocks)
+ if err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to encode bad blocks")
+ }
+ if err := db.Put(badBlockKey, data); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to write bad blocks")
+ }
+}
+
+// DeleteBadBlocks deletes all the bad blocks from the database
+// This function is NOT used, just ported over from the Ethereum
+func DeleteBadBlocks(db ethdb.KeyValueWriter) {
+ if err := db.Delete(badBlockKey); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to delete bad blocks")
+ }
+}
+
+// ReadHeadHeader returns the current canonical head header.
+func ReadHeadHeader(db ethdb.Reader) *block.Header {
+ headHeaderHash := ReadHeadHeaderHash(db)
+ if headHeaderHash == (common.Hash{}) {
+ return nil
+ }
+ headHeaderNumber := ReadHeaderNumber(db, headHeaderHash)
+ if headHeaderNumber == nil {
+ return nil
+ }
+ return ReadHeader(db, headHeaderHash, *headHeaderNumber)
+}
+
+// ReadHeadBlock returns the current canonical head block.
+func ReadHeadBlock(db ethdb.Reader) *types.Block {
+ headBlockHash := ReadHeadBlockHash(db)
+ if headBlockHash == (common.Hash{}) {
+ return nil
+ }
+ headBlockNumber := ReadHeaderNumber(db, headBlockHash)
+ if headBlockNumber == nil {
+ return nil
+ }
+ return ReadBlock(db, headBlockHash, *headBlockNumber)
+}
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
index 9824367d21..6edf9f9ec7 100644
--- a/core/rawdb/accessors_chain_test.go
+++ b/core/rawdb/accessors_chain_test.go
@@ -21,8 +21,6 @@ import (
"math/big"
"testing"
- "github.com/ethereum/go-ethereum/core/rawdb"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/block"
@@ -34,7 +32,7 @@ import (
// Tests block header storage and retrieval operations.
func TestHeaderStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
+ db := NewMemoryDatabase()
// Create a test header to move around the database and make sure it's really new
header := blockfactory.NewTestHeader().With().Number(big.NewInt(42)).Extra([]byte("test header")).Header()
@@ -67,7 +65,7 @@ func TestHeaderStorage(t *testing.T) {
// Tests block body storage and retrieval operations.
func TestBodyStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
+ db := NewMemoryDatabase()
// Create a test body to move around the database and make sure it's really new
body := types.NewTestBody().With().Uncles([]*block.Header{blockfactory.NewTestHeader().With().Extra([]byte("test header")).Header()}).Body()
@@ -105,7 +103,7 @@ func TestBodyStorage(t *testing.T) {
// Tests block storage and retrieval operations.
func TestBlockStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
+ db := NewMemoryDatabase()
// Create a test block to move around the database and make sure it's really new
block := types.NewBlockWithHeader(blockfactory.NewTestHeader().With().
@@ -167,7 +165,7 @@ func TestBlockStorage(t *testing.T) {
// Tests that partial block contents don't get reassembled into full blocks.
func TestPartialBlockStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
+ db := NewMemoryDatabase()
block := types.NewBlockWithHeader(blockfactory.NewTestHeader().With().
Extra([]byte("test block")).
TxHash(types.EmptyRootHash).
@@ -200,7 +198,7 @@ func TestPartialBlockStorage(t *testing.T) {
// Tests block total difficulty storage and retrieval operations.
func TestTdStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
+ db := NewMemoryDatabase()
// Create a test TD to move around the database and make sure it's really new
hash, td := common.Hash{}, big.NewInt(314)
@@ -223,7 +221,7 @@ func TestTdStorage(t *testing.T) {
// Tests that canonical numbers can be mapped to hashes and retrieved.
func TestCanonicalMappingStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
+ db := NewMemoryDatabase()
// Create a test canonical number and assinged hash to move around
hash, number := common.Hash{0: 0xff}, uint64(314)
@@ -246,7 +244,7 @@ func TestCanonicalMappingStorage(t *testing.T) {
// Tests that head headers and head blocks can be assigned, individually.
func TestHeadStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
+ db := NewMemoryDatabase()
blockHead := types.NewBlockWithHeader(blockfactory.NewTestHeader().With().Extra([]byte("test block header")).Header())
blockFull := types.NewBlockWithHeader(blockfactory.NewTestHeader().With().Extra([]byte("test block full")).Header())
@@ -281,7 +279,7 @@ func TestHeadStorage(t *testing.T) {
// Tests that receipts associated with a single block can be stored and retrieved.
func TestBlockReceiptStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
+ db := NewMemoryDatabase()
receipt1 := &types.Receipt{
Status: types.ReceiptStatusFailed,
@@ -309,14 +307,14 @@ func TestBlockReceiptStorage(t *testing.T) {
// Check that no receipt entries are in a pristine database
hash := common.BytesToHash([]byte{0x03, 0x14})
- if rs := ReadReceipts(db, hash, 0); len(rs) != 0 {
+ if rs := ReadReceipts(db, hash, 0, nil); len(rs) != 0 {
t.Fatalf("non existent receipts returned: %v", rs)
}
// Insert the receipt slice into the database and check presence
if err := WriteReceipts(db, hash, 0, receipts); err != nil {
t.Fatalf("write receipts")
}
- if rs := ReadReceipts(db, hash, 0); len(rs) == 0 {
+ if rs := ReadReceipts(db, hash, 0, nil); len(rs) == 0 {
t.Fatalf("no receipts returned")
} else {
for i := 0; i < len(receipts); i++ {
@@ -330,7 +328,7 @@ func TestBlockReceiptStorage(t *testing.T) {
}
// Delete the receipt slice and check purge
DeleteReceipts(db, hash, 0)
- if rs := ReadReceipts(db, hash, 0); len(rs) != 0 {
+ if rs := ReadReceipts(db, hash, 0, nil); len(rs) != 0 {
t.Fatalf("deleted receipts returned: %v", rs)
}
}
diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go
index 44b2961ac9..2fcb61deb7 100644
--- a/core/rawdb/accessors_indexes.go
+++ b/core/rawdb/accessors_indexes.go
@@ -18,6 +18,7 @@ package rawdb
import (
"bytes"
+ "math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
@@ -25,11 +26,14 @@ import (
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
staking "github.com/harmony-one/harmony/staking/types"
+
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/params"
)
// ReadTxLookupEntry retrieves the positional metadata associated with a transaction
// hash to allow retrieving the transaction or receipt by hash.
-func ReadTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64, uint64) {
+func ReadTxLookupEntry(db ethdb.Reader, hash common.Hash) (common.Hash, uint64, uint64) {
data, _ := db.Get(txLookupKey(hash))
if len(data) == 0 {
return common.Hash{}, 0, 0
@@ -88,13 +92,13 @@ func WriteBlockStxLookUpEntries(db DatabaseWriter, block *types.Block) error {
}
// DeleteTxLookupEntry removes all transaction data associated with a hash.
-func DeleteTxLookupEntry(db DatabaseDeleter, hash common.Hash) error {
+func DeleteTxLookupEntry(db ethdb.KeyValueWriter, hash common.Hash) error {
return db.Delete(txLookupKey(hash))
}
// ReadTransaction retrieves a specific transaction from the database, along with
// its added positional metadata.
-func ReadTransaction(db DatabaseReader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
+func ReadTransaction(db ethdb.Reader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
blockHash, blockNumber, txIndex := ReadTxLookupEntry(db, hash)
if blockHash == (common.Hash{}) {
return nil, common.Hash{}, 0, 0
@@ -135,7 +139,7 @@ func ReadTransaction(db DatabaseReader, hash common.Hash) (*types.Transaction, c
// ReadStakingTransaction retrieves a specific staking transaction from the database, along with
// its added positional metadata.
-func ReadStakingTransaction(db DatabaseReader, hash common.Hash) (*staking.StakingTransaction, common.Hash, uint64, uint64) {
+func ReadStakingTransaction(db ethdb.Reader, hash common.Hash) (*staking.StakingTransaction, common.Hash, uint64, uint64) {
blockHash, blockNumber, txIndex := ReadTxLookupEntry(db, hash)
if blockHash == (common.Hash{}) {
return nil, common.Hash{}, 0, 0
@@ -163,13 +167,13 @@ func ReadStakingTransaction(db DatabaseReader, hash common.Hash) (*staking.Staki
// ReadReceipt retrieves a specific transaction receipt from the database, along with
// its added positional metadata.
-func ReadReceipt(db DatabaseReader, hash common.Hash) (*types.Receipt, common.Hash, uint64, uint64) {
+func ReadReceipt(db ethdb.Reader, hash common.Hash, config *params.ChainConfig) (*types.Receipt, common.Hash, uint64, uint64) {
blockHash, blockNumber, receiptIndex := ReadTxLookupEntry(db, hash)
if blockHash == (common.Hash{}) {
return nil, common.Hash{}, 0, 0
}
- receipts := ReadReceipts(db, blockHash, blockNumber)
+ receipts := ReadReceipts(db, blockHash, blockNumber, nil)
if len(receipts) <= int(receiptIndex) {
utils.Logger().Error().
Uint64("number", blockNumber).
@@ -183,13 +187,13 @@ func ReadReceipt(db DatabaseReader, hash common.Hash) (*types.Receipt, common.Ha
// ReadBloomBits retrieves the compressed bloom bit vector belonging to the given
// section and bit index from the.
-func ReadBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash) ([]byte, error) {
+func ReadBloomBits(db ethdb.KeyValueReader, bit uint, section uint64, head common.Hash) ([]byte, error) {
return db.Get(bloomBitsKey(bit, section, head))
}
// WriteBloomBits stores the compressed bloom bits vector belonging to the given
// section and bit index.
-func WriteBloomBits(db DatabaseWriter, bit uint, section uint64, head common.Hash, bits []byte) error {
+func WriteBloomBits(db ethdb.KeyValueWriter, bit uint, section uint64, head common.Hash, bits []byte) error {
if err := db.Put(bloomBitsKey(bit, section, head), bits); err != nil {
utils.Logger().Error().Err(err).Msg("Failed to store bloom bits")
return err
@@ -247,7 +251,7 @@ func DeleteCxLookupEntry(db DatabaseDeleter, hash common.Hash) error {
// ReadCXReceipt retrieves a specific transaction from the database, along with
// its added positional metadata.
-func ReadCXReceipt(db DatabaseReader, hash common.Hash) (*types.CXReceipt, common.Hash, uint64, uint64) {
+func ReadCXReceipt(db ethdb.Reader, hash common.Hash) (*types.CXReceipt, common.Hash, uint64, uint64) {
blockHash, blockNumber, cxIndex := ReadCxLookupEntry(db, hash)
if blockHash == (common.Hash{}) {
return nil, common.Hash{}, 0, 0
@@ -272,3 +276,57 @@ func ReadCXReceipt(db DatabaseReader, hash common.Hash) (*types.CXReceipt, commo
}
return cx, blockHash, blockNumber, cxIndex
}
+
+// writeTxLookupEntry stores a positional metadata for a transaction,
+// enabling hash based transaction and receipt lookups.
+func writeTxLookupEntry(db ethdb.KeyValueWriter, hash common.Hash, numberBytes []byte) {
+ if err := db.Put(txLookupKey(hash), numberBytes); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store transaction lookup entry")
+ }
+}
+
+// WriteTxLookupEntries is identical to WriteTxLookupEntry, but it works on
+// a list of hashes
+func WriteTxLookupEntries(db ethdb.KeyValueWriter, number uint64, hashes []common.Hash) {
+ numberBytes := new(big.Int).SetUint64(number).Bytes()
+ for _, hash := range hashes {
+ writeTxLookupEntry(db, hash, numberBytes)
+ }
+}
+
+// WriteTxLookupEntriesByBlock stores a positional metadata for every transaction from
+// a block, enabling hash based transaction and receipt lookups.
+func WriteTxLookupEntriesByBlock(db ethdb.KeyValueWriter, block *types.Block) {
+ numberBytes := block.Number().Bytes()
+ for _, tx := range block.Transactions() {
+ writeTxLookupEntry(db, tx.Hash(), numberBytes)
+ }
+}
+
+// DeleteTxLookupEntries removes all transaction lookups for a given block.
+func DeleteTxLookupEntries(db ethdb.KeyValueWriter, hashes []common.Hash) {
+ for _, hash := range hashes {
+ DeleteTxLookupEntry(db, hash)
+ }
+}
+
+// DeleteBloombits removes all compressed bloom bits vector belonging to the
+// given section range and bit index.
+func DeleteBloombits(db ethdb.Database, bit uint, from uint64, to uint64) {
+ start, end := bloomBitsKey(bit, from, common.Hash{}), bloomBitsKey(bit, to, common.Hash{})
+ it := db.NewIterator(nil, start)
+ defer it.Release()
+
+ for it.Next() {
+ if bytes.Compare(it.Key(), end) >= 0 {
+ break
+ }
+ if len(it.Key()) != len(bloomBitsPrefix)+2+8+32 {
+ continue
+ }
+ db.Delete(it.Key())
+ }
+ if it.Error() != nil {
+ utils.Logger().Error().Err(it.Error()).Msg("Failed to delete bloom bits")
+ }
+}
diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go
index 77a66f63e0..41f54ac98a 100644
--- a/core/rawdb/accessors_indexes_test.go
+++ b/core/rawdb/accessors_indexes_test.go
@@ -20,8 +20,6 @@ import (
"math/big"
"testing"
- "github.com/ethereum/go-ethereum/core/rawdb"
-
"github.com/harmony-one/harmony/crypto/bls"
"github.com/ethereum/go-ethereum/common"
@@ -42,7 +40,7 @@ var (
// Tests that positional lookup metadata can be stored and retrieved.
func TestLookupStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
+ db := NewMemoryDatabase()
tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), 0, big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11})
tx2 := types.NewTransaction(2, common.BytesToAddress([]byte{0x22}), 0, big.NewInt(222), 2222, big.NewInt(22222), []byte{0x22, 0x22, 0x22})
@@ -123,7 +121,7 @@ func TestLookupStorage(t *testing.T) {
// Test that staking tx hash does not find a plain tx hash (and visa versa) within the same block
func TestMixedLookupStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
+ db := NewMemoryDatabase()
tx := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), 0, big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11})
stx := sampleCreateValidatorStakingTxn()
diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go
index 9001b7894c..69a7522f6c 100644
--- a/core/rawdb/accessors_metadata.go
+++ b/core/rawdb/accessors_metadata.go
@@ -17,82 +17,212 @@
package rawdb
import (
+ "encoding/binary"
"encoding/json"
- "errors"
+ "time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/params"
"github.com/harmony-one/harmony/internal/utils"
+ "github.com/pkg/errors"
)
// ReadDatabaseVersion retrieves the version number of the database.
-func ReadDatabaseVersion(db DatabaseReader) int {
- var version int
+func ReadDatabaseVersion(db ethdb.KeyValueReader) *uint64 {
+ var version uint64
- enc, _ := db.Get(databaseVerisionKey)
- rlp.DecodeBytes(enc, &version)
+ enc, _ := db.Get(databaseVersionKey)
+ if len(enc) == 0 {
+ return nil
+ }
+ if err := rlp.DecodeBytes(enc, &version); err != nil {
+ return nil
+ }
- return version
+ return &version
}
// WriteDatabaseVersion stores the version number of the database
-func WriteDatabaseVersion(db DatabaseWriter, version int) error {
- enc, _ := rlp.EncodeToBytes(version)
- if err := db.Put(databaseVerisionKey, enc); err != nil {
+func WriteDatabaseVersion(db ethdb.KeyValueWriter, version uint64) {
+ enc, err := rlp.EncodeToBytes(version)
+ if err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to encode database version")
+ }
+ if err = db.Put(databaseVersionKey, enc); err != nil {
utils.Logger().Error().Err(err).Msg("Failed to store the database version")
- return err
}
- return nil
}
// ReadChainConfig retrieves the consensus settings based on the given genesis hash.
-func ReadChainConfig(db DatabaseReader, hash common.Hash) *params.ChainConfig {
+func ReadChainConfig(db ethdb.KeyValueReader, hash common.Hash) *params.ChainConfig {
data, _ := db.Get(configKey(hash))
if len(data) == 0 {
return nil
}
var config params.ChainConfig
if err := json.Unmarshal(data, &config); err != nil {
- utils.Logger().Error().Err(err).Str("hash", hash.Hex()).Msg("Invalid chain config JSON")
+ utils.Logger().Error().Err(err).Interface("hash", hash).Msg("Invalid chain config JSON")
return nil
}
return &config
}
// WriteChainConfig writes the chain config settings to the database.
-func WriteChainConfig(db DatabaseWriter, hash common.Hash, cfg *params.ChainConfig) error {
+func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.ChainConfig) {
if cfg == nil {
- return errors.New("nil config")
+ return
}
data, err := json.Marshal(cfg)
if err != nil {
utils.Logger().Error().Err(err).Msg("Failed to JSON encode chain config")
- return err
}
if err := db.Put(configKey(hash), data); err != nil {
utils.Logger().Error().Err(err).Msg("Failed to store chain config")
- return err
}
- return nil
}
-// ReadPreimage retrieves a single preimage of the provided hash.
-func ReadPreimage(db DatabaseReader, hash common.Hash) []byte {
- data, _ := db.Get(preimageKey(hash))
+// ReadGenesisStateSpec retrieves the genesis state specification based on the
+// given genesis (block-)hash.
+func ReadGenesisStateSpec(db ethdb.KeyValueReader, blockhash common.Hash) []byte {
+ data, _ := db.Get(genesisStateSpecKey(blockhash))
return data
}
-// WritePreimages writes the provided set of preimages to the database. `number` is the
-// current block number, and is used for debug messages only.
-func WritePreimages(db DatabaseWriter, number uint64, preimages map[common.Hash][]byte) error {
- for hash, preimage := range preimages {
- if err := db.Put(preimageKey(hash), preimage); err != nil {
- utils.Logger().Error().Err(err).Msg("Failed to store trie preimage")
- return err
- }
- }
- preimageCounter.Inc(int64(len(preimages)))
- preimageHitCounter.Inc(int64(len(preimages)))
+// WriteGenesisStateSpec writes the genesis state specification into the disk.
+func WriteGenesisStateSpec(db ethdb.KeyValueWriter, blockhash common.Hash, data []byte) {
+ if err := db.Put(genesisStateSpecKey(blockhash), data); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store genesis state")
+ }
+}
+
+// crashList is a list of unclean-shutdown-markers, for rlp-encoding to the
+// database
+type crashList struct {
+ Discarded uint64 // how many ucs have we deleted
+ Recent []uint64 // unix timestamps of 10 latest unclean shutdowns
+}
+
+const crashesToKeep = 10
+
+// PushUncleanShutdownMarker appends a new unclean shutdown marker and returns
+// the previous data
+// - a list of timestamps
+// - a count of how many old unclean-shutdowns have been discarded
+// This function is NOT used, just ported over from the Ethereum
+func PushUncleanShutdownMarker(db ethdb.KeyValueStore) ([]uint64, uint64, error) {
+ var uncleanShutdowns crashList
+ // Read old data
+ if data, err := db.Get(uncleanShutdownKey); err != nil {
+ utils.Logger().Warn().Err(err).Msg("Error reading unclean shutdown markers")
+ } else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil {
+ return nil, 0, err
+ }
+ var discarded = uncleanShutdowns.Discarded
+ var previous = make([]uint64, len(uncleanShutdowns.Recent))
+ copy(previous, uncleanShutdowns.Recent)
+ // Add a new (but cap it)
+ uncleanShutdowns.Recent = append(uncleanShutdowns.Recent, uint64(time.Now().Unix()))
+ if count := len(uncleanShutdowns.Recent); count > crashesToKeep+1 {
+ numDel := count - (crashesToKeep + 1)
+ uncleanShutdowns.Recent = uncleanShutdowns.Recent[numDel:]
+ uncleanShutdowns.Discarded += uint64(numDel)
+ }
+ // And save it again
+ data, _ := rlp.EncodeToBytes(uncleanShutdowns)
+ if err := db.Put(uncleanShutdownKey, data); err != nil {
+ utils.Logger().Warn().Err(err).Msg("Failed to write unclean-shutdown marker")
+ return nil, 0, err
+ }
+ return previous, discarded, nil
+}
+
+// PopUncleanShutdownMarker removes the last unclean shutdown marker
+// This function is NOT used, just ported over from the Ethereum
+func PopUncleanShutdownMarker(db ethdb.KeyValueStore) {
+ var uncleanShutdowns crashList
+ // Read old data
+ if data, err := db.Get(uncleanShutdownKey); err != nil {
+ utils.Logger().Warn().Err(err).Msg("Error reading unclean shutdown markers")
+ } else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil {
+ utils.Logger().Error().Err(err).Msg("Error decoding unclean shutdown markers") // Should mos def _not_ happen
+ }
+ if l := len(uncleanShutdowns.Recent); l > 0 {
+ uncleanShutdowns.Recent = uncleanShutdowns.Recent[:l-1]
+ }
+ data, _ := rlp.EncodeToBytes(uncleanShutdowns)
+ if err := db.Put(uncleanShutdownKey, data); err != nil {
+ utils.Logger().Warn().Err(err).Msg("Failed to clear unclean-shutdown marker")
+ }
+}
+
+// UpdateUncleanShutdownMarker updates the last marker's timestamp to now.
+// This function is NOT used, just ported over from the Ethereum
+func UpdateUncleanShutdownMarker(db ethdb.KeyValueStore) {
+ var uncleanShutdowns crashList
+ // Read old data
+ if data, err := db.Get(uncleanShutdownKey); err != nil {
+ utils.Logger().Warn().Err(err).Msg("Error reading unclean shutdown markers")
+ } else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil {
+ utils.Logger().Warn().Err(err).Msg("Error decoding unclean shutdown markers")
+ }
+ // This shouldn't happen because we push a marker on Backend instantiation
+ count := len(uncleanShutdowns.Recent)
+ if count == 0 {
+ utils.Logger().Warn().Msg("No unclean shutdown marker to update")
+ return
+ }
+ uncleanShutdowns.Recent[count-1] = uint64(time.Now().Unix())
+ data, _ := rlp.EncodeToBytes(uncleanShutdowns)
+ if err := db.Put(uncleanShutdownKey, data); err != nil {
+ utils.Logger().Warn().Err(err).Msg("Failed to write unclean-shutdown marker")
+ }
+}
+
+// ReadTransitionStatus retrieves the eth2 transition status from the database
+// This function is NOT used, just ported over from the Ethereum
+func ReadTransitionStatus(db ethdb.KeyValueReader) []byte {
+ data, _ := db.Get(transitionStatusKey)
+ return data
+}
+
+// WriteTransitionStatus stores the eth2 transition status to the database
+// This function is NOT used, just ported over from the Ethereum
+func WriteTransitionStatus(db ethdb.KeyValueWriter, data []byte) {
+ if err := db.Put(transitionStatusKey, data); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store the eth2 transition status")
+ }
+}
+
+// WriteLeaderRotationMeta writes the leader continuous blocks count to the database.
+func WriteLeaderRotationMeta(db DatabaseWriter, leader []byte, epoch uint64, count, shifts uint64) error {
+ if len(leader) != bls.PublicKeySizeInBytes {
+ return errors.New("invalid leader public key size")
+ }
+ value := make([]byte, bls.PublicKeySizeInBytes+8*3)
+ copy(value, leader)
+ binary.LittleEndian.PutUint64(value[len(leader)+8*0:], epoch)
+ binary.LittleEndian.PutUint64(value[len(leader)+8*1:], count)
+ binary.LittleEndian.PutUint64(value[len(leader)+8*2:], shifts)
+ if err := db.Put(leaderContinuousBlocksCountKey(), value); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store leader continuous blocks count")
+ return err
+ }
return nil
}
+
+// ReadLeaderRotationMeta retrieves the leader continuous blocks count from the database.
+func ReadLeaderRotationMeta(db DatabaseReader) (pubKeyBytes []byte, epoch, count, shifts uint64, err error) {
+ data, _ := db.Get(leaderContinuousBlocksCountKey())
+ if len(data) != bls.PublicKeySizeInBytes+24 {
+ return nil, 0, 0, 0, errors.New("invalid leader continuous blocks count")
+ }
+
+ pubKeyBytes = data[:bls.PublicKeySizeInBytes]
+ epoch = binary.LittleEndian.Uint64(data[bls.PublicKeySizeInBytes:])
+ count = binary.LittleEndian.Uint64(data[bls.PublicKeySizeInBytes+8:])
+ shifts = binary.LittleEndian.Uint64(data[bls.PublicKeySizeInBytes+16:])
+ return pubKeyBytes, epoch, count, shifts, nil
+}
diff --git a/core/rawdb/accessors_metadata_test.go b/core/rawdb/accessors_metadata_test.go
new file mode 100644
index 0000000000..62cfac062c
--- /dev/null
+++ b/core/rawdb/accessors_metadata_test.go
@@ -0,0 +1,32 @@
+package rawdb
+
+import (
+ "testing"
+
+ ethRawDB "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/harmony-one/harmony/crypto/bls"
+)
+
+func TestLeaderRotationMeta(t *testing.T) {
+ db := ethRawDB.NewMemoryDatabase()
+ err := WriteLeaderRotationMeta(db, make([]byte, bls.PublicKeySizeInBytes), 1, 2, 3)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pub, epoch, count, shifts, err := ReadLeaderRotationMeta(db)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(pub) != bls.PublicKeySizeInBytes {
+ t.Fatal("invalid leader public key size")
+ }
+ if epoch != 1 {
+ t.Fatal("invalid epoch")
+ }
+ if count != 2 {
+ t.Fatal("invalid count")
+ }
+ if shifts != 3 {
+ t.Fatal("invalid shifts")
+ }
+}
diff --git a/core/rawdb/accessors_offchain.go b/core/rawdb/accessors_offchain.go
index 5324aa0e50..dd43299034 100644
--- a/core/rawdb/accessors_offchain.go
+++ b/core/rawdb/accessors_offchain.go
@@ -189,7 +189,7 @@ func DeleteValidatorSnapshot(db DatabaseDeleter, addr common.Address, epoch *big
}
func IteratorValidatorSnapshot(iterator DatabaseIterator, cb func(addr common.Address, epoch *big.Int) bool) (minKey []byte, maxKey []byte) {
- iter := iterator.NewIteratorWithPrefix(validatorSnapshotPrefix)
+ iter := iterator.NewIterator(validatorSnapshotPrefix, nil)
defer iter.Release()
minKey = validatorSnapshotPrefix
@@ -211,7 +211,7 @@ func IteratorValidatorSnapshot(iterator DatabaseIterator, cb func(addr common.Ad
func IteratorCXReceipt(iterator DatabaseIterator, cb func(it ethdb.Iterator, shardID uint32, number uint64, hash common.Hash) bool) {
preifxKey := cxReceiptPrefix
- iter := iterator.NewIteratorWithPrefix(preifxKey)
+ iter := iterator.NewIterator(preifxKey, nil)
defer iter.Release()
shardOffset := len(preifxKey)
numberOffset := shardOffset + 4
@@ -231,7 +231,7 @@ func IteratorCXReceipt(iterator DatabaseIterator, cb func(it ethdb.Iterator, sha
func IteratorCXReceiptsProofSpent(iterator DatabaseIterator, cb func(it ethdb.Iterator, shardID uint32, number uint64) bool) {
preifxKey := cxReceiptSpentPrefix
- iter := iterator.NewIteratorWithPrefix(preifxKey)
+ iter := iterator.NewIterator(preifxKey, nil)
defer iter.Release()
shardOffset := len(preifxKey)
numberOffset := shardOffset + 4
@@ -248,7 +248,7 @@ func IteratorCXReceiptsProofSpent(iterator DatabaseIterator, cb func(it ethdb.It
}
func IteratorValidatorStats(iterator DatabaseIterator, cb func(it ethdb.Iterator, addr common.Address) bool) {
preifxKey := validatorStatsPrefix
- iter := iterator.NewIteratorWithPrefix(preifxKey)
+ iter := iterator.NewIterator(preifxKey, nil)
defer iter.Release()
addrOffset := len(preifxKey)
@@ -263,7 +263,7 @@ func IteratorValidatorStats(iterator DatabaseIterator, cb func(it ethdb.Iterator
}
func IteratorDelegatorDelegations(iterator DatabaseIterator, cb func(it ethdb.Iterator, delegator common.Address) bool) {
preifxKey := delegatorValidatorListPrefix
- iter := iterator.NewIteratorWithPrefix(preifxKey)
+ iter := iterator.NewIterator(preifxKey, nil)
defer iter.Release()
addrOffset := len(preifxKey)
diff --git a/core/rawdb/accessors_snapdb_test.go b/core/rawdb/accessors_snapdb_test.go
index e81a856b26..915a28e8ca 100644
--- a/core/rawdb/accessors_snapdb_test.go
+++ b/core/rawdb/accessors_snapdb_test.go
@@ -5,7 +5,6 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
- ethRawDB "github.com/ethereum/go-ethereum/core/rawdb"
blockfactory "github.com/harmony-one/harmony/block/factory"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
)
@@ -22,7 +21,7 @@ func TestSnapdbInfo(t *testing.T) {
LastAccountKey: hexutil.MustDecode("0x1339383fd90ed804e28464763a13fafad66dd0f88434b8e5d8b410eb75a10331"),
LastAccountStateKey: hexutil.MustDecode("0xa940a0bb9eca4f9d5eee3f4059f458bd4a05bb1d680c5f7c781b06bc43c20df6"),
}
- db := ethRawDB.NewMemoryDatabase()
+ db := NewMemoryDatabase()
if err := WriteSnapdbInfo(db, src); err != nil {
t.Fatal(err)
}
diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go
new file mode 100644
index 0000000000..dd2119f8d8
--- /dev/null
+++ b/core/rawdb/accessors_snapshot.go
@@ -0,0 +1,210 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "encoding/binary"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/harmony-one/harmony/internal/utils"
+)
+
+// ReadSnapshotDisabled retrieves if the snapshot maintenance is disabled.
+func ReadSnapshotDisabled(db ethdb.KeyValueReader) bool {
+ disabled, _ := db.Has(snapshotDisabledKey)
+ return disabled
+}
+
+// WriteSnapshotDisabled stores the snapshot pause flag.
+func WriteSnapshotDisabled(db ethdb.KeyValueWriter) {
+ if err := db.Put(snapshotDisabledKey, []byte("42")); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store snapshot disabled flag")
+ }
+}
+
+// DeleteSnapshotDisabled deletes the flag keeping the snapshot maintenance disabled.
+func DeleteSnapshotDisabled(db ethdb.KeyValueWriter) {
+ if err := db.Delete(snapshotDisabledKey); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to remove snapshot disabled flag")
+ }
+}
+
+// ReadSnapshotRoot retrieves the root of the block whose state is contained in
+// the persisted snapshot.
+func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash {
+ data, _ := db.Get(SnapshotRootKey)
+ if len(data) != common.HashLength {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
+// WriteSnapshotRoot stores the root of the block whose state is contained in
+// the persisted snapshot.
+func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) {
+ if err := db.Put(SnapshotRootKey, root[:]); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store snapshot root")
+ }
+}
+
+// DeleteSnapshotRoot deletes the hash of the block whose state is contained in
+// the persisted snapshot. Since snapshots are not immutable, this method can
+// be used during updates, so a crash or failure will mark the entire snapshot
+// invalid.
+func DeleteSnapshotRoot(db ethdb.KeyValueWriter) {
+ if err := db.Delete(SnapshotRootKey); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to remove snapshot root")
+ }
+}
+
+// ReadAccountSnapshot retrieves the snapshot entry of an account trie leaf.
+func ReadAccountSnapshot(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(accountSnapshotKey(hash))
+ return data
+}
+
+// WriteAccountSnapshot stores the snapshot entry of an account trie leaf.
+func WriteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash, entry []byte) {
+ if err := db.Put(accountSnapshotKey(hash), entry); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store account snapshot")
+ }
+}
+
+// DeleteAccountSnapshot removes the snapshot entry of an account trie leaf.
+func DeleteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Delete(accountSnapshotKey(hash)); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to delete account snapshot")
+ }
+}
+
+// ReadStorageSnapshot retrieves the snapshot entry of an storage trie leaf.
+func ReadStorageSnapshot(db ethdb.KeyValueReader, accountHash, storageHash common.Hash) []byte {
+ data, _ := db.Get(storageSnapshotKey(accountHash, storageHash))
+ return data
+}
+
+// WriteStorageSnapshot stores the snapshot entry of an storage trie leaf.
+func WriteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash common.Hash, entry []byte) {
+ if err := db.Put(storageSnapshotKey(accountHash, storageHash), entry); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store storage snapshot")
+ }
+}
+
+// DeleteStorageSnapshot removes the snapshot entry of an storage trie leaf.
+func DeleteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash common.Hash) {
+ if err := db.Delete(storageSnapshotKey(accountHash, storageHash)); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to delete storage snapshot")
+ }
+}
+
+// IterateStorageSnapshots returns an iterator for walking the entire storage
+// space of a specific account.
+func IterateStorageSnapshots(db ethdb.Iteratee, accountHash common.Hash) ethdb.Iterator {
+ return NewKeyLengthIterator(db.NewIterator(storageSnapshotsKey(accountHash), nil), len(SnapshotStoragePrefix)+2*common.HashLength)
+}
+
+// ReadSnapshotJournal retrieves the serialized in-memory diff layers saved at
+// the last shutdown. The blob is expected to be max a few 10s of megabytes.
+func ReadSnapshotJournal(db ethdb.KeyValueReader) []byte {
+ data, _ := db.Get(snapshotJournalKey)
+ return data
+}
+
+// WriteSnapshotJournal stores the serialized in-memory diff layers to save at
+// shutdown. The blob is expected to be max a few 10s of megabytes.
+func WriteSnapshotJournal(db ethdb.KeyValueWriter, journal []byte) {
+ if err := db.Put(snapshotJournalKey, journal); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store snapshot journal")
+ }
+}
+
+// DeleteSnapshotJournal deletes the serialized in-memory diff layers saved at
+// the last shutdown
+func DeleteSnapshotJournal(db ethdb.KeyValueWriter) {
+ if err := db.Delete(snapshotJournalKey); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to remove snapshot journal")
+ }
+}
+
+// ReadSnapshotGenerator retrieves the serialized snapshot generator saved at
+// the last shutdown.
+func ReadSnapshotGenerator(db ethdb.KeyValueReader) []byte {
+ data, _ := db.Get(snapshotGeneratorKey)
+ return data
+}
+
+// WriteSnapshotGenerator stores the serialized snapshot generator to save at
+// shutdown.
+func WriteSnapshotGenerator(db ethdb.KeyValueWriter, generator []byte) {
+ if err := db.Put(snapshotGeneratorKey, generator); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store snapshot generator")
+ }
+}
+
+// DeleteSnapshotGenerator deletes the serialized snapshot generator saved at
+// the last shutdown
+func DeleteSnapshotGenerator(db ethdb.KeyValueWriter) {
+ if err := db.Delete(snapshotGeneratorKey); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to remove snapshot generator")
+ }
+}
+
+// ReadSnapshotRecoveryNumber retrieves the block number of the last persisted
+// snapshot layer.
+func ReadSnapshotRecoveryNumber(db ethdb.KeyValueReader) *uint64 {
+ data, _ := db.Get(snapshotRecoveryKey)
+ if len(data) == 0 {
+ return nil
+ }
+ if len(data) != 8 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteSnapshotRecoveryNumber stores the block number of the last persisted
+// snapshot layer.
+func WriteSnapshotRecoveryNumber(db ethdb.KeyValueWriter, number uint64) {
+ var buf [8]byte
+ binary.BigEndian.PutUint64(buf[:], number)
+ if err := db.Put(snapshotRecoveryKey, buf[:]); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store snapshot recovery number")
+ }
+}
+
+// DeleteSnapshotRecoveryNumber deletes the block number of the last persisted
+// snapshot layer.
+func DeleteSnapshotRecoveryNumber(db ethdb.KeyValueWriter) {
+ if err := db.Delete(snapshotRecoveryKey); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to remove snapshot recovery number")
+ }
+}
+
+// ReadSnapshotSyncStatus retrieves the serialized sync status saved at shutdown.
+func ReadSnapshotSyncStatus(db ethdb.KeyValueReader) []byte {
+ data, _ := db.Get(snapshotSyncStatusKey)
+ return data
+}
+
+// WriteSnapshotSyncStatus stores the serialized sync status to save at shutdown.
+func WriteSnapshotSyncStatus(db ethdb.KeyValueWriter, status []byte) {
+ if err := db.Put(snapshotSyncStatusKey, status); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store snapshot sync status")
+ }
+}
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
new file mode 100644
index 0000000000..72dbe94fb6
--- /dev/null
+++ b/core/rawdb/accessors_state.go
@@ -0,0 +1,149 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/harmony-one/harmony/internal/utils"
+)
+
+// ReadPreimage retrieves a single preimage of the provided hash.
+func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(preimageKey(hash))
+ return data
+}
+
+// WritePreimages writes the provided set of preimages to the database.
+func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) error {
+ for hash, preimage := range preimages {
+ if err := db.Put(preimageKey(hash), preimage); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store trie preimage")
+ }
+ }
+ preimageCounter.Inc(int64(len(preimages)))
+ preimageHitCounter.Inc(int64(len(preimages)))
+ return nil
+}
+
+// ReadCode retrieves the contract code of the provided code hash.
+func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ // Try with the prefixed code scheme first, if not then try with legacy
+ // scheme.
+ data := ReadCodeWithPrefix(db, hash)
+ if len(data) != 0 {
+ return data
+ }
+ data, _ = db.Get(hash.Bytes())
+ return data
+}
+
+// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
+// The main difference between this function and ReadCode is this function
+// will only check the existence with latest scheme(with prefix).
+func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(codeKey(hash))
+ return data
+}
+
+// HasCode checks if the contract code corresponding to the
+// provided code hash is present in the db.
+func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool {
+ // Try with the prefixed code scheme first, if not then try with legacy
+ // scheme.
+ if ok := HasCodeWithPrefix(db, hash); ok {
+ return true
+ }
+ ok, _ := db.Has(hash.Bytes())
+ return ok
+}
+
+// HasCodeWithPrefix checks if the contract code corresponding to the
+// provided code hash is present in the db. This function will only check
+// presence using the prefix-scheme.
+func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool {
+ ok, _ := db.Has(codeKey(hash))
+ return ok
+}
+
+// WriteCode writes the provided contract code database.
+func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
+ if err := db.Put(codeKey(hash), code); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store contract code")
+ }
+}
+
+// DeleteCode deletes the specified contract code from the database.
+func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Delete(codeKey(hash)); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to delete contract code")
+ }
+}
+
+// ReadValidatorCode retrieves the validator code of the provided code hash.
+func ReadValidatorCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ // Try with the prefixed code scheme first, if not then try with legacy
+ // scheme.
+ data := ReadValidatorCodeWithPrefix(db, hash)
+ if len(data) != 0 {
+ return data
+ }
+ data, _ = db.Get(hash.Bytes())
+ return data
+}
+
+// ReadValidatorCodeWithPrefix retrieves the validator code of the provided code hash.
+// The main difference between this function and ReadValidatorCode is this function
+// will only check the existence with latest scheme(with prefix).
+func ReadValidatorCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(validatorCodeKey(hash))
+ return data
+}
+
+// HasValidatorCode checks if the validator code corresponding to the
+// provided code hash is present in the db.
+func HasValidatorCode(db ethdb.KeyValueReader, hash common.Hash) bool {
+ // Try with the prefixed code scheme first, if not then try with legacy
+ // scheme.
+ if ok := HasValidatorCodeWithPrefix(db, hash); ok {
+ return true
+ }
+ ok, _ := db.Has(hash.Bytes())
+ return ok
+}
+
+// HasValidatorCodeWithPrefix checks if the validator code corresponding to the
+// provided code hash is present in the db. This function will only check
+// presence using the prefix-scheme.
+func HasValidatorCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool {
+ ok, _ := db.Has(validatorCodeKey(hash))
+ return ok
+}
+
+// WriteValidatorCode writes the provided validator code to database.
+func WriteValidatorCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
+ if err := db.Put(validatorCodeKey(hash), code); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store validator code")
+ }
+}
+
+// DeleteValidatorCode deletes the specified validator code from the database.
+func DeleteValidatorCode(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Delete(validatorCodeKey(hash)); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to delete validator code")
+ }
+}
diff --git a/core/rawdb/accessors_sync.go b/core/rawdb/accessors_sync.go
new file mode 100644
index 0000000000..d5f4f34e4e
--- /dev/null
+++ b/core/rawdb/accessors_sync.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "bytes"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/harmony-one/harmony/internal/utils"
+)
+
+// ReadSkeletonSyncStatus retrieves the serialized sync status saved at shutdown.
+func ReadSkeletonSyncStatus(db ethdb.KeyValueReader) []byte {
+ data, _ := db.Get(skeletonSyncStatusKey)
+ return data
+}
+
+// WriteSkeletonSyncStatus stores the serialized sync status to save at shutdown.
+func WriteSkeletonSyncStatus(db ethdb.KeyValueWriter, status []byte) {
+ if err := db.Put(skeletonSyncStatusKey, status); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store skeleton sync status")
+ }
+}
+
+// DeleteSkeletonSyncStatus deletes the serialized sync status saved at the last
+// shutdown
+func DeleteSkeletonSyncStatus(db ethdb.KeyValueWriter) {
+ if err := db.Delete(skeletonSyncStatusKey); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to remove skeleton sync status")
+ }
+}
+
+// ReadSkeletonHeader retrieves a block header from the skeleton sync store,
+func ReadSkeletonHeader(db ethdb.KeyValueReader, number uint64) *types.Header {
+ data, _ := db.Get(skeletonHeaderKey(number))
+ if len(data) == 0 {
+ return nil
+ }
+ header := new(types.Header)
+ if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
+ utils.Logger().Error().Err(err).Uint64("number", number).Msg("Invalid skeleton header RLP")
+ return nil
+ }
+ return header
+}
+
+// WriteSkeletonHeader stores a block header into the skeleton sync store.
+func WriteSkeletonHeader(db ethdb.KeyValueWriter, header *types.Header) {
+ data, err := rlp.EncodeToBytes(header)
+ if err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to RLP encode header")
+ }
+ key := skeletonHeaderKey(header.Number.Uint64())
+ if err := db.Put(key, data); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store skeleton header")
+ }
+}
+
+// DeleteSkeletonHeader removes all block header data associated with a hash.
+func DeleteSkeletonHeader(db ethdb.KeyValueWriter, number uint64) {
+ if err := db.Delete(skeletonHeaderKey(number)); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to delete skeleton header")
+ }
+}
diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go
new file mode 100644
index 0000000000..b551cfc452
--- /dev/null
+++ b/core/rawdb/accessors_trie.go
@@ -0,0 +1,263 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package rawdb
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/harmony-one/harmony/internal/utils"
+ "golang.org/x/crypto/sha3"
+)
+
+// HashScheme is the legacy hash-based state scheme with which trie nodes are
+// stored in the disk with node hash as the database key. The advantage of this
+// scheme is that different versions of trie nodes can be stored in disk, which
+// is very beneficial for constructing archive nodes. The drawback is it will
+// store different trie nodes on the same path to different locations on the disk
+// with no data locality, and it's unfriendly for designing state pruning.
+//
+// Now this scheme is still kept for backward compatibility, and it will be used
+// for archive node and some other tries(e.g. light trie).
+const HashScheme = "hashScheme"
+
+// PathScheme is the new path-based state scheme with which trie nodes are stored
+// in the disk with node path as the database key. This scheme will only store one
+// version of state data in the disk, which means that the state pruning operation
+// is native. At the same time, this scheme will put adjacent trie nodes in the same
+// area of the disk with good data locality property. But this scheme needs to rely
+// on extra state diffs to survive deep reorg.
+const PathScheme = "pathScheme"
+
+// nodeHasher used to derive the hash of trie node.
+type nodeHasher struct{ sha crypto.KeccakState }
+
+var hasherPool = sync.Pool{
+ New: func() interface{} { return &nodeHasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
+}
+
+func newNodeHasher() *nodeHasher { return hasherPool.Get().(*nodeHasher) }
+func returnHasherToPool(h *nodeHasher) { hasherPool.Put(h) }
+
+func (h *nodeHasher) hashData(data []byte) (n common.Hash) {
+ h.sha.Reset()
+ h.sha.Write(data)
+ h.sha.Read(n[:])
+ return n
+}
+
+// ReadAccountTrieNode retrieves the account trie node and the associated node
+// hash with the specified node path.
+func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) {
+ data, err := db.Get(accountTrieNodeKey(path))
+ if err != nil {
+ return nil, common.Hash{}
+ }
+ hasher := newNodeHasher()
+ defer returnHasherToPool(hasher)
+ return data, hasher.hashData(data)
+}
+
+// HasAccountTrieNode checks the account trie node presence with the specified
+// node path and the associated node hash.
+func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) bool {
+ data, err := db.Get(accountTrieNodeKey(path))
+ if err != nil {
+ return false
+ }
+ hasher := newNodeHasher()
+ defer returnHasherToPool(hasher)
+ return hasher.hashData(data) == hash
+}
+
+// WriteAccountTrieNode writes the provided account trie node into database.
+func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) {
+ if err := db.Put(accountTrieNodeKey(path), node); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store account trie node")
+ }
+}
+
+// DeleteAccountTrieNode deletes the specified account trie node from the database.
+func DeleteAccountTrieNode(db ethdb.KeyValueWriter, path []byte) {
+ if err := db.Delete(accountTrieNodeKey(path)); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to delete account trie node")
+ }
+}
+
+// ReadStorageTrieNode retrieves the storage trie node and the associated node
+// hash with the specified node path.
+func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) ([]byte, common.Hash) {
+ data, err := db.Get(storageTrieNodeKey(accountHash, path))
+ if err != nil {
+ return nil, common.Hash{}
+ }
+ hasher := newNodeHasher()
+ defer returnHasherToPool(hasher)
+ return data, hasher.hashData(data)
+}
+
+// HasStorageTrieNode checks the storage trie node presence with the provided
+// node path and the associated node hash.
+func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte, hash common.Hash) bool {
+ data, err := db.Get(storageTrieNodeKey(accountHash, path))
+ if err != nil {
+ return false
+ }
+ hasher := newNodeHasher()
+ defer returnHasherToPool(hasher)
+ return hasher.hashData(data) == hash
+}
+
+// WriteStorageTrieNode writes the provided storage trie node into database.
+func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) {
+ if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store storage trie node")
+ }
+}
+
+// DeleteStorageTrieNode deletes the specified storage trie node from the database.
+func DeleteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte) {
+ if err := db.Delete(storageTrieNodeKey(accountHash, path)); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to delete storage trie node")
+ }
+}
+
+// ReadLegacyTrieNode retrieves the legacy trie node with the given
+// associated node hash.
+func ReadLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, err := db.Get(hash.Bytes())
+ if err != nil {
+ return nil
+ }
+ return data
+}
+
+// HasLegacyTrieNode checks if the trie node with the provided hash is present in db.
+func HasLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
+ ok, _ := db.Has(hash.Bytes())
+ return ok
+}
+
+// WriteLegacyTrieNode writes the provided legacy trie node to database.
+func WriteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
+ if err := db.Put(hash.Bytes(), node); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to store legacy trie node")
+ }
+}
+
+// DeleteLegacyTrieNode deletes the specified legacy trie node from database.
+func DeleteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Delete(hash.Bytes()); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to delete legacy trie node")
+ }
+}
+
+// HasTrieNode checks the trie node presence with the provided node info and
+// the associated node hash.
+func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) bool {
+ switch scheme {
+ case HashScheme:
+ return HasLegacyTrieNode(db, hash)
+ case PathScheme:
+ if owner == (common.Hash{}) {
+ return HasAccountTrieNode(db, path, hash)
+ }
+ return HasStorageTrieNode(db, owner, path, hash)
+ default:
+ panic(fmt.Sprintf("Unknown scheme %v", scheme))
+ }
+}
+
+// ReadTrieNode retrieves the trie node from database with the provided node info
+// and associated node hash.
+// hashScheme-based lookup requires the following:
+// - hash
+//
+// pathScheme-based lookup requires the following:
+// - owner
+// - path
+func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) []byte {
+ switch scheme {
+ case HashScheme:
+ return ReadLegacyTrieNode(db, hash)
+ case PathScheme:
+ var (
+ blob []byte
+ nHash common.Hash
+ )
+ if owner == (common.Hash{}) {
+ blob, nHash = ReadAccountTrieNode(db, path)
+ } else {
+ blob, nHash = ReadStorageTrieNode(db, owner, path)
+ }
+ if nHash != hash {
+ return nil
+ }
+ return blob
+ default:
+ panic(fmt.Sprintf("Unknown scheme %v", scheme))
+ }
+}
+
+// WriteTrieNode writes the trie node into database with the provided node info
+// and associated node hash.
+// hashScheme-based lookup requires the following:
+// - hash
+//
+// pathScheme-based lookup requires the following:
+// - owner
+// - path
+func WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte, scheme string) {
+ switch scheme {
+ case HashScheme:
+ WriteLegacyTrieNode(db, hash, node)
+ case PathScheme:
+ if owner == (common.Hash{}) {
+ WriteAccountTrieNode(db, path, node)
+ } else {
+ WriteStorageTrieNode(db, owner, path, node)
+ }
+ default:
+ panic(fmt.Sprintf("Unknown scheme %v", scheme))
+ }
+}
+
+// DeleteTrieNode deletes the trie node from database with the provided node info
+// and associated node hash.
+// hashScheme-based lookup requires the following:
+// - hash
+//
+// pathScheme-based lookup requires the following:
+// - owner
+// - path
+func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, scheme string) {
+ switch scheme {
+ case HashScheme:
+ DeleteLegacyTrieNode(db, hash)
+ case PathScheme:
+ if owner == (common.Hash{}) {
+ DeleteAccountTrieNode(db, path)
+ } else {
+ DeleteStorageTrieNode(db, owner, path)
+ }
+ default:
+ panic(fmt.Sprintf("Unknown scheme %v", scheme))
+ }
+}
diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go
new file mode 100644
index 0000000000..8b109c5a52
--- /dev/null
+++ b/core/rawdb/ancient_scheme.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+// The list of table names of chain freezer.
+// This variables is NOT used, just ported over from the Ethereum
+const (
+ // ChainFreezerHeaderTable indicates the name of the freezer header table.
+ ChainFreezerHeaderTable = "headers"
+
+ // ChainFreezerHashTable indicates the name of the freezer canonical hash table.
+ ChainFreezerHashTable = "hashes"
+
+ // ChainFreezerBodiesTable indicates the name of the freezer block body table.
+ ChainFreezerBodiesTable = "bodies"
+
+ // ChainFreezerReceiptTable indicates the name of the freezer receipts table.
+ ChainFreezerReceiptTable = "receipts"
+
+ // ChainFreezerDifficultyTable indicates the name of the freezer total difficulty table.
+ ChainFreezerDifficultyTable = "diffs"
+)
+
+// chainFreezerNoSnappy configures whether compression is disabled for the ancient-tables.
+// Hashes and difficulties don't compress well.
+// This function is NOT used, just ported over from the Ethereum
+var chainFreezerNoSnappy = map[string]bool{
+ ChainFreezerHeaderTable: false,
+ ChainFreezerHashTable: true,
+ ChainFreezerBodiesTable: false,
+ ChainFreezerReceiptTable: false,
+ ChainFreezerDifficultyTable: true,
+}
+
+// The list of identifiers of ancient stores.
+var (
+ chainFreezerName = "chain" // the folder name of chain segment ancient store.
+)
+
+// freezers the collections of all builtin freezers.
+var freezers = []string{chainFreezerName}
diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go
new file mode 100644
index 0000000000..e4eaf559c2
--- /dev/null
+++ b/core/rawdb/ancient_utils.go
@@ -0,0 +1,91 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+type tableSize struct {
+ name string
+ size common.StorageSize
+}
+
+// freezerInfo contains the basic information of the freezer.
+type freezerInfo struct {
+ name string // The identifier of freezer
+ head uint64 // The number of last stored item in the freezer
+ tail uint64 // The number of first stored item in the freezer
+ sizes []tableSize // The storage size per table
+}
+
+// count returns the number of stored items in the freezer.
+func (info *freezerInfo) count() uint64 {
+ return info.head - info.tail + 1
+}
+
+// size returns the storage size of the entire freezer.
+func (info *freezerInfo) size() common.StorageSize {
+ var total common.StorageSize
+ for _, table := range info.sizes {
+ total += table.size
+ }
+ return total
+}
+
+// inspectFreezers inspects all freezers registered in the system.
+// This function is NOT used, just ported over from the Ethereum
+func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
+ var infos []freezerInfo
+ for _, freezer := range freezers {
+ switch freezer {
+ case chainFreezerName:
+ // Chain ancient store is a bit special. It's always opened along
+ // with the key-value store, inspect the chain store directly.
+ info := freezerInfo{name: freezer}
+ // Retrieve storage size of every contained table.
+ for table := range chainFreezerNoSnappy {
+ size, err := db.AncientSize(table)
+ if err != nil {
+ return nil, err
+ }
+ info.sizes = append(info.sizes, tableSize{name: table, size: common.StorageSize(size)})
+ }
+ // Retrieve the number of last stored item
+ ancients, err := db.Ancients()
+ if err != nil {
+ return nil, err
+ }
+ info.head = ancients - 1
+
+ // Retrieve the number of first stored item
+ tail, err := db.Tail()
+ if err != nil {
+ return nil, err
+ }
+ info.tail = tail
+ infos = append(infos, info)
+
+ default:
+ return nil, fmt.Errorf("unknown freezer, supported ones: %v", freezers)
+ }
+ }
+ return infos, nil
+}
diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go
new file mode 100644
index 0000000000..048c1f948d
--- /dev/null
+++ b/core/rawdb/chain_iterator.go
@@ -0,0 +1,358 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "runtime"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/prque"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/harmony-one/harmony/internal/utils"
+)
+
+// InitDatabaseFromFreezer reinitializes an empty database from a previous batch
+// of frozen ancient blocks. The method iterates over all the frozen blocks and
+// injects into the database the block hash->number mappings.
+// This function is NOT used, just ported over from the Ethereum
+func InitDatabaseFromFreezer(db ethdb.Database) {
+ // If we can't access the freezer or it's empty, abort
+ frozen, err := db.Ancients()
+ if err != nil || frozen == 0 {
+ return
+ }
+ var (
+ batch = db.NewBatch()
+ start = time.Now()
+ logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log
+ hash common.Hash
+ )
+ for i := uint64(0); i < frozen; {
+ // We read 100K hashes at a time, for a total of 3.2M
+ count := uint64(100_000)
+ if i+count > frozen {
+ count = frozen - i
+ }
+ data, err := db.AncientRange(ChainFreezerHashTable, i, count, 32*count)
+ if err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to init database from freezer")
+ }
+ for j, h := range data {
+ number := i + uint64(j)
+ hash = common.BytesToHash(h)
+ WriteHeaderNumber(batch, hash, number)
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ if batch.ValueSize() > ethdb.IdealBatchSize {
+ if err := batch.Write(); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to write data to db")
+ }
+ batch.Reset()
+ }
+ }
+ i += uint64(len(data))
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Initializing database from freezer", "total", frozen, "number", i, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ if err := batch.Write(); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to write data to db")
+ }
+ batch.Reset()
+
+ WriteHeadHeaderHash(db, hash)
+ WriteHeadFastBlockHash(db, hash)
+ log.Info("Initialized database from freezer", "blocks", frozen, "elapsed", common.PrettyDuration(time.Since(start)))
+}
+
+type blockTxHashes struct {
+ number uint64
+ hashes []common.Hash
+}
+
+// iterateTransactions iterates over all transactions in the (canon) block
+// number(s) given, and yields the hashes on a channel. If there is a signal
+// received from interrupt channel, the iteration will be aborted and result
+// channel will be closed.
+func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool, interrupt chan struct{}) chan *blockTxHashes {
+ // One thread sequentially reads data from db
+ type numberRlp struct {
+ number uint64
+ rlp rlp.RawValue
+ }
+ if to == from {
+ return nil
+ }
+ threads := to - from
+ if cpus := runtime.NumCPU(); threads > uint64(cpus) {
+ threads = uint64(cpus)
+ }
+ var (
+ rlpCh = make(chan *numberRlp, threads*2) // we send raw rlp over this channel
+ hashesCh = make(chan *blockTxHashes, threads*2) // send hashes over hashesCh
+ )
+ // lookup runs in one instance
+ lookup := func() {
+ n, end := from, to
+ if reverse {
+ n, end = to-1, from-1
+ }
+ defer close(rlpCh)
+ for n != end {
+ data := ReadCanonicalBodyRLP(db, n)
+ // Feed the block to the aggregator, or abort on interrupt
+ select {
+ case rlpCh <- &numberRlp{n, data}:
+ case <-interrupt:
+ return
+ }
+ if reverse {
+ n--
+ } else {
+ n++
+ }
+ }
+ }
+ // process runs in parallel
+ nThreadsAlive := int32(threads)
+ process := func() {
+ defer func() {
+ // Last processor closes the result channel
+ if atomic.AddInt32(&nThreadsAlive, -1) == 0 {
+ close(hashesCh)
+ }
+ }()
+ for data := range rlpCh {
+ var body types.Body
+ if err := rlp.DecodeBytes(data.rlp, &body); err != nil {
+ utils.Logger().Warn().Err(err).Uint64("block", data.number).Msg("Failed to decode block body")
+ return
+ }
+ var hashes []common.Hash
+ for _, tx := range body.Transactions {
+ hashes = append(hashes, tx.Hash())
+ }
+ result := &blockTxHashes{
+ hashes: hashes,
+ number: data.number,
+ }
+ // Feed the block to the aggregator, or abort on interrupt
+ select {
+ case hashesCh <- result:
+ case <-interrupt:
+ return
+ }
+ }
+ }
+ go lookup() // start the sequential db accessor
+ for i := 0; i < int(threads); i++ {
+ go process()
+ }
+ return hashesCh
+}
+
+// indexTransactions creates txlookup indices of the specified block range.
+//
+// This function iterates canonical chain in reverse order, it has one main advantage:
+// We can write tx index tail flag periodically even without the whole indexing
+// procedure is finished. So that we can resume indexing procedure next time quickly.
+//
+// There is a passed channel, the whole procedure will be interrupted if any
+// signal received.
+// This function is NOT used, just ported over from the Ethereum
+func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
+ // short circuit for invalid range
+ if from >= to {
+ return
+ }
+ var (
+ hashesCh = iterateTransactions(db, from, to, true, interrupt)
+ batch = db.NewBatch()
+ start = time.Now()
+ logged = start.Add(-7 * time.Second)
+ // Since we iterate in reverse, we expect the first number to come
+ // in to be [to-1]. Therefore, setting lastNum to means that the
+ // prqueue gap-evaluation will work correctly
+ lastNum = to
+ queue = prque.New[int64, *blockTxHashes](nil)
+ // for stats reporting
+ blocks, txs = 0, 0
+ )
+ for chanDelivery := range hashesCh {
+ // Push the delivery into the queue and process contiguous ranges.
+ // Since we iterate in reverse, so lower numbers have lower prio, and
+ // we can use the number directly as prio marker
+ queue.Push(chanDelivery, int64(chanDelivery.number))
+ for !queue.Empty() {
+ // If the next available item is gapped, return
+ if _, priority := queue.Peek(); priority != int64(lastNum-1) {
+ break
+ }
+ // For testing
+ if hook != nil && !hook(lastNum-1) {
+ break
+ }
+ // Next block available, pop it off and index it
+ delivery := queue.PopItem()
+ lastNum = delivery.number
+ WriteTxLookupEntries(batch, delivery.number, delivery.hashes)
+ blocks++
+ txs += len(delivery.hashes)
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ if batch.ValueSize() > ethdb.IdealBatchSize {
+ WriteTxIndexTail(batch, lastNum) // Also write the tail here
+ if err := batch.Write(); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed writing batch to db")
+ return
+ }
+ batch.Reset()
+ }
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Indexing transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ }
+ // Flush the new indexing tail and the last committed data. It can also happen
+ // that the last batch is empty because nothing to index, but the tail has to
+ // be flushed anyway.
+ WriteTxIndexTail(batch, lastNum)
+ if err := batch.Write(); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed writing batch to db")
+ return
+ }
+ select {
+ case <-interrupt:
+ log.Debug("Transaction indexing interrupted", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
+ default:
+ log.Debug("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
+ }
+}
+
+// IndexTransactions creates txlookup indices of the specified block range. The from
+// is included while to is excluded.
+//
+// This function iterates canonical chain in reverse order, it has one main advantage:
+// We can write tx index tail flag periodically even without the whole indexing
+// procedure is finished. So that we can resume indexing procedure next time quickly.
+//
+// There is a passed channel, the whole procedure will be interrupted if any
+// signal received.
+func IndexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}) {
+ indexTransactions(db, from, to, interrupt, nil)
+}
+
+// indexTransactionsForTesting is the internal debug version with an additional hook.
+func indexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
+ indexTransactions(db, from, to, interrupt, hook)
+}
+
+// unindexTransactions removes txlookup indices of the specified block range.
+//
+// There is a passed channel, the whole procedure will be interrupted if any
+// signal received.
+// This function is NOT used, just ported over from the Ethereum
+func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
+ // short circuit for invalid range
+ if from >= to {
+ return
+ }
+ var (
+ hashesCh = iterateTransactions(db, from, to, false, interrupt)
+ batch = db.NewBatch()
+ start = time.Now()
+ logged = start.Add(-7 * time.Second)
+ // we expect the first number to come in to be [from]. Therefore, setting
+ // nextNum to from means that the prqueue gap-evaluation will work correctly
+ nextNum = from
+ queue = prque.New[int64, *blockTxHashes](nil)
+ // for stats reporting
+ blocks, txs = 0, 0
+ )
+ // Otherwise spin up the concurrent iterator and unindexer
+ for delivery := range hashesCh {
+ // Push the delivery into the queue and process contiguous ranges.
+ queue.Push(delivery, -int64(delivery.number))
+ for !queue.Empty() {
+ // If the next available item is gapped, return
+ if _, priority := queue.Peek(); -priority != int64(nextNum) {
+ break
+ }
+ // For testing
+ if hook != nil && !hook(nextNum) {
+ break
+ }
+ delivery := queue.PopItem()
+ nextNum = delivery.number + 1
+ DeleteTxLookupEntries(batch, delivery.hashes)
+ txs += len(delivery.hashes)
+ blocks++
+
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ // A batch counts the size of deletion as '1', so we need to flush more
+ // often than that.
+ if blocks%1000 == 0 {
+ WriteTxIndexTail(batch, nextNum)
+ if err := batch.Write(); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed writing batch to db")
+ return
+ }
+ batch.Reset()
+ }
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Unindexing transactions", "blocks", blocks, "txs", txs, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ }
+ // Flush the new indexing tail and the last committed data. It can also happen
+ // that the last batch is empty because nothing to unindex, but the tail has to
+ // be flushed anyway.
+ WriteTxIndexTail(batch, nextNum)
+ if err := batch.Write(); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed writing batch to db")
+ return
+ }
+ select {
+ case <-interrupt:
+ log.Debug("Transaction unindexing interrupted", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
+ default:
+ log.Debug("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
+ }
+}
+
+// UnindexTransactions removes txlookup indices of the specified block range.
+// The from is included while to is excluded.
+//
+// There is a passed channel, the whole procedure will be interrupted if any
+// signal received.
+func UnindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}) {
+ unindexTransactions(db, from, to, interrupt, nil)
+}
+
+// unindexTransactionsForTesting is the internal debug version with an additional hook.
+func unindexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
+ unindexTransactions(db, from, to, interrupt, hook)
+}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
new file mode 100644
index 0000000000..c1421b5073
--- /dev/null
+++ b/core/rawdb/database.go
@@ -0,0 +1,468 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/ethdb/leveldb"
+ "github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/harmony-one/harmony/internal/utils"
+ "github.com/olekukonko/tablewriter"
+)
+
+var errNotSupported = errors.New("not supported")
+
+// convertLegacyFn takes a raw freezer entry in an older format and
+// returns it in the new format.
+type convertLegacyFn = func([]byte) ([]byte, error)
+
+// freezerdb is a database wrapper that enabled freezer data retrievals.
+type freezerdb struct {
+ ancientRoot string
+ ethdb.KeyValueStore
+ ethdb.AncientStore
+}
+
+// AncientDatadir returns the path of root ancient directory.
+func (frdb *freezerdb) AncientDatadir() (string, error) {
+ return frdb.ancientRoot, nil
+}
+
+// Close implements io.Closer, closing both the fast key-value store as well as
+// the slow ancient tables.
+func (frdb *freezerdb) Close() error {
+ var errs []error
+ if err := frdb.AncientStore.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := frdb.KeyValueStore.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if len(errs) != 0 {
+ return fmt.Errorf("%v", errs)
+ }
+ return nil
+}
+
+// nofreezedb is a database wrapper that disables freezer data retrievals.
+type nofreezedb struct {
+ ethdb.KeyValueStore
+}
+
+// HasAncient returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) {
+ return false, errNotSupported
+}
+
+// Ancient returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
+ return nil, errNotSupported
+}
+
+// AncientRange returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
+ return nil, errNotSupported
+}
+
+// Ancients returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) Ancients() (uint64, error) {
+ return 0, errNotSupported
+}
+
+// Tail returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) Tail() (uint64, error) {
+ return 0, errNotSupported
+}
+
+// AncientSize returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
+ return 0, errNotSupported
+}
+
+// ModifyAncients is not supported.
+func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) {
+ return 0, errNotSupported
+}
+
+// TruncateHead returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) TruncateHead(items uint64) error {
+ return errNotSupported
+}
+
+// TruncateTail returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) TruncateTail(items uint64) error {
+ return errNotSupported
+}
+
+// Sync returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) Sync() error {
+ return errNotSupported
+}
+
+func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
+ // Unlike other ancient-related methods, this method does not return
+ // errNotSupported when invoked.
+ // The reason for this is that the caller might want to do several things:
+ // 1. Check if something is in freezer,
+ // 2. If not, check leveldb.
+ //
+ // This will work, since the ancient-checks inside 'fn' will return errors,
+ // and the leveldb work will continue.
+ //
+ // If we instead were to return errNotSupported here, then the caller would
+ // have to explicitly check for that, having an extra clause to do the
+ // non-ancient operations.
+ return fn(db)
+}
+
+// MigrateTable processes the entries in a given table in sequence
+// converting them to a new format if they're of an old format.
+func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
+ return errNotSupported
+}
+
+// AncientDatadir returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) AncientDatadir() (string, error) {
+ return "", errNotSupported
+}
+
+// NewDatabase creates a high level database on top of a given key-value data
+// store without a freezer moving immutable chain segments into cold storage.
+func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
+ return &nofreezedb{KeyValueStore: db}
+}
+
+// resolveChainFreezerDir is a helper function which resolves the absolute path
+// of chain freezer by considering backward compatibility.
+// This function is NOT used, just ported over from the Ethereum
+func resolveChainFreezerDir(ancient string) string {
+ // Check if the chain freezer is already present in the specified
+ // sub folder, if not then two possibilities:
+ // - chain freezer is not initialized
+ // - chain freezer exists in legacy location (root ancient folder)
+ freezer := path.Join(ancient, chainFreezerName)
+ if !common.FileExist(freezer) {
+ if !common.FileExist(ancient) {
+ // The entire ancient store is not initialized, still use the sub
+ // folder for initialization.
+ } else {
+ // Ancient root is already initialized, then we hold the assumption
+ // that chain freezer is also initialized and located in root folder.
+ // In this case fallback to legacy location.
+ freezer = ancient
+ log.Info("Found legacy ancient chain path", "location", ancient)
+ }
+ }
+ return freezer
+}
+
+// NewMemoryDatabase creates an ephemeral in-memory key-value database without a
+// freezer moving immutable chain segments into cold storage.
+func NewMemoryDatabase() ethdb.Database {
+ return NewDatabase(memorydb.New())
+}
+
+// NewMemoryDatabaseWithCap creates an ephemeral in-memory key-value database
+// with an initial starting capacity, but without a freezer moving immutable
+// chain segments into cold storage.
+func NewMemoryDatabaseWithCap(size int) ethdb.Database {
+ return NewDatabase(memorydb.NewWithCap(size))
+}
+
+// NewLevelDBDatabase creates a persistent key-value database without a freezer
+// moving immutable chain segments into cold storage.
+func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
+ db, err := leveldb.New(file, cache, handles, namespace, readonly)
+ if err != nil {
+ return nil, err
+ }
+ log.Info("Using LevelDB as the backing database")
+ return NewDatabase(db), nil
+}
+
+const (
+ dbPebble = "pebble"
+ dbLeveldb = "leveldb"
+)
+
+// hasPreexistingDb checks the given data directory whether a database is already
+// instantiated at that location, and if so, returns the type of database (or the
+// empty string).
+func hasPreexistingDb(path string) string {
+ if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
+ return "" // No pre-existing db
+ }
+ if matches, err := filepath.Glob(filepath.Join(path, "OPTIONS*")); len(matches) > 0 || err != nil {
+ if err != nil {
+ panic(err) // only possible if the pattern is malformed
+ }
+ return dbPebble
+ }
+ return dbLeveldb
+}
+
+// OpenOptions contains the options to apply when opening a database.
+// OBS: If AncientsDirectory is empty, it indicates that no freezer is to be used.
+type OpenOptions struct {
+ Type string // "leveldb" | "pebble"
+ Directory string // the datadir
+ AncientsDirectory string // the ancients-dir
+ Namespace string // the namespace for database relevant metrics
+ Cache int // the capacity(in megabytes) of the data caching
+ Handles int // number of files to be open simultaneously
+ ReadOnly bool
+}
+
+// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
+//
+// type == null type != null
+// +----------------------------------------
+// db is non-existent | leveldb default | specified type
+// db is existent | from db | specified type (if compatible)
+func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
+ existingDb := hasPreexistingDb(o.Directory)
+ if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb {
+ return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb)
+ }
+ if o.Type == dbPebble || existingDb == dbPebble {
+ if PebbleEnabled {
+ log.Info("Using pebble as the backing database")
+ return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
+ } else {
+ return nil, errors.New("db.engine 'pebble' not supported on this platform")
+ }
+ }
+ if len(o.Type) != 0 && o.Type != dbLeveldb {
+ return nil, fmt.Errorf("unknown db.engine %v", o.Type)
+ }
+ log.Info("Using leveldb as the backing database")
+ // Use leveldb, either as default (no explicit choice), or pre-existing, or chosen explicitly
+ return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
+}
+
+type counter uint64
+
+func (c counter) String() string {
+ return fmt.Sprintf("%d", c)
+}
+
+func (c counter) Percentage(current uint64) string {
+ return fmt.Sprintf("%d", current*100/uint64(c))
+}
+
+// stat stores sizes and count for a parameter
+type stat struct {
+ size common.StorageSize
+ count counter
+}
+
+// Add size to the stat and increase the counter by 1
+func (s *stat) Add(size common.StorageSize) {
+ s.size += size
+ s.count++
+}
+
+func (s *stat) Size() string {
+ return s.size.String()
+}
+
+func (s *stat) Count() string {
+ return s.count.String()
+}
+
+// InspectDatabase traverses the entire database and checks the size
+// of all different categories of data.
+// This function is NOT used, just ported over from the Ethereum
+func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
+ it := db.NewIterator(keyPrefix, keyStart)
+ defer it.Release()
+
+ var (
+ count int64
+ start = time.Now()
+ logged = time.Now()
+
+ // Key-value store statistics
+ headers stat
+ bodies stat
+ receipts stat
+ tds stat
+ numHashPairings stat
+ hashNumPairings stat
+ tries stat
+ codes stat
+ validatorCodes stat
+ txLookups stat
+ accountSnaps stat
+ storageSnaps stat
+ preimages stat
+ bloomBits stat
+ beaconHeaders stat
+ cliqueSnaps stat
+
+ // Les statistic
+ chtTrieNodes stat
+ bloomTrieNodes stat
+
+ // Meta- and unaccounted data
+ metadata stat
+ unaccounted stat
+
+ // Totals
+ total common.StorageSize
+ )
+ // Inspect key-value database first.
+ for it.Next() {
+ var (
+ key = it.Key()
+ size = common.StorageSize(len(key) + len(it.Value()))
+ )
+ total += size
+ switch {
+ case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength):
+ headers.Add(size)
+ case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength):
+ bodies.Add(size)
+ case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
+ receipts.Add(size)
+ case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
+ tds.Add(size)
+ case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
+ numHashPairings.Add(size)
+ case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
+ hashNumPairings.Add(size)
+ case len(key) == common.HashLength:
+ tries.Add(size)
+ case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength:
+ codes.Add(size)
+ case bytes.HasPrefix(key, ValidatorCodePrefix) && len(key) == len(ValidatorCodePrefix)+common.HashLength:
+ validatorCodes.Add(size)
+ case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
+ txLookups.Add(size)
+ case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength):
+ accountSnaps.Add(size)
+ case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
+ storageSnaps.Add(size)
+ case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength):
+ preimages.Add(size)
+ case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
+ metadata.Add(size)
+ case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
+ metadata.Add(size)
+ case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
+ bloomBits.Add(size)
+ case bytes.HasPrefix(key, BloomBitsIndexPrefix):
+ bloomBits.Add(size)
+ case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
+ beaconHeaders.Add(size)
+ case bytes.HasPrefix(key, CliqueSnapshotPrefix) && len(key) == 7+common.HashLength:
+ cliqueSnaps.Add(size)
+ case bytes.HasPrefix(key, ChtTablePrefix) ||
+ bytes.HasPrefix(key, ChtIndexTablePrefix) ||
+ bytes.HasPrefix(key, ChtPrefix): // Canonical hash trie
+ chtTrieNodes.Add(size)
+ case bytes.HasPrefix(key, BloomTrieTablePrefix) ||
+ bytes.HasPrefix(key, BloomTrieIndexPrefix) ||
+ bytes.HasPrefix(key, BloomTriePrefix): // Bloomtrie sub
+ bloomTrieNodes.Add(size)
+ default:
+ var accounted bool
+ for _, meta := range [][]byte{
+ databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, headFinalizedBlockKey,
+ lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
+ snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
+ uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
+ } {
+ if bytes.Equal(key, meta) {
+ metadata.Add(size)
+ accounted = true
+ break
+ }
+ }
+ if !accounted {
+ unaccounted.Add(size)
+ }
+ }
+ count++
+ if count%1000 == 0 && time.Since(logged) > 8*time.Second {
+ log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ // Display the database statistic of key-value store.
+ stats := [][]string{
+ {"Key-Value store", "Headers", headers.Size(), headers.Count()},
+ {"Key-Value store", "Bodies", bodies.Size(), bodies.Count()},
+ {"Key-Value store", "Receipt lists", receipts.Size(), receipts.Count()},
+ {"Key-Value store", "Difficulties", tds.Size(), tds.Count()},
+ {"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
+ {"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
+ {"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
+ {"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()},
+ {"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
+ {"Key-Value store", "Validator codes", validatorCodes.Size(), validatorCodes.Count()},
+ {"Key-Value store", "Trie nodes", tries.Size(), tries.Count()},
+ {"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
+ {"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
+ {"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
+ {"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()},
+ {"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
+ {"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
+ {"Light client", "CHT trie nodes", chtTrieNodes.Size(), chtTrieNodes.Count()},
+ {"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()},
+ }
+ // Inspect all registered append-only file store then.
+ ancients, err := inspectFreezers(db)
+ if err != nil {
+ return err
+ }
+ for _, ancient := range ancients {
+ for _, table := range ancient.sizes {
+ stats = append(stats, []string{
+ fmt.Sprintf("Ancient store (%s)", strings.Title(ancient.name)),
+ strings.Title(table.name),
+ table.size.String(),
+ fmt.Sprintf("%d", ancient.count()),
+ })
+ }
+ total += ancient.size()
+ }
+ table := tablewriter.NewWriter(os.Stdout)
+ table.SetHeader([]string{"Database", "Category", "Size", "Items"})
+ table.SetFooter([]string{"", "Total", total.String(), " "})
+ table.AppendBulk(stats)
+ table.Render()
+
+ if unaccounted.size > 0 {
+ utils.Logger().Error().
+ Interface("size", unaccounted.size).
+ Interface("count", unaccounted.count).
+ Msg("Database contains unaccounted data")
+ }
+ return nil
+}
diff --git a/core/rawdb/database_test.go b/core/rawdb/database_test.go
new file mode 100644
index 0000000000..a0d7b5ec66
--- /dev/null
+++ b/core/rawdb/database_test.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
diff --git a/core/rawdb/databases_64bit.go b/core/rawdb/databases_64bit.go
new file mode 100644
index 0000000000..139ce7d347
--- /dev/null
+++ b/core/rawdb/databases_64bit.go
@@ -0,0 +1,37 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+//go:build arm64 || amd64
+
+package rawdb
+
+import (
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/ethdb/pebble"
+)
+
+// Pebble is unsuported on 32bit architecture
+const PebbleEnabled = true
+
+// NewPebbleDBDatabase creates a persistent key-value database without a freezer
+// moving immutable chain segments into cold storage.
+func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
+ db, err := pebble.New(file, cache, handles, namespace, readonly)
+ if err != nil {
+ return nil, err
+ }
+ return NewDatabase(db), nil
+}
diff --git a/core/rawdb/databases_non64bit.go b/core/rawdb/databases_non64bit.go
new file mode 100644
index 0000000000..b8ab2ecada
--- /dev/null
+++ b/core/rawdb/databases_non64bit.go
@@ -0,0 +1,34 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+//go:build !(arm64 || amd64)
+
+package rawdb
+
+import (
+ "errors"
+
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+// Pebble is unsuported on 32bit architecture
+const PebbleEnabled = false
+
+// NewPebbleDBDatabase creates a persistent key-value database without a freezer
+// moving immutable chain segments into cold storage.
+func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
+ return nil, errors.New("pebble is not supported on this platform")
+}
diff --git a/core/rawdb/interfaces.go b/core/rawdb/interfaces.go
index ac91ae55ae..76971edc60 100644
--- a/core/rawdb/interfaces.go
+++ b/core/rawdb/interfaces.go
@@ -27,13 +27,15 @@ type DatabaseReader interface {
// DatabaseWriter wraps the Put method of a backing data store.
type DatabaseWriter interface {
Put(key []byte, value []byte) error
+ Delete(key []byte) error
}
// DatabaseDeleter wraps the Delete method of a backing data store.
type DatabaseDeleter interface {
+ Put(key []byte, value []byte) error
Delete(key []byte) error
}
type DatabaseIterator interface {
- NewIteratorWithPrefix(prefix []byte) ethdb.Iterator
+ NewIterator(prefix []byte, start []byte) ethdb.Iterator
}
diff --git a/core/rawdb/key_length_iterator.go b/core/rawdb/key_length_iterator.go
new file mode 100644
index 0000000000..d1c5af269a
--- /dev/null
+++ b/core/rawdb/key_length_iterator.go
@@ -0,0 +1,47 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import "github.com/ethereum/go-ethereum/ethdb"
+
+// KeyLengthIterator is a wrapper for a database iterator that ensures only key-value pairs
+// with a specific key length will be returned.
+type KeyLengthIterator struct {
+ requiredKeyLength int
+ ethdb.Iterator
+}
+
+// NewKeyLengthIterator returns a wrapped version of the iterator that will only return key-value
+// pairs where keys with a specific key length will be returned.
+func NewKeyLengthIterator(it ethdb.Iterator, keyLen int) ethdb.Iterator {
+ return &KeyLengthIterator{
+ Iterator: it,
+ requiredKeyLength: keyLen,
+ }
+}
+
+func (it *KeyLengthIterator) Next() bool {
+ // Return true as soon as a key with the required key length is discovered
+ for it.Iterator.Next() {
+ if len(it.Iterator.Key()) == it.requiredKeyLength {
+ return true
+ }
+ }
+
+ // Return false when we exhaust the keys in the underlying iterator.
+ return false
+}
diff --git a/core/rawdb/key_length_iterator_test.go b/core/rawdb/key_length_iterator_test.go
new file mode 100644
index 0000000000..654efc5b55
--- /dev/null
+++ b/core/rawdb/key_length_iterator_test.go
@@ -0,0 +1,60 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "encoding/binary"
+ "testing"
+)
+
+func TestKeyLengthIterator(t *testing.T) {
+ db := NewMemoryDatabase()
+
+ keyLen := 8
+ expectedKeys := make(map[string]struct{})
+ for i := 0; i < 100; i++ {
+ key := make([]byte, keyLen)
+ binary.BigEndian.PutUint64(key, uint64(i))
+ if err := db.Put(key, []byte{0x1}); err != nil {
+ t.Fatal(err)
+ }
+ expectedKeys[string(key)] = struct{}{}
+
+ longerKey := make([]byte, keyLen*2)
+ binary.BigEndian.PutUint64(longerKey, uint64(i))
+ if err := db.Put(longerKey, []byte{0x1}); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ it := NewKeyLengthIterator(db.NewIterator(nil, nil), keyLen)
+ for it.Next() {
+ key := it.Key()
+ _, exists := expectedKeys[string(key)]
+ if !exists {
+ t.Fatalf("Found unexpected key %d", binary.BigEndian.Uint64(key))
+ }
+ delete(expectedKeys, string(key))
+ if len(key) != keyLen {
+ t.Fatalf("Found unexpected key in key length iterator with length %d", len(key))
+ }
+ }
+
+ if len(expectedKeys) != 0 {
+ t.Fatalf("Expected all keys of length %d to be removed from expected keys during iteration", keyLen)
+ }
+}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index a95e568f61..56147b51d3 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -18,6 +18,7 @@
package rawdb
import (
+ "bytes"
"encoding/binary"
"math/big"
@@ -27,15 +28,63 @@ import (
// The fields below define the low level database schema prefixing.
var (
- // databaseVerisionKey tracks the current database version.
- databaseVerisionKey = []byte("DatabaseVersion")
- // headHeaderKey tracks the latest know header's hash.
+ // databaseVersionKey tracks the current database version.
+ databaseVersionKey = []byte("DatabaseVersion")
+
+ // headHeaderKey tracks the latest known header's hash.
headHeaderKey = []byte("LastHeader")
- // headBlockKey tracks the latest know full block's hash.
+
+ // headBlockKey tracks the latest known full block's hash.
headBlockKey = []byte("LastBlock")
- // headFastBlockKey tracks the latest known incomplete block's hash duirng fast sync.
+
+ // headFastBlockKey tracks the latest known incomplete block's hash during fast sync.
headFastBlockKey = []byte("LastFast")
- // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
+
+ // headFinalizedBlockKey tracks the latest known finalized block hash.
+ headFinalizedBlockKey = []byte("LastFinalized")
+
+ // lastPivotKey tracks the last pivot block used by fast sync (to reenable on sethead).
+ lastPivotKey = []byte("LastPivot")
+
+ // fastTrieProgressKey tracks the number of trie entries imported during fast sync.
+ fastTrieProgressKey = []byte("TrieSync")
+
+ // snapshotDisabledKey flags that the snapshot should not be maintained due to initial sync.
+ snapshotDisabledKey = []byte("SnapshotDisabled")
+
+ // SnapshotRootKey tracks the hash of the last snapshot.
+ SnapshotRootKey = []byte("SnapshotRoot")
+
+ // snapshotJournalKey tracks the in-memory diff layers across restarts.
+ snapshotJournalKey = []byte("SnapshotJournal")
+
+ // snapshotGeneratorKey tracks the snapshot generation marker across restarts.
+ snapshotGeneratorKey = []byte("SnapshotGenerator")
+
+ // snapshotRecoveryKey tracks the snapshot recovery marker across restarts.
+ snapshotRecoveryKey = []byte("SnapshotRecovery")
+
+ // snapshotSyncStatusKey tracks the snapshot sync status across restarts.
+ snapshotSyncStatusKey = []byte("SnapshotSyncStatus")
+
+ // skeletonSyncStatusKey tracks the skeleton sync status across restarts.
+ skeletonSyncStatusKey = []byte("SkeletonSyncStatus")
+
+ // txIndexTailKey tracks the oldest block whose transactions have been indexed.
+ txIndexTailKey = []byte("TransactionIndexTail")
+
+ // fastTxLookupLimitKey tracks the transaction lookup limit during fast sync.
+ fastTxLookupLimitKey = []byte("FastTransactionLookupLimit")
+
+ // badBlockKey tracks the list of bad blocks seen by local
+ badBlockKey = []byte("InvalidBlock")
+
+ // uncleanShutdownKey tracks the list of local crashes
+ uncleanShutdownKey = []byte("unclean-shutdown") // config prefix for the db
+
+ // transitionStatusKey tracks the eth2 transition status.
+ transitionStatusKey = []byte("eth2-transition")
+
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td
headerHashSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + headerHashSuffix -> hash
@@ -51,6 +100,7 @@ var (
pendingCrosslinkKey = []byte("pendingCL") // prefix for shard last pending crosslink
pendingSlashingKey = []byte("pendingSC") // prefix for shard last pending slashing record
preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage
+ continuousBlocksCountKey = []byte("continuous") // key for continuous blocks count
configPrefix = []byte("ethereum-config-") // config prefix for the db
crosslinkPrefix = []byte("cl") // prefix for crosslink
delegatorValidatorListPrefix = []byte("dvl") // prefix for delegator's validator list
@@ -74,8 +124,108 @@ var (
currentRewardGivenOutPrefix = []byte("blk-rwd-")
// key of SnapdbInfo
snapdbInfoKey = []byte("SnapdbInfo")
+
+ // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
+ SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value
+ SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
+ CodePrefix = []byte("c") // CodePrefix + code hash -> account code
+ ValidatorCodePrefix = []byte("vc") // ValidatorCodePrefix + code hash -> validator code
+ skeletonHeaderPrefix = []byte("S") // skeletonHeaderPrefix + num (uint64 big endian) -> header
+
+ // Path-based trie node scheme.
+ trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node
+ trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node
+
+ PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
+ genesisPrefix = []byte("ethereum-genesis-") // genesis state prefix for the db
+
+ ChtPrefix = []byte("chtRootV2-") // ChtPrefix + chtNum (uint64 big endian) -> trie root hash
+ ChtTablePrefix = []byte("cht-")
+ ChtIndexTablePrefix = []byte("chtIndexV2-")
+
+ BloomTriePrefix = []byte("bltRoot-") // BloomTriePrefix + bloomTrieNum (uint64 big endian) -> trie root hash
+ BloomTrieTablePrefix = []byte("blt-")
+ BloomTrieIndexPrefix = []byte("bltIndex-")
+
+ CliqueSnapshotPrefix = []byte("clique-")
)
+// LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary
+// fields.
+type LegacyTxLookupEntry struct {
+ BlockHash common.Hash
+ BlockIndex uint64
+ Index uint64
+}
+
+// headerKeyPrefix = headerPrefix + num (uint64 big endian)
+func headerKeyPrefix(number uint64) []byte {
+ return append(headerPrefix, encodeBlockNumber(number)...)
+}
+
+// accountSnapshotKey = SnapshotAccountPrefix + hash
+func accountSnapshotKey(hash common.Hash) []byte {
+ return append(SnapshotAccountPrefix, hash.Bytes()...)
+}
+
+// storageSnapshotKey = SnapshotStoragePrefix + account hash + storage hash
+func storageSnapshotKey(accountHash, storageHash common.Hash) []byte {
+ return append(append(SnapshotStoragePrefix, accountHash.Bytes()...), storageHash.Bytes()...)
+}
+
+// storageSnapshotsKey = SnapshotStoragePrefix + account hash + storage hash
+func storageSnapshotsKey(accountHash common.Hash) []byte {
+ return append(SnapshotStoragePrefix, accountHash.Bytes()...)
+}
+
+// skeletonHeaderKey = skeletonHeaderPrefix + num (uint64 big endian)
+func skeletonHeaderKey(number uint64) []byte {
+ return append(skeletonHeaderPrefix, encodeBlockNumber(number)...)
+}
+
+// codeKey = CodePrefix + hash
+func codeKey(hash common.Hash) []byte {
+ return append(CodePrefix, hash.Bytes()...)
+}
+
+// IsCodeKey reports whether the given byte slice is the key of contract code,
+// if so return the raw code hash as well.
+func IsCodeKey(key []byte) (bool, []byte) {
+ if bytes.HasPrefix(key, CodePrefix) && len(key) == common.HashLength+len(CodePrefix) {
+ return true, key[len(CodePrefix):]
+ }
+ return false, nil
+}
+
+// validatorCodeKey = ValidatorCodePrefix + hash
+func validatorCodeKey(hash common.Hash) []byte {
+ return append(ValidatorCodePrefix, hash.Bytes()...)
+}
+
+// IsValidatorCodeKey reports whether the given byte slice is the key of validator code,
+// if so return the raw code hash as well.
+func IsValidatorCodeKey(key []byte) (bool, []byte) {
+ if bytes.HasPrefix(key, ValidatorCodePrefix) && len(key) == common.HashLength+len(ValidatorCodePrefix) {
+ return true, key[len(ValidatorCodePrefix):]
+ }
+ return false, nil
+}
+
+// genesisStateSpecKey = genesisPrefix + hash
+func genesisStateSpecKey(hash common.Hash) []byte {
+ return append(genesisPrefix, hash.Bytes()...)
+}
+
+// accountTrieNodeKey = trieNodeAccountPrefix + nodePath.
+func accountTrieNodeKey(path []byte) []byte {
+ return append(trieNodeAccountPrefix, path...)
+}
+
+// storageTrieNodeKey = trieNodeStoragePrefix + accountHash + nodePath.
+func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte {
+ return append(append(trieNodeStoragePrefix, accountHash.Bytes()...), path...)
+}
+
// TxLookupEntry is a positional metadata to help looking up the data content of
// a transaction or receipt given only its hash.
type TxLookupEntry struct {
@@ -151,6 +301,10 @@ func preimageKey(hash common.Hash) []byte {
return append(preimagePrefix, hash.Bytes()...)
}
+func leaderContinuousBlocksCountKey() []byte {
+ return continuousBlocksCountKey
+}
+
// configKey = configPrefix + hash
func configKey(hash common.Hash) []byte {
return append(configPrefix, hash.Bytes()...)
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
new file mode 100644
index 0000000000..35cd11d55d
--- /dev/null
+++ b/core/rawdb/table.go
@@ -0,0 +1,313 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+// table is a wrapper around a database that prefixes each key access with a pre-
+// configured string.
+type table struct {
+ db ethdb.Database
+ prefix string
+}
+
+// NewTable returns a database object that prefixes all keys with a given string.
+func NewTable(db ethdb.Database, prefix string) ethdb.Database {
+ return &table{
+ db: db,
+ prefix: prefix,
+ }
+}
+
+// Close is a noop to implement the Database interface.
+func (t *table) Close() error {
+ return nil
+}
+
+// Has retrieves if a prefixed version of a key is present in the database.
+func (t *table) Has(key []byte) (bool, error) {
+ return t.db.Has(append([]byte(t.prefix), key...))
+}
+
+// Get retrieves the given prefixed key if it's present in the database.
+func (t *table) Get(key []byte) ([]byte, error) {
+ return t.db.Get(append([]byte(t.prefix), key...))
+}
+
+// HasAncient is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) HasAncient(kind string, number uint64) (bool, error) {
+ return t.db.HasAncient(kind, number)
+}
+
+// Ancient is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) Ancient(kind string, number uint64) ([]byte, error) {
+ return t.db.Ancient(kind, number)
+}
+
+// AncientRange is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
+ return t.db.AncientRange(kind, start, count, maxBytes)
+}
+
+// Ancients is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) Ancients() (uint64, error) {
+ return t.db.Ancients()
+}
+
+// Tail is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) Tail() (uint64, error) {
+ return t.db.Tail()
+}
+
+// AncientSize is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) AncientSize(kind string) (uint64, error) {
+ return t.db.AncientSize(kind)
+}
+
+// ModifyAncients runs an ancient write operation on the underlying database.
+func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) {
+ return t.db.ModifyAncients(fn)
+}
+
+func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
+ return t.db.ReadAncients(fn)
+}
+
+// TruncateHead is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) TruncateHead(items uint64) error {
+ return t.db.TruncateHead(items)
+}
+
+// TruncateTail is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) TruncateTail(items uint64) error {
+ return t.db.TruncateTail(items)
+}
+
+// Sync is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) Sync() error {
+ return t.db.Sync()
+}
+
+// MigrateTable processes the entries in a given table in sequence
+// converting them to a new format if they're of an old format.
+func (t *table) MigrateTable(kind string, convert convertLegacyFn) error {
+ return t.db.MigrateTable(kind, convert)
+}
+
+// AncientDatadir returns the ancient datadir of the underlying database.
+func (t *table) AncientDatadir() (string, error) {
+ return t.db.AncientDatadir()
+}
+
+// Put inserts the given value into the database at a prefixed version of the
+// provided key.
+func (t *table) Put(key []byte, value []byte) error {
+ return t.db.Put(append([]byte(t.prefix), key...), value)
+}
+
+// Delete removes the given prefixed key from the database.
+func (t *table) Delete(key []byte) error {
+ return t.db.Delete(append([]byte(t.prefix), key...))
+}
+
+// NewIterator creates a binary-alphabetical iterator over a subset
+// of database content with a particular key prefix, starting at a particular
+// initial key (or after, if it does not exist).
+func (t *table) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
+ innerPrefix := append([]byte(t.prefix), prefix...)
+ iter := t.db.NewIterator(innerPrefix, start)
+ return &tableIterator{
+ iter: iter,
+ prefix: t.prefix,
+ }
+}
+
+// NewIteratorWithPrefix creates a binary-alphabetical iterator over a subset
+// of database content with a particular key prefix.
+func (t *table) NewIteratorWithPrefix(prefix []byte) ethdb.Iterator {
+ return t.NewIterator(prefix, nil)
+}
+
+// Stat returns a particular internal stat of the database.
+func (t *table) Stat(property string) (string, error) {
+ return t.db.Stat(property)
+}
+
+// Compact flattens the underlying data store for the given key range. In essence,
+// deleted and overwritten versions are discarded, and the data is rearranged to
+// reduce the cost of operations needed to access them.
+//
+// A nil start is treated as a key before all keys in the data store; a nil limit
+// is treated as a key after all keys in the data store. If both is nil then it
+// will compact entire data store.
+func (t *table) Compact(start []byte, limit []byte) error {
+ // If no start was specified, use the table prefix as the first value
+ if start == nil {
+ start = []byte(t.prefix)
+ } else {
+ start = append([]byte(t.prefix), start...)
+ }
+ // If no limit was specified, use the first element not matching the prefix
+ // as the limit
+ if limit == nil {
+ limit = []byte(t.prefix)
+ for i := len(limit) - 1; i >= 0; i-- {
+ // Bump the current character, stopping if it doesn't overflow
+ limit[i]++
+ if limit[i] > 0 {
+ break
+ }
+ // Character overflown, proceed to the next or nil if the last
+ if i == 0 {
+ limit = nil
+ }
+ }
+ } else {
+ limit = append([]byte(t.prefix), limit...)
+ }
+ // Range correctly calculated based on table prefix, delegate down
+ return t.db.Compact(start, limit)
+}
+
+// NewBatch creates a write-only database that buffers changes to its host db
+// until a final write is called, each operation prefixing all keys with the
+// pre-configured string.
+func (t *table) NewBatch() ethdb.Batch {
+ return &tableBatch{t.db.NewBatch(), t.prefix}
+}
+
+// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
+func (t *table) NewBatchWithSize(size int) ethdb.Batch {
+ return &tableBatch{t.db.NewBatchWithSize(size), t.prefix}
+}
+
+// NewSnapshot creates a database snapshot based on the current state.
+// The created snapshot will not be affected by all following mutations
+// happened on the database.
+func (t *table) NewSnapshot() (ethdb.Snapshot, error) {
+ return t.db.NewSnapshot()
+}
+
+// tableBatch is a wrapper around a database batch that prefixes each key access
+// with a pre-configured string.
+type tableBatch struct {
+ batch ethdb.Batch
+ prefix string
+}
+
+// Put inserts the given value into the batch for later committing.
+func (b *tableBatch) Put(key, value []byte) error {
+ return b.batch.Put(append([]byte(b.prefix), key...), value)
+}
+
+// Delete inserts the a key removal into the batch for later committing.
+func (b *tableBatch) Delete(key []byte) error {
+ return b.batch.Delete(append([]byte(b.prefix), key...))
+}
+
+// ValueSize retrieves the amount of data queued up for writing.
+func (b *tableBatch) ValueSize() int {
+ return b.batch.ValueSize()
+}
+
+// Write flushes any accumulated data to disk.
+func (b *tableBatch) Write() error {
+ return b.batch.Write()
+}
+
+// Reset resets the batch for reuse.
+func (b *tableBatch) Reset() {
+ b.batch.Reset()
+}
+
+// tableReplayer is a wrapper around a batch replayer which truncates
+// the added prefix.
+type tableReplayer struct {
+ w ethdb.KeyValueWriter
+ prefix string
+}
+
+// Put implements the interface KeyValueWriter.
+func (r *tableReplayer) Put(key []byte, value []byte) error {
+ trimmed := key[len(r.prefix):]
+ return r.w.Put(trimmed, value)
+}
+
+// Delete implements the interface KeyValueWriter.
+func (r *tableReplayer) Delete(key []byte) error {
+ trimmed := key[len(r.prefix):]
+ return r.w.Delete(trimmed)
+}
+
+// Replay replays the batch contents.
+func (b *tableBatch) Replay(w ethdb.KeyValueWriter) error {
+ return b.batch.Replay(&tableReplayer{w: w, prefix: b.prefix})
+}
+
+// tableIterator is a wrapper around a database iterator that prefixes each key access
+// with a pre-configured string.
+type tableIterator struct {
+ iter ethdb.Iterator
+ prefix string
+}
+
+// Next moves the iterator to the next key/value pair. It returns whether the
+// iterator is exhausted.
+func (iter *tableIterator) Next() bool {
+ return iter.iter.Next()
+}
+
+// Error returns any accumulated error. Exhausting all the key/value pairs
+// is not considered to be an error.
+func (iter *tableIterator) Error() error {
+ return iter.iter.Error()
+}
+
+// Key returns the key of the current key/value pair, or nil if done. The caller
+// should not modify the contents of the returned slice, and its contents may
+// change on the next call to Next.
+func (iter *tableIterator) Key() []byte {
+ key := iter.iter.Key()
+ if key == nil {
+ return nil
+ }
+ return key[len(iter.prefix):]
+}
+
+// Value returns the value of the current key/value pair, or nil if done. The
+// caller should not modify the contents of the returned slice, and its contents
+// may change on the next call to Next.
+func (iter *tableIterator) Value() []byte {
+ return iter.iter.Value()
+}
+
+// Release releases associated resources. Release should always succeed and can
+// be called multiple times without causing error.
+func (iter *tableIterator) Release() {
+ iter.iter.Release()
+}
diff --git a/core/rawdb/table_test.go b/core/rawdb/table_test.go
new file mode 100644
index 0000000000..aa6adf3e72
--- /dev/null
+++ b/core/rawdb/table_test.go
@@ -0,0 +1,128 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+func TestTableDatabase(t *testing.T) { testTableDatabase(t, "prefix") }
+func TestEmptyPrefixTableDatabase(t *testing.T) { testTableDatabase(t, "") }
+
+type testReplayer struct {
+ puts [][]byte
+ dels [][]byte
+}
+
+func (r *testReplayer) Put(key []byte, value []byte) error {
+ r.puts = append(r.puts, key)
+ return nil
+}
+
+func (r *testReplayer) Delete(key []byte) error {
+ r.dels = append(r.dels, key)
+ return nil
+}
+
+func testTableDatabase(t *testing.T, prefix string) {
+ db := NewTable(NewMemoryDatabase(), prefix)
+
+ var entries = []struct {
+ key []byte
+ value []byte
+ }{
+ {[]byte{0x01, 0x02}, []byte{0x0a, 0x0b}},
+ {[]byte{0x03, 0x04}, []byte{0x0c, 0x0d}},
+ {[]byte{0x05, 0x06}, []byte{0x0e, 0x0f}},
+
+ {[]byte{0xff, 0xff, 0x01}, []byte{0x1a, 0x1b}},
+ {[]byte{0xff, 0xff, 0x02}, []byte{0x1c, 0x1d}},
+ {[]byte{0xff, 0xff, 0x03}, []byte{0x1e, 0x1f}},
+ }
+
+ // Test Put/Get operation
+ for _, entry := range entries {
+ db.Put(entry.key, entry.value)
+ }
+ for _, entry := range entries {
+ got, err := db.Get(entry.key)
+ if err != nil {
+ t.Fatalf("Failed to get value: %v", err)
+ }
+ if !bytes.Equal(got, entry.value) {
+ t.Fatalf("Value mismatch: want=%v, got=%v", entry.value, got)
+ }
+ }
+
+ // Test batch operation
+ db = NewTable(NewMemoryDatabase(), prefix)
+ batch := db.NewBatch()
+ for _, entry := range entries {
+ batch.Put(entry.key, entry.value)
+ }
+ batch.Write()
+ for _, entry := range entries {
+ got, err := db.Get(entry.key)
+ if err != nil {
+ t.Fatalf("Failed to get value: %v", err)
+ }
+ if !bytes.Equal(got, entry.value) {
+ t.Fatalf("Value mismatch: want=%v, got=%v", entry.value, got)
+ }
+ }
+
+ // Test batch replayer
+ r := &testReplayer{}
+ batch.Replay(r)
+ for index, entry := range entries {
+ got := r.puts[index]
+ if !bytes.Equal(got, entry.key) {
+ t.Fatalf("Key mismatch: want=%v, got=%v", entry.key, got)
+ }
+ }
+
+ check := func(iter ethdb.Iterator, expCount, index int) {
+ count := 0
+ for iter.Next() {
+ key, value := iter.Key(), iter.Value()
+ if !bytes.Equal(key, entries[index].key) {
+ t.Fatalf("Key mismatch: want=%v, got=%v", entries[index].key, key)
+ }
+ if !bytes.Equal(value, entries[index].value) {
+ t.Fatalf("Value mismatch: want=%v, got=%v", entries[index].value, value)
+ }
+ index += 1
+ count++
+ }
+ if count != expCount {
+ t.Fatalf("Wrong number of elems, exp %d got %d", expCount, count)
+ }
+ iter.Release()
+ }
+ // Test iterators
+ check(db.NewIterator(nil, nil), 6, 0)
+ // Test iterators with prefix
+ check(db.NewIterator([]byte{0xff, 0xff}, nil), 3, 3)
+ // Test iterators with start point
+ check(db.NewIterator(nil, []byte{0xff, 0xff, 0x02}), 2, 4)
+ // Test iterators with prefix and start point
+ check(db.NewIterator([]byte{0xee}, nil), 0, 0)
+ check(db.NewIterator(nil, []byte{0x00}), 6, 0)
+}
diff --git a/core/rawdb/testdata/stored_receipts.bin b/core/rawdb/testdata/stored_receipts.bin
new file mode 100644
index 0000000000..8204fae09b
Binary files /dev/null and b/core/rawdb/testdata/stored_receipts.bin differ
diff --git a/core/staking_verifier_test.go b/core/staking_verifier_test.go
index f829a00500..01d24cc8d4 100644
--- a/core/staking_verifier_test.go
+++ b/core/staking_verifier_test.go
@@ -9,7 +9,7 @@ import (
"github.com/harmony-one/harmony/internal/params"
- "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/crypto/bls"
@@ -1651,7 +1651,7 @@ func makeVWrapperByIndex(index int) staking.ValidatorWrapper {
}
func newTestStateDB() (*state.DB, error) {
- return state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ return state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
}
// makeVWrappersForStake makes the default staking.ValidatorWrappers for
diff --git a/core/state/access_list.go b/core/state/access_list.go
new file mode 100644
index 0000000000..4194691345
--- /dev/null
+++ b/core/state/access_list.go
@@ -0,0 +1,136 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+)
+
+type accessList struct {
+ addresses map[common.Address]int
+ slots []map[common.Hash]struct{}
+}
+
+// ContainsAddress returns true if the address is in the access list.
+func (al *accessList) ContainsAddress(address common.Address) bool {
+ _, ok := al.addresses[address]
+ return ok
+}
+
+// Contains checks if a slot within an account is present in the access list, returning
+// separate flags for the presence of the account and the slot respectively.
+func (al *accessList) Contains(address common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) {
+ idx, ok := al.addresses[address]
+ if !ok {
+ // no such address (and hence zero slots)
+ return false, false
+ }
+ if idx == -1 {
+ // address yes, but no slots
+ return true, false
+ }
+ _, slotPresent = al.slots[idx][slot]
+ return true, slotPresent
+}
+
+// newAccessList creates a new accessList.
+func newAccessList() *accessList {
+ return &accessList{
+ addresses: make(map[common.Address]int),
+ }
+}
+
+// Copy creates an independent copy of an accessList.
+func (a *accessList) Copy() *accessList {
+ cp := newAccessList()
+ for k, v := range a.addresses {
+ cp.addresses[k] = v
+ }
+ cp.slots = make([]map[common.Hash]struct{}, len(a.slots))
+ for i, slotMap := range a.slots {
+ newSlotmap := make(map[common.Hash]struct{}, len(slotMap))
+ for k := range slotMap {
+ newSlotmap[k] = struct{}{}
+ }
+ cp.slots[i] = newSlotmap
+ }
+ return cp
+}
+
+// AddAddress adds an address to the access list, and returns 'true' if the operation
+// caused a change (addr was not previously in the list).
+func (al *accessList) AddAddress(address common.Address) bool {
+ if _, present := al.addresses[address]; present {
+ return false
+ }
+ al.addresses[address] = -1
+ return true
+}
+
+// AddSlot adds the specified (addr, slot) combo to the access list.
+// Return values are:
+// - address added
+// - slot added
+// For any 'true' value returned, a corresponding journal entry must be made.
+func (al *accessList) AddSlot(address common.Address, slot common.Hash) (addrChange bool, slotChange bool) {
+ idx, addrPresent := al.addresses[address]
+ if !addrPresent || idx == -1 {
+ // Address not present, or addr present but no slots there
+ al.addresses[address] = len(al.slots)
+ slotmap := map[common.Hash]struct{}{slot: {}}
+ al.slots = append(al.slots, slotmap)
+ return !addrPresent, true
+ }
+ // There is already an (address,slot) mapping
+ slotmap := al.slots[idx]
+ if _, ok := slotmap[slot]; !ok {
+ slotmap[slot] = struct{}{}
+ // Journal add slot change
+ return false, true
+ }
+ // No changes required
+ return false, false
+}
+
+// DeleteSlot removes an (address, slot)-tuple from the access list.
+// This operation needs to be performed in the same order as the addition happened.
+// This method is meant to be used by the journal, which maintains ordering of
+// operations.
+func (al *accessList) DeleteSlot(address common.Address, slot common.Hash) {
+ idx, addrOk := al.addresses[address]
+ // There are two ways this can fail
+ if !addrOk {
+ panic("reverting slot change, address not present in list")
+ }
+ slotmap := al.slots[idx]
+ delete(slotmap, slot)
+ // If that was the last (first) slot, remove it
+ // Since additions and rollbacks are always performed in order,
+ // we can delete the item without worrying about screwing up later indices
+ if len(slotmap) == 0 {
+ al.slots = al.slots[:idx]
+ al.addresses[address] = -1
+ }
+}
+
+// DeleteAddress removes an address from the access list. This operation
+// needs to be performed in the same order as the addition happened.
+// This method is meant to be used by the journal, which maintains ordering of
+// operations.
+func (al *accessList) DeleteAddress(address common.Address) {
+ delete(al.addresses, address)
+}
diff --git a/core/state/database.go b/core/state/database.go
index 9144b45da8..c1e375ccde 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -17,24 +17,23 @@
package state
import (
+ "errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/lru"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
- lru "github.com/hashicorp/golang-lru"
+ "github.com/harmony-one/harmony/core/rawdb"
)
-// MaxTrieCacheGen limit after which to evict trie nodes from memory.
-var MaxTrieCacheGen = uint16(120)
-
const (
- // Number of past tries to keep. This value is chosen such that
- // reasonable chain reorg depths will hit an existing trie.
- maxPastTries = 12
-
// Number of codehash->size associations to keep.
codeSizeCacheSize = 100000
+
+ // Cache size granted for caching clean code.
+ codeCacheSize = 64 * 1024 * 1024
)
// Database wraps access to tries and contract code.
@@ -43,7 +42,7 @@ type Database interface {
OpenTrie(root common.Hash) (Trie, error)
// OpenStorageTrie opens the storage trie of an account.
- OpenStorageTrie(addrHash, root common.Hash) (Trie, error)
+ OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error)
// CopyTrie returns an independent copy of the given trie.
CopyTrie(Trie) Trie
@@ -54,6 +53,15 @@ type Database interface {
// ContractCodeSize retrieves a particular contracts code's size.
ContractCodeSize(addrHash, codeHash common.Hash) (int, error)
+ // ValidatorCode retrieves a particular validator's code.
+ ValidatorCode(addrHash, codeHash common.Hash) ([]byte, error)
+
+ // ValidatorCodeSize retrieves a particular validator code's size.
+ ValidatorCodeSize(addrHash, codeHash common.Hash) (int, error)
+
+ // DiskDB returns the underlying key-value disk database.
+ DiskDB() ethdb.KeyValueStore
+
// TrieDB retrieves the low level trie database used for data storage.
TrieDB() *trie.Database
}
@@ -63,7 +71,7 @@ type Trie interface {
// GetKey returns the sha3 preimage of a hashed key that was previously used
// to store a value.
//
- // TODO(fjl): remove this when SecureTrie is removed
+ // TODO(fjl): remove this when StateTrie is removed
GetKey([]byte) []byte
// TryGet returns the value for key stored in the trie. The value bytes must
@@ -71,23 +79,43 @@ type Trie interface {
// trie.MissingNodeError is returned.
TryGet(key []byte) ([]byte, error)
+ // TryGetAccount abstracts an account read from the trie. It retrieves the
+ // account blob from the trie with provided account address and decodes it
+ // with associated decoding algorithm. If the specified account is not in
+ // the trie, nil will be returned. If the trie is corrupted(e.g. some nodes
+ // are missing or the account blob is incorrect for decoding), an error will
+ // be returned.
+ TryGetAccount(address common.Address) (*types.StateAccount, error)
+
// TryUpdate associates key with value in the trie. If value has length zero, any
// existing value is deleted from the trie. The value bytes must not be modified
// by the caller while they are stored in the trie. If a node was not found in the
// database, a trie.MissingNodeError is returned.
TryUpdate(key, value []byte) error
+ // TryUpdateAccount abstracts an account write to the trie. It encodes the
+ // provided account object with associated algorithm and then updates it
+ // in the trie with provided address.
+ TryUpdateAccount(address common.Address, account *types.StateAccount) error
+
// TryDelete removes any existing value for key from the trie. If a node was not
// found in the database, a trie.MissingNodeError is returned.
TryDelete(key []byte) error
+ // TryDeleteAccount abstracts an account deletion from the trie.
+ TryDeleteAccount(address common.Address) error
+
// Hash returns the root hash of the trie. It does not write to the database and
// can be used even if the trie doesn't have one.
Hash() common.Hash
- // Commit writes all nodes to the trie's memory database, tracking the internal
- // and external (for account tries) references.
- Commit(onleaf trie.LeafCallback) (common.Hash, error)
+ // Commit collects all dirty nodes in the trie and replace them with the
+ // corresponding node hash. All collected nodes(including dirty leaves if
+ // collectLeaf is true) will be encapsulated into a nodeset for return.
+ // The returned nodeset can be nil if the trie is clean(nothing to commit).
+ // Once the trie is committed, it's not usable anymore. A new trie must
+ // be created with new root and updated trie database for following usage
+ Commit(collectLeaf bool) (common.Hash, *trie.NodeSet)
// NodeIterator returns an iterator that returns nodes of the trie. Iteration
// starts at the key after the given start key.
@@ -105,41 +133,66 @@ type Trie interface {
// NewDatabase creates a backing store for state. The returned database is safe for
// concurrent use, but does not retain any recent trie nodes in memory. To keep some
-// historical state in memory, use the NewDatabaseWithCache constructor.
+// historical state in memory, use the NewDatabaseWithConfig constructor.
func NewDatabase(db ethdb.Database) Database {
- return NewDatabaseWithCache(db, 0)
+ return NewDatabaseWithConfig(db, nil)
}
-// NewDatabaseWithCache creates a backing store for state. The returned database
+func NewDatabaseWithCache(db ethdb.Database, cache int) Database {
+ return NewDatabaseWithConfig(db, nil)
+}
+
+// NewDatabaseWithConfig creates a backing store for state. The returned database
// is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a
// large memory cache.
-func NewDatabaseWithCache(db ethdb.Database, cache int) Database {
- csc, _ := lru.New(codeSizeCacheSize)
+func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
+ return &cachingDB{
+ disk: db,
+ codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
+ codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
+ triedb: trie.NewDatabaseWithConfig(db, config),
+ }
+}
+
+// NewDatabaseWithNodeDB creates a state database with an already initialized node database.
+func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database {
return &cachingDB{
- db: trie.NewDatabaseWithCache(db, cache),
- codeSizeCache: csc,
+ disk: db,
+ codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
+ codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
+ triedb: triedb,
}
}
type cachingDB struct {
- db *trie.Database
- codeSizeCache *lru.Cache
+ disk ethdb.KeyValueStore
+ codeSizeCache *lru.Cache[common.Hash, int]
+ codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
+ triedb *trie.Database
}
// OpenTrie opens the main account trie at a specific root hash.
func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
- return trie.NewSecure(root, db.db)
+ tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb)
+ if err != nil {
+ return nil, err
+ }
+ return tr, nil
}
// OpenStorageTrie opens the storage trie of an account.
-func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) {
- return trie.NewSecure(root, db.db)
+func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) {
+ tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, addrHash, root), db.triedb)
+ if err != nil {
+ return nil, err
+ }
+ return tr, nil
}
// CopyTrie returns an independent copy of the given trie.
func (db *cachingDB) CopyTrie(t Trie) Trie {
switch t := t.(type) {
- case *trie.SecureTrie:
+ case *trie.StateTrie:
return t.Copy()
default:
panic(fmt.Errorf("unknown trie type %T", t))
@@ -148,23 +201,92 @@ func (db *cachingDB) CopyTrie(t Trie) Trie {
// ContractCode retrieves a particular contract's code.
func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) {
- code, err := db.db.Node(codeHash)
- if err == nil {
+ code, _ := db.codeCache.Get(codeHash)
+ if len(code) > 0 {
+ return code, nil
+ }
+ code = rawdb.ReadCode(db.disk, codeHash)
+ if len(code) > 0 {
+ db.codeCache.Add(codeHash, code)
+ db.codeSizeCache.Add(codeHash, len(code))
+ return code, nil
+ }
+ return nil, errors.New("not found")
+}
+
+// ContractCodeWithPrefix retrieves a particular contract's code. If the
+// code can't be found in the cache, then check the existence with **new**
+// db scheme.
+func (db *cachingDB) ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) {
+ code, _ := db.codeCache.Get(codeHash)
+ if len(code) > 0 {
+ return code, nil
+ }
+ code = rawdb.ReadCodeWithPrefix(db.disk, codeHash)
+ if len(code) > 0 {
+ db.codeCache.Add(codeHash, code)
db.codeSizeCache.Add(codeHash, len(code))
+ return code, nil
}
- return code, err
+ return nil, errors.New("not found")
}
// ContractCodeSize retrieves a particular contracts code's size.
func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) {
if cached, ok := db.codeSizeCache.Get(codeHash); ok {
- return cached.(int), nil
+ return cached, nil
}
code, err := db.ContractCode(addrHash, codeHash)
return len(code), err
}
+// ValidatorCodeSize retrieves a particular validators code's size.
+func (db *cachingDB) ValidatorCodeSize(addrHash, codeHash common.Hash) (int, error) {
+ if cached, ok := db.codeSizeCache.Get(codeHash); ok {
+ return cached, nil
+ }
+ code, err := db.ValidatorCode(addrHash, codeHash)
+ return len(code), err
+}
+
+// ValidatorCode retrieves a particular validator's code.
+func (db *cachingDB) ValidatorCode(addrHash, codeHash common.Hash) ([]byte, error) {
+ code, _ := db.codeCache.Get(codeHash)
+ if len(code) > 0 {
+ return code, nil
+ }
+ code = rawdb.ReadValidatorCode(db.disk, codeHash)
+ if len(code) > 0 {
+ db.codeCache.Add(codeHash, code)
+ db.codeSizeCache.Add(codeHash, len(code))
+ return code, nil
+ }
+ return nil, errors.New("not found")
+}
+
+// ValidatorCodeWithPrefix retrieves a particular validator's code. If the
+// code can't be found in the cache, then check the existence with **new**
+// db scheme.
+func (db *cachingDB) ValidatorCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) {
+ code, _ := db.codeCache.Get(codeHash)
+ if len(code) > 0 {
+ return code, nil
+ }
+ code = rawdb.ReadValidatorCodeWithPrefix(db.disk, codeHash)
+ if len(code) > 0 {
+ db.codeCache.Add(codeHash, code)
+ db.codeSizeCache.Add(codeHash, len(code))
+ return code, nil
+ }
+ return nil, errors.New("not found")
+}
+
+// DiskDB returns the underlying key-value disk database.
+func (db *cachingDB) DiskDB() ethdb.KeyValueStore {
+ return db.disk
+}
+
// TrieDB retrieves any intermediate trie-node caching layer.
func (db *cachingDB) TrieDB() *trie.Database {
- return db.db
+ return db.triedb
}
diff --git a/core/state/dump.go b/core/state/dump.go
index f429ae1077..c4d70f6e00 100644
--- a/core/state/dump.go
+++ b/core/state/dump.go
@@ -17,29 +17,27 @@
package state
import (
- "bytes"
"encoding/json"
"fmt"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/harmony-one/harmony/internal/utils"
)
-// DumpConfig is a set of options to control what portions of the statewill be
+// DumpConfig is a set of options to control what portions of the state will be
// iterated and collected.
type DumpConfig struct {
SkipCode bool
SkipStorage bool
OnlyWithAddresses bool
- HoldStorage bool
Start []byte
End []byte
- StateStart []byte
- StateEnd []byte
Max uint64
}
@@ -48,10 +46,7 @@ type DumpCollector interface {
// OnRoot is called with the state root
OnRoot(common.Hash)
// OnAccount is called once for each account in the trie
- OnAccountStart(common.Address, DumpAccount)
- // OnAccount is called once for each account in the trie
- OnAccountState(_ common.Address, StateSecureKey hexutil.Bytes, key, value []byte)
- OnAccountEnd(common.Address, DumpAccount)
+ OnAccount(common.Address, DumpAccount)
}
// DumpAccount represents an account in the state.
@@ -78,15 +73,7 @@ func (d *Dump) OnRoot(root common.Hash) {
}
// OnAccount implements DumpCollector interface
-func (d *Dump) OnAccountStart(addr common.Address, account DumpAccount) {
-}
-
-// OnAccount implements DumpCollector interface
-func (d *Dump) OnAccountState(_ common.Address, StateSecureKey hexutil.Bytes, key, value []byte) {
-}
-
-// OnAccount implements DumpCollector interface
-func (d *Dump) OnAccountEnd(addr common.Address, account DumpAccount) {
+func (d *Dump) OnAccount(addr common.Address, account DumpAccount) {
d.Accounts[addr] = account
}
@@ -103,15 +90,7 @@ func (d *IteratorDump) OnRoot(root common.Hash) {
}
// OnAccount implements DumpCollector interface
-func (d *IteratorDump) OnAccountStart(addr common.Address, account DumpAccount) {
-}
-
-// OnAccount implements DumpCollector interface
-func (d *IteratorDump) OnAccountState(_ common.Address, StateSecureKey hexutil.Bytes, key, value []byte) {
-}
-
-// OnAccount implements DumpCollector interface
-func (d *IteratorDump) OnAccountEnd(addr common.Address, account DumpAccount) {
+func (d *IteratorDump) OnAccount(addr common.Address, account DumpAccount) {
d.Accounts[addr] = account
}
@@ -121,15 +100,7 @@ type iterativeDump struct {
}
// OnAccount implements DumpCollector interface
-func (d iterativeDump) OnAccountStart(addr common.Address, account DumpAccount) {
-}
-
-// OnAccount implements DumpCollector interface
-func (d iterativeDump) OnAccountState(_ common.Address, StateSecureKey hexutil.Bytes, key, value []byte) {
-}
-
-// OnAccount implements DumpCollector interface
-func (d iterativeDump) OnAccountEnd(addr common.Address, account DumpAccount) {
+func (d iterativeDump) OnAccount(addr common.Address, account DumpAccount) {
dumpAccount := &DumpAccount{
Balance: account.Balance,
Nonce: account.Nonce,
@@ -169,15 +140,9 @@ func (s *DB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []byte)
log.Info("Trie dumping started", "root", s.trie.Hash())
c.OnRoot(s.trie.Hash())
- hasEnd := len(conf.End) > 0
- stateStart := conf.StateStart
- hasStateEnd := len(conf.StateEnd) > 0
it := trie.NewIterator(s.trie.NodeIterator(conf.Start))
for it.Next() {
- if hasEnd && bytes.Compare(it.Key, conf.End) >= 0 {
- break
- }
- var data Account
+ var data types.StateAccount
if err := rlp.DecodeBytes(it.Value, &data); err != nil {
panic(err)
}
@@ -200,31 +165,26 @@ func (s *DB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []byte)
addr := common.BytesToAddress(addrBytes)
obj := newObject(s, addr, data)
if !conf.SkipCode {
- account.Code = obj.Code(s.db)
+ account.Code = obj.Code(s.db, false)
}
- c.OnAccountStart(addr, account)
if !conf.SkipStorage {
account.Storage = make(map[common.Hash]string)
- storageIt := trie.NewIterator(obj.getTrie(s.db).NodeIterator(stateStart))
+ tr, err := obj.getTrie(s.db)
+ if err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to load storage trie")
+ continue
+ }
+ storageIt := trie.NewIterator(tr.NodeIterator(nil))
for storageIt.Next() {
- if hasStateEnd && bytes.Compare(storageIt.Key, conf.StateEnd) >= 0 {
- break
- }
- key := s.trie.GetKey(storageIt.Key)
- c.OnAccountState(addr, storageIt.Key, key, storageIt.Value)
- if conf.HoldStorage {
- _, content, _, err := rlp.Split(storageIt.Value)
- if err != nil {
- log.Error("Failed to decode the value returned by iterator", "error", err)
- continue
- }
- account.Storage[common.BytesToHash(s.trie.GetKey(storageIt.Key))] = common.Bytes2Hex(content)
+ _, content, _, err := rlp.Split(storageIt.Value)
+ if err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to decode the value returned by iterator")
+ continue
}
+ account.Storage[common.BytesToHash(s.trie.GetKey(storageIt.Key))] = common.Bytes2Hex(content)
}
- stateStart = nil
- hasStateEnd = false
}
- c.OnAccountEnd(addr, account)
+ c.OnAccount(addr, account)
accounts++
if time.Since(logged) > 8*time.Second {
log.Info("Trie dumping in progress", "at", it.Key, "accounts", accounts,
@@ -239,7 +199,7 @@ func (s *DB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []byte)
}
}
if missingPreimages > 0 {
- log.Warn("Dump incomplete due to missing preimages", "missing", missingPreimages)
+ utils.Logger().Warn().Int("missing", missingPreimages).Msg("Dump incomplete due to missing preimages")
}
log.Info("Trie dumping complete", "accounts", accounts,
"elapsed", common.PrettyDuration(time.Since(start)))
diff --git a/core/state/iterator.go b/core/state/iterator.go
new file mode 100644
index 0000000000..8bc26332dd
--- /dev/null
+++ b/core/state/iterator.go
@@ -0,0 +1,161 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+// NodeIterator is an iterator to traverse the entire state trie post-order,
+// including all of the contract code and contract state tries.
+type NodeIterator struct {
+ state *DB // State being iterated
+
+ stateIt trie.NodeIterator // Primary iterator for the global state trie
+ dataIt trie.NodeIterator // Secondary iterator for the data trie of a contract
+
+ accountHash common.Hash // Hash of the node containing the account
+ codeHash common.Hash // Hash of the contract source code
+ code []byte // Source code associated with a contract
+
+ Hash common.Hash // Hash of the current entry being iterated (nil if not standalone)
+ Parent common.Hash // Hash of the first full ancestor node (nil if current is the root)
+
+ Error error // Failure set in case of an internal error in the iterator
+}
+
+// NewNodeIterator creates an post-order state node iterator.
+func NewNodeIterator(state *DB) *NodeIterator {
+ return &NodeIterator{
+ state: state,
+ }
+}
+
+// Next moves the iterator to the next node, returning whether there are any
+// further nodes. In case of an internal error this method returns false and
+// sets the Error field to the encountered failure.
+func (it *NodeIterator) Next() bool {
+ // If the iterator failed previously, don't do anything
+ if it.Error != nil {
+ return false
+ }
+ // Otherwise step forward with the iterator and report any errors
+ if err := it.step(); err != nil {
+ it.Error = err
+ return false
+ }
+ return it.retrieve()
+}
+
+// step moves the iterator to the next entry of the state trie.
+func (it *NodeIterator) step() error {
+ // Abort if we reached the end of the iteration
+ if it.state == nil {
+ return nil
+ }
+ // Initialize the iterator if we've just started
+ if it.stateIt == nil {
+ it.stateIt = it.state.trie.NodeIterator(nil)
+ }
+ // If we had data nodes previously, we surely have at least state nodes
+ if it.dataIt != nil {
+ if cont := it.dataIt.Next(true); !cont {
+ if it.dataIt.Error() != nil {
+ return it.dataIt.Error()
+ }
+ it.dataIt = nil
+ }
+ return nil
+ }
+ // If we had source code previously, discard that
+ if it.code != nil {
+ it.code = nil
+ return nil
+ }
+ // Step to the next state trie node, terminating if we're out of nodes
+ if cont := it.stateIt.Next(true); !cont {
+ if it.stateIt.Error() != nil {
+ return it.stateIt.Error()
+ }
+ it.state, it.stateIt = nil, nil
+ return nil
+ }
+ // If the state trie node is an internal entry, leave as is
+ if !it.stateIt.Leaf() {
+ return nil
+ }
+ // Otherwise we've reached an account node, initiate data iteration
+ var account Account
+ if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {
+ return err
+ }
+ dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, common.BytesToHash(it.stateIt.LeafKey()), account.Root)
+ if err != nil {
+ return err
+ }
+ it.dataIt = dataTrie.NodeIterator(nil)
+ if !it.dataIt.Next(true) {
+ it.dataIt = nil
+ }
+ if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) {
+ it.codeHash = common.BytesToHash(account.CodeHash)
+ addrHash := common.BytesToHash(it.stateIt.LeafKey())
+ it.code, err = it.state.db.ContractCode(addrHash, common.BytesToHash(account.CodeHash))
+ if err != nil {
+ return fmt.Errorf("code %x: %v", account.CodeHash, err)
+ }
+ if it.code == nil || len(it.code) == 0 {
+ it.code, err = it.state.db.ValidatorCode(addrHash, common.BytesToHash(account.CodeHash))
+ if err != nil {
+ return fmt.Errorf("code %x: %v", account.CodeHash, err)
+ }
+ }
+ }
+ it.accountHash = it.stateIt.Parent()
+ return nil
+}
+
+// retrieve pulls and caches the current state entry the iterator is traversing.
+// The method returns whether there are any more data left for inspection.
+func (it *NodeIterator) retrieve() bool {
+ // Clear out any previously set values
+ it.Hash = common.Hash{}
+
+ // If the iteration's done, return no available data
+ if it.state == nil {
+ return false
+ }
+ // Otherwise retrieve the current entry
+ switch {
+ case it.dataIt != nil:
+ it.Hash, it.Parent = it.dataIt.Hash(), it.dataIt.Parent()
+ if it.Parent == (common.Hash{}) {
+ it.Parent = it.accountHash
+ }
+ case it.code != nil:
+ it.Hash, it.Parent = it.codeHash, it.accountHash
+ case it.stateIt != nil:
+ it.Hash, it.Parent = it.stateIt.Hash(), it.stateIt.Parent()
+ }
+ return true
+}
diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go
new file mode 100644
index 0000000000..8333dc8bce
--- /dev/null
+++ b/core/state/iterator_test.go
@@ -0,0 +1,142 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/harmony-one/harmony/core/rawdb"
+)
+
+// testAccount is the data associated with an account used by the state tests.
+type testAccount struct {
+ address common.Address
+ balance *big.Int
+ nonce uint64
+ code []byte
+}
+
+// makeTestState create a sample test state to test node-wise reconstruction.
+func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) {
+ // Create an empty state
+ db := rawdb.NewMemoryDatabase()
+ sdb := NewDatabase(db)
+ state, _ := New(common.Hash{}, sdb, nil)
+
+ // Fill it with some arbitrary data
+ var accounts []*testAccount
+ for i := byte(0); i < 96; i++ {
+ obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
+ acc := &testAccount{address: common.BytesToAddress([]byte{i})}
+
+ obj.AddBalance(big.NewInt(int64(11 * i)))
+ acc.balance = big.NewInt(int64(11 * i))
+
+ obj.SetNonce(uint64(42 * i))
+ acc.nonce = uint64(42 * i)
+
+ if i%3 == 0 {
+ obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i}, false)
+ acc.code = []byte{i, i, i, i, i}
+ }
+ if i%5 == 0 {
+ for j := byte(0); j < 5; j++ {
+ hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j})
+ obj.SetState(sdb, hash, hash)
+ }
+ }
+ state.updateStateObject(obj)
+ accounts = append(accounts, acc)
+ }
+ root, _ := state.Commit(false)
+
+ // Return the generated state
+ return db, sdb, root, accounts
+}
+
+// Tests that the node iterator indeed walks over the entire database contents.
+func TestNodeIteratorCoverage(t *testing.T) {
+ // Create some arbitrary test state to iterate
+ db, sdb, root, _ := makeTestState()
+ sdb.TrieDB().Commit(root, false)
+
+ state, err := New(root, sdb, nil)
+ if err != nil {
+ t.Fatalf("failed to create state trie at %x: %v", root, err)
+ }
+ // Gather all the node hashes found by the iterator
+ hashes := make(map[common.Hash]struct{})
+ for it := NewNodeIterator(state); it.Next(); {
+ if it.Hash != (common.Hash{}) {
+ hashes[it.Hash] = struct{}{}
+ }
+ }
+ // Check in-disk nodes
+ var (
+ seenNodes = make(map[common.Hash]struct{})
+ seenCodes = make(map[common.Hash]struct{})
+ )
+ it := db.NewIterator(nil, nil)
+ for it.Next() {
+ ok, hash := isTrieNode(sdb.TrieDB().Scheme(), it.Key(), it.Value())
+ if !ok {
+ continue
+ }
+ seenNodes[hash] = struct{}{}
+ }
+ it.Release()
+
+ // Check in-disk codes
+ it = db.NewIterator(nil, nil)
+ for it.Next() {
+ ok, hash := rawdb.IsCodeKey(it.Key())
+ if !ok {
+ continue
+ }
+ if _, ok := hashes[common.BytesToHash(hash)]; !ok {
+ t.Errorf("state entry not reported %x", it.Key())
+ }
+ seenCodes[common.BytesToHash(hash)] = struct{}{}
+ }
+ it.Release()
+
+ // Cross check the iterated hashes and the database/nodepool content
+ for hash := range hashes {
+ _, ok := seenNodes[hash]
+ if !ok {
+ _, ok = seenCodes[hash]
+ }
+ if !ok {
+ t.Errorf("failed to retrieve reported node %x", hash)
+ }
+ }
+}
+
+// isTrieNode is a helper function which reports if the provided
+// database entry belongs to a trie node or not.
+func isTrieNode(scheme string, key, val []byte) (bool, common.Hash) {
+ if scheme == rawdb.HashScheme {
+ if len(key) == common.HashLength {
+ return true, common.BytesToHash(key)
+ }
+ }
+ return false, common.Hash{}
+}
diff --git a/core/state/journal.go b/core/state/journal.go
index 35198216da..210f38808f 100644
--- a/core/state/journal.go
+++ b/core/state/journal.go
@@ -34,14 +34,14 @@ type journalEntry interface {
}
// journal contains the list of state modifications applied since the last state
-// commit. These are tracked to be able to be reverted in case of an execution
-// exception or revertal request.
+// commit. These are tracked to be able to be reverted in the case of an execution
+// exception or request for reversal.
type journal struct {
entries []journalEntry // Current changes tracked by the journal
dirties map[common.Address]int // Dirty accounts and the number of changes
}
-// newJournal create a new initialized journal.
+// newJournal creates a new initialized journal.
func newJournal() *journal {
return &journal{
dirties: make(map[common.Address]int),
@@ -91,7 +91,8 @@ type (
account *common.Address
}
resetObjectChange struct {
- prev *Object
+ prev *Object
+ prevdestruct bool
}
suicideChange struct {
account *common.Address
@@ -134,8 +135,30 @@ type (
touchChange struct {
account *common.Address
}
+ // Changes to the access list
+ accessListAddAccountChange struct {
+ address *common.Address
+ }
+ accessListAddSlotChange struct {
+ address *common.Address
+ slot *common.Hash
+ }
+
+ transientStorageChange struct {
+ account *common.Address
+ key, prevalue common.Hash
+ }
)
+// dirtied returns the Ethereum address modified by this journal entry.
+func (v validatorWrapperChange) dirtied() *common.Address {
+ return v.address
+}
+
+// revert undoes the changes introduced by this journal entry.
+func (v validatorWrapperChange) revert(*DB) {
+}
+
func (ch createObjectChange) revert(s *DB) {
delete(s.stateObjects, *ch.account)
delete(s.stateObjectsDirty, *ch.account)
@@ -147,6 +170,9 @@ func (ch createObjectChange) dirtied() *common.Address {
func (ch resetObjectChange) revert(s *DB) {
s.setStateObject(ch.prev)
+ if !ch.prevdestruct {
+ delete(s.stateObjectsDestruct, ch.prev.address)
+ }
}
func (ch resetObjectChange) dirtied() *common.Address {
@@ -191,21 +217,13 @@ func (ch nonceChange) dirtied() *common.Address {
}
func (ch codeChange) revert(s *DB) {
- s.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode)
+ s.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode, false)
}
func (ch codeChange) dirtied() *common.Address {
return ch.account
}
-func (ch validatorWrapperChange) revert(s *DB) {
- s.stateValidators[*(ch.address)] = ch.prev
-}
-
-func (ch validatorWrapperChange) dirtied() *common.Address {
- return ch.address
-}
-
func (ch storageChange) revert(s *DB) {
s.getStateObject(*ch.account).setState(ch.key, ch.prevalue)
}
@@ -214,6 +232,14 @@ func (ch storageChange) dirtied() *common.Address {
return ch.account
}
+func (ch transientStorageChange) revert(s *DB) {
+ s.setTransientState(*ch.account, ch.key, ch.prevalue)
+}
+
+func (ch transientStorageChange) dirtied() *common.Address {
+ return nil
+}
+
func (ch refundChange) revert(s *DB) {
s.refund = ch.prev
}
@@ -243,3 +269,28 @@ func (ch addPreimageChange) revert(s *DB) {
func (ch addPreimageChange) dirtied() *common.Address {
return nil
}
+
+func (ch accessListAddAccountChange) revert(s *DB) {
+ /*
+ One important invariant here, is that whenever a (addr, slot) is added, if the
+ addr is not already present, the add causes two journal entries:
+ - one for the address,
+ - one for the (address,slot)
+ Therefore, when unrolling the change, we can always blindly delete the
+ (addr) at this point, since no storage adds can remain when come upon
+ a single (addr) change.
+ */
+ s.accessList.DeleteAddress(*ch.address)
+}
+
+func (ch accessListAddAccountChange) dirtied() *common.Address {
+ return nil
+}
+
+func (ch accessListAddSlotChange) revert(s *DB) {
+ s.accessList.DeleteSlot(*ch.address, *ch.slot)
+}
+
+func (ch accessListAddSlotChange) dirtied() *common.Address {
+ return nil
+}
diff --git a/core/state/managed_state_test.go b/core/state/managed_state_test.go
index 2fc7ca5ad8..81d316c3ab 100644
--- a/core/state/managed_state_test.go
+++ b/core/state/managed_state_test.go
@@ -19,7 +19,7 @@ package state
import (
"testing"
- "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/ethereum/go-ethereum/common"
)
@@ -27,7 +27,7 @@ import (
var addr = common.BytesToAddress([]byte("test"))
func create() (*ManagedState, *account) {
- statedb, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
ms := ManageState(statedb)
ms.DB.SetNonce(addr, 100)
ms.accounts[addr] = newAccount(ms.DB.getStateObject(addr))
diff --git a/core/state/metrics.go b/core/state/metrics.go
new file mode 100644
index 0000000000..e702ef3a81
--- /dev/null
+++ b/core/state/metrics.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import "github.com/ethereum/go-ethereum/metrics"
+
+var (
+ accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil)
+ storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil)
+ accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil)
+ storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil)
+ accountTrieUpdatedMeter = metrics.NewRegisteredMeter("state/update/accountnodes", nil)
+ storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil)
+ accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil)
+ storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil)
+)
diff --git a/core/state/prefeth.go b/core/state/prefeth.go
index 50094fa63d..0b19f80d74 100644
--- a/core/state/prefeth.go
+++ b/core/state/prefeth.go
@@ -8,6 +8,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/harmony-one/harmony/internal/utils"
@@ -15,7 +16,7 @@ import (
type prefetchJob struct {
accountAddr []byte
- account *Account
+ account *types.StateAccount
start, end []byte
}
@@ -91,7 +92,7 @@ func (s *DB) prefetchWorker(job *prefetchJob, jobs chan *prefetchJob) {
}
// build account data from main trie tree
- var data Account
+ var data types.StateAccount
if err := rlp.DecodeBytes(it.Value, &data); err != nil {
panic(err)
}
@@ -99,11 +100,12 @@ func (s *DB) prefetchWorker(job *prefetchJob, jobs chan *prefetchJob) {
addr := common.BytesToAddress(addrBytes)
obj := newObject(s, addr, data)
if data.CodeHash != nil {
- obj.Code(s.db)
+ obj.Code(s.db, false)
}
// build account trie tree
- storageIt := trie.NewIterator(obj.getTrie(s.db).NodeIterator(nil))
+ tr, _ := obj.getTrie(s.db)
+ storageIt := trie.NewIterator(tr.NodeIterator(nil))
storageJob := &prefetchJob{
accountAddr: addrBytes,
account: &data,
@@ -115,7 +117,8 @@ func (s *DB) prefetchWorker(job *prefetchJob, jobs chan *prefetchJob) {
} else {
// scan main trie tree
obj := newObject(s, common.BytesToAddress(job.accountAddr), *job.account)
- storageIt := trie.NewIterator(obj.getTrie(s.db).NodeIterator(job.start))
+ tr, _ := obj.getTrie(s.db)
+ storageIt := trie.NewIterator(tr.NodeIterator(job.start))
// fetch data
s.prefetchAccountStorage(jobs, job, storageIt)
diff --git a/core/state/snapshot/account.go b/core/state/snapshot/account.go
new file mode 100644
index 0000000000..b5634972ad
--- /dev/null
+++ b/core/state/snapshot/account.go
@@ -0,0 +1,87 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// Account is a modified version of a state.Account, where the root is replaced
+// with a byte slice. This format can be used to represent full-consensus format
+// or slim-snapshot format which replaces the empty root and code hash as nil
+// byte slice.
+type Account struct {
+ Nonce uint64
+ Balance *big.Int
+ Root []byte
+ CodeHash []byte
+}
+
+// SlimAccount converts a state.Account content into a slim snapshot account
+func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) Account {
+ slim := Account{
+ Nonce: nonce,
+ Balance: balance,
+ }
+ if root != types.EmptyRootHash {
+ slim.Root = root[:]
+ }
+ if !bytes.Equal(codehash, types.EmptyCodeHash[:]) {
+ slim.CodeHash = codehash
+ }
+ return slim
+}
+
+// SlimAccountRLP converts a state.Account content into a slim snapshot
+// version RLP encoded.
+func SlimAccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) []byte {
+ data, err := rlp.EncodeToBytes(SlimAccount(nonce, balance, root, codehash))
+ if err != nil {
+ panic(err)
+ }
+ return data
+}
+
+// FullAccount decodes the data on the 'slim RLP' format and return
+// the consensus format account.
+func FullAccount(data []byte) (Account, error) {
+ var account Account
+ if err := rlp.DecodeBytes(data, &account); err != nil {
+ return Account{}, err
+ }
+ if len(account.Root) == 0 {
+ account.Root = types.EmptyRootHash[:]
+ }
+ if len(account.CodeHash) == 0 {
+ account.CodeHash = types.EmptyCodeHash[:]
+ }
+ return account, nil
+}
+
+// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format.
+func FullAccountRLP(data []byte) ([]byte, error) {
+ account, err := FullAccount(data)
+ if err != nil {
+ return nil, err
+ }
+ return rlp.EncodeToBytes(account)
+}
diff --git a/core/state/snapshot/context.go b/core/state/snapshot/context.go
new file mode 100644
index 0000000000..16a52c81f5
--- /dev/null
+++ b/core/state/snapshot/context.go
@@ -0,0 +1,241 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/harmony-one/harmony/core/rawdb"
+ "github.com/harmony-one/harmony/internal/utils"
+)
+
+const (
+ snapAccount = "account" // Identifier of account snapshot generation
+ snapStorage = "storage" // Identifier of storage snapshot generation
+)
+
+// generatorStats is a collection of statistics gathered by the snapshot generator
+// for logging purposes.
+type generatorStats struct {
+ origin uint64 // Origin prefix where generation started
+ start time.Time // Timestamp when generation started
+ accounts uint64 // Number of accounts indexed(generated or recovered)
+ slots uint64 // Number of storage slots indexed(generated or recovered)
+ dangling uint64 // Number of dangling storage slots
+ storage common.StorageSize // Total account and storage slot size(generation or recovery)
+}
+
+// Log creates an contextual log with the given message and the context pulled
+// from the internally maintained statistics.
+func (gs *generatorStats) Log(msg string, root common.Hash, marker []byte) {
+ var ctx []interface{}
+ if root != (common.Hash{}) {
+ ctx = append(ctx, []interface{}{"root", root}...)
+ }
+ // Figure out whether we're after or within an account
+ switch len(marker) {
+ case common.HashLength:
+ ctx = append(ctx, []interface{}{"at", common.BytesToHash(marker)}...)
+ case 2 * common.HashLength:
+ ctx = append(ctx, []interface{}{
+ "in", common.BytesToHash(marker[:common.HashLength]),
+ "at", common.BytesToHash(marker[common.HashLength:]),
+ }...)
+ }
+ // Add the usual measurements
+ ctx = append(ctx, []interface{}{
+ "accounts", gs.accounts,
+ "slots", gs.slots,
+ "storage", gs.storage,
+ "dangling", gs.dangling,
+ "elapsed", common.PrettyDuration(time.Since(gs.start)),
+ }...)
+ // Calculate the estimated indexing time based on current stats
+ if len(marker) > 0 {
+ if done := binary.BigEndian.Uint64(marker[:8]) - gs.origin; done > 0 {
+ left := math.MaxUint64 - binary.BigEndian.Uint64(marker[:8])
+
+ speed := done/uint64(time.Since(gs.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
+ ctx = append(ctx, []interface{}{
+ "eta", common.PrettyDuration(time.Duration(left/speed) * time.Millisecond),
+ }...)
+ }
+ }
+ utils.Logger().Info().Msg(msg)
+}
+
+// generatorContext carries a few global values to be shared by all generation functions.
+type generatorContext struct {
+ stats *generatorStats // Generation statistic collection
+ db ethdb.KeyValueStore // Key-value store containing the snapshot data
+ account *holdableIterator // Iterator of account snapshot data
+ storage *holdableIterator // Iterator of storage snapshot data
+ batch ethdb.Batch // Database batch for writing batch data atomically
+ logged time.Time // The timestamp when last generation progress was displayed
+}
+
+// newGeneratorContext initializes the context for generation.
+func newGeneratorContext(stats *generatorStats, db ethdb.KeyValueStore, accMarker []byte, storageMarker []byte) *generatorContext {
+ ctx := &generatorContext{
+ stats: stats,
+ db: db,
+ batch: db.NewBatch(),
+ logged: time.Now(),
+ }
+ ctx.openIterator(snapAccount, accMarker)
+ ctx.openIterator(snapStorage, storageMarker)
+ return ctx
+}
+
+// openIterator constructs global account and storage snapshot iterators
+// at the interrupted position. These iterators should be reopened from time
+// to time to avoid blocking leveldb compaction for a long time.
+func (ctx *generatorContext) openIterator(kind string, start []byte) {
+ if kind == snapAccount {
+ iter := ctx.db.NewIterator(rawdb.SnapshotAccountPrefix, start)
+ ctx.account = newHoldableIterator(rawdb.NewKeyLengthIterator(iter, 1+common.HashLength))
+ return
+ }
+ iter := ctx.db.NewIterator(rawdb.SnapshotStoragePrefix, start)
+ ctx.storage = newHoldableIterator(rawdb.NewKeyLengthIterator(iter, 1+2*common.HashLength))
+}
+
+// reopenIterator releases the specified snapshot iterator and re-open it
+// in the next position. It's aimed for not blocking leveldb compaction.
+func (ctx *generatorContext) reopenIterator(kind string) {
+ // Shift iterator one more step, so that we can reopen
+ // the iterator at the right position.
+ var iter = ctx.account
+ if kind == snapStorage {
+ iter = ctx.storage
+ }
+ hasNext := iter.Next()
+ if !hasNext {
+ // Iterator exhausted, release forever and create an already exhausted virtual iterator
+ iter.Release()
+ if kind == snapAccount {
+ ctx.account = newHoldableIterator(memorydb.New().NewIterator(nil, nil))
+ return
+ }
+ ctx.storage = newHoldableIterator(memorydb.New().NewIterator(nil, nil))
+ return
+ }
+ next := iter.Key()
+ iter.Release()
+ ctx.openIterator(kind, next[1:])
+}
+
+// close releases all the held resources.
+func (ctx *generatorContext) close() {
+ ctx.account.Release()
+ ctx.storage.Release()
+}
+
+// iterator returns the corresponding iterator specified by the kind.
+func (ctx *generatorContext) iterator(kind string) *holdableIterator {
+ if kind == snapAccount {
+ return ctx.account
+ }
+ return ctx.storage
+}
+
+// removeStorageBefore deletes all storage entries which are located before
+// the specified account. When the iterator touches the storage entry which
+// is located in or outside the given account, it stops and holds the current
+// iterated element locally.
+func (ctx *generatorContext) removeStorageBefore(account common.Hash) {
+ var (
+ count uint64
+ start = time.Now()
+ iter = ctx.storage
+ )
+ for iter.Next() {
+ key := iter.Key()
+ if bytes.Compare(key[1:1+common.HashLength], account.Bytes()) >= 0 {
+ iter.Hold()
+ break
+ }
+ count++
+ ctx.batch.Delete(key)
+ if ctx.batch.ValueSize() > ethdb.IdealBatchSize {
+ ctx.batch.Write()
+ ctx.batch.Reset()
+ }
+ }
+ ctx.stats.dangling += count
+ snapStorageCleanCounter.Inc(time.Since(start).Nanoseconds())
+}
+
+// removeStorageAt deletes all storage entries which are located in the specified
+// account. When the iterator touches the storage entry which is outside the given
+// account, it stops and holds the current iterated element locally. An error will
+// be returned if the initial position of iterator is not in the given account.
+func (ctx *generatorContext) removeStorageAt(account common.Hash) error {
+ var (
+ count int64
+ start = time.Now()
+ iter = ctx.storage
+ )
+ for iter.Next() {
+ key := iter.Key()
+ cmp := bytes.Compare(key[1:1+common.HashLength], account.Bytes())
+ if cmp < 0 {
+ return errors.New("invalid iterator position")
+ }
+ if cmp > 0 {
+ iter.Hold()
+ break
+ }
+ count++
+ ctx.batch.Delete(key)
+ if ctx.batch.ValueSize() > ethdb.IdealBatchSize {
+ ctx.batch.Write()
+ ctx.batch.Reset()
+ }
+ }
+ snapWipedStorageMeter.Mark(count)
+ snapStorageCleanCounter.Inc(time.Since(start).Nanoseconds())
+ return nil
+}
+
+// removeStorageLeft deletes all storage entries which are located after
+// the current iterator position.
+func (ctx *generatorContext) removeStorageLeft() {
+ var (
+ count uint64
+ start = time.Now()
+ iter = ctx.storage
+ )
+ for iter.Next() {
+ count++
+ ctx.batch.Delete(iter.Key())
+ if ctx.batch.ValueSize() > ethdb.IdealBatchSize {
+ ctx.batch.Write()
+ ctx.batch.Reset()
+ }
+ }
+ ctx.stats.dangling += count
+ snapDanglingStorageMeter.Mark(int64(count))
+ snapStorageCleanCounter.Inc(time.Since(start).Nanoseconds())
+}
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
new file mode 100644
index 0000000000..fd42167376
--- /dev/null
+++ b/core/state/snapshot/conversion.go
@@ -0,0 +1,383 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/harmony-one/harmony/core/rawdb"
+ "github.com/harmony-one/harmony/internal/utils"
+)
+
+// trieKV represents a trie key-value pair
+type trieKV struct {
+ key common.Hash
+ value []byte
+}
+
+type (
+ // trieGeneratorFn is the interface of trie generation which can
+ // be implemented by different trie algorithm.
+ trieGeneratorFn func(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan (trieKV), out chan (common.Hash))
+
+ // leafCallbackFn is the callback invoked at the leaves of the trie,
+ // returns the subtrie root with the specified subtrie identifier.
+ leafCallbackFn func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error)
+)
+
+// GenerateAccountTrieRoot takes an account iterator and reproduces the root hash.
+func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) {
+ return generateTrieRoot(nil, "", it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true)
+}
+
+// GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash.
+func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) {
+ return generateTrieRoot(nil, "", it, account, stackTrieGenerate, nil, newGenerateStats(), true)
+}
+
+// GenerateTrie takes the whole snapshot tree as the input, traverses all the
+// accounts as well as the corresponding storages and regenerate the whole state
+// (account trie + all storage tries).
+func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethdb.KeyValueWriter) error {
+ // Traverse all state by snapshot, re-generate the whole state trie
+ acctIt, err := snaptree.AccountIterator(root, common.Hash{})
+ if err != nil {
+ return err // The required snapshot might not exist.
+ }
+ defer acctIt.Release()
+
+ scheme := snaptree.triedb.Scheme()
+ got, err := generateTrieRoot(dst, scheme, acctIt, common.Hash{}, stackTrieGenerate, func(dst ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
+ // Migrate the code first, commit the contract code into the tmp db.
+ if codeHash != types.EmptyCodeHash {
+ code := rawdb.ReadCode(src, codeHash)
+ if len(code) == 0 {
+ return common.Hash{}, errors.New("failed to read code")
+ }
+ rawdb.WriteCode(dst, codeHash, code)
+ }
+ // Then migrate all storage trie nodes into the tmp db.
+ storageIt, err := snaptree.StorageIterator(root, accountHash, common.Hash{})
+ if err != nil {
+ return common.Hash{}, err
+ }
+ defer storageIt.Release()
+
+ hash, err := generateTrieRoot(dst, scheme, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ return hash, nil
+ }, newGenerateStats(), true)
+
+ if err != nil {
+ return err
+ }
+ if got != root {
+ return fmt.Errorf("state root hash mismatch: got %x, want %x", got, root)
+ }
+ return nil
+}
+
+// generateStats is a collection of statistics gathered by the trie generator
+// for logging purposes.
+type generateStats struct {
+ head common.Hash
+ start time.Time
+
+ accounts uint64 // Number of accounts done (including those being crawled)
+ slots uint64 // Number of storage slots done (including those being crawled)
+
+ slotsStart map[common.Hash]time.Time // Start time for account slot crawling
+ slotsHead map[common.Hash]common.Hash // Slot head for accounts being crawled
+
+ lock sync.RWMutex
+}
+
+// newGenerateStats creates a new generator stats.
+func newGenerateStats() *generateStats {
+ return &generateStats{
+ slotsStart: make(map[common.Hash]time.Time),
+ slotsHead: make(map[common.Hash]common.Hash),
+ start: time.Now(),
+ }
+}
+
+// progressAccounts updates the generator stats for the account range.
+func (stat *generateStats) progressAccounts(account common.Hash, done uint64) {
+ stat.lock.Lock()
+ defer stat.lock.Unlock()
+
+ stat.accounts += done
+ stat.head = account
+}
+
+// finishAccounts updates the generator stats for the finished account range.
+func (stat *generateStats) finishAccounts(done uint64) {
+ stat.lock.Lock()
+ defer stat.lock.Unlock()
+
+ stat.accounts += done
+}
+
+// progressContract updates the generator stats for a specific in-progress contract.
+func (stat *generateStats) progressContract(account common.Hash, slot common.Hash, done uint64) {
+ stat.lock.Lock()
+ defer stat.lock.Unlock()
+
+ stat.slots += done
+ stat.slotsHead[account] = slot
+ if _, ok := stat.slotsStart[account]; !ok {
+ stat.slotsStart[account] = time.Now()
+ }
+}
+
+// finishContract updates the generator stats for a specific just-finished contract.
+func (stat *generateStats) finishContract(account common.Hash, done uint64) {
+ stat.lock.Lock()
+ defer stat.lock.Unlock()
+
+ stat.slots += done
+ delete(stat.slotsHead, account)
+ delete(stat.slotsStart, account)
+}
+
+// report prints the cumulative progress statistic smartly.
+func (stat *generateStats) report() {
+ stat.lock.RLock()
+ defer stat.lock.RUnlock()
+
+ ctx := []interface{}{
+ "accounts", stat.accounts,
+ "slots", stat.slots,
+ "elapsed", common.PrettyDuration(time.Since(stat.start)),
+ }
+ if stat.accounts > 0 {
+ // If there's progress on the account trie, estimate the time to finish crawling it
+ if done := binary.BigEndian.Uint64(stat.head[:8]) / stat.accounts; done > 0 {
+ var (
+ left = (math.MaxUint64 - binary.BigEndian.Uint64(stat.head[:8])) / stat.accounts
+ speed = done/uint64(time.Since(stat.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
+ eta = time.Duration(left/speed) * time.Millisecond
+ )
+ // If there are large contract crawls in progress, estimate their finish time
+ for acc, head := range stat.slotsHead {
+ start := stat.slotsStart[acc]
+ if done := binary.BigEndian.Uint64(head[:8]); done > 0 {
+ var (
+ left = math.MaxUint64 - binary.BigEndian.Uint64(head[:8])
+ speed = done/uint64(time.Since(start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
+ )
+ // Override the ETA if larger than the largest until now
+ if slotETA := time.Duration(left/speed) * time.Millisecond; eta < slotETA {
+ eta = slotETA
+ }
+ }
+ }
+ ctx = append(ctx, []interface{}{
+ "eta", common.PrettyDuration(eta),
+ }...)
+ }
+ }
+ utils.Logger().Info().Msg("Iterating state snapshot")
+}
+
+// reportDone prints the last log when the whole generation is finished.
+func (stat *generateStats) reportDone() {
+ stat.lock.RLock()
+ defer stat.lock.RUnlock()
+
+ var ctx []interface{}
+ ctx = append(ctx, []interface{}{"accounts", stat.accounts}...)
+ if stat.slots != 0 {
+ ctx = append(ctx, []interface{}{"slots", stat.slots}...)
+ }
+ ctx = append(ctx, []interface{}{"elapsed", common.PrettyDuration(time.Since(stat.start))}...)
+ utils.Logger().Info().Msg("Iterated snapshot")
+}
+
+// runReport periodically prints the progress information.
+func runReport(stats *generateStats, stop chan bool) {
+ timer := time.NewTimer(0)
+ defer timer.Stop()
+
+ for {
+ select {
+ case <-timer.C:
+ stats.report()
+ timer.Reset(time.Second * 8)
+ case success := <-stop:
+ if success {
+ stats.reportDone()
+ }
+ return
+ }
+ }
+}
+
+// generateTrieRoot generates the trie hash based on the snapshot iterator.
+// It can be used for generating account trie, storage trie or even the
+// whole state which connects the accounts and the corresponding storages.
+func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) {
+ var (
+ in = make(chan trieKV) // chan to pass leaves
+ out = make(chan common.Hash, 1) // chan to collect result
+ stoplog = make(chan bool, 1) // 1-size buffer, works when logging is not enabled
+ wg sync.WaitGroup
+ )
+ // Spin up a go-routine for trie hash re-generation
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ generatorFn(db, scheme, account, in, out)
+ }()
+ // Spin up a go-routine for progress logging
+ if report && stats != nil {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ runReport(stats, stoplog)
+ }()
+ }
+ // Create a semaphore to assign tasks and collect results through. We'll pre-
+ // fill it with nils, thus using the same channel for both limiting concurrent
+ // processing and gathering results.
+ threads := runtime.NumCPU()
+ results := make(chan error, threads)
+ for i := 0; i < threads; i++ {
+ results <- nil // fill the semaphore
+ }
+ // stop is a helper function to shutdown the background threads
+ // and return the re-generated trie hash.
+ stop := func(fail error) (common.Hash, error) {
+ close(in)
+ result := <-out
+ for i := 0; i < threads; i++ {
+ if err := <-results; err != nil && fail == nil {
+ fail = err
+ }
+ }
+ stoplog <- fail == nil
+
+ wg.Wait()
+ return result, fail
+ }
+ var (
+ logged = time.Now()
+ processed = uint64(0)
+ leaf trieKV
+ )
+ // Start to feed leaves
+ for it.Next() {
+ if account == (common.Hash{}) {
+ var (
+ err error
+ fullData []byte
+ )
+ if leafCallback == nil {
+ fullData, err = FullAccountRLP(it.(AccountIterator).Account())
+ if err != nil {
+ return stop(err)
+ }
+ } else {
+ // Wait until the semaphore allows us to continue, aborting if
+ // a sub-task failed
+ if err := <-results; err != nil {
+ results <- nil // stop will drain the results, add a noop back for this error we just consumed
+ return stop(err)
+ }
+ // Fetch the next account and process it concurrently
+ account, err := FullAccount(it.(AccountIterator).Account())
+ if err != nil {
+ return stop(err)
+ }
+ go func(hash common.Hash) {
+ subroot, err := leafCallback(db, hash, common.BytesToHash(account.CodeHash), stats)
+ if err != nil {
+ results <- err
+ return
+ }
+ if !bytes.Equal(account.Root, subroot.Bytes()) {
+ results <- fmt.Errorf("invalid subroot(path %x), want %x, have %x", hash, account.Root, subroot)
+ return
+ }
+ results <- nil
+ }(it.Hash())
+ fullData, err = rlp.EncodeToBytes(account)
+ if err != nil {
+ return stop(err)
+ }
+ }
+ leaf = trieKV{it.Hash(), fullData}
+ } else {
+ leaf = trieKV{it.Hash(), common.CopyBytes(it.(StorageIterator).Slot())}
+ }
+ in <- leaf
+
+ // Accumulate the generation statistic if it's required.
+ processed++
+ if time.Since(logged) > 3*time.Second && stats != nil {
+ if account == (common.Hash{}) {
+ stats.progressAccounts(it.Hash(), processed)
+ } else {
+ stats.progressContract(account, it.Hash(), processed)
+ }
+ logged, processed = time.Now(), 0
+ }
+ }
+ // Commit the last part statistic.
+ if processed > 0 && stats != nil {
+ if account == (common.Hash{}) {
+ stats.finishAccounts(processed)
+ } else {
+ stats.finishContract(account, processed)
+ }
+ }
+ return stop(nil)
+}
+
+func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) {
+ var nodeWriter trie.NodeWriteFunc
+ if db != nil {
+ nodeWriter = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme)
+ }
+ }
+ t := trie.NewStackTrieWithOwner(nodeWriter, owner)
+ for leaf := range in {
+ t.TryUpdate(leaf.key[:], leaf.value)
+ }
+ var root common.Hash
+ if db == nil {
+ root = t.Hash()
+ } else {
+ root, _ = t.Commit()
+ }
+ out <- root
+}
diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go
new file mode 100644
index 0000000000..f916a020e7
--- /dev/null
+++ b/core/state/snapshot/difflayer.go
@@ -0,0 +1,559 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "math/rand"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/rlp"
+ bloomfilter "github.com/holiman/bloomfilter/v2"
+)
+
+var (
+ // aggregatorMemoryLimit is the maximum size of the bottom-most diff layer
+ // that aggregates the writes from above until it's flushed into the disk
+ // layer.
+ //
+ // Note, bumping this up might drastically increase the size of the bloom
+ // filters that's stored in every diff layer. Don't do that without fully
+ // understanding all the implications.
+ aggregatorMemoryLimit = uint64(4 * 1024 * 1024)
+
+ // aggregatorItemLimit is an approximate number of items that will end up
+ // in the agregator layer before it's flushed out to disk. A plain account
+ // weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
+ // 0B (+hash). Slots are mostly set/unset in lockstep, so that average at
+ // 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
+ // smaller number to be on the safe side.
+ aggregatorItemLimit = aggregatorMemoryLimit / 42
+
+ // bloomTargetError is the target false positive rate when the aggregator
+ // layer is at its fullest. The actual value will probably move around up
+ // and down from this number, it's mostly a ballpark figure.
+ //
+ // Note, dropping this down might drastically increase the size of the bloom
+ // filters that's stored in every diff layer. Don't do that without fully
+ // understanding all the implications.
+ bloomTargetError = 0.02
+
+ // bloomSize is the ideal bloom filter size given the maximum number of items
+ // it's expected to hold and the target false positive error rate.
+ bloomSize = math.Ceil(float64(aggregatorItemLimit) * math.Log(bloomTargetError) / math.Log(1/math.Pow(2, math.Log(2))))
+
+ // bloomFuncs is the ideal number of bits a single entry should set in the
+ // bloom filter to keep its size to a minimum (given it's size and maximum
+ // entry count).
+ bloomFuncs = math.Round((bloomSize / float64(aggregatorItemLimit)) * math.Log(2))
+
+ // the bloom offsets are runtime constants which determines which part of the
+ // account/storage hash the hasher functions looks at, to determine the
+ // bloom key for an account/slot. This is randomized at init(), so that the
+ // global population of nodes do not all display the exact same behaviour with
+ // regards to bloom content
+ bloomDestructHasherOffset = 0
+ bloomAccountHasherOffset = 0
+ bloomStorageHasherOffset = 0
+)
+
+func init() {
+ // Init the bloom offsets in the range [0:24] (requires 8 bytes)
+ bloomDestructHasherOffset = rand.Intn(25)
+ bloomAccountHasherOffset = rand.Intn(25)
+ bloomStorageHasherOffset = rand.Intn(25)
+
+ // The destruct and account blooms must be different, as the storage slots
+ // will check for destruction too for every bloom miss. It should not collide
+ // with modified accounts.
+ for bloomAccountHasherOffset == bloomDestructHasherOffset {
+ bloomAccountHasherOffset = rand.Intn(25)
+ }
+}
+
+// diffLayer represents a collection of modifications made to a state snapshot
+// after running a block on top. It contains one sorted list for the account trie
+// and one-one list for each storage tries.
+//
+// The goal of a diff layer is to act as a journal, tracking recent modifications
+// made to the state, that have not yet graduated into a semi-immutable state.
+type diffLayer struct {
+ origin *diskLayer // Base disk layer to directly use on bloom misses
+ parent snapshot // Parent snapshot modified by this one, never nil
+ memory uint64 // Approximate guess as to how much memory we use
+
+ root common.Hash // Root hash to which this snapshot diff belongs to
+ stale uint32 // Signals that the layer became stale (state progressed)
+
+ // destructSet is a very special helper marker. If an account is marked as
+ // deleted, then it's recorded in this set. However it's allowed that an account
+ // is included here but still available in other sets(e.g. storageData). The
+ // reason is the diff layer includes all the changes in a *block*. It can
+ // happen that in the tx_1, account A is self-destructed while in the tx_2
+ // it's recreated. But we still need this marker to indicate the "old" A is
+ // deleted, all data in other set belongs to the "new" A.
+ destructSet map[common.Hash]struct{} // Keyed markers for deleted (and potentially) recreated accounts
+ accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
+ accountData map[common.Hash][]byte // Keyed accounts for direct retrieval (nil means deleted)
+ storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
+ storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
+
+ diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer
+
+ lock sync.RWMutex
+}
+
+// destructBloomHasher is a wrapper around a common.Hash to satisfy the interface
+// API requirements of the bloom library used. It's used to convert a destruct
+// event into a 64 bit mini hash.
+type destructBloomHasher common.Hash
+
+func (h destructBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") }
+func (h destructBloomHasher) Sum(b []byte) []byte { panic("not implemented") }
+func (h destructBloomHasher) Reset() { panic("not implemented") }
+func (h destructBloomHasher) BlockSize() int { panic("not implemented") }
+func (h destructBloomHasher) Size() int { return 8 }
+func (h destructBloomHasher) Sum64() uint64 {
+ return binary.BigEndian.Uint64(h[bloomDestructHasherOffset : bloomDestructHasherOffset+8])
+}
+
+// accountBloomHasher is a wrapper around a common.Hash to satisfy the interface
+// API requirements of the bloom library used. It's used to convert an account
+// hash into a 64 bit mini hash.
+type accountBloomHasher common.Hash
+
+func (h accountBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") }
+func (h accountBloomHasher) Sum(b []byte) []byte { panic("not implemented") }
+func (h accountBloomHasher) Reset() { panic("not implemented") }
+func (h accountBloomHasher) BlockSize() int { panic("not implemented") }
+func (h accountBloomHasher) Size() int { return 8 }
+func (h accountBloomHasher) Sum64() uint64 {
+ return binary.BigEndian.Uint64(h[bloomAccountHasherOffset : bloomAccountHasherOffset+8])
+}
+
+// storageBloomHasher is a wrapper around a [2]common.Hash to satisfy the interface
+// API requirements of the bloom library used. It's used to convert an account
+// hash into a 64 bit mini hash.
+type storageBloomHasher [2]common.Hash
+
+func (h storageBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") }
+func (h storageBloomHasher) Sum(b []byte) []byte { panic("not implemented") }
+func (h storageBloomHasher) Reset() { panic("not implemented") }
+func (h storageBloomHasher) BlockSize() int { panic("not implemented") }
+func (h storageBloomHasher) Size() int { return 8 }
+func (h storageBloomHasher) Sum64() uint64 {
+ return binary.BigEndian.Uint64(h[0][bloomStorageHasherOffset:bloomStorageHasherOffset+8]) ^
+ binary.BigEndian.Uint64(h[1][bloomStorageHasherOffset:bloomStorageHasherOffset+8])
+}
+
+// newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
+// level persistent database or a hierarchical diff already.
+func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
+ // Create the new layer with some pre-allocated data segments
+ dl := &diffLayer{
+ parent: parent,
+ root: root,
+ destructSet: destructs,
+ accountData: accounts,
+ storageData: storage,
+ storageList: make(map[common.Hash][]common.Hash),
+ }
+ switch parent := parent.(type) {
+ case *diskLayer:
+ dl.rebloom(parent)
+ case *diffLayer:
+ dl.rebloom(parent.origin)
+ default:
+ panic("unknown parent type")
+ }
+ // Sanity check that accounts or storage slots are never nil
+ for accountHash, blob := range accounts {
+ if blob == nil {
+ panic(fmt.Sprintf("account %#x nil", accountHash))
+ }
+ // Determine memory size and track the dirty writes
+ dl.memory += uint64(common.HashLength + len(blob))
+ snapshotDirtyAccountWriteMeter.Mark(int64(len(blob)))
+ }
+ for accountHash, slots := range storage {
+ if slots == nil {
+ panic(fmt.Sprintf("storage %#x nil", accountHash))
+ }
+ // Determine memory size and track the dirty writes
+ for _, data := range slots {
+ dl.memory += uint64(common.HashLength + len(data))
+ snapshotDirtyStorageWriteMeter.Mark(int64(len(data)))
+ }
+ }
+ dl.memory += uint64(len(destructs) * common.HashLength)
+ return dl
+}
+
+// rebloom discards the layer's current bloom and rebuilds it from scratch based
+// on the parent's and the local diffs.
+func (dl *diffLayer) rebloom(origin *diskLayer) {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ defer func(start time.Time) {
+ snapshotBloomIndexTimer.Update(time.Since(start))
+ }(time.Now())
+
+ // Inject the new origin that triggered the rebloom
+ dl.origin = origin
+
+ // Retrieve the parent bloom or create a fresh empty one
+ if parent, ok := dl.parent.(*diffLayer); ok {
+ parent.lock.RLock()
+ dl.diffed, _ = parent.diffed.Copy()
+ parent.lock.RUnlock()
+ } else {
+ dl.diffed, _ = bloomfilter.New(uint64(bloomSize), uint64(bloomFuncs))
+ }
+ // Iterate over all the accounts and storage slots and index them
+ for hash := range dl.destructSet {
+ dl.diffed.Add(destructBloomHasher(hash))
+ }
+ for hash := range dl.accountData {
+ dl.diffed.Add(accountBloomHasher(hash))
+ }
+ for accountHash, slots := range dl.storageData {
+ for storageHash := range slots {
+ dl.diffed.Add(storageBloomHasher{accountHash, storageHash})
+ }
+ }
+ // Calculate the current false positive rate and update the error rate meter.
+ // This is a bit cheating because subsequent layers will overwrite it, but it
+ // should be fine, we're only interested in ballpark figures.
+ k := float64(dl.diffed.K())
+ n := float64(dl.diffed.N())
+ m := float64(dl.diffed.M())
+ snapshotBloomErrorGauge.Update(math.Pow(1.0-math.Exp((-k)*(n+0.5)/(m-1)), k))
+}
+
+// Root returns the root hash for which this snapshot was made.
+func (dl *diffLayer) Root() common.Hash {
+ return dl.root
+}
+
+// Parent returns the subsequent layer of a diff layer.
+func (dl *diffLayer) Parent() snapshot {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ return dl.parent
+}
+
+// Stale return whether this layer has become stale (was flattened across) or if
+// it's still live.
+func (dl *diffLayer) Stale() bool {
+ return atomic.LoadUint32(&dl.stale) != 0
+}
+
+// Account directly retrieves the account associated with a particular hash in
+// the snapshot slim data format.
+func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
+ data, err := dl.AccountRLP(hash)
+ if err != nil {
+ return nil, err
+ }
+ if len(data) == 0 { // can be both nil and []byte{}
+ return nil, nil
+ }
+ account := new(Account)
+ if err := rlp.DecodeBytes(data, account); err != nil {
+ panic(err)
+ }
+ return account, nil
+}
+
+// AccountRLP directly retrieves the account RLP associated with a particular
+// hash in the snapshot slim data format.
+//
+// Note the returned account is not a copy, please don't modify it.
+func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
+ // Check the bloom filter first whether there's even a point in reaching into
+ // all the maps in all the layers below
+ dl.lock.RLock()
+ hit := dl.diffed.Contains(accountBloomHasher(hash))
+ if !hit {
+ hit = dl.diffed.Contains(destructBloomHasher(hash))
+ }
+ var origin *diskLayer
+ if !hit {
+ origin = dl.origin // extract origin while holding the lock
+ }
+ dl.lock.RUnlock()
+
+ // If the bloom filter misses, don't even bother with traversing the memory
+ // diff layers, reach straight into the bottom persistent disk layer
+ if origin != nil {
+ snapshotBloomAccountMissMeter.Mark(1)
+ return origin.AccountRLP(hash)
+ }
+ // The bloom filter hit, start poking in the internal maps
+ return dl.accountRLP(hash, 0)
+}
+
+// accountRLP is an internal version of AccountRLP that skips the bloom filter
+// checks and uses the internal maps to try and retrieve the data. It's meant
+// to be used if a higher layer's bloom filter hit already.
+func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ // If the layer was flattened into, consider it invalid (any live reference to
+ // the original should be marked as unusable).
+ if dl.Stale() {
+ return nil, ErrSnapshotStale
+ }
+ // If the account is known locally, return it
+ if data, ok := dl.accountData[hash]; ok {
+ snapshotDirtyAccountHitMeter.Mark(1)
+ snapshotDirtyAccountHitDepthHist.Update(int64(depth))
+ snapshotDirtyAccountReadMeter.Mark(int64(len(data)))
+ snapshotBloomAccountTrueHitMeter.Mark(1)
+ return data, nil
+ }
+ // If the account is known locally, but deleted, return it
+ if _, ok := dl.destructSet[hash]; ok {
+ snapshotDirtyAccountHitMeter.Mark(1)
+ snapshotDirtyAccountHitDepthHist.Update(int64(depth))
+ snapshotDirtyAccountInexMeter.Mark(1)
+ snapshotBloomAccountTrueHitMeter.Mark(1)
+ return nil, nil
+ }
+ // Account unknown to this diff, resolve from parent
+ if diff, ok := dl.parent.(*diffLayer); ok {
+ return diff.accountRLP(hash, depth+1)
+ }
+ // Failed to resolve through diff layers, mark a bloom error and use the disk
+ snapshotBloomAccountFalseHitMeter.Mark(1)
+ return dl.parent.AccountRLP(hash)
+}
+
+// Storage directly retrieves the storage data associated with a particular hash,
+// within a particular account. If the slot is unknown to this diff, it's parent
+// is consulted.
+//
+// Note the returned slot is not a copy, please don't modify it.
+func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) {
+ // Check the bloom filter first whether there's even a point in reaching into
+ // all the maps in all the layers below
+ dl.lock.RLock()
+ hit := dl.diffed.Contains(storageBloomHasher{accountHash, storageHash})
+ if !hit {
+ hit = dl.diffed.Contains(destructBloomHasher(accountHash))
+ }
+ var origin *diskLayer
+ if !hit {
+ origin = dl.origin // extract origin while holding the lock
+ }
+ dl.lock.RUnlock()
+
+ // If the bloom filter misses, don't even bother with traversing the memory
+ // diff layers, reach straight into the bottom persistent disk layer
+ if origin != nil {
+ snapshotBloomStorageMissMeter.Mark(1)
+ return origin.Storage(accountHash, storageHash)
+ }
+ // The bloom filter hit, start poking in the internal maps
+ return dl.storage(accountHash, storageHash, 0)
+}
+
+// storage is an internal version of Storage that skips the bloom filter checks
+// and uses the internal maps to try and retrieve the data. It's meant to be
+// used if a higher layer's bloom filter hit already.
+func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([]byte, error) {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ // If the layer was flattened into, consider it invalid (any live reference to
+ // the original should be marked as unusable).
+ if dl.Stale() {
+ return nil, ErrSnapshotStale
+ }
+ // If the account is known locally, try to resolve the slot locally
+ if storage, ok := dl.storageData[accountHash]; ok {
+ if data, ok := storage[storageHash]; ok {
+ snapshotDirtyStorageHitMeter.Mark(1)
+ snapshotDirtyStorageHitDepthHist.Update(int64(depth))
+ if n := len(data); n > 0 {
+ snapshotDirtyStorageReadMeter.Mark(int64(n))
+ } else {
+ snapshotDirtyStorageInexMeter.Mark(1)
+ }
+ snapshotBloomStorageTrueHitMeter.Mark(1)
+ return data, nil
+ }
+ }
+ // If the account is known locally, but deleted, return an empty slot
+ if _, ok := dl.destructSet[accountHash]; ok {
+ snapshotDirtyStorageHitMeter.Mark(1)
+ snapshotDirtyStorageHitDepthHist.Update(int64(depth))
+ snapshotDirtyStorageInexMeter.Mark(1)
+ snapshotBloomStorageTrueHitMeter.Mark(1)
+ return nil, nil
+ }
+ // Storage slot unknown to this diff, resolve from parent
+ if diff, ok := dl.parent.(*diffLayer); ok {
+ return diff.storage(accountHash, storageHash, depth+1)
+ }
+ // Failed to resolve through diff layers, mark a bloom error and use the disk
+ snapshotBloomStorageFalseHitMeter.Mark(1)
+ return dl.parent.Storage(accountHash, storageHash)
+}
+
+// Update creates a new layer on top of the existing snapshot diff tree with
+// the specified data items.
+func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
+ return newDiffLayer(dl, blockRoot, destructs, accounts, storage)
+}
+
+// flatten pushes all data from this point downwards, flattening everything into
+// a single diff at the bottom. Since usually the lowermost diff is the largest,
+// the flattening builds up from there in reverse.
+func (dl *diffLayer) flatten() snapshot {
+ // If the parent is not diff, we're the first in line, return unmodified
+ parent, ok := dl.parent.(*diffLayer)
+ if !ok {
+ return dl
+ }
+ // Parent is a diff, flatten it first (note, apart from weird corned cases,
+ // flatten will realistically only ever merge 1 layer, so there's no need to
+ // be smarter about grouping flattens together).
+ parent = parent.flatten().(*diffLayer)
+
+ parent.lock.Lock()
+ defer parent.lock.Unlock()
+
+ // Before actually writing all our data to the parent, first ensure that the
+ // parent hasn't been 'corrupted' by someone else already flattening into it
+ if atomic.SwapUint32(&parent.stale, 1) != 0 {
+ panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo
+ }
+ // Overwrite all the updated accounts blindly, merge the sorted list
+ for hash := range dl.destructSet {
+ parent.destructSet[hash] = struct{}{}
+ delete(parent.accountData, hash)
+ delete(parent.storageData, hash)
+ }
+ for hash, data := range dl.accountData {
+ parent.accountData[hash] = data
+ }
+ // Overwrite all the updated storage slots (individually)
+ for accountHash, storage := range dl.storageData {
+ // If storage didn't exist (or was deleted) in the parent, overwrite blindly
+ if _, ok := parent.storageData[accountHash]; !ok {
+ parent.storageData[accountHash] = storage
+ continue
+ }
+ // Storage exists in both parent and child, merge the slots
+ comboData := parent.storageData[accountHash]
+ for storageHash, data := range storage {
+ comboData[storageHash] = data
+ }
+ }
+ // Return the combo parent
+ return &diffLayer{
+ parent: parent.parent,
+ origin: parent.origin,
+ root: dl.root,
+ destructSet: parent.destructSet,
+ accountData: parent.accountData,
+ storageData: parent.storageData,
+ storageList: make(map[common.Hash][]common.Hash),
+ diffed: dl.diffed,
+ memory: parent.memory + dl.memory,
+ }
+}
+
+// AccountList returns a sorted list of all accounts in this diffLayer, including
+// the deleted ones.
+//
+// Note, the returned slice is not a copy, so do not modify it.
+func (dl *diffLayer) AccountList() []common.Hash {
+ // If an old list already exists, return it
+ dl.lock.RLock()
+ list := dl.accountList
+ dl.lock.RUnlock()
+
+ if list != nil {
+ return list
+ }
+ // No old sorted account list exists, generate a new one
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ dl.accountList = make([]common.Hash, 0, len(dl.destructSet)+len(dl.accountData))
+ for hash := range dl.accountData {
+ dl.accountList = append(dl.accountList, hash)
+ }
+ for hash := range dl.destructSet {
+ if _, ok := dl.accountData[hash]; !ok {
+ dl.accountList = append(dl.accountList, hash)
+ }
+ }
+ sort.Sort(hashes(dl.accountList))
+ dl.memory += uint64(len(dl.accountList) * common.HashLength)
+ return dl.accountList
+}
+
+// StorageList returns a sorted list of all storage slot hashes in this diffLayer
+// for the given account. If the whole storage is destructed in this layer, then
+// an additional flag *destructed = true* will be returned, otherwise the flag is
+// false. Besides, the returned list will include the hash of deleted storage slot.
+// Note a special case is an account is deleted in a prior tx but is recreated in
+// the following tx with some storage slots set. In this case the returned list is
+// not empty but the flag is true.
+//
+// Note, the returned slice is not a copy, so do not modify it.
+func (dl *diffLayer) StorageList(accountHash common.Hash) ([]common.Hash, bool) {
+ dl.lock.RLock()
+ _, destructed := dl.destructSet[accountHash]
+ if _, ok := dl.storageData[accountHash]; !ok {
+ // Account not tracked by this layer
+ dl.lock.RUnlock()
+ return nil, destructed
+ }
+ // If an old list already exists, return it
+ if list, exist := dl.storageList[accountHash]; exist {
+ dl.lock.RUnlock()
+ return list, destructed // the cached list can't be nil
+ }
+ dl.lock.RUnlock()
+
+ // No old sorted account list exists, generate a new one
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ storageMap := dl.storageData[accountHash]
+ storageList := make([]common.Hash, 0, len(storageMap))
+ for k := range storageMap {
+ storageList = append(storageList, k)
+ }
+ sort.Sort(hashes(storageList))
+ dl.storageList[accountHash] = storageList
+ dl.memory += uint64(len(dl.storageList)*common.HashLength + common.HashLength)
+ return storageList, destructed
+}
diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go
new file mode 100644
index 0000000000..674a031b16
--- /dev/null
+++ b/core/state/snapshot/difflayer_test.go
@@ -0,0 +1,399 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+ crand "crypto/rand"
+ "math/rand"
+ "testing"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb/memorydb"
+)
+
+func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} {
+ copy := make(map[common.Hash]struct{})
+ for hash := range destructs {
+ copy[hash] = struct{}{}
+ }
+ return copy
+}
+
+func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte {
+ copy := make(map[common.Hash][]byte)
+ for hash, blob := range accounts {
+ copy[hash] = blob
+ }
+ return copy
+}
+
+func copyStorage(storage map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
+ copy := make(map[common.Hash]map[common.Hash][]byte)
+ for accHash, slots := range storage {
+ copy[accHash] = make(map[common.Hash][]byte)
+ for slotHash, blob := range slots {
+ copy[accHash][slotHash] = blob
+ }
+ }
+ return copy
+}
+
+// TestMergeBasics tests some simple merges
+func TestMergeBasics(t *testing.T) {
+ var (
+ destructs = make(map[common.Hash]struct{})
+ accounts = make(map[common.Hash][]byte)
+ storage = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ // Fill up a parent
+ for i := 0; i < 100; i++ {
+ h := randomHash()
+ data := randomAccount()
+
+ accounts[h] = data
+ if rand.Intn(4) == 0 {
+ destructs[h] = struct{}{}
+ }
+ if rand.Intn(2) == 0 {
+ accStorage := make(map[common.Hash][]byte)
+ value := make([]byte, 32)
+ crand.Read(value)
+ accStorage[randomHash()] = value
+ storage[h] = accStorage
+ }
+ }
+ // Add some (identical) layers on top
+ parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
+ child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
+ child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
+ child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
+ child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
+ // And flatten
+ merged := (child.flatten()).(*diffLayer)
+
+ { // Check account lists
+ if have, want := len(merged.accountList), 0; have != want {
+ t.Errorf("accountList wrong: have %v, want %v", have, want)
+ }
+ if have, want := len(merged.AccountList()), len(accounts); have != want {
+ t.Errorf("AccountList() wrong: have %v, want %v", have, want)
+ }
+ if have, want := len(merged.accountList), len(accounts); have != want {
+ t.Errorf("accountList [2] wrong: have %v, want %v", have, want)
+ }
+ }
+ { // Check account drops
+ if have, want := len(merged.destructSet), len(destructs); have != want {
+ t.Errorf("accountDrop wrong: have %v, want %v", have, want)
+ }
+ }
+ { // Check storage lists
+ i := 0
+ for aHash, sMap := range storage {
+ if have, want := len(merged.storageList), i; have != want {
+ t.Errorf("[1] storageList wrong: have %v, want %v", have, want)
+ }
+ list, _ := merged.StorageList(aHash)
+ if have, want := len(list), len(sMap); have != want {
+ t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want)
+ }
+ if have, want := len(merged.storageList[aHash]), len(sMap); have != want {
+ t.Errorf("storageList wrong: have %v, want %v", have, want)
+ }
+ i++
+ }
+ }
+}
+
+// TestMergeDelete tests some deletion
+func TestMergeDelete(t *testing.T) {
+ var (
+ storage = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ // Fill up a parent
+ h1 := common.HexToHash("0x01")
+ h2 := common.HexToHash("0x02")
+
+ flipDrops := func() map[common.Hash]struct{} {
+ return map[common.Hash]struct{}{
+ h2: {},
+ }
+ }
+ flipAccs := func() map[common.Hash][]byte {
+ return map[common.Hash][]byte{
+ h1: randomAccount(),
+ }
+ }
+ flopDrops := func() map[common.Hash]struct{} {
+ return map[common.Hash]struct{}{
+ h1: {},
+ }
+ }
+ flopAccs := func() map[common.Hash][]byte {
+ return map[common.Hash][]byte{
+ h2: randomAccount(),
+ }
+ }
+ // Add some flipAccs-flopping layers on top
+ parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage)
+ child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
+ child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
+ child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
+ child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
+ child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
+ child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
+
+ if data, _ := child.Account(h1); data == nil {
+ t.Errorf("last diff layer: expected %x account to be non-nil", h1)
+ }
+ if data, _ := child.Account(h2); data != nil {
+ t.Errorf("last diff layer: expected %x account to be nil", h2)
+ }
+ if _, ok := child.destructSet[h1]; ok {
+ t.Errorf("last diff layer: expected %x drop to be missing", h1)
+ }
+ if _, ok := child.destructSet[h2]; !ok {
+ t.Errorf("last diff layer: expected %x drop to be present", h1)
+ }
+ // And flatten
+ merged := (child.flatten()).(*diffLayer)
+
+ if data, _ := merged.Account(h1); data == nil {
+ t.Errorf("merged layer: expected %x account to be non-nil", h1)
+ }
+ if data, _ := merged.Account(h2); data != nil {
+ t.Errorf("merged layer: expected %x account to be nil", h2)
+ }
+ if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk!
+ t.Errorf("merged diff layer: expected %x drop to be present", h1)
+ }
+ if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk!
+ t.Errorf("merged diff layer: expected %x drop to be present", h1)
+ }
+ // If we add more granular metering of memory, we can enable this again,
+ // but it's not implemented for now
+ //if have, want := merged.memory, child.memory; have != want {
+ // t.Errorf("mem wrong: have %d, want %d", have, want)
+ //}
+}
+
+// This tests that if we create a new account, and set a slot, and then merge
+// it, the lists will be correct.
+func TestInsertAndMerge(t *testing.T) {
+ // Fill up a parent
+ var (
+ acc = common.HexToHash("0x01")
+ slot = common.HexToHash("0x02")
+ parent *diffLayer
+ child *diffLayer
+ )
+ {
+ var (
+ destructs = make(map[common.Hash]struct{})
+ accounts = make(map[common.Hash][]byte)
+ storage = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
+ }
+ {
+ var (
+ destructs = make(map[common.Hash]struct{})
+ accounts = make(map[common.Hash][]byte)
+ storage = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ accounts[acc] = randomAccount()
+ storage[acc] = make(map[common.Hash][]byte)
+ storage[acc][slot] = []byte{0x01}
+ child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
+ }
+ // And flatten
+ merged := (child.flatten()).(*diffLayer)
+ { // Check that slot value is present
+ have, _ := merged.Storage(acc, slot)
+ if want := []byte{0x01}; !bytes.Equal(have, want) {
+ t.Errorf("merged slot value wrong: have %x, want %x", have, want)
+ }
+ }
+}
+
+func emptyLayer() *diskLayer {
+ return &diskLayer{
+ diskdb: memorydb.New(),
+ cache: fastcache.New(500 * 1024),
+ }
+}
+
+// BenchmarkSearch checks how long it takes to find a non-existing key
+// BenchmarkSearch-6 200000 10481 ns/op (1K per layer)
+// BenchmarkSearch-6 200000 10760 ns/op (10K per layer)
+// BenchmarkSearch-6 100000 17866 ns/op
+//
+// BenchmarkSearch-6 500000 3723 ns/op (10k per layer, only top-level RLock()
+func BenchmarkSearch(b *testing.B) {
+ // First, we set up 128 diff layers, with 1K items each
+ fill := func(parent snapshot) *diffLayer {
+ var (
+ destructs = make(map[common.Hash]struct{})
+ accounts = make(map[common.Hash][]byte)
+ storage = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ for i := 0; i < 10000; i++ {
+ accounts[randomHash()] = randomAccount()
+ }
+ return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
+ }
+ var layer snapshot
+ layer = emptyLayer()
+ for i := 0; i < 128; i++ {
+ layer = fill(layer)
+ }
+ key := crypto.Keccak256Hash([]byte{0x13, 0x38})
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ layer.AccountRLP(key)
+ }
+}
+
+// BenchmarkSearchSlot checks how long it takes to find a non-existing key
+// - Number of layers: 128
+// - Each layers contains the account, with a couple of storage slots
+// BenchmarkSearchSlot-6 100000 14554 ns/op
+// BenchmarkSearchSlot-6 100000 22254 ns/op (when checking parent root using mutex)
+// BenchmarkSearchSlot-6 100000 14551 ns/op (when checking parent number using atomic)
+// With bloom filter:
+// BenchmarkSearchSlot-6 3467835 351 ns/op
+func BenchmarkSearchSlot(b *testing.B) {
+ // First, we set up 128 diff layers, with 1K items each
+ accountKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
+ storageKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
+ accountRLP := randomAccount()
+ fill := func(parent snapshot) *diffLayer {
+ var (
+ destructs = make(map[common.Hash]struct{})
+ accounts = make(map[common.Hash][]byte)
+ storage = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ accounts[accountKey] = accountRLP
+
+ accStorage := make(map[common.Hash][]byte)
+ for i := 0; i < 5; i++ {
+ value := make([]byte, 32)
+ crand.Read(value)
+ accStorage[randomHash()] = value
+ storage[accountKey] = accStorage
+ }
+ return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
+ }
+ var layer snapshot
+ layer = emptyLayer()
+ for i := 0; i < 128; i++ {
+ layer = fill(layer)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ layer.Storage(accountKey, storageKey)
+ }
+}
+
+// With accountList and sorting
+// BenchmarkFlatten-6 50 29890856 ns/op
+//
+// Without sorting and tracking accountList
+// BenchmarkFlatten-6 300 5511511 ns/op
+func BenchmarkFlatten(b *testing.B) {
+ fill := func(parent snapshot) *diffLayer {
+ var (
+ destructs = make(map[common.Hash]struct{})
+ accounts = make(map[common.Hash][]byte)
+ storage = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ for i := 0; i < 100; i++ {
+ accountKey := randomHash()
+ accounts[accountKey] = randomAccount()
+
+ accStorage := make(map[common.Hash][]byte)
+ for i := 0; i < 20; i++ {
+ value := make([]byte, 32)
+ crand.Read(value)
+ accStorage[randomHash()] = value
+ }
+ storage[accountKey] = accStorage
+ }
+ return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ var layer snapshot
+ layer = emptyLayer()
+ for i := 1; i < 128; i++ {
+ layer = fill(layer)
+ }
+ b.StartTimer()
+
+ for i := 1; i < 128; i++ {
+ dl, ok := layer.(*diffLayer)
+ if !ok {
+ break
+ }
+ layer = dl.flatten()
+ }
+ b.StopTimer()
+ }
+}
+
+// This test writes ~324M of diff layers to disk, spread over
+// - 128 individual layers,
+// - each with 200 accounts
+// - containing 200 slots
+//
+// BenchmarkJournal-6 1 1471373923 ns/ops
+// BenchmarkJournal-6 1 1208083335 ns/op // bufio writer
+func BenchmarkJournal(b *testing.B) {
+ fill := func(parent snapshot) *diffLayer {
+ var (
+ destructs = make(map[common.Hash]struct{})
+ accounts = make(map[common.Hash][]byte)
+ storage = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ for i := 0; i < 200; i++ {
+ accountKey := randomHash()
+ accounts[accountKey] = randomAccount()
+
+ accStorage := make(map[common.Hash][]byte)
+ for i := 0; i < 200; i++ {
+ value := make([]byte, 32)
+ crand.Read(value)
+ accStorage[randomHash()] = value
+ }
+ storage[accountKey] = accStorage
+ }
+ return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
+ }
+ layer := snapshot(emptyLayer())
+ for i := 1; i < 128; i++ {
+ layer = fill(layer)
+ }
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ layer.Journal(new(bytes.Buffer))
+ }
+}
diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go
new file mode 100644
index 0000000000..39879e1433
--- /dev/null
+++ b/core/state/snapshot/disklayer.go
@@ -0,0 +1,166 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+ "sync"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/harmony-one/harmony/core/rawdb"
+)
+
+// diskLayer is a low level persistent snapshot built on top of a key-value store.
+type diskLayer struct {
+ diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot
+ triedb *trie.Database // Trie node cache for reconstruction purposes
+ cache *fastcache.Cache // Cache to avoid hitting the disk for direct access
+
+ root common.Hash // Root hash of the base snapshot
+ stale bool // Signals that the layer became stale (state progressed)
+
+ genMarker []byte // Marker for the state that's indexed during initial layer generation
+ genPending chan struct{} // Notification channel when generation is done (test synchronicity)
+ genAbort chan chan *generatorStats // Notification channel to abort generating the snapshot in this layer
+
+ lock sync.RWMutex
+}
+
+// Root returns root hash for which this snapshot was made.
+func (dl *diskLayer) Root() common.Hash {
+ return dl.root
+}
+
+// Parent always returns nil as there's no layer below the disk.
+func (dl *diskLayer) Parent() snapshot {
+ return nil
+}
+
+// Stale return whether this layer has become stale (was flattened across) or if
+// it's still live.
+func (dl *diskLayer) Stale() bool {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ return dl.stale
+}
+
+// Account directly retrieves the account associated with a particular hash in
+// the snapshot slim data format.
+func (dl *diskLayer) Account(hash common.Hash) (*Account, error) {
+ data, err := dl.AccountRLP(hash)
+ if err != nil {
+ return nil, err
+ }
+ if len(data) == 0 { // can be both nil and []byte{}
+ return nil, nil
+ }
+ account := new(Account)
+ if err := rlp.DecodeBytes(data, account); err != nil {
+ panic(err)
+ }
+ return account, nil
+}
+
+// AccountRLP directly retrieves the account RLP associated with a particular
+// hash in the snapshot slim data format.
+func (dl *diskLayer) AccountRLP(hash common.Hash) ([]byte, error) {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ // If the layer was flattened into, consider it invalid (any live reference to
+ // the original should be marked as unusable).
+ if dl.stale {
+ return nil, ErrSnapshotStale
+ }
+ // If the layer is being generated, ensure the requested hash has already been
+ // covered by the generator.
+ if dl.genMarker != nil && bytes.Compare(hash[:], dl.genMarker) > 0 {
+ return nil, ErrNotCoveredYet
+ }
+ // If we're in the disk layer, all diff layers missed
+ snapshotDirtyAccountMissMeter.Mark(1)
+
+ // Try to retrieve the account from the memory cache
+ if blob, found := dl.cache.HasGet(nil, hash[:]); found {
+ snapshotCleanAccountHitMeter.Mark(1)
+ snapshotCleanAccountReadMeter.Mark(int64(len(blob)))
+ return blob, nil
+ }
+ // Cache doesn't contain account, pull from disk and cache for later
+ blob := rawdb.ReadAccountSnapshot(dl.diskdb, hash)
+ dl.cache.Set(hash[:], blob)
+
+ snapshotCleanAccountMissMeter.Mark(1)
+ if n := len(blob); n > 0 {
+ snapshotCleanAccountWriteMeter.Mark(int64(n))
+ } else {
+ snapshotCleanAccountInexMeter.Mark(1)
+ }
+ return blob, nil
+}
+
+// Storage directly retrieves the storage data associated with a particular hash,
+// within a particular account.
+func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ // If the layer was flattened into, consider it invalid (any live reference to
+ // the original should be marked as unusable).
+ if dl.stale {
+ return nil, ErrSnapshotStale
+ }
+ key := append(accountHash[:], storageHash[:]...)
+
+ // If the layer is being generated, ensure the requested hash has already been
+ // covered by the generator.
+ if dl.genMarker != nil && bytes.Compare(key, dl.genMarker) > 0 {
+ return nil, ErrNotCoveredYet
+ }
+ // If we're in the disk layer, all diff layers missed
+ snapshotDirtyStorageMissMeter.Mark(1)
+
+ // Try to retrieve the storage slot from the memory cache
+ if blob, found := dl.cache.HasGet(nil, key); found {
+ snapshotCleanStorageHitMeter.Mark(1)
+ snapshotCleanStorageReadMeter.Mark(int64(len(blob)))
+ return blob, nil
+ }
+ // Cache doesn't contain storage slot, pull from disk and cache for later
+ blob := rawdb.ReadStorageSnapshot(dl.diskdb, accountHash, storageHash)
+ dl.cache.Set(key, blob)
+
+ snapshotCleanStorageMissMeter.Mark(1)
+ if n := len(blob); n > 0 {
+ snapshotCleanStorageWriteMeter.Mark(int64(n))
+ } else {
+ snapshotCleanStorageInexMeter.Mark(1)
+ }
+ return blob, nil
+}
+
+// Update creates a new layer on top of the existing snapshot diff tree with
+// the specified data items. Note, the maps are retained by the method to avoid
+// copying everything.
+func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
+ return newDiffLayer(dl, blockHash, destructs, accounts, storage)
+}
diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go
new file mode 100644
index 0000000000..80070d619d
--- /dev/null
+++ b/core/state/snapshot/disklayer_test.go
@@ -0,0 +1,574 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/harmony-one/harmony/core/rawdb"
+)
+
+// reverse reverses the contents of a byte slice. It's used to update random accs
+// with deterministic changes.
+func reverse(blob []byte) []byte {
+ res := make([]byte, len(blob))
+ for i, b := range blob {
+ res[len(blob)-1-i] = b
+ }
+ return res
+}
+
+// Tests that merging something into a disk layer persists it into the database
+// and invalidates any previously written and cached values.
+func TestDiskMerge(t *testing.T) {
+ // Create some accounts in the disk layer
+ db := memorydb.New()
+
+ var (
+ accNoModNoCache = common.Hash{0x1}
+ accNoModCache = common.Hash{0x2}
+ accModNoCache = common.Hash{0x3}
+ accModCache = common.Hash{0x4}
+ accDelNoCache = common.Hash{0x5}
+ accDelCache = common.Hash{0x6}
+ conNoModNoCache = common.Hash{0x7}
+ conNoModNoCacheSlot = common.Hash{0x70}
+ conNoModCache = common.Hash{0x8}
+ conNoModCacheSlot = common.Hash{0x80}
+ conModNoCache = common.Hash{0x9}
+ conModNoCacheSlot = common.Hash{0x90}
+ conModCache = common.Hash{0xa}
+ conModCacheSlot = common.Hash{0xa0}
+ conDelNoCache = common.Hash{0xb}
+ conDelNoCacheSlot = common.Hash{0xb0}
+ conDelCache = common.Hash{0xc}
+ conDelCacheSlot = common.Hash{0xc0}
+ conNukeNoCache = common.Hash{0xd}
+ conNukeNoCacheSlot = common.Hash{0xd0}
+ conNukeCache = common.Hash{0xe}
+ conNukeCacheSlot = common.Hash{0xe0}
+ baseRoot = randomHash()
+ diffRoot = randomHash()
+ )
+
+ rawdb.WriteAccountSnapshot(db, accNoModNoCache, accNoModNoCache[:])
+ rawdb.WriteAccountSnapshot(db, accNoModCache, accNoModCache[:])
+ rawdb.WriteAccountSnapshot(db, accModNoCache, accModNoCache[:])
+ rawdb.WriteAccountSnapshot(db, accModCache, accModCache[:])
+ rawdb.WriteAccountSnapshot(db, accDelNoCache, accDelNoCache[:])
+ rawdb.WriteAccountSnapshot(db, accDelCache, accDelCache[:])
+
+ rawdb.WriteAccountSnapshot(db, conNoModNoCache, conNoModNoCache[:])
+ rawdb.WriteStorageSnapshot(db, conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
+ rawdb.WriteAccountSnapshot(db, conNoModCache, conNoModCache[:])
+ rawdb.WriteStorageSnapshot(db, conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
+ rawdb.WriteAccountSnapshot(db, conModNoCache, conModNoCache[:])
+ rawdb.WriteStorageSnapshot(db, conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:])
+ rawdb.WriteAccountSnapshot(db, conModCache, conModCache[:])
+ rawdb.WriteStorageSnapshot(db, conModCache, conModCacheSlot, conModCacheSlot[:])
+ rawdb.WriteAccountSnapshot(db, conDelNoCache, conDelNoCache[:])
+ rawdb.WriteStorageSnapshot(db, conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:])
+ rawdb.WriteAccountSnapshot(db, conDelCache, conDelCache[:])
+ rawdb.WriteStorageSnapshot(db, conDelCache, conDelCacheSlot, conDelCacheSlot[:])
+
+ rawdb.WriteAccountSnapshot(db, conNukeNoCache, conNukeNoCache[:])
+ rawdb.WriteStorageSnapshot(db, conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:])
+ rawdb.WriteAccountSnapshot(db, conNukeCache, conNukeCache[:])
+ rawdb.WriteStorageSnapshot(db, conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
+
+ rawdb.WriteSnapshotRoot(db, baseRoot)
+
+ // Create a disk layer based on the above and cache in some data
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ baseRoot: &diskLayer{
+ diskdb: db,
+ cache: fastcache.New(500 * 1024),
+ root: baseRoot,
+ },
+ },
+ }
+ base := snaps.Snapshot(baseRoot)
+ base.AccountRLP(accNoModCache)
+ base.AccountRLP(accModCache)
+ base.AccountRLP(accDelCache)
+ base.Storage(conNoModCache, conNoModCacheSlot)
+ base.Storage(conModCache, conModCacheSlot)
+ base.Storage(conDelCache, conDelCacheSlot)
+ base.Storage(conNukeCache, conNukeCacheSlot)
+
+ // Modify or delete some accounts, flatten everything onto disk
+ if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
+ accDelNoCache: {},
+ accDelCache: {},
+ conNukeNoCache: {},
+ conNukeCache: {},
+ }, map[common.Hash][]byte{
+ accModNoCache: reverse(accModNoCache[:]),
+ accModCache: reverse(accModCache[:]),
+ }, map[common.Hash]map[common.Hash][]byte{
+ conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
+ conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
+ conDelNoCache: {conDelNoCacheSlot: nil},
+ conDelCache: {conDelCacheSlot: nil},
+ }); err != nil {
+ t.Fatalf("failed to update snapshot tree: %v", err)
+ }
+ if err := snaps.Cap(diffRoot, 0); err != nil {
+ t.Fatalf("failed to flatten snapshot tree: %v", err)
+ }
+ // Retrieve all the data through the disk layer and validate it
+ base = snaps.Snapshot(diffRoot)
+ if _, ok := base.(*diskLayer); !ok {
+ t.Fatalf("update not flattend into the disk layer")
+ }
+
+ // assertAccount ensures that an account matches the given blob.
+ assertAccount := func(account common.Hash, data []byte) {
+ t.Helper()
+ blob, err := base.AccountRLP(account)
+ if err != nil {
+ t.Errorf("account access (%x) failed: %v", account, err)
+ } else if !bytes.Equal(blob, data) {
+ t.Errorf("account access (%x) mismatch: have %x, want %x", account, blob, data)
+ }
+ }
+ assertAccount(accNoModNoCache, accNoModNoCache[:])
+ assertAccount(accNoModCache, accNoModCache[:])
+ assertAccount(accModNoCache, reverse(accModNoCache[:]))
+ assertAccount(accModCache, reverse(accModCache[:]))
+ assertAccount(accDelNoCache, nil)
+ assertAccount(accDelCache, nil)
+
+ // assertStorage ensures that a storage slot matches the given blob.
+ assertStorage := func(account common.Hash, slot common.Hash, data []byte) {
+ t.Helper()
+ blob, err := base.Storage(account, slot)
+ if err != nil {
+ t.Errorf("storage access (%x:%x) failed: %v", account, slot, err)
+ } else if !bytes.Equal(blob, data) {
+ t.Errorf("storage access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data)
+ }
+ }
+ assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
+ assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
+ assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
+ assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
+ assertStorage(conDelNoCache, conDelNoCacheSlot, nil)
+ assertStorage(conDelCache, conDelCacheSlot, nil)
+ assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
+ assertStorage(conNukeCache, conNukeCacheSlot, nil)
+
+ // Retrieve all the data directly from the database and validate it
+
+ // assertDatabaseAccount ensures that an account from the database matches the given blob.
+ assertDatabaseAccount := func(account common.Hash, data []byte) {
+ t.Helper()
+ if blob := rawdb.ReadAccountSnapshot(db, account); !bytes.Equal(blob, data) {
+ t.Errorf("account database access (%x) mismatch: have %x, want %x", account, blob, data)
+ }
+ }
+ assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:])
+ assertDatabaseAccount(accNoModCache, accNoModCache[:])
+ assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:]))
+ assertDatabaseAccount(accModCache, reverse(accModCache[:]))
+ assertDatabaseAccount(accDelNoCache, nil)
+ assertDatabaseAccount(accDelCache, nil)
+
+ // assertDatabaseStorage ensures that a storage slot from the database matches the given blob.
+ assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) {
+ t.Helper()
+ if blob := rawdb.ReadStorageSnapshot(db, account, slot); !bytes.Equal(blob, data) {
+ t.Errorf("storage database access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data)
+ }
+ }
+ assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
+ assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
+ assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
+ assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
+ assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil)
+ assertDatabaseStorage(conDelCache, conDelCacheSlot, nil)
+ assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
+ assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil)
+}
+
+// Tests that merging something into a disk layer persists it into the database
+// and invalidates any previously written and cached values, discarding anything
+// after the in-progress generation marker.
+func TestDiskPartialMerge(t *testing.T) {
+ // Iterate the test a few times to ensure we pick various internal orderings
+ // for the data slots as well as the progress marker.
+ for i := 0; i < 1024; i++ {
+ // Create some accounts in the disk layer
+ db := memorydb.New()
+
+ var (
+ accNoModNoCache = randomHash()
+ accNoModCache = randomHash()
+ accModNoCache = randomHash()
+ accModCache = randomHash()
+ accDelNoCache = randomHash()
+ accDelCache = randomHash()
+ conNoModNoCache = randomHash()
+ conNoModNoCacheSlot = randomHash()
+ conNoModCache = randomHash()
+ conNoModCacheSlot = randomHash()
+ conModNoCache = randomHash()
+ conModNoCacheSlot = randomHash()
+ conModCache = randomHash()
+ conModCacheSlot = randomHash()
+ conDelNoCache = randomHash()
+ conDelNoCacheSlot = randomHash()
+ conDelCache = randomHash()
+ conDelCacheSlot = randomHash()
+ conNukeNoCache = randomHash()
+ conNukeNoCacheSlot = randomHash()
+ conNukeCache = randomHash()
+ conNukeCacheSlot = randomHash()
+ baseRoot = randomHash()
+ diffRoot = randomHash()
+ genMarker = append(randomHash().Bytes(), randomHash().Bytes()...)
+ )
+
+ // insertAccount injects an account into the database if it's after the
+ // generator marker, drops the op otherwise. This is needed to seed the
+ // database with a valid starting snapshot.
+ insertAccount := func(account common.Hash, data []byte) {
+ if bytes.Compare(account[:], genMarker) <= 0 {
+ rawdb.WriteAccountSnapshot(db, account, data[:])
+ }
+ }
+ insertAccount(accNoModNoCache, accNoModNoCache[:])
+ insertAccount(accNoModCache, accNoModCache[:])
+ insertAccount(accModNoCache, accModNoCache[:])
+ insertAccount(accModCache, accModCache[:])
+ insertAccount(accDelNoCache, accDelNoCache[:])
+ insertAccount(accDelCache, accDelCache[:])
+
+ // insertStorage injects a storage slot into the database if it's after
+ // the generator marker, drops the op otherwise. This is needed to seed
+ // the database with a valid starting snapshot.
+ insertStorage := func(account common.Hash, slot common.Hash, data []byte) {
+ if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 {
+ rawdb.WriteStorageSnapshot(db, account, slot, data[:])
+ }
+ }
+ insertAccount(conNoModNoCache, conNoModNoCache[:])
+ insertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
+ insertAccount(conNoModCache, conNoModCache[:])
+ insertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
+ insertAccount(conModNoCache, conModNoCache[:])
+ insertStorage(conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:])
+ insertAccount(conModCache, conModCache[:])
+ insertStorage(conModCache, conModCacheSlot, conModCacheSlot[:])
+ insertAccount(conDelNoCache, conDelNoCache[:])
+ insertStorage(conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:])
+ insertAccount(conDelCache, conDelCache[:])
+ insertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:])
+
+ insertAccount(conNukeNoCache, conNukeNoCache[:])
+ insertStorage(conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:])
+ insertAccount(conNukeCache, conNukeCache[:])
+ insertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
+
+ rawdb.WriteSnapshotRoot(db, baseRoot)
+
+ // Create a disk layer based on the above using a random progress marker
+ // and cache in some data.
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ baseRoot: &diskLayer{
+ diskdb: db,
+ cache: fastcache.New(500 * 1024),
+ root: baseRoot,
+ },
+ },
+ }
+ snaps.layers[baseRoot].(*diskLayer).genMarker = genMarker
+ base := snaps.Snapshot(baseRoot)
+
+ // assertAccount ensures that an account matches the given blob if it's
+ // already covered by the disk snapshot, and errors out otherwise.
+ assertAccount := func(account common.Hash, data []byte) {
+ t.Helper()
+ blob, err := base.AccountRLP(account)
+ if bytes.Compare(account[:], genMarker) > 0 && err != ErrNotCoveredYet {
+ t.Fatalf("test %d: post-marker (%x) account access (%x) succeeded: %x", i, genMarker, account, blob)
+ }
+ if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) {
+ t.Fatalf("test %d: pre-marker (%x) account access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data)
+ }
+ }
+ assertAccount(accNoModCache, accNoModCache[:])
+ assertAccount(accModCache, accModCache[:])
+ assertAccount(accDelCache, accDelCache[:])
+
+ // assertStorage ensures that a storage slot matches the given blob if
+ // it's already covered by the disk snapshot, and errors out otherwise.
+ assertStorage := func(account common.Hash, slot common.Hash, data []byte) {
+ t.Helper()
+ blob, err := base.Storage(account, slot)
+ if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && err != ErrNotCoveredYet {
+ t.Fatalf("test %d: post-marker (%x) storage access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob)
+ }
+ if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) {
+ t.Fatalf("test %d: pre-marker (%x) storage access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data)
+ }
+ }
+ assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
+ assertStorage(conModCache, conModCacheSlot, conModCacheSlot[:])
+ assertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:])
+ assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
+
+ // Modify or delete some accounts, flatten everything onto disk
+ if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
+ accDelNoCache: {},
+ accDelCache: {},
+ conNukeNoCache: {},
+ conNukeCache: {},
+ }, map[common.Hash][]byte{
+ accModNoCache: reverse(accModNoCache[:]),
+ accModCache: reverse(accModCache[:]),
+ }, map[common.Hash]map[common.Hash][]byte{
+ conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
+ conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
+ conDelNoCache: {conDelNoCacheSlot: nil},
+ conDelCache: {conDelCacheSlot: nil},
+ }); err != nil {
+ t.Fatalf("test %d: failed to update snapshot tree: %v", i, err)
+ }
+ if err := snaps.Cap(diffRoot, 0); err != nil {
+ t.Fatalf("test %d: failed to flatten snapshot tree: %v", i, err)
+ }
+ // Retrieve all the data through the disk layer and validate it
+ base = snaps.Snapshot(diffRoot)
+ if _, ok := base.(*diskLayer); !ok {
+ t.Fatalf("test %d: update not flattend into the disk layer", i)
+ }
+ assertAccount(accNoModNoCache, accNoModNoCache[:])
+ assertAccount(accNoModCache, accNoModCache[:])
+ assertAccount(accModNoCache, reverse(accModNoCache[:]))
+ assertAccount(accModCache, reverse(accModCache[:]))
+ assertAccount(accDelNoCache, nil)
+ assertAccount(accDelCache, nil)
+
+ assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
+ assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
+ assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
+ assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
+ assertStorage(conDelNoCache, conDelNoCacheSlot, nil)
+ assertStorage(conDelCache, conDelCacheSlot, nil)
+ assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
+ assertStorage(conNukeCache, conNukeCacheSlot, nil)
+
+ // Retrieve all the data directly from the database and validate it
+
+ // assertDatabaseAccount ensures that an account inside the database matches
+ // the given blob if it's already covered by the disk snapshot, and does not
+ // exist otherwise.
+ assertDatabaseAccount := func(account common.Hash, data []byte) {
+ t.Helper()
+ blob := rawdb.ReadAccountSnapshot(db, account)
+ if bytes.Compare(account[:], genMarker) > 0 && blob != nil {
+ t.Fatalf("test %d: post-marker (%x) account database access (%x) succeeded: %x", i, genMarker, account, blob)
+ }
+ if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) {
+ t.Fatalf("test %d: pre-marker (%x) account database access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data)
+ }
+ }
+ assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:])
+ assertDatabaseAccount(accNoModCache, accNoModCache[:])
+ assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:]))
+ assertDatabaseAccount(accModCache, reverse(accModCache[:]))
+ assertDatabaseAccount(accDelNoCache, nil)
+ assertDatabaseAccount(accDelCache, nil)
+
+ // assertDatabaseStorage ensures that a storage slot inside the database
+ // matches the given blob if it's already covered by the disk snapshot,
+ // and does not exist otherwise.
+ assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) {
+ t.Helper()
+ blob := rawdb.ReadStorageSnapshot(db, account, slot)
+ if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && blob != nil {
+ t.Fatalf("test %d: post-marker (%x) storage database access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob)
+ }
+ if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) {
+ t.Fatalf("test %d: pre-marker (%x) storage database access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data)
+ }
+ }
+ assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
+ assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
+ assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
+ assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
+ assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil)
+ assertDatabaseStorage(conDelCache, conDelCacheSlot, nil)
+ assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
+ assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil)
+ }
+}
+
+// Tests that when the bottom-most diff layer is merged into the disk
+// layer whether the corresponding generator is persisted correctly.
+func TestDiskGeneratorPersistence(t *testing.T) {
+ var (
+ accOne = randomHash()
+ accTwo = randomHash()
+ accOneSlotOne = randomHash()
+ accOneSlotTwo = randomHash()
+
+ accThree = randomHash()
+ accThreeSlot = randomHash()
+ baseRoot = randomHash()
+ diffRoot = randomHash()
+ diffTwoRoot = randomHash()
+ genMarker = append(randomHash().Bytes(), randomHash().Bytes()...)
+ )
+ // Testing scenario 1, the disk layer is still under the construction.
+ db := rawdb.NewMemoryDatabase()
+
+ rawdb.WriteAccountSnapshot(db, accOne, accOne[:])
+ rawdb.WriteStorageSnapshot(db, accOne, accOneSlotOne, accOneSlotOne[:])
+ rawdb.WriteStorageSnapshot(db, accOne, accOneSlotTwo, accOneSlotTwo[:])
+ rawdb.WriteSnapshotRoot(db, baseRoot)
+
+ // Create a disk layer based on all above updates
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ baseRoot: &diskLayer{
+ diskdb: db,
+ cache: fastcache.New(500 * 1024),
+ root: baseRoot,
+ genMarker: genMarker,
+ },
+ },
+ }
+ // Modify or delete some accounts, flatten everything onto disk
+ if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
+ accTwo: accTwo[:],
+ }, nil); err != nil {
+ t.Fatalf("failed to update snapshot tree: %v", err)
+ }
+ if err := snaps.Cap(diffRoot, 0); err != nil {
+ t.Fatalf("failed to flatten snapshot tree: %v", err)
+ }
+ blob := rawdb.ReadSnapshotGenerator(db)
+ var generator journalGenerator
+ if err := rlp.DecodeBytes(blob, &generator); err != nil {
+ t.Fatalf("Failed to decode snapshot generator %v", err)
+ }
+ if !bytes.Equal(generator.Marker, genMarker) {
+ t.Fatalf("Generator marker is not matched")
+ }
+ // Test scenario 2, the disk layer is fully generated
+ // Modify or delete some accounts, flatten everything onto disk
+ if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{
+ accThree: accThree.Bytes(),
+ }, map[common.Hash]map[common.Hash][]byte{
+ accThree: {accThreeSlot: accThreeSlot.Bytes()},
+ }); err != nil {
+ t.Fatalf("failed to update snapshot tree: %v", err)
+ }
+ diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer)
+ diskLayer.genMarker = nil // Construction finished
+ if err := snaps.Cap(diffTwoRoot, 0); err != nil {
+ t.Fatalf("failed to flatten snapshot tree: %v", err)
+ }
+ blob = rawdb.ReadSnapshotGenerator(db)
+ if err := rlp.DecodeBytes(blob, &generator); err != nil {
+ t.Fatalf("Failed to decode snapshot generator %v", err)
+ }
+ if len(generator.Marker) != 0 {
+ t.Fatalf("Failed to update snapshot generator")
+ }
+}
+
+// Tests that merging something into a disk layer persists it into the database
+// and invalidates any previously written and cached values, discarding anything
+// after the in-progress generation marker.
+//
+// This test case is a tiny specialized case of TestDiskPartialMerge, which tests
+// some very specific cornercases that random tests won't ever trigger.
+func TestDiskMidAccountPartialMerge(t *testing.T) {
+ // TODO(@karalabe) ?
+}
+
+// TestDiskSeek tests that seek-operations work on the disk layer
+func TestDiskSeek(t *testing.T) {
+ // Create some accounts in the disk layer
+ db := rawdb.NewMemoryDatabase()
+ defer db.Close()
+
+ // Fill even keys [0,2,4...]
+ for i := 0; i < 0xff; i += 2 {
+ acc := common.Hash{byte(i)}
+ rawdb.WriteAccountSnapshot(db, acc, acc[:])
+ }
+ // Add an 'higher' key, with incorrect (higher) prefix
+ highKey := []byte{rawdb.SnapshotAccountPrefix[0] + 1}
+ db.Put(highKey, []byte{0xff, 0xff})
+
+ baseRoot := randomHash()
+ rawdb.WriteSnapshotRoot(db, baseRoot)
+
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ baseRoot: &diskLayer{
+ diskdb: db,
+ cache: fastcache.New(500 * 1024),
+ root: baseRoot,
+ },
+ },
+ }
+ // Test some different seek positions
+ type testcase struct {
+ pos byte
+ expkey byte
+ }
+ var cases = []testcase{
+ {0xff, 0x55}, // this should exit immediately without checking key
+ {0x01, 0x02},
+ {0xfe, 0xfe},
+ {0xfd, 0xfe},
+ {0x00, 0x00},
+ }
+ for i, tc := range cases {
+ it, err := snaps.AccountIterator(baseRoot, common.Hash{tc.pos})
+ if err != nil {
+ t.Fatalf("case %d, error: %v", i, err)
+ }
+ count := 0
+ for it.Next() {
+ k, v, err := it.Hash()[0], it.Account()[0], it.Error()
+ if err != nil {
+ t.Fatalf("test %d, item %d, error: %v", i, count, err)
+ }
+ // First item in iterator should have the expected key
+ if count == 0 && k != tc.expkey {
+ t.Fatalf("test %d, item %d, got %v exp %v", i, count, k, tc.expkey)
+ }
+ count++
+ if v != k {
+ t.Fatalf("test %d, item %d, value wrong, got %v exp %v", i, count, v, k)
+ }
+ }
+ }
+}
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
new file mode 100644
index 0000000000..e5376429bc
--- /dev/null
+++ b/core/state/snapshot/generate.go
@@ -0,0 +1,756 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/big"
+ "time"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/harmony-one/harmony/core/rawdb"
+ "github.com/harmony-one/harmony/internal/utils"
+)
+
+var (
+ // accountCheckRange is the upper limit of the number of accounts involved in
+ // each range check. This is a value estimated based on experience. If this
+ // range is too large, the failure rate of range proof will increase. Otherwise,
+ // if the range is too small, the efficiency of the state recovery will decrease.
+ accountCheckRange = 128
+
+ // storageCheckRange is the upper limit of the number of storage slots involved
+ // in each range check. This is a value estimated based on experience. If this
+ // range is too large, the failure rate of range proof will increase. Otherwise,
+ // if the range is too small, the efficiency of the state recovery will decrease.
+ storageCheckRange = 1024
+
+ // errMissingTrie is returned if the target trie is missing while the generation
+ // is running. In this case the generation is aborted and wait the new signal.
+ errMissingTrie = errors.New("missing trie")
+)
+
+// generateSnapshot regenerates a brand new snapshot based on an existing state
+// database and head block asynchronously. The snapshot is returned immediately
+// and generation is continued in the background until done.
+func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) *diskLayer {
+ // Create a new disk layer with an initialized state marker at zero
+ var (
+ stats = &generatorStats{start: time.Now()}
+ batch = diskdb.NewBatch()
+ genMarker = []byte{} // Initialized but empty!
+ )
+ rawdb.WriteSnapshotRoot(batch, root)
+ journalProgress(batch, genMarker, stats)
+ if err := batch.Write(); err != nil {
+ utils.Logger().Fatal().Err(err).Msg("Failed to write initialized state marker")
+ }
+ base := &diskLayer{
+ diskdb: diskdb,
+ triedb: triedb,
+ root: root,
+ cache: fastcache.New(cache * 1024 * 1024),
+ genMarker: genMarker,
+ genPending: make(chan struct{}),
+ genAbort: make(chan chan *generatorStats),
+ }
+ go base.generate(stats)
+ utils.Logger().Debug().Interface("root", root).Msg("Start snapshot generation")
+ return base
+}
+
+// journalProgress persists the generator stats into the database to resume later.
+func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorStats) {
+ // Write out the generator marker. Note it's a standalone disk layer generator
+ // which is not mixed with journal. It's ok if the generator is persisted while
+ // journal is not.
+ entry := journalGenerator{
+ Done: marker == nil,
+ Marker: marker,
+ }
+ if stats != nil {
+ entry.Accounts = stats.accounts
+ entry.Slots = stats.slots
+ entry.Storage = uint64(stats.storage)
+ }
+ blob, err := rlp.EncodeToBytes(entry)
+ if err != nil {
+ panic(err) // Cannot happen, here to catch dev errors
+ }
+ var logstr string
+ switch {
+ case marker == nil:
+ logstr = "done"
+ case bytes.Equal(marker, []byte{}):
+ logstr = "empty"
+ case len(marker) == common.HashLength:
+ logstr = fmt.Sprintf("%#x", marker)
+ default:
+ logstr = fmt.Sprintf("%#x:%#x", marker[:common.HashLength], marker[common.HashLength:])
+ }
+ utils.Logger().Debug().Err(err).Str("progress", logstr).Msg("Journalled generator progress")
+ rawdb.WriteSnapshotGenerator(db, blob)
+}
+
+// proofResult contains the output of range proving which can be used
+// for further processing regardless if it is successful or not.
+type proofResult struct {
+ keys [][]byte // The key set of all elements being iterated, even proving is failed
+ vals [][]byte // The val set of all elements being iterated, even proving is failed
+ diskMore bool // Set when the database has extra snapshot states since last iteration
+ trieMore bool // Set when the trie has extra snapshot states(only meaningful for successful proving)
+ proofErr error // Indicator whether the given state range is valid or not
+ tr *trie.Trie // The trie, in case the trie was resolved by the prover (may be nil)
+}
+
+// valid returns the indicator that range proof is successful or not.
+func (result *proofResult) valid() bool {
+ return result.proofErr == nil
+}
+
+// last returns the last verified element key regardless of whether the range proof is
+// successful or not. Nil is returned if nothing involved in the proving.
+func (result *proofResult) last() []byte {
+ var last []byte
+ if len(result.keys) > 0 {
+ last = result.keys[len(result.keys)-1]
+ }
+ return last
+}
+
+// forEach iterates all the visited elements and applies the given callback on them.
+// The iteration is aborted if the callback returns non-nil error.
+func (result *proofResult) forEach(callback func(key []byte, val []byte) error) error {
+ for i := 0; i < len(result.keys); i++ {
+ key, val := result.keys[i], result.vals[i]
+ if err := callback(key, val); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// proveRange proves the snapshot segment with particular prefix is "valid".
+// The iteration start point will be assigned if the iterator is restored from
+// the last interruption. Max will be assigned in order to limit the maximum
+// amount of data involved in each iteration.
+//
+// The proof result will be returned if the range proving is finished, otherwise
+// the error will be returned to abort the entire procedure.
+func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) {
+ var (
+ keys [][]byte
+ vals [][]byte
+ proof = rawdb.NewMemoryDatabase()
+ diskMore = false
+ iter = ctx.iterator(kind)
+ start = time.Now()
+ min = append(prefix, origin...)
+ )
+ for iter.Next() {
+ // Ensure the iterated item is always equal or larger than the given origin.
+ key := iter.Key()
+ if bytes.Compare(key, min) < 0 {
+ return nil, errors.New("invalid iteration position")
+ }
+ // Ensure the iterated item still fall in the specified prefix. If
+ // not which means the items in the specified area are all visited.
+ // Move the iterator a step back since we iterate one extra element
+ // out.
+ if !bytes.Equal(key[:len(prefix)], prefix) {
+ iter.Hold()
+ break
+ }
+ // Break if we've reached the max size, and signal that we're not
+ // done yet. Move the iterator a step back since we iterate one
+ // extra element out.
+ if len(keys) == max {
+ iter.Hold()
+ diskMore = true
+ break
+ }
+ keys = append(keys, common.CopyBytes(key[len(prefix):]))
+
+ if valueConvertFn == nil {
+ vals = append(vals, common.CopyBytes(iter.Value()))
+ } else {
+ val, err := valueConvertFn(iter.Value())
+ if err != nil {
+ // Special case, the state data is corrupted (invalid slim-format account),
+ // don't abort the entire procedure directly. Instead, let the fallback
+ // generation to heal the invalid data.
+ //
+ // Here append the original value to ensure that the number of key and
+ // value are aligned.
+ vals = append(vals, common.CopyBytes(iter.Value()))
+ utils.Logger().Error().Err(err).Msg("Failed to convert account state data")
+ } else {
+ vals = append(vals, val)
+ }
+ }
+ }
+ // Update metrics for database iteration and merkle proving
+ if kind == snapStorage {
+ snapStorageSnapReadCounter.Inc(time.Since(start).Nanoseconds())
+ } else {
+ snapAccountSnapReadCounter.Inc(time.Since(start).Nanoseconds())
+ }
+ defer func(start time.Time) {
+ if kind == snapStorage {
+ snapStorageProveCounter.Inc(time.Since(start).Nanoseconds())
+ } else {
+ snapAccountProveCounter.Inc(time.Since(start).Nanoseconds())
+ }
+ }(time.Now())
+
+ // The snap state is exhausted, pass the entire key/val set for verification
+ root := trieId.Root
+ if origin == nil && !diskMore {
+ stackTr := trie.NewStackTrie(nil)
+ for i, key := range keys {
+ stackTr.TryUpdate(key, vals[i])
+ }
+ if gotRoot := stackTr.Hash(); gotRoot != root {
+ return &proofResult{
+ keys: keys,
+ vals: vals,
+ proofErr: fmt.Errorf("wrong root: have %#x want %#x", gotRoot, root),
+ }, nil
+ }
+ return &proofResult{keys: keys, vals: vals}, nil
+ }
+ // Snap state is chunked, generate edge proofs for verification.
+ tr, err := trie.New(trieId, dl.triedb)
+ if err != nil {
+ ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
+ return nil, errMissingTrie
+ }
+ // Firstly find out the key of last iterated element.
+ var last []byte
+ if len(keys) > 0 {
+ last = keys[len(keys)-1]
+ }
+ // Generate the Merkle proofs for the first and last element
+ if origin == nil {
+ origin = common.Hash{}.Bytes()
+ }
+ if err := tr.Prove(origin, 0, proof); err != nil {
+ utils.Logger().Debug().Err(err).
+ Msg("Failed to prove range")
+
+ return &proofResult{
+ keys: keys,
+ vals: vals,
+ diskMore: diskMore,
+ proofErr: err,
+ tr: tr,
+ }, nil
+ }
+ if last != nil {
+ if err := tr.Prove(last, 0, proof); err != nil {
+ utils.Logger().Debug().Err(err).Str("kind", kind).Bytes("last", last).Msg("Failed to prove range")
+ return &proofResult{
+ keys: keys,
+ vals: vals,
+ diskMore: diskMore,
+ proofErr: err,
+ tr: tr,
+ }, nil
+ }
+ }
+ // Verify the snapshot segment with range prover, ensure that all flat states
+ // in this range correspond to merkle trie.
+ cont, err := trie.VerifyRangeProof(root, origin, last, keys, vals, proof)
+ return &proofResult{
+ keys: keys,
+ vals: vals,
+ diskMore: diskMore,
+ trieMore: cont,
+ proofErr: err,
+ tr: tr},
+ nil
+}
+
+// onStateCallback is a function that is called by generateRange, when processing a range of
+// accounts or storage slots. For each element, the callback is invoked.
+//
+// - If 'delete' is true, then this element (and potential slots) needs to be deleted from the snapshot.
+// - If 'write' is true, then this element needs to be updated with the 'val'.
+// - If 'write' is false, then this element is already correct, and needs no update.
+// The 'val' is the canonical encoding of the value (not the slim format for accounts)
+//
+// However, for accounts, the storage trie of the account needs to be checked. Also,
+// dangling storages(storage exists but the corresponding account is missing) need to
+// be cleaned up.
+type onStateCallback func(key []byte, val []byte, write bool, delete bool) error
+
+// generateRange generates the state segment with particular prefix. Generation can
+// either verify the correctness of existing state through range-proof and skip
+// generation, or iterate trie to regenerate state on demand.
+func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefix []byte, kind string, origin []byte, max int, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) {
+ // Use range prover to check the validity of the flat state in the range
+ result, err := dl.proveRange(ctx, trieId, prefix, kind, origin, max, valueConvertFn)
+ if err != nil {
+ return false, nil, err
+ }
+ last := result.last()
+
+ // Construct contextual logger
+ logCtx := []interface{}{"kind", kind, "prefix", hexutil.Encode(prefix)}
+ if len(origin) > 0 {
+ logCtx = append(logCtx, "origin", hexutil.Encode(origin))
+ }
+ logger := utils.GetLogger().New()
+
+ // The range prover says the range is correct, skip trie iteration
+ if result.valid() {
+ snapSuccessfulRangeProofMeter.Mark(1)
+ logger.Trace("Proved state range", "last", hexutil.Encode(last))
+
+ // The verification is passed, process each state with the given
+ // callback function. If this state represents a contract, the
+ // corresponding storage check will be performed in the callback
+ if err := result.forEach(func(key []byte, val []byte) error { return onState(key, val, false, false) }); err != nil {
+ return false, nil, err
+ }
+ // Only abort the iteration when both database and trie are exhausted
+ return !result.diskMore && !result.trieMore, last, nil
+ }
+ logger.Trace("Detected outdated state range", "last", hexutil.Encode(last), "err", result.proofErr)
+ snapFailedRangeProofMeter.Mark(1)
+
+ // Special case, the entire trie is missing. In the original trie scheme,
+ // all the duplicated subtries will be filtered out (only one copy of data
+ // will be stored). While in the snapshot model, all the storage tries
+ // belong to different contracts will be kept even they are duplicated.
+ // Track it to a certain extent remove the noise data used for statistics.
+ if origin == nil && last == nil {
+ meter := snapMissallAccountMeter
+ if kind == snapStorage {
+ meter = snapMissallStorageMeter
+ }
+ meter.Mark(1)
+ }
+ // We use the snap data to build up a cache which can be used by the
+ // main account trie as a primary lookup when resolving hashes
+ var resolver trie.NodeResolver
+ if len(result.keys) > 0 {
+ mdb := rawdb.NewMemoryDatabase()
+ tdb := trie.NewDatabase(mdb)
+ snapTrie := trie.NewEmpty(tdb)
+ for i, key := range result.keys {
+ snapTrie.Update(key, result.vals[i])
+ }
+ root, nodes := snapTrie.Commit(false)
+ if nodes != nil {
+ tdb.Update(trie.NewWithNodeSet(nodes))
+ tdb.Commit(root, false)
+ }
+ resolver = func(owner common.Hash, path []byte, hash common.Hash) []byte {
+ return rawdb.ReadTrieNode(mdb, owner, path, hash, tdb.Scheme())
+ }
+ }
+ // Construct the trie for state iteration, reuse the trie
+ // if it's already opened with some nodes resolved.
+ tr := result.tr
+ if tr == nil {
+ tr, err = trie.New(trieId, dl.triedb)
+ if err != nil {
+ ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
+ return false, nil, errMissingTrie
+ }
+ }
+ var (
+ trieMore bool
+ nodeIt = tr.NodeIterator(origin)
+ iter = trie.NewIterator(nodeIt)
+ kvkeys, kvvals = result.keys, result.vals
+
+ // counters
+ count = 0 // number of states delivered by iterator
+ created = 0 // states created from the trie
+ updated = 0 // states updated from the trie
+ deleted = 0 // states not in trie, but were in snapshot
+ untouched = 0 // states already correct
+
+ // timers
+ start = time.Now()
+ internal time.Duration
+ )
+ nodeIt.AddResolver(resolver)
+
+ for iter.Next() {
+ if last != nil && bytes.Compare(iter.Key, last) > 0 {
+ trieMore = true
+ break
+ }
+ count++
+ write := true
+ created++
+ for len(kvkeys) > 0 {
+ if cmp := bytes.Compare(kvkeys[0], iter.Key); cmp < 0 {
+ // delete the key
+ istart := time.Now()
+ if err := onState(kvkeys[0], nil, false, true); err != nil {
+ return false, nil, err
+ }
+ kvkeys = kvkeys[1:]
+ kvvals = kvvals[1:]
+ deleted++
+ internal += time.Since(istart)
+ continue
+ } else if cmp == 0 {
+ // the snapshot key can be overwritten
+ created--
+ if write = !bytes.Equal(kvvals[0], iter.Value); write {
+ updated++
+ } else {
+ untouched++
+ }
+ kvkeys = kvkeys[1:]
+ kvvals = kvvals[1:]
+ }
+ break
+ }
+ istart := time.Now()
+ if err := onState(iter.Key, iter.Value, write, false); err != nil {
+ return false, nil, err
+ }
+ internal += time.Since(istart)
+ }
+ if iter.Err != nil {
+ return false, nil, iter.Err
+ }
+ // Delete all stale snapshot states remaining
+ istart := time.Now()
+ for _, key := range kvkeys {
+ if err := onState(key, nil, false, true); err != nil {
+ return false, nil, err
+ }
+ deleted += 1
+ }
+ internal += time.Since(istart)
+
+ // Update metrics for counting trie iteration
+ if kind == snapStorage {
+ snapStorageTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds())
+ } else {
+ snapAccountTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds())
+ }
+ logger.Debug("Regenerated state range", "root", trieId.Root, "last", hexutil.Encode(last),
+ "count", count, "created", created, "updated", updated, "untouched", untouched, "deleted", deleted)
+
+ // If there are either more trie items, or there are more snap items
+ // (in the next segment), then we need to keep working
+ return !trieMore && !result.diskMore, last, nil
+}
+
+// checkAndFlush checks if an interruption signal is received or the
+// batch size has exceeded the allowance.
+func (dl *diskLayer) checkAndFlush(ctx *generatorContext, current []byte) error {
+ var abort chan *generatorStats
+ select {
+ case abort = <-dl.genAbort:
+ default:
+ }
+ if ctx.batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
+ if bytes.Compare(current, dl.genMarker) < 0 {
+ utils.Logger().Error().
+ Str("current", fmt.Sprintf("%x", current)).
+ Str("genMarker", fmt.Sprintf("%x", dl.genMarker)).
+ Msg("Snapshot generator went backwards")
+ }
+ // Flush out the batch anyway no matter it's empty or not.
+ // It's possible that all the states are recovered and the
+ // generation indeed makes progress.
+ journalProgress(ctx.batch, current, ctx.stats)
+
+ if err := ctx.batch.Write(); err != nil {
+ return err
+ }
+ ctx.batch.Reset()
+
+ dl.lock.Lock()
+ dl.genMarker = current
+ dl.lock.Unlock()
+
+ if abort != nil {
+ ctx.stats.Log("Aborting state snapshot generation", dl.root, current)
+ return newAbortErr(abort) // bubble up an error for interruption
+ }
+ // Don't hold the iterators too long, release them to let compactor works
+ ctx.reopenIterator(snapAccount)
+ ctx.reopenIterator(snapStorage)
+ }
+ if time.Since(ctx.logged) > 8*time.Second {
+ ctx.stats.Log("Generating state snapshot", dl.root, current)
+ ctx.logged = time.Now()
+ }
+ return nil
+}
+
+// generateStorages generates the missing storage slots of the specific contract.
+// It's supposed to restart the generation from the given origin position.
+func generateStorages(ctx *generatorContext, dl *diskLayer, stateRoot common.Hash, account common.Hash, storageRoot common.Hash, storeMarker []byte) error {
+ onStorage := func(key []byte, val []byte, write bool, delete bool) error {
+ defer func(start time.Time) {
+ snapStorageWriteCounter.Inc(time.Since(start).Nanoseconds())
+ }(time.Now())
+
+ if delete {
+ rawdb.DeleteStorageSnapshot(ctx.batch, account, common.BytesToHash(key))
+ snapWipedStorageMeter.Mark(1)
+ return nil
+ }
+ if write {
+ rawdb.WriteStorageSnapshot(ctx.batch, account, common.BytesToHash(key), val)
+ snapGeneratedStorageMeter.Mark(1)
+ } else {
+ snapRecoveredStorageMeter.Mark(1)
+ }
+ ctx.stats.storage += common.StorageSize(1 + 2*common.HashLength + len(val))
+ ctx.stats.slots++
+
+ // If we've exceeded our batch allowance or termination was requested, flush to disk
+ if err := dl.checkAndFlush(ctx, append(account[:], key...)); err != nil {
+ return err
+ }
+ return nil
+ }
+ // Loop for re-generating the missing storage slots.
+ var origin = common.CopyBytes(storeMarker)
+ for {
+ id := trie.StorageTrieID(stateRoot, account, storageRoot)
+ exhausted, last, err := dl.generateRange(ctx, id, append(rawdb.SnapshotStoragePrefix, account.Bytes()...), snapStorage, origin, storageCheckRange, onStorage, nil)
+ if err != nil {
+ return err // The procedure it aborted, either by external signal or internal error.
+ }
+ // Abort the procedure if the entire contract storage is generated
+ if exhausted {
+ break
+ }
+ if origin = increaseKey(last); origin == nil {
+ break // special case, the last is 0xffffffff...fff
+ }
+ }
+ return nil
+}
+
+// generateAccounts generates the missing snapshot accounts as well as their
+// storage slots in the main trie. It's supposed to restart the generation
+// from the given origin position.
+func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) error {
+ onAccount := func(key []byte, val []byte, write bool, delete bool) error {
+ // Make sure to clear all dangling storages before this account
+ account := common.BytesToHash(key)
+ ctx.removeStorageBefore(account)
+
+ start := time.Now()
+ if delete {
+ rawdb.DeleteAccountSnapshot(ctx.batch, account)
+ snapWipedAccountMeter.Mark(1)
+ snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds())
+
+ ctx.removeStorageAt(account)
+ return nil
+ }
+ // Retrieve the current account and flatten it into the internal format
+ var acc struct {
+ Nonce uint64
+ Balance *big.Int
+ Root common.Hash
+ CodeHash []byte
+ }
+ if err := rlp.DecodeBytes(val, &acc); err != nil {
+ utils.Logger().Fatal().Err(err).Msg("Invalid account encountered during snapshot creation")
+ }
+ // If the account is not yet in-progress, write it out
+ if accMarker == nil || !bytes.Equal(account[:], accMarker) {
+ dataLen := len(val) // Approximate size, saves us a round of RLP-encoding
+ if !write {
+ if bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) {
+ dataLen -= 32
+ }
+ if acc.Root == types.EmptyRootHash {
+ dataLen -= 32
+ }
+ snapRecoveredAccountMeter.Mark(1)
+ } else {
+ data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash)
+ dataLen = len(data)
+ rawdb.WriteAccountSnapshot(ctx.batch, account, data)
+ snapGeneratedAccountMeter.Mark(1)
+ }
+ ctx.stats.storage += common.StorageSize(1 + common.HashLength + dataLen)
+ ctx.stats.accounts++
+ }
+ // If the snap generation goes here after interrupted, genMarker may go backward
+ // when last genMarker is consisted of accountHash and storageHash
+ marker := account[:]
+ if accMarker != nil && bytes.Equal(marker, accMarker) && len(dl.genMarker) > common.HashLength {
+ marker = dl.genMarker[:]
+ }
+ // If we've exceeded our batch allowance or termination was requested, flush to disk
+ if err := dl.checkAndFlush(ctx, marker); err != nil {
+ return err
+ }
+ snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) // let's count flush time as well
+
+ // If the iterated account is the contract, create a further loop to
+ // verify or regenerate the contract storage.
+ if acc.Root == types.EmptyRootHash {
+ ctx.removeStorageAt(account)
+ } else {
+ var storeMarker []byte
+ if accMarker != nil && bytes.Equal(account[:], accMarker) && len(dl.genMarker) > common.HashLength {
+ storeMarker = dl.genMarker[common.HashLength:]
+ }
+ if err := generateStorages(ctx, dl, dl.root, account, acc.Root, storeMarker); err != nil {
+ return err
+ }
+ }
+ // Some account processed, unmark the marker
+ accMarker = nil
+ return nil
+ }
+ // Always reset the initial account range as 1 whenever recover from the
+ // interruption. TODO(rjl493456442) can we remove it?
+ var accountRange = accountCheckRange
+ if len(accMarker) > 0 {
+ accountRange = 1
+ }
+ origin := common.CopyBytes(accMarker)
+ for {
+ id := trie.StateTrieID(dl.root)
+ exhausted, last, err := dl.generateRange(ctx, id, rawdb.SnapshotAccountPrefix, snapAccount, origin, accountRange, onAccount, FullAccountRLP)
+ if err != nil {
+ return err // The procedure it aborted, either by external signal or internal error.
+ }
+ origin = increaseKey(last)
+
+ // Last step, cleanup the storages after the last account.
+ // All the left storages should be treated as dangling.
+ if origin == nil || exhausted {
+ ctx.removeStorageLeft()
+ break
+ }
+ accountRange = accountCheckRange
+ }
+ return nil
+}
+
+// generate is a background thread that iterates over the state and storage tries,
+// constructing the state snapshot. All the arguments are purely for statistics
+// gathering and logging, since the method surfs the blocks as they arrive, often
+// being restarted.
+func (dl *diskLayer) generate(stats *generatorStats) {
+ var (
+ accMarker []byte
+ abort chan *generatorStats
+ )
+ if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that
+ accMarker = dl.genMarker[:common.HashLength]
+ }
+ stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker)
+
+ // Initialize the global generator context. The snapshot iterators are
+ // opened at the interrupted position because the assumption is held
+ // that all the snapshot data are generated correctly before the marker.
+ // Even if the snapshot data is updated during the interruption (before
+ // or at the marker), the assumption is still held.
+ // For the account or storage slot at the interruption, they will be
+ // processed twice by the generator(they are already processed in the
+ // last run) but it's fine.
+ ctx := newGeneratorContext(stats, dl.diskdb, accMarker, dl.genMarker)
+ defer ctx.close()
+
+ if err := generateAccounts(ctx, dl, accMarker); err != nil {
+ // Extract the received interruption signal if exists
+ if aerr, ok := err.(*abortErr); ok {
+ abort = aerr.abort
+ }
+ // Aborted by internal error, wait the signal
+ if abort == nil {
+ abort = <-dl.genAbort
+ }
+ abort <- stats
+ return
+ }
+ // Snapshot fully generated, set the marker to nil.
+ // Note even there is nothing to commit, persist the
+ // generator anyway to mark the snapshot is complete.
+ journalProgress(ctx.batch, nil, stats)
+ if err := ctx.batch.Write(); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to flush batch")
+
+ abort = <-dl.genAbort
+ abort <- stats
+ return
+ }
+ ctx.batch.Reset()
+
+ utils.Logger().Info().
+ Uint64("accounts", stats.accounts).
+ Uint64("slots", stats.slots).
+ Interface("storage", stats.storage).
+ Uint64("dangling", stats.dangling).
+ Interface("elapsed", common.PrettyDuration(time.Since(stats.start))).
+ Msg("Generated state snapshot")
+
+ dl.lock.Lock()
+ dl.genMarker = nil
+ close(dl.genPending)
+ dl.lock.Unlock()
+
+ // Someone will be looking for us, wait it out
+ abort = <-dl.genAbort
+ abort <- nil
+}
+
+// increaseKey increase the input key by one bit. Return nil if the entire
+// addition operation overflows.
+func increaseKey(key []byte) []byte {
+ for i := len(key) - 1; i >= 0; i-- {
+ key[i]++
+ if key[i] != 0x0 {
+ return key
+ }
+ }
+ return nil
+}
+
+// abortErr wraps an interruption signal received to represent the
+// generation is aborted by external processes.
+type abortErr struct {
+ abort chan *generatorStats
+}
+
+func newAbortErr(abort chan *generatorStats) error {
+ return &abortErr{abort: abort}
+}
+
+func (err *abortErr) Error() string {
+ return "aborted"
+}
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
new file mode 100644
index 0000000000..c5a6725f40
--- /dev/null
+++ b/core/state/snapshot/generate_test.go
@@ -0,0 +1,861 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "fmt"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/harmony-one/harmony/core/rawdb"
+ "golang.org/x/crypto/sha3"
+)
+
+func hashData(input []byte) common.Hash {
+ var hasher = sha3.NewLegacyKeccak256()
+ var hash common.Hash
+ hasher.Reset()
+ hasher.Write(input)
+ hasher.Sum(hash[:0])
+ return hash
+}
+
+// Tests that snapshot generation from an empty database.
+func TestGeneration(t *testing.T) {
+ // We can't use statedb to make a test trie (circular dependency), so make
+ // a fake one manually. We're going with a small account trie of 3 accounts,
+ // two of which also has the same 3-slot storage trie attached.
+ var helper = newHelper()
+ stRoot := helper.makeStorageTrie(common.Hash{}, common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false)
+
+ helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+
+ root, snap := helper.CommitAndGenerate()
+ if have, want := root, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"); have != want {
+ t.Fatalf("have %#x want %#x", have, want)
+ }
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+
+ case <-time.After(3 * time.Second):
+ t.Errorf("Snapshot generation failed")
+ }
+ checkSnapRoot(t, snap, root)
+
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
+
+// Tests that snapshot generation with existent flat state.
+func TestGenerateExistentState(t *testing.T) {
+ // We can't use statedb to make a test trie (circular dependency), so make
+ // a fake one manually. We're going with a small account trie of 3 accounts,
+ // two of which also has the same 3-slot storage trie attached.
+ var helper = newHelper()
+
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+ helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+
+ stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+ root, snap := helper.CommitAndGenerate()
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+
+ case <-time.After(3 * time.Second):
+ t.Errorf("Snapshot generation failed")
+ }
+ checkSnapRoot(t, snap, root)
+
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
+
+func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) {
+ t.Helper()
+
+ accIt := snap.AccountIterator(common.Hash{})
+ defer accIt.Release()
+
+ snapRoot, err := generateTrieRoot(nil, "", accIt, common.Hash{}, stackTrieGenerate,
+ func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
+ storageIt, _ := snap.StorageIterator(accountHash, common.Hash{})
+ defer storageIt.Release()
+
+ hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ return hash, nil
+ }, newGenerateStats(), true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if snapRoot != trieRoot {
+ t.Fatalf("snaproot: %#x != trieroot #%x", snapRoot, trieRoot)
+ }
+ if err := CheckDanglingStorage(snap.diskdb); err != nil {
+ t.Fatalf("Detected dangling storages: %v", err)
+ }
+}
+
+type testHelper struct {
+ diskdb ethdb.Database
+ triedb *trie.Database
+ accTrie *trie.StateTrie
+ nodes *trie.MergedNodeSet
+}
+
+func newHelper() *testHelper {
+ diskdb := rawdb.NewMemoryDatabase()
+ triedb := trie.NewDatabase(diskdb)
+ accTrie, _ := trie.NewStateTrie(trie.StateTrieID(common.Hash{}), triedb)
+ return &testHelper{
+ diskdb: diskdb,
+ triedb: triedb,
+ accTrie: accTrie,
+ nodes: trie.NewMergedNodeSet(),
+ }
+}
+
+func (t *testHelper) addTrieAccount(acckey string, acc *Account) {
+ val, _ := rlp.EncodeToBytes(acc)
+ t.accTrie.Update([]byte(acckey), val)
+}
+
+func (t *testHelper) addSnapAccount(acckey string, acc *Account) {
+ val, _ := rlp.EncodeToBytes(acc)
+ key := hashData([]byte(acckey))
+ rawdb.WriteAccountSnapshot(t.diskdb, key, val)
+}
+
+func (t *testHelper) addAccount(acckey string, acc *Account) {
+ t.addTrieAccount(acckey, acc)
+ t.addSnapAccount(acckey, acc)
+}
+
+func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string) {
+ accHash := hashData([]byte(accKey))
+ for i, key := range keys {
+ rawdb.WriteStorageSnapshot(t.diskdb, accHash, hashData([]byte(key)), []byte(vals[i]))
+ }
+}
+
+func (t *testHelper) makeStorageTrie(stateRoot, owner common.Hash, keys []string, vals []string, commit bool) []byte {
+ id := trie.StorageTrieID(stateRoot, owner, common.Hash{})
+ stTrie, _ := trie.NewStateTrie(id, t.triedb)
+ for i, k := range keys {
+ stTrie.Update([]byte(k), []byte(vals[i]))
+ }
+ if !commit {
+ return stTrie.Hash().Bytes()
+ }
+ root, nodes := stTrie.Commit(false)
+ if nodes != nil {
+ t.nodes.Merge(nodes)
+ }
+ return root.Bytes()
+}
+
+func (t *testHelper) Commit() common.Hash {
+ root, nodes := t.accTrie.Commit(true)
+ if nodes != nil {
+ t.nodes.Merge(nodes)
+ }
+ t.triedb.Update(t.nodes)
+ t.triedb.Commit(root, false)
+ return root
+}
+
+func (t *testHelper) CommitAndGenerate() (common.Hash, *diskLayer) {
+ root := t.Commit()
+ snap := generateSnapshot(t.diskdb, t.triedb, 16, root)
+ return root, snap
+}
+
+// Tests that snapshot generation with existent flat state, where the flat state
+// contains some errors:
+// - the contract with empty storage root but has storage entries in the disk
+// - the contract with non empty storage root but empty storage slots
+// - the contract(non-empty storage) misses some storage slots
+// - miss in the beginning
+// - miss in the middle
+// - miss in the end
+//
+// - the contract(non-empty storage) has wrong storage slots
+// - wrong slots in the beginning
+// - wrong slots in the middle
+// - wrong slots in the end
+//
+// - the contract(non-empty storage) has extra storage slots
+// - extra slots in the beginning
+// - extra slots in the middle
+// - extra slots in the end
+func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
+ helper := newHelper()
+
+ // Account one, empty root but non-empty database
+ helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+ // Account two, non empty root but empty database
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+
+ // Miss slots
+ {
+ // Account three, non empty root but misses slots in the beginning
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"})
+
+ // Account four, non empty root but misses slots in the middle
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"})
+
+ // Account five, non empty root but misses slots in the end
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-5")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addAccount("acc-5", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"})
+ }
+
+ // Wrong storage slots
+ {
+ // Account six, non empty root but wrong slots in the beginning
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"})
+
+ // Account seven, non empty root but wrong slots in the middle
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-7")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addAccount("acc-7", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"})
+
+ // Account eight, non empty root but wrong slots in the end
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-8")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addAccount("acc-8", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"})
+
+ // Account 9, non empty root but rotated slots
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-9")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addAccount("acc-9", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"})
+ }
+
+ // Extra storage slots
+ {
+ // Account 10, non empty root but extra slots in the beginning
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-10")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addAccount("acc-10", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"})
+
+ // Account 11, non empty root but extra slots in the middle
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-11")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addAccount("acc-11", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"})
+
+ // Account 12, non empty root but extra slots in the end
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-12")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addAccount("acc-12", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"})
+ }
+
+ root, snap := helper.CommitAndGenerate()
+ t.Logf("Root: %#x\n", root) // Root = 0x8746cce9fd9c658b2cfd639878ed6584b7a2b3e73bb40f607fcfa156002429a0
+
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+
+ case <-time.After(3 * time.Second):
+ t.Errorf("Snapshot generation failed")
+ }
+ checkSnapRoot(t, snap, root)
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
+
+// Tests that snapshot generation with existent flat state, where the flat state
+// contains some errors:
+// - miss accounts
+// - wrong accounts
+// - extra accounts
+func TestGenerateExistentStateWithWrongAccounts(t *testing.T) {
+ helper := newHelper()
+
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+
+ // Trie accounts [acc-1, acc-2, acc-3, acc-4, acc-6]
+ // Extra accounts [acc-0, acc-5, acc-7]
+
+ // Missing accounts, only in the trie
+ {
+ helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning
+ helper.addTrieAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle
+ helper.addTrieAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End
+ }
+
+ // Wrong accounts
+ {
+ helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")})
+
+ helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+ }
+
+ // Extra accounts, only in the snap
+ {
+ helper.addSnapAccount("acc-0", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning
+ helper.addSnapAccount("acc-5", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: common.Hex2Bytes("0x1234")}) // Middle
+ helper.addSnapAccount("acc-7", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // after the end
+ }
+
+ root, snap := helper.CommitAndGenerate()
+ t.Logf("Root: %#x\n", root) // Root = 0x825891472281463511e7ebcc7f109e4f9200c20fa384754e11fd605cd98464e8
+
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+
+ case <-time.After(3 * time.Second):
+ t.Errorf("Snapshot generation failed")
+ }
+ checkSnapRoot(t, snap, root)
+
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
+
+// Tests that snapshot generation errors out correctly in case of a missing trie
+// node in the account trie.
+func TestGenerateCorruptAccountTrie(t *testing.T) {
+ // We can't use statedb to make a test trie (circular dependency), so make
+ // a fake one manually. We're going with a small account trie of 3 accounts,
+ // without any storage slots to keep the test smaller.
+ helper := newHelper()
+
+ helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074
+ helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
+
+ root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
+
+ // Delete an account trie leaf and ensure the generator chokes
+ helper.triedb.Commit(root, false)
+ helper.diskdb.Delete(common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7").Bytes())
+
+ snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+ t.Errorf("Snapshot generated against corrupt account trie")
+
+ case <-time.After(time.Second):
+ // Not generated fast enough, hopefully blocked inside on missing trie node fail
+ }
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
+
+// Tests that snapshot generation errors out correctly in case of a missing root
+// trie node for a storage trie. It's similar to internal corruption but it is
+// handled differently inside the generator.
+func TestGenerateMissingStorageTrie(t *testing.T) {
+ // We can't use statedb to make a test trie (circular dependency), so make
+ // a fake one manually. We're going with a small account trie of 3 accounts,
+ // two of which also has the same 3-slot storage trie attached.
+ helper := newHelper()
+
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
+ helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
+
+ root := helper.Commit()
+
+ // Delete a storage trie root and ensure the generator chokes
+ helper.diskdb.Delete(stRoot)
+
+ snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+ t.Errorf("Snapshot generated against corrupt storage trie")
+
+ case <-time.After(time.Second):
+ // Not generated fast enough, hopefully blocked inside on missing trie node fail
+ }
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
+
+// Tests that snapshot generation errors out correctly in case of a missing trie
+// node in a storage trie.
+func TestGenerateCorruptStorageTrie(t *testing.T) {
+ // We can't use statedb to make a test trie (circular dependency), so make
+ // a fake one manually. We're going with a small account trie of 3 accounts,
+ // two of which also has the same 3-slot storage trie attached.
+ helper := newHelper()
+
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
+ helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
+
+ root := helper.Commit()
+
+ // Delete a storage trie leaf and ensure the generator chokes
+ helper.diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes())
+
+ snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+ t.Errorf("Snapshot generated against corrupt storage trie")
+
+ case <-time.After(time.Second):
+ // Not generated fast enough, hopefully blocked inside on missing trie node fail
+ }
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
+
+// Tests that snapshot generation when an extra account with storage exists in the snap state.
+func TestGenerateWithExtraAccounts(t *testing.T) {
+ helper := newHelper()
+ {
+ // Account one in the trie
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")),
+ []string{"key-1", "key-2", "key-3", "key-4", "key-5"},
+ []string{"val-1", "val-2", "val-3", "val-4", "val-5"},
+ true,
+ )
+ acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
+ val, _ := rlp.EncodeToBytes(acc)
+ helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+
+ // Identical in the snap
+ key := hashData([]byte("acc-1"))
+ rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
+ rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-1")), []byte("val-1"))
+ rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-2")), []byte("val-2"))
+ rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-3")), []byte("val-3"))
+ rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-4")), []byte("val-4"))
+ rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-5")), []byte("val-5"))
+ }
+ {
+ // Account two exists only in the snapshot
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-2")),
+ []string{"key-1", "key-2", "key-3", "key-4", "key-5"},
+ []string{"val-1", "val-2", "val-3", "val-4", "val-5"},
+ true,
+ )
+ acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
+ val, _ := rlp.EncodeToBytes(acc)
+ key := hashData([]byte("acc-2"))
+ rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
+ rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-1")), []byte("b-val-1"))
+ rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-2")), []byte("b-val-2"))
+ rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-3")), []byte("b-val-3"))
+ }
+ root := helper.Commit()
+
+ // To verify the test: If we now inspect the snap db, there should exist extraneous storage items
+ if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data == nil {
+ t.Fatalf("expected snap storage to exist")
+ }
+ snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+
+ case <-time.After(3 * time.Second):
+ t.Errorf("Snapshot generation failed")
+ }
+ checkSnapRoot(t, snap, root)
+
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+ // If we now inspect the snap db, there should exist no extraneous storage items
+ if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil {
+ t.Fatalf("expected slot to be removed, got %v", string(data))
+ }
+}
+
+// Tests that snapshot generation when an extra account with storage exists in the snap state.
+func TestGenerateWithManyExtraAccounts(t *testing.T) {
+ helper := newHelper()
+ {
+ // Account one in the trie
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")),
+ []string{"key-1", "key-2", "key-3"},
+ []string{"val-1", "val-2", "val-3"},
+ true,
+ )
+ acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
+ val, _ := rlp.EncodeToBytes(acc)
+ helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+
+ // Identical in the snap
+ key := hashData([]byte("acc-1"))
+ rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
+ rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-1")), []byte("val-1"))
+ rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-2")), []byte("val-2"))
+ rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-3")), []byte("val-3"))
+ }
+ {
+ // 100 accounts exist only in snapshot
+ for i := 0; i < 1000; i++ {
+ acc := &Account{Balance: big.NewInt(int64(i)), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}
+ val, _ := rlp.EncodeToBytes(acc)
+ key := hashData([]byte(fmt.Sprintf("acc-%d", i)))
+ rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
+ }
+ }
+ root, snap := helper.CommitAndGenerate()
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+
+ case <-time.After(3 * time.Second):
+ t.Errorf("Snapshot generation failed")
+ }
+ checkSnapRoot(t, snap, root)
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
+
+// Tests this case
+// maxAccountRange 3
+// snapshot-accounts: 01, 02, 03, 04, 05, 06, 07
+// trie-accounts: 03, 07
+//
+// We iterate three snapshot storage slots (max = 3) from the database. They are 0x01, 0x02, 0x03.
+// The trie has a lot of deletions.
+// So in trie, we iterate 2 entries 0x03, 0x07. We create the 0x07 in the database and abort the procedure, because the trie is exhausted.
+// But in the database, we still have the stale storage slots 0x04, 0x05. They are not iterated yet, but the procedure is finished.
+func TestGenerateWithExtraBeforeAndAfter(t *testing.T) {
+ accountCheckRange = 3
+ helper := newHelper()
+ {
+ acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}
+ val, _ := rlp.EncodeToBytes(acc)
+ helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val)
+ helper.accTrie.Update(common.HexToHash("0x07").Bytes(), val)
+
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x01"), val)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x02"), val)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x03"), val)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x04"), val)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x05"), val)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x06"), val)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x07"), val)
+ }
+ root, snap := helper.CommitAndGenerate()
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+
+ case <-time.After(3 * time.Second):
+ t.Errorf("Snapshot generation failed")
+ }
+ checkSnapRoot(t, snap, root)
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
+
+// TestGenerateWithMalformedSnapdata tests what happes if we have some junk
+// in the snapshot database, which cannot be parsed back to an account
+func TestGenerateWithMalformedSnapdata(t *testing.T) {
+ accountCheckRange = 3
+ helper := newHelper()
+ {
+ acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}
+ val, _ := rlp.EncodeToBytes(acc)
+ helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val)
+
+ junk := make([]byte, 100)
+ copy(junk, []byte{0xde, 0xad})
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x02"), junk)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x03"), junk)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x04"), junk)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x05"), junk)
+ }
+ root, snap := helper.CommitAndGenerate()
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+
+ case <-time.After(3 * time.Second):
+ t.Errorf("Snapshot generation failed")
+ }
+ checkSnapRoot(t, snap, root)
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+ // If we now inspect the snap db, there should exist no extraneous storage items
+ if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil {
+ t.Fatalf("expected slot to be removed, got %v", string(data))
+ }
+}
+
+func TestGenerateFromEmptySnap(t *testing.T) {
+ //enableLogging()
+ accountCheckRange = 10
+ storageCheckRange = 20
+ helper := newHelper()
+ // Add 1K accounts to the trie
+ for i := 0; i < 400; i++ {
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addTrieAccount(fmt.Sprintf("acc-%d", i),
+ &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ }
+ root, snap := helper.CommitAndGenerate()
+ t.Logf("Root: %#x\n", root) // Root: 0x6f7af6d2e1a1bf2b84a3beb3f8b64388465fbc1e274ca5d5d3fc787ca78f59e4
+
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+
+ case <-time.After(3 * time.Second):
+ t.Errorf("Snapshot generation failed")
+ }
+ checkSnapRoot(t, snap, root)
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
+
+// Tests that snapshot generation with existent flat state, where the flat state
+// storage is correct, but incomplete.
+// The incomplete part is on the second range
+// snap: [ 0x01, 0x02, 0x03, 0x04] , [ 0x05, 0x06, 0x07, {missing}] (with storageCheck = 4)
+// trie: 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
+// This hits a case where the snap verification passes, but there are more elements in the trie
+// which we must also add.
+func TestGenerateWithIncompleteStorage(t *testing.T) {
+ storageCheckRange = 4
+ helper := newHelper()
+ stKeys := []string{"1", "2", "3", "4", "5", "6", "7", "8"}
+ stVals := []string{"v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"}
+ // We add 8 accounts, each one is missing exactly one of the storage slots. This means
+ // we don't have to order the keys and figure out exactly which hash-key winds up
+ // on the sensitive spots at the boundaries
+ for i := 0; i < 8; i++ {
+ accKey := fmt.Sprintf("acc-%d", i)
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte(accKey)), stKeys, stVals, true)
+ helper.addAccount(accKey, &Account{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ var moddedKeys []string
+ var moddedVals []string
+ for ii := 0; ii < 8; ii++ {
+ if ii != i {
+ moddedKeys = append(moddedKeys, stKeys[ii])
+ moddedVals = append(moddedVals, stVals[ii])
+ }
+ }
+ helper.addSnapStorage(accKey, moddedKeys, moddedVals)
+ }
+ root, snap := helper.CommitAndGenerate()
+ t.Logf("Root: %#x\n", root) // Root: 0xca73f6f05ba4ca3024ef340ef3dfca8fdabc1b677ff13f5a9571fd49c16e67ff
+
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+
+ case <-time.After(3 * time.Second):
+ t.Errorf("Snapshot generation failed")
+ }
+ checkSnapRoot(t, snap, root)
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
+
+func incKey(key []byte) []byte {
+ for i := len(key) - 1; i >= 0; i-- {
+ key[i]++
+ if key[i] != 0x0 {
+ break
+ }
+ }
+ return key
+}
+
+func decKey(key []byte) []byte {
+ for i := len(key) - 1; i >= 0; i-- {
+ key[i]--
+ if key[i] != 0xff {
+ break
+ }
+ }
+ return key
+}
+
+func populateDangling(disk ethdb.KeyValueStore) {
+ populate := func(accountHash common.Hash, keys []string, vals []string) {
+ for i, key := range keys {
+ rawdb.WriteStorageSnapshot(disk, accountHash, hashData([]byte(key)), []byte(vals[i]))
+ }
+ }
+ // Dangling storages of the "first" account
+ populate(common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+ // Dangling storages of the "last" account
+ populate(common.HexToHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+ // Dangling storages around the account 1
+ hash := decKey(hashData([]byte("acc-1")).Bytes())
+ populate(common.BytesToHash(hash), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+ hash = incKey(hashData([]byte("acc-1")).Bytes())
+ populate(common.BytesToHash(hash), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+ // Dangling storages around the account 2
+ hash = decKey(hashData([]byte("acc-2")).Bytes())
+ populate(common.BytesToHash(hash), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+ hash = incKey(hashData([]byte("acc-2")).Bytes())
+ populate(common.BytesToHash(hash), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+ // Dangling storages around the account 3
+ hash = decKey(hashData([]byte("acc-3")).Bytes())
+ populate(common.BytesToHash(hash), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+ hash = incKey(hashData([]byte("acc-3")).Bytes())
+ populate(common.BytesToHash(hash), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+ // Dangling storages of the random account
+ populate(randomHash(), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+ populate(randomHash(), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+ populate(randomHash(), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+}
+
+// Tests that snapshot generation with dangling storages. Dangling storage means
+// the storage data is existent while the corresponding account data is missing.
+//
+// This test will populate some dangling storages to see if they can be cleaned up.
+func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) {
+ var helper = newHelper()
+
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+
+ helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+ helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+ populateDangling(helper.diskdb)
+
+ root, snap := helper.CommitAndGenerate()
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+
+ case <-time.After(3 * time.Second):
+ t.Errorf("Snapshot generation failed")
+ }
+ checkSnapRoot(t, snap, root)
+
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
+
+// Tests that snapshot generation with dangling storages. Dangling storage means
+// the storage data is existent while the corresponding account data is missing.
+//
+// This test will populate some dangling storages to see if they can be cleaned up.
+func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) {
+ var helper = newHelper()
+
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+
+ populateDangling(helper.diskdb)
+
+ root, snap := helper.CommitAndGenerate()
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+
+ case <-time.After(3 * time.Second):
+ t.Errorf("Snapshot generation failed")
+ }
+ checkSnapRoot(t, snap, root)
+
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
diff --git a/core/state/snapshot/holdable_iterator.go b/core/state/snapshot/holdable_iterator.go
new file mode 100644
index 0000000000..1e86ff9d82
--- /dev/null
+++ b/core/state/snapshot/holdable_iterator.go
@@ -0,0 +1,97 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+// holdableIterator is a wrapper of underlying database iterator. It extends
+// the basic iterator interface by adding Hold which can hold the element
+// locally where the iterator is currently located and serve it up next time.
+type holdableIterator struct {
+ it ethdb.Iterator
+ key []byte
+ val []byte
+ atHeld bool
+}
+
+// newHoldableIterator initializes the holdableIterator with the given iterator.
+func newHoldableIterator(it ethdb.Iterator) *holdableIterator {
+ return &holdableIterator{it: it}
+}
+
+// Hold holds the element locally where the iterator is currently located which
+// can be served up next time.
+func (it *holdableIterator) Hold() {
+ if it.it.Key() == nil {
+ return // nothing to hold
+ }
+ it.key = common.CopyBytes(it.it.Key())
+ it.val = common.CopyBytes(it.it.Value())
+ it.atHeld = false
+}
+
+// Next moves the iterator to the next key/value pair. It returns whether the
+// iterator is exhausted.
+func (it *holdableIterator) Next() bool {
+ if !it.atHeld && it.key != nil {
+ it.atHeld = true
+ } else if it.atHeld {
+ it.atHeld = false
+ it.key = nil
+ it.val = nil
+ }
+ if it.key != nil {
+ return true // shifted to locally held value
+ }
+ return it.it.Next()
+}
+
+// Error returns any accumulated error. Exhausting all the key/value pairs
+// is not considered to be an error.
+func (it *holdableIterator) Error() error { return it.it.Error() }
+
+// Release releases associated resources. Release should always succeed and can
+// be called multiple times without causing error.
+func (it *holdableIterator) Release() {
+ it.atHeld = false
+ it.key = nil
+ it.val = nil
+ it.it.Release()
+}
+
+// Key returns the key of the current key/value pair, or nil if done. The caller
+// should not modify the contents of the returned slice, and its contents may
+// change on the next call to Next.
+func (it *holdableIterator) Key() []byte {
+ if it.key != nil {
+ return it.key
+ }
+ return it.it.Key()
+}
+
+// Value returns the value of the current key/value pair, or nil if done. The
+// caller should not modify the contents of the returned slice, and its contents
+// may change on the next call to Next.
+func (it *holdableIterator) Value() []byte {
+ if it.val != nil {
+ return it.val
+ }
+ return it.it.Value()
+}
diff --git a/core/state/snapshot/holdable_iterator_test.go b/core/state/snapshot/holdable_iterator_test.go
new file mode 100644
index 0000000000..76a2c75c0c
--- /dev/null
+++ b/core/state/snapshot/holdable_iterator_test.go
@@ -0,0 +1,163 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/harmony-one/harmony/core/rawdb"
+)
+
+func TestIteratorHold(t *testing.T) {
+ // Create the key-value data store
+ var (
+ content = map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}
+ order = []string{"k1", "k2", "k3"}
+ db = rawdb.NewMemoryDatabase()
+ )
+ for key, val := range content {
+ if err := db.Put([]byte(key), []byte(val)); err != nil {
+ t.Fatalf("failed to insert item %s:%s into database: %v", key, val, err)
+ }
+ }
+ // Iterate over the database with the given configs and verify the results
+ it, idx := newHoldableIterator(db.NewIterator(nil, nil)), 0
+
+ // Nothing should be affected for calling Discard on non-initialized iterator
+ it.Hold()
+
+ for it.Next() {
+ if len(content) <= idx {
+ t.Errorf("more items than expected: checking idx=%d (key %q), expecting len=%d", idx, it.Key(), len(order))
+ break
+ }
+ if !bytes.Equal(it.Key(), []byte(order[idx])) {
+ t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx])
+ }
+ if !bytes.Equal(it.Value(), []byte(content[order[idx]])) {
+ t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]])
+ }
+ // Should be safe to call discard multiple times
+ it.Hold()
+ it.Hold()
+
+ // Shift iterator to the discarded element
+ it.Next()
+ if !bytes.Equal(it.Key(), []byte(order[idx])) {
+ t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx])
+ }
+ if !bytes.Equal(it.Value(), []byte(content[order[idx]])) {
+ t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]])
+ }
+
+ // Discard/Next combo should work always
+ it.Hold()
+ it.Next()
+ if !bytes.Equal(it.Key(), []byte(order[idx])) {
+ t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx])
+ }
+ if !bytes.Equal(it.Value(), []byte(content[order[idx]])) {
+ t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]])
+ }
+ idx++
+ }
+ if err := it.Error(); err != nil {
+ t.Errorf("iteration failed: %v", err)
+ }
+ if idx != len(order) {
+ t.Errorf("iteration terminated prematurely: have %d, want %d", idx, len(order))
+ }
+ db.Close()
+}
+
+func TestReopenIterator(t *testing.T) {
+ var (
+ content = map[common.Hash]string{
+ common.HexToHash("a1"): "v1",
+ common.HexToHash("a2"): "v2",
+ common.HexToHash("a3"): "v3",
+ common.HexToHash("a4"): "v4",
+ common.HexToHash("a5"): "v5",
+ common.HexToHash("a6"): "v6",
+ }
+ order = []common.Hash{
+ common.HexToHash("a1"),
+ common.HexToHash("a2"),
+ common.HexToHash("a3"),
+ common.HexToHash("a4"),
+ common.HexToHash("a5"),
+ common.HexToHash("a6"),
+ }
+ db = rawdb.NewMemoryDatabase()
+ )
+ for key, val := range content {
+ rawdb.WriteAccountSnapshot(db, key, []byte(val))
+ }
+ checkVal := func(it *holdableIterator, index int) {
+ if !bytes.Equal(it.Key(), append(rawdb.SnapshotAccountPrefix, order[index].Bytes()...)) {
+ t.Fatalf("Unexpected data entry key, want %v got %v", order[index], it.Key())
+ }
+ if !bytes.Equal(it.Value(), []byte(content[order[index]])) {
+ t.Fatalf("Unexpected data entry key, want %v got %v", []byte(content[order[index]]), it.Value())
+ }
+ }
+ // Iterate over the database with the given configs and verify the results
+ ctx, idx := newGeneratorContext(&generatorStats{}, db, nil, nil), -1
+
+ idx++
+ ctx.account.Next()
+ checkVal(ctx.account, idx)
+
+ ctx.reopenIterator(snapAccount)
+ idx++
+ ctx.account.Next()
+ checkVal(ctx.account, idx)
+
+ // reopen twice
+ ctx.reopenIterator(snapAccount)
+ ctx.reopenIterator(snapAccount)
+ idx++
+ ctx.account.Next()
+ checkVal(ctx.account, idx)
+
+ // reopen iterator with held value
+ ctx.account.Next()
+ ctx.account.Hold()
+ ctx.reopenIterator(snapAccount)
+ idx++
+ ctx.account.Next()
+ checkVal(ctx.account, idx)
+
+ // reopen twice iterator with held value
+ ctx.account.Next()
+ ctx.account.Hold()
+ ctx.reopenIterator(snapAccount)
+ ctx.reopenIterator(snapAccount)
+ idx++
+ ctx.account.Next()
+ checkVal(ctx.account, idx)
+
+ // shift to the end and reopen
+ ctx.account.Next() // the end
+ ctx.reopenIterator(snapAccount)
+ ctx.account.Next()
+ if ctx.account.Key() != nil {
+ t.Fatal("Unexpected iterated entry")
+ }
+}
diff --git a/core/state/snapshot/iterator.go b/core/state/snapshot/iterator.go
new file mode 100644
index 0000000000..44127ae6fd
--- /dev/null
+++ b/core/state/snapshot/iterator.go
@@ -0,0 +1,400 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/harmony-one/harmony/core/rawdb"
+)
+
+// Iterator is an iterator to step over all the accounts or the specific
+// storage in a snapshot which may or may not be composed of multiple layers.
+type Iterator interface {
+ // Next steps the iterator forward one element, returning false if exhausted,
+ // or an error if iteration failed for some reason (e.g. root being iterated
+ // becomes stale and garbage collected).
+ Next() bool
+
+ // Error returns any failure that occurred during iteration, which might have
+ // caused a premature iteration exit (e.g. snapshot stack becoming stale).
+ Error() error
+
+ // Hash returns the hash of the account or storage slot the iterator is
+ // currently at.
+ Hash() common.Hash
+
+ // Release releases associated resources. Release should always succeed and
+ // can be called multiple times without causing error.
+ Release()
+}
+
+// AccountIterator is an iterator to step over all the accounts in a snapshot,
+// which may or may not be composed of multiple layers.
+type AccountIterator interface {
+ Iterator
+
+ // Account returns the RLP encoded slim account the iterator is currently at.
+ // An error will be returned if the iterator becomes invalid
+ Account() []byte
+}
+
+// StorageIterator is an iterator to step over the specific storage in a snapshot,
+// which may or may not be composed of multiple layers.
+type StorageIterator interface {
+ Iterator
+
+ // Slot returns the storage slot the iterator is currently at. An error will
+ // be returned if the iterator becomes invalid
+ Slot() []byte
+}
+
+// diffAccountIterator is an account iterator that steps over the accounts (both
+// live and deleted) contained within a single diff layer. Higher order iterators
+// will use the deleted accounts to skip deeper iterators.
+type diffAccountIterator struct {
+ // curHash is the current hash the iterator is positioned on. The field is
+ // explicitly tracked since the referenced diff layer might go stale after
+ // the iterator was positioned and we don't want to fail accessing the old
+ // hash as long as the iterator is not touched any more.
+ curHash common.Hash
+
+ layer *diffLayer // Live layer to retrieve values from
+ keys []common.Hash // Keys left in the layer to iterate
+ fail error // Any failures encountered (stale)
+}
+
+// AccountIterator creates an account iterator over a single diff layer.
+func (dl *diffLayer) AccountIterator(seek common.Hash) AccountIterator {
+ // Seek out the requested starting account
+ hashes := dl.AccountList()
+ index := sort.Search(len(hashes), func(i int) bool {
+ return bytes.Compare(seek[:], hashes[i][:]) <= 0
+ })
+ // Assemble and returned the already seeked iterator
+ return &diffAccountIterator{
+ layer: dl,
+ keys: hashes[index:],
+ }
+}
+
+// Next steps the iterator forward one element, returning false if exhausted.
+func (it *diffAccountIterator) Next() bool {
+ // If the iterator was already stale, consider it a programmer error. Although
+ // we could just return false here, triggering this path would probably mean
+ // somebody forgot to check for Error, so lets blow up instead of undefined
+ // behavior that's hard to debug.
+ if it.fail != nil {
+ panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail))
+ }
+ // Stop iterating if all keys were exhausted
+ if len(it.keys) == 0 {
+ return false
+ }
+ if it.layer.Stale() {
+ it.fail, it.keys = ErrSnapshotStale, nil
+ return false
+ }
+ // Iterator seems to be still alive, retrieve and cache the live hash
+ it.curHash = it.keys[0]
+ // key cached, shift the iterator and notify the user of success
+ it.keys = it.keys[1:]
+ return true
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+func (it *diffAccountIterator) Error() error {
+ return it.fail
+}
+
+// Hash returns the hash of the account the iterator is currently at.
+func (it *diffAccountIterator) Hash() common.Hash {
+ return it.curHash
+}
+
+// Account returns the RLP encoded slim account the iterator is currently at.
+// This method may _fail_, if the underlying layer has been flattened between
+// the call to Next and Account. That type of error will set it.Err.
+// This method assumes that flattening does not delete elements from
+// the accountdata mapping (writing nil into it is fine though), and will panic
+// if elements have been deleted.
+//
+// Note the returned account is not a copy, please don't modify it.
+func (it *diffAccountIterator) Account() []byte {
+ it.layer.lock.RLock()
+ blob, ok := it.layer.accountData[it.curHash]
+ if !ok {
+ if _, ok := it.layer.destructSet[it.curHash]; ok {
+ it.layer.lock.RUnlock()
+ return nil
+ }
+ panic(fmt.Sprintf("iterator referenced non-existent account: %x", it.curHash))
+ }
+ it.layer.lock.RUnlock()
+ if it.layer.Stale() {
+ it.fail, it.keys = ErrSnapshotStale, nil
+ }
+ return blob
+}
+
+// Release is a noop for diff account iterators as there are no held resources.
+func (it *diffAccountIterator) Release() {}
+
+// diskAccountIterator is an account iterator that steps over the live accounts
+// contained within a disk layer.
+type diskAccountIterator struct {
+ layer *diskLayer
+ it ethdb.Iterator
+}
+
+// AccountIterator creates an account iterator over a disk layer.
+func (dl *diskLayer) AccountIterator(seek common.Hash) AccountIterator {
+ pos := common.TrimRightZeroes(seek[:])
+ return &diskAccountIterator{
+ layer: dl,
+ it: dl.diskdb.NewIterator(rawdb.SnapshotAccountPrefix, pos),
+ }
+}
+
+// Next steps the iterator forward one element, returning false if exhausted.
+func (it *diskAccountIterator) Next() bool {
+ // If the iterator was already exhausted, don't bother
+ if it.it == nil {
+ return false
+ }
+ // Try to advance the iterator and release it if we reached the end
+ for {
+ if !it.it.Next() {
+ it.it.Release()
+ it.it = nil
+ return false
+ }
+ if len(it.it.Key()) == len(rawdb.SnapshotAccountPrefix)+common.HashLength {
+ break
+ }
+ }
+ return true
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+//
+// A diff layer is immutable after creation content wise and can always be fully
+// iterated without error, so this method always returns nil.
+func (it *diskAccountIterator) Error() error {
+ if it.it == nil {
+ return nil // Iterator is exhausted and released
+ }
+ return it.it.Error()
+}
+
+// Hash returns the hash of the account the iterator is currently at.
+func (it *diskAccountIterator) Hash() common.Hash {
+ return common.BytesToHash(it.it.Key()) // The prefix will be truncated
+}
+
+// Account returns the RLP encoded slim account the iterator is currently at.
+func (it *diskAccountIterator) Account() []byte {
+ return it.it.Value()
+}
+
+// Release releases the database snapshot held during iteration.
+func (it *diskAccountIterator) Release() {
+ // The iterator is auto-released on exhaustion, so make sure it's still alive
+ if it.it != nil {
+ it.it.Release()
+ it.it = nil
+ }
+}
+
+// diffStorageIterator is a storage iterator that steps over the specific storage
+// (both live and deleted) contained within a single diff layer. Higher order
+// iterators will use the deleted slot to skip deeper iterators.
+type diffStorageIterator struct {
+ // curHash is the current hash the iterator is positioned on. The field is
+ // explicitly tracked since the referenced diff layer might go stale after
+ // the iterator was positioned and we don't want to fail accessing the old
+ // hash as long as the iterator is not touched any more.
+ curHash common.Hash
+ account common.Hash
+
+ layer *diffLayer // Live layer to retrieve values from
+ keys []common.Hash // Keys left in the layer to iterate
+ fail error // Any failures encountered (stale)
+}
+
+// StorageIterator creates a storage iterator over a single diff layer.
+// Except the storage iterator is returned, there is an additional flag
+// "destructed" returned. If it's true then it means the whole storage is
+// destructed in this layer(maybe recreated too), don't bother deeper layer
+// for storage retrieval.
+func (dl *diffLayer) StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) {
+ // Create the storage for this account even it's marked
+ // as destructed. The iterator is for the new one which
+ // just has the same address as the deleted one.
+ hashes, destructed := dl.StorageList(account)
+ index := sort.Search(len(hashes), func(i int) bool {
+ return bytes.Compare(seek[:], hashes[i][:]) <= 0
+ })
+ // Assemble and returned the already seeked iterator
+ return &diffStorageIterator{
+ layer: dl,
+ account: account,
+ keys: hashes[index:],
+ }, destructed
+}
+
+// Next steps the iterator forward one element, returning false if exhausted.
+func (it *diffStorageIterator) Next() bool {
+ // If the iterator was already stale, consider it a programmer error. Although
+ // we could just return false here, triggering this path would probably mean
+ // somebody forgot to check for Error, so lets blow up instead of undefined
+ // behavior that's hard to debug.
+ if it.fail != nil {
+ panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail))
+ }
+ // Stop iterating if all keys were exhausted
+ if len(it.keys) == 0 {
+ return false
+ }
+ if it.layer.Stale() {
+ it.fail, it.keys = ErrSnapshotStale, nil
+ return false
+ }
+ // Iterator seems to be still alive, retrieve and cache the live hash
+ it.curHash = it.keys[0]
+ // key cached, shift the iterator and notify the user of success
+ it.keys = it.keys[1:]
+ return true
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+func (it *diffStorageIterator) Error() error {
+ return it.fail
+}
+
+// Hash returns the hash of the storage slot the iterator is currently at.
+func (it *diffStorageIterator) Hash() common.Hash {
+ return it.curHash
+}
+
+// Slot returns the raw storage slot value the iterator is currently at.
+// This method may _fail_, if the underlying layer has been flattened between
+// the call to Next and Value. That type of error will set it.Err.
+// This method assumes that flattening does not delete elements from
+// the storage mapping (writing nil into it is fine though), and will panic
+// if elements have been deleted.
+//
+// Note the returned slot is not a copy, please don't modify it.
+func (it *diffStorageIterator) Slot() []byte {
+ it.layer.lock.RLock()
+ storage, ok := it.layer.storageData[it.account]
+ if !ok {
+ panic(fmt.Sprintf("iterator referenced non-existent account storage: %x", it.account))
+ }
+ // Storage slot might be nil(deleted), but it must exist
+ blob, ok := storage[it.curHash]
+ if !ok {
+ panic(fmt.Sprintf("iterator referenced non-existent storage slot: %x", it.curHash))
+ }
+ it.layer.lock.RUnlock()
+ if it.layer.Stale() {
+ it.fail, it.keys = ErrSnapshotStale, nil
+ }
+ return blob
+}
+
+// Release is a noop for diff account iterators as there are no held resources.
+func (it *diffStorageIterator) Release() {}
+
+// diskStorageIterator is a storage iterator that steps over the live storage
+// contained within a disk layer.
+type diskStorageIterator struct {
+ layer *diskLayer
+ account common.Hash
+ it ethdb.Iterator
+}
+
+// StorageIterator creates a storage iterator over a disk layer.
+// If the whole storage is destructed, then all entries in the disk
+// layer are deleted already. So the "destructed" flag returned here
+// is always false.
+func (dl *diskLayer) StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) {
+ pos := common.TrimRightZeroes(seek[:])
+ return &diskStorageIterator{
+ layer: dl,
+ account: account,
+ it: dl.diskdb.NewIterator(append(rawdb.SnapshotStoragePrefix, account.Bytes()...), pos),
+ }, false
+}
+
+// Next steps the iterator forward one element, returning false if exhausted.
+func (it *diskStorageIterator) Next() bool {
+ // If the iterator was already exhausted, don't bother
+ if it.it == nil {
+ return false
+ }
+ // Try to advance the iterator and release it if we reached the end
+ for {
+ if !it.it.Next() {
+ it.it.Release()
+ it.it = nil
+ return false
+ }
+ if len(it.it.Key()) == len(rawdb.SnapshotStoragePrefix)+common.HashLength+common.HashLength {
+ break
+ }
+ }
+ return true
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+//
+// A diff layer is immutable after creation content wise and can always be fully
+// iterated without error, so this method always returns nil.
+func (it *diskStorageIterator) Error() error {
+ if it.it == nil {
+ return nil // Iterator is exhausted and released
+ }
+ return it.it.Error()
+}
+
+// Hash returns the hash of the storage slot the iterator is currently at.
+func (it *diskStorageIterator) Hash() common.Hash {
+ return common.BytesToHash(it.it.Key()) // The prefix will be truncated
+}
+
+// Slot returns the raw storage slot content the iterator is currently at.
+func (it *diskStorageIterator) Slot() []byte {
+ return it.it.Value()
+}
+
+// Release releases the database snapshot held during iteration.
+func (it *diskStorageIterator) Release() {
+ // The iterator is auto-released on exhaustion, so make sure it's still alive
+ if it.it != nil {
+ it.it.Release()
+ it.it = nil
+ }
+}
diff --git a/core/state/snapshot/iterator_binary.go b/core/state/snapshot/iterator_binary.go
new file mode 100644
index 0000000000..22184b2545
--- /dev/null
+++ b/core/state/snapshot/iterator_binary.go
@@ -0,0 +1,213 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// binaryIterator is a simplistic iterator to step over the accounts or storage
+// in a snapshot, which may or may not be composed of multiple layers. Performance
+// wise this iterator is slow, it's meant for cross validating the fast one,
+type binaryIterator struct {
+ a Iterator
+ b Iterator
+ aDone bool
+ bDone bool
+ accountIterator bool
+ k common.Hash
+ account common.Hash
+ fail error
+}
+
+// initBinaryAccountIterator creates a simplistic iterator to step over all the
+// accounts in a slow, but easily verifiable way. Note this function is used for
+// initialization, use `newBinaryAccountIterator` as the API.
+func (dl *diffLayer) initBinaryAccountIterator() Iterator {
+ parent, ok := dl.parent.(*diffLayer)
+ if !ok {
+ l := &binaryIterator{
+ a: dl.AccountIterator(common.Hash{}),
+ b: dl.Parent().AccountIterator(common.Hash{}),
+ accountIterator: true,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
+ }
+ l := &binaryIterator{
+ a: dl.AccountIterator(common.Hash{}),
+ b: parent.initBinaryAccountIterator(),
+ accountIterator: true,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
+}
+
+// initBinaryStorageIterator creates a simplistic iterator to step over all the
+// storage slots in a slow, but easily verifiable way. Note this function is used
+// for initialization, use `newBinaryStorageIterator` as the API.
+func (dl *diffLayer) initBinaryStorageIterator(account common.Hash) Iterator {
+ parent, ok := dl.parent.(*diffLayer)
+ if !ok {
+ // If the storage in this layer is already destructed, discard all
+ // deeper layers but still return an valid single-branch iterator.
+ a, destructed := dl.StorageIterator(account, common.Hash{})
+ if destructed {
+ l := &binaryIterator{
+ a: a,
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = true
+ return l
+ }
+ // The parent is disk layer, don't need to take care "destructed"
+ // anymore.
+ b, _ := dl.Parent().StorageIterator(account, common.Hash{})
+ l := &binaryIterator{
+ a: a,
+ b: b,
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
+ }
+ // If the storage in this layer is already destructed, discard all
+ // deeper layers but still return an valid single-branch iterator.
+ a, destructed := dl.StorageIterator(account, common.Hash{})
+ if destructed {
+ l := &binaryIterator{
+ a: a,
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = true
+ return l
+ }
+ l := &binaryIterator{
+ a: a,
+ b: parent.initBinaryStorageIterator(account),
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
+}
+
+// Next steps the iterator forward one element, returning false if exhausted,
+// or an error if iteration failed for some reason (e.g. root being iterated
+// becomes stale and garbage collected).
+func (it *binaryIterator) Next() bool {
+ if it.aDone && it.bDone {
+ return false
+ }
+first:
+ if it.aDone {
+ it.k = it.b.Hash()
+ it.bDone = !it.b.Next()
+ return true
+ }
+ if it.bDone {
+ it.k = it.a.Hash()
+ it.aDone = !it.a.Next()
+ return true
+ }
+ nextA, nextB := it.a.Hash(), it.b.Hash()
+ if diff := bytes.Compare(nextA[:], nextB[:]); diff < 0 {
+ it.aDone = !it.a.Next()
+ it.k = nextA
+ return true
+ } else if diff == 0 {
+ // Now we need to advance one of them
+ it.aDone = !it.a.Next()
+ goto first
+ }
+ it.bDone = !it.b.Next()
+ it.k = nextB
+ return true
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+func (it *binaryIterator) Error() error {
+ return it.fail
+}
+
+// Hash returns the hash of the account the iterator is currently at.
+func (it *binaryIterator) Hash() common.Hash {
+ return it.k
+}
+
+// Account returns the RLP encoded slim account the iterator is currently at, or
+// nil if the iterated snapshot stack became stale (you can check Error after
+// to see if it failed or not).
+//
+// Note the returned account is not a copy, please don't modify it.
+func (it *binaryIterator) Account() []byte {
+ if !it.accountIterator {
+ return nil
+ }
+ // The topmost iterator must be `diffAccountIterator`
+ blob, err := it.a.(*diffAccountIterator).layer.AccountRLP(it.k)
+ if err != nil {
+ it.fail = err
+ return nil
+ }
+ return blob
+}
+
+// Slot returns the raw storage slot data the iterator is currently at, or
+// nil if the iterated snapshot stack became stale (you can check Error after
+// to see if it failed or not).
+//
+// Note the returned slot is not a copy, please don't modify it.
+func (it *binaryIterator) Slot() []byte {
+ if it.accountIterator {
+ return nil
+ }
+ blob, err := it.a.(*diffStorageIterator).layer.Storage(it.account, it.k)
+ if err != nil {
+ it.fail = err
+ return nil
+ }
+ return blob
+}
+
+// Release recursively releases all the iterators in the stack.
+func (it *binaryIterator) Release() {
+ it.a.Release()
+ it.b.Release()
+}
+
+// newBinaryAccountIterator creates a simplistic account iterator to step over
+// all the accounts in a slow, but easily verifiable way.
+func (dl *diffLayer) newBinaryAccountIterator() AccountIterator {
+ iter := dl.initBinaryAccountIterator()
+ return iter.(AccountIterator)
+}
+
+// newBinaryStorageIterator creates a simplistic account iterator to step over
+// all the storage slots in a slow, but easily verifiable way.
+func (dl *diffLayer) newBinaryStorageIterator(account common.Hash) StorageIterator {
+ iter := dl.initBinaryStorageIterator(account)
+ return iter.(StorageIterator)
+}
diff --git a/core/state/snapshot/iterator_fast.go b/core/state/snapshot/iterator_fast.go
new file mode 100644
index 0000000000..1a042c7cd3
--- /dev/null
+++ b/core/state/snapshot/iterator_fast.go
@@ -0,0 +1,350 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// weightedIterator is a iterator with an assigned weight. It is used to prioritise
+// which account or storage slot is the correct one if multiple iterators find the
+// same one (modified in multiple consecutive blocks).
+type weightedIterator struct {
+ it Iterator
+ priority int
+}
+
+// weightedIterators is a set of iterators implementing the sort.Interface.
+type weightedIterators []*weightedIterator
+
+// Len implements sort.Interface, returning the number of active iterators.
+func (its weightedIterators) Len() int { return len(its) }
+
+// Less implements sort.Interface, returning which of two iterators in the stack
+// is before the other.
+func (its weightedIterators) Less(i, j int) bool {
+ // Order the iterators primarily by the account hashes
+ hashI := its[i].it.Hash()
+ hashJ := its[j].it.Hash()
+
+ switch bytes.Compare(hashI[:], hashJ[:]) {
+ case -1:
+ return true
+ case 1:
+ return false
+ }
+ // Same account/storage-slot in multiple layers, split by priority
+ return its[i].priority < its[j].priority
+}
+
+// Swap implements sort.Interface, swapping two entries in the iterator stack.
+func (its weightedIterators) Swap(i, j int) {
+ its[i], its[j] = its[j], its[i]
+}
+
+// fastIterator is a more optimized multi-layer iterator which maintains a
+// direct mapping of all iterators leading down to the bottom layer.
+type fastIterator struct {
+ tree *Tree // Snapshot tree to reinitialize stale sub-iterators with
+ root common.Hash // Root hash to reinitialize stale sub-iterators through
+
+ curAccount []byte
+ curSlot []byte
+
+ iterators weightedIterators
+ initiated bool
+ account bool
+ fail error
+}
+
+// newFastIterator creates a new hierarchical account or storage iterator with one
+// element per diff layer. The returned combo iterator can be used to walk over
+// the entire snapshot diff stack simultaneously.
+func newFastIterator(tree *Tree, root common.Hash, account common.Hash, seek common.Hash, accountIterator bool) (*fastIterator, error) {
+ snap := tree.Snapshot(root)
+ if snap == nil {
+ return nil, fmt.Errorf("unknown snapshot: %x", root)
+ }
+ fi := &fastIterator{
+ tree: tree,
+ root: root,
+ account: accountIterator,
+ }
+ current := snap.(snapshot)
+ for depth := 0; current != nil; depth++ {
+ if accountIterator {
+ fi.iterators = append(fi.iterators, &weightedIterator{
+ it: current.AccountIterator(seek),
+ priority: depth,
+ })
+ } else {
+ // If the whole storage is destructed in this layer, don't
+ // bother deeper layer anymore. But we should still keep
+ // the iterator for this layer, since the iterator can contain
+ // some valid slots which belongs to the re-created account.
+ it, destructed := current.StorageIterator(account, seek)
+ fi.iterators = append(fi.iterators, &weightedIterator{
+ it: it,
+ priority: depth,
+ })
+ if destructed {
+ break
+ }
+ }
+ current = current.Parent()
+ }
+ fi.init()
+ return fi, nil
+}
+
+// init walks over all the iterators and resolves any clashes between them, after
+// which it prepares the stack for step-by-step iteration.
+func (fi *fastIterator) init() {
+ // Track which account hashes are iterators positioned on
+ var positioned = make(map[common.Hash]int)
+
+ // Position all iterators and track how many remain live
+ for i := 0; i < len(fi.iterators); i++ {
+ // Retrieve the first element and if it clashes with a previous iterator,
+ // advance either the current one or the old one. Repeat until nothing is
+ // clashing any more.
+ it := fi.iterators[i]
+ for {
+ // If the iterator is exhausted, drop it off the end
+ if !it.it.Next() {
+ it.it.Release()
+ last := len(fi.iterators) - 1
+
+ fi.iterators[i] = fi.iterators[last]
+ fi.iterators[last] = nil
+ fi.iterators = fi.iterators[:last]
+
+ i--
+ break
+ }
+ // The iterator is still alive, check for collisions with previous ones
+ hash := it.it.Hash()
+ if other, exist := positioned[hash]; !exist {
+ positioned[hash] = i
+ break
+ } else {
+ // Iterators collide, one needs to be progressed, use priority to
+ // determine which.
+ //
+ // This whole else-block can be avoided, if we instead
+ // do an initial priority-sort of the iterators. If we do that,
+ // then we'll only wind up here if a lower-priority (preferred) iterator
+ // has the same value, and then we will always just continue.
+ // However, it costs an extra sort, so it's probably not better
+ if fi.iterators[other].priority < it.priority {
+ // The 'it' should be progressed
+ continue
+ } else {
+ // The 'other' should be progressed, swap them
+ it = fi.iterators[other]
+ fi.iterators[other], fi.iterators[i] = fi.iterators[i], fi.iterators[other]
+ continue
+ }
+ }
+ }
+ }
+ // Re-sort the entire list
+ sort.Sort(fi.iterators)
+ fi.initiated = false
+}
+
+// Next steps the iterator forward one element, returning false if exhausted.
+func (fi *fastIterator) Next() bool {
+ if len(fi.iterators) == 0 {
+ return false
+ }
+ if !fi.initiated {
+ // Don't forward first time -- we had to 'Next' once in order to
+ // do the sorting already
+ fi.initiated = true
+ if fi.account {
+ fi.curAccount = fi.iterators[0].it.(AccountIterator).Account()
+ } else {
+ fi.curSlot = fi.iterators[0].it.(StorageIterator).Slot()
+ }
+ if innerErr := fi.iterators[0].it.Error(); innerErr != nil {
+ fi.fail = innerErr
+ return false
+ }
+ if fi.curAccount != nil || fi.curSlot != nil {
+ return true
+ }
+ // Implicit else: we've hit a nil-account or nil-slot, and need to
+ // fall through to the loop below to land on something non-nil
+ }
+ // If an account or a slot is deleted in one of the layers, the key will
+ // still be there, but the actual value will be nil. However, the iterator
+ // should not export nil-values (but instead simply omit the key), so we
+ // need to loop here until we either
+ // - get a non-nil value,
+ // - hit an error,
+ // - or exhaust the iterator
+ for {
+ if !fi.next(0) {
+ return false // exhausted
+ }
+ if fi.account {
+ fi.curAccount = fi.iterators[0].it.(AccountIterator).Account()
+ } else {
+ fi.curSlot = fi.iterators[0].it.(StorageIterator).Slot()
+ }
+ if innerErr := fi.iterators[0].it.Error(); innerErr != nil {
+ fi.fail = innerErr
+ return false // error
+ }
+ if fi.curAccount != nil || fi.curSlot != nil {
+ break // non-nil value found
+ }
+ }
+ return true
+}
+
+// next handles the next operation internally and should be invoked when we know
+// that two elements in the list may have the same value.
+//
+// For example, if the iterated hashes become [2,3,5,5,8,9,10], then we should
+// invoke next(3), which will call Next on elem 3 (the second '5') and will
+// cascade along the list, applying the same operation if needed.
+func (fi *fastIterator) next(idx int) bool {
+ // If this particular iterator got exhausted, remove it and return true (the
+ // next one is surely not exhausted yet, otherwise it would have been removed
+ // already).
+ if it := fi.iterators[idx].it; !it.Next() {
+ it.Release()
+
+ fi.iterators = append(fi.iterators[:idx], fi.iterators[idx+1:]...)
+ return len(fi.iterators) > 0
+ }
+ // If there's no one left to cascade into, return
+ if idx == len(fi.iterators)-1 {
+ return true
+ }
+ // We next-ed the iterator at 'idx', now we may have to re-sort that element
+ var (
+ cur, next = fi.iterators[idx], fi.iterators[idx+1]
+ curHash, nextHash = cur.it.Hash(), next.it.Hash()
+ )
+ if diff := bytes.Compare(curHash[:], nextHash[:]); diff < 0 {
+ // It is still in correct place
+ return true
+ } else if diff == 0 && cur.priority < next.priority {
+ // So still in correct place, but we need to iterate on the next
+ fi.next(idx + 1)
+ return true
+ }
+ // At this point, the iterator is in the wrong location, but the remaining
+ // list is sorted. Find out where to move the item.
+ clash := -1
+ index := sort.Search(len(fi.iterators), func(n int) bool {
+ // The iterator always advances forward, so anything before the old slot
+ // is known to be behind us, so just skip them altogether. This actually
+ // is an important clause since the sort order got invalidated.
+ if n < idx {
+ return false
+ }
+ if n == len(fi.iterators)-1 {
+ // Can always place an elem last
+ return true
+ }
+ nextHash := fi.iterators[n+1].it.Hash()
+ if diff := bytes.Compare(curHash[:], nextHash[:]); diff < 0 {
+ return true
+ } else if diff > 0 {
+ return false
+ }
+ // The elem we're placing it next to has the same value,
+ // so whichever winds up on n+1 will need further iteration
+ clash = n + 1
+
+ return cur.priority < fi.iterators[n+1].priority
+ })
+ fi.move(idx, index)
+ if clash != -1 {
+ fi.next(clash)
+ }
+ return true
+}
+
+// move advances an iterator to another position in the list.
+func (fi *fastIterator) move(index, newpos int) {
+ elem := fi.iterators[index]
+ copy(fi.iterators[index:], fi.iterators[index+1:newpos+1])
+ fi.iterators[newpos] = elem
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+func (fi *fastIterator) Error() error {
+ return fi.fail
+}
+
+// Hash returns the current key
+func (fi *fastIterator) Hash() common.Hash {
+ return fi.iterators[0].it.Hash()
+}
+
+// Account returns the current account blob.
+// Note the returned account is not a copy, please don't modify it.
+func (fi *fastIterator) Account() []byte {
+ return fi.curAccount
+}
+
+// Slot returns the current storage slot.
+// Note the returned slot is not a copy, please don't modify it.
+func (fi *fastIterator) Slot() []byte {
+ return fi.curSlot
+}
+
+// Release iterates over all the remaining live layer iterators and releases each
+// of them individually.
+func (fi *fastIterator) Release() {
+ for _, it := range fi.iterators {
+ it.it.Release()
+ }
+ fi.iterators = nil
+}
+
+// Debug is a convenience helper during testing
+func (fi *fastIterator) Debug() {
+ for _, it := range fi.iterators {
+ fmt.Printf("[p=%v v=%v] ", it.priority, it.it.Hash()[0])
+ }
+ fmt.Println()
+}
+
+// newFastAccountIterator creates a new hierarchical account iterator with one
+// element per diff layer. The returned combo iterator can be used to walk over
+// the entire snapshot diff stack simultaneously.
+func newFastAccountIterator(tree *Tree, root common.Hash, seek common.Hash) (AccountIterator, error) {
+ return newFastIterator(tree, root, common.Hash{}, seek, true)
+}
+
+// newFastStorageIterator creates a new hierarchical storage iterator with one
+// element per diff layer. The returned combo iterator can be used to walk over
+// the entire snapshot diff stack simultaneously.
+func newFastStorageIterator(tree *Tree, root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
+ return newFastIterator(tree, root, account, seek, false)
+}
diff --git a/core/state/snapshot/iterator_test.go b/core/state/snapshot/iterator_test.go
new file mode 100644
index 0000000000..0296802d76
--- /dev/null
+++ b/core/state/snapshot/iterator_test.go
@@ -0,0 +1,1047 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+ crand "crypto/rand"
+ "encoding/binary"
+ "fmt"
+ "math/rand"
+ "testing"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/harmony-one/harmony/core/rawdb"
+)
+
+// TestAccountIteratorBasics tests some simple single-layer(diff and disk) iteration
+func TestAccountIteratorBasics(t *testing.T) {
+ var (
+ destructs = make(map[common.Hash]struct{})
+ accounts = make(map[common.Hash][]byte)
+ storage = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ // Fill up a parent
+ for i := 0; i < 100; i++ {
+ h := randomHash()
+ data := randomAccount()
+
+ accounts[h] = data
+ if rand.Intn(4) == 0 {
+ destructs[h] = struct{}{}
+ }
+ if rand.Intn(2) == 0 {
+ accStorage := make(map[common.Hash][]byte)
+ value := make([]byte, 32)
+ crand.Read(value)
+ accStorage[randomHash()] = value
+ storage[h] = accStorage
+ }
+ }
+ // Add some (identical) layers on top
+ diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
+ it := diffLayer.AccountIterator(common.Hash{})
+ verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
+
+ diskLayer := diffToDisk(diffLayer)
+ it = diskLayer.AccountIterator(common.Hash{})
+ verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
+}
+
+// TestStorageIteratorBasics tests some simple single-layer(diff and disk) iteration for storage
+func TestStorageIteratorBasics(t *testing.T) {
+ var (
+ nilStorage = make(map[common.Hash]int)
+ accounts = make(map[common.Hash][]byte)
+ storage = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ // Fill some random data
+ for i := 0; i < 10; i++ {
+ h := randomHash()
+ accounts[h] = randomAccount()
+
+ accStorage := make(map[common.Hash][]byte)
+ value := make([]byte, 32)
+
+ var nilstorage int
+ for i := 0; i < 100; i++ {
+ crand.Read(value)
+ if rand.Intn(2) == 0 {
+ accStorage[randomHash()] = common.CopyBytes(value)
+ } else {
+ accStorage[randomHash()] = nil // delete slot
+ nilstorage += 1
+ }
+ }
+ storage[h] = accStorage
+ nilStorage[h] = nilstorage
+ }
+ // Add some (identical) layers on top
+ diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage))
+ for account := range accounts {
+ it, _ := diffLayer.StorageIterator(account, common.Hash{})
+ verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
+ }
+
+ diskLayer := diffToDisk(diffLayer)
+ for account := range accounts {
+ it, _ := diskLayer.StorageIterator(account, common.Hash{})
+ verifyIterator(t, 100-nilStorage[account], it, verifyNothing) // Nil is allowed for single layer iterator
+ }
+}
+
+type testIterator struct {
+ values []byte
+}
+
+func newTestIterator(values ...byte) *testIterator {
+ return &testIterator{values}
+}
+
+func (ti *testIterator) Seek(common.Hash) {
+ panic("implement me")
+}
+
+func (ti *testIterator) Next() bool {
+ ti.values = ti.values[1:]
+ return len(ti.values) > 0
+}
+
+func (ti *testIterator) Error() error {
+ return nil
+}
+
+func (ti *testIterator) Hash() common.Hash {
+ return common.BytesToHash([]byte{ti.values[0]})
+}
+
+func (ti *testIterator) Account() []byte {
+ return nil
+}
+
+func (ti *testIterator) Slot() []byte {
+ return nil
+}
+
+func (ti *testIterator) Release() {}
+
+func TestFastIteratorBasics(t *testing.T) {
+ type testCase struct {
+ lists [][]byte
+ expKeys []byte
+ }
+ for i, tc := range []testCase{
+ {lists: [][]byte{{0, 1, 8}, {1, 2, 8}, {2, 9}, {4},
+ {7, 14, 15}, {9, 13, 15, 16}},
+ expKeys: []byte{0, 1, 2, 4, 7, 8, 9, 13, 14, 15, 16}},
+ {lists: [][]byte{{0, 8}, {1, 2, 8}, {7, 14, 15}, {8, 9},
+ {9, 10}, {10, 13, 15, 16}},
+ expKeys: []byte{0, 1, 2, 7, 8, 9, 10, 13, 14, 15, 16}},
+ } {
+ var iterators []*weightedIterator
+ for i, data := range tc.lists {
+ it := newTestIterator(data...)
+ iterators = append(iterators, &weightedIterator{it, i})
+ }
+ fi := &fastIterator{
+ iterators: iterators,
+ initiated: false,
+ }
+ count := 0
+ for fi.Next() {
+ if got, exp := fi.Hash()[31], tc.expKeys[count]; exp != got {
+ t.Errorf("tc %d, [%d]: got %d exp %d", i, count, got, exp)
+ }
+ count++
+ }
+ }
+}
+
+type verifyContent int
+
+const (
+ verifyNothing verifyContent = iota
+ verifyAccount
+ verifyStorage
+)
+
+func verifyIterator(t *testing.T, expCount int, it Iterator, verify verifyContent) {
+ t.Helper()
+
+ var (
+ count = 0
+ last = common.Hash{}
+ )
+ for it.Next() {
+ hash := it.Hash()
+ if bytes.Compare(last[:], hash[:]) >= 0 {
+ t.Errorf("wrong order: %x >= %x", last, hash)
+ }
+ count++
+ if verify == verifyAccount && len(it.(AccountIterator).Account()) == 0 {
+ t.Errorf("iterator returned nil-value for hash %x", hash)
+ } else if verify == verifyStorage && len(it.(StorageIterator).Slot()) == 0 {
+ t.Errorf("iterator returned nil-value for hash %x", hash)
+ }
+ last = hash
+ }
+ if count != expCount {
+ t.Errorf("iterator count mismatch: have %d, want %d", count, expCount)
+ }
+ if err := it.Error(); err != nil {
+ t.Errorf("iterator failed: %v", err)
+ }
+}
+
+// TestAccountIteratorTraversal tests some simple multi-layer iteration.
+func TestAccountIteratorTraversal(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Stack three diff layers on top with various overlaps
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
+ randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
+
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
+ randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
+
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
+ randomAccountSet("0xcc", "0xf0", "0xff"), nil)
+
+ // Verify the single and multi-layer iterators
+ head := snaps.Snapshot(common.HexToHash("0x04"))
+
+ verifyIterator(t, 3, head.(snapshot).AccountIterator(common.Hash{}), verifyNothing)
+ verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
+
+ it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
+ verifyIterator(t, 7, it, verifyAccount)
+ it.Release()
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x04"), 2)
+ verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
+ verifyIterator(t, 7, it, verifyAccount)
+ it.Release()
+}
+
+func TestStorageIteratorTraversal(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Stack three diff layers on top with various overlaps
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
+
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil))
+
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
+
+ // Verify the single and multi-layer iterators
+ head := snaps.Snapshot(common.HexToHash("0x04"))
+
+ diffIter, _ := head.(snapshot).StorageIterator(common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 3, diffIter, verifyNothing)
+ verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
+
+ it, _ := snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 6, it, verifyStorage)
+ it.Release()
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x04"), 2)
+ verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 6, it, verifyStorage)
+ it.Release()
+}
+
+// TestAccountIteratorTraversalValues tests some multi-layer iteration, where we
+// also expect the correct values to show up.
+func TestAccountIteratorTraversalValues(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Create a batch of account sets to seed subsequent layers with
+ var (
+ a = make(map[common.Hash][]byte)
+ b = make(map[common.Hash][]byte)
+ c = make(map[common.Hash][]byte)
+ d = make(map[common.Hash][]byte)
+ e = make(map[common.Hash][]byte)
+ f = make(map[common.Hash][]byte)
+ g = make(map[common.Hash][]byte)
+ h = make(map[common.Hash][]byte)
+ )
+ for i := byte(2); i < 0xff; i++ {
+ a[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 0, i))
+ if i > 20 && i%2 == 0 {
+ b[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 1, i))
+ }
+ if i%4 == 0 {
+ c[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 2, i))
+ }
+ if i%7 == 0 {
+ d[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 3, i))
+ }
+ if i%8 == 0 {
+ e[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 4, i))
+ }
+ if i > 50 || i < 85 {
+ f[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 5, i))
+ }
+ if i%64 == 0 {
+ g[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 6, i))
+ }
+ if i%128 == 0 {
+ h[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 7, i))
+ }
+ }
+ // Assemble a stack of snapshots from the account layers
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil)
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil)
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil)
+ snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil)
+ snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil)
+ snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil)
+ snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil)
+ snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil)
+
+ it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
+ head := snaps.Snapshot(common.HexToHash("0x09"))
+ for it.Next() {
+ hash := it.Hash()
+ want, err := head.AccountRLP(hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve expected account: %v", err)
+ }
+ if have := it.Account(); !bytes.Equal(want, have) {
+ t.Fatalf("hash %x: account mismatch: have %x, want %x", hash, have, want)
+ }
+ }
+ it.Release()
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x09"), 2)
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
+ for it.Next() {
+ hash := it.Hash()
+ want, err := head.AccountRLP(hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve expected account: %v", err)
+ }
+ if have := it.Account(); !bytes.Equal(want, have) {
+ t.Fatalf("hash %x: account mismatch: have %x, want %x", hash, have, want)
+ }
+ }
+ it.Release()
+}
+
+func TestStorageIteratorTraversalValues(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ wrapStorage := func(storage map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
+ return map[common.Hash]map[common.Hash][]byte{
+ common.HexToHash("0xaa"): storage,
+ }
+ }
+ // Create a batch of storage sets to seed subsequent layers with
+ var (
+ a = make(map[common.Hash][]byte)
+ b = make(map[common.Hash][]byte)
+ c = make(map[common.Hash][]byte)
+ d = make(map[common.Hash][]byte)
+ e = make(map[common.Hash][]byte)
+ f = make(map[common.Hash][]byte)
+ g = make(map[common.Hash][]byte)
+ h = make(map[common.Hash][]byte)
+ )
+ for i := byte(2); i < 0xff; i++ {
+ a[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 0, i))
+ if i > 20 && i%2 == 0 {
+ b[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 1, i))
+ }
+ if i%4 == 0 {
+ c[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 2, i))
+ }
+ if i%7 == 0 {
+ d[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 3, i))
+ }
+ if i%8 == 0 {
+ e[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 4, i))
+ }
+ if i > 50 || i < 85 {
+ f[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 5, i))
+ }
+ if i%64 == 0 {
+ g[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 6, i))
+ }
+ if i%128 == 0 {
+ h[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 7, i))
+ }
+ }
+ // Assemble a stack of snapshots from the account layers
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a))
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b))
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c))
+ snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d))
+ snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e))
+ snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e))
+ snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g))
+ snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h))
+
+ it, _ := snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
+ head := snaps.Snapshot(common.HexToHash("0x09"))
+ for it.Next() {
+ hash := it.Hash()
+ want, err := head.Storage(common.HexToHash("0xaa"), hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve expected storage slot: %v", err)
+ }
+ if have := it.Slot(); !bytes.Equal(want, have) {
+ t.Fatalf("hash %x: slot mismatch: have %x, want %x", hash, have, want)
+ }
+ }
+ it.Release()
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x09"), 2)
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
+ for it.Next() {
+ hash := it.Hash()
+ want, err := head.Storage(common.HexToHash("0xaa"), hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve expected slot: %v", err)
+ }
+ if have := it.Slot(); !bytes.Equal(want, have) {
+ t.Fatalf("hash %x: slot mismatch: have %x, want %x", hash, have, want)
+ }
+ }
+ it.Release()
+}
+
+// This testcase is notorious, all layers contain the exact same 200 accounts.
+func TestAccountIteratorLargeTraversal(t *testing.T) {
+ // Create a custom account factory to recreate the same addresses
+ makeAccounts := func(num int) map[common.Hash][]byte {
+ accounts := make(map[common.Hash][]byte)
+ for i := 0; i < num; i++ {
+ h := common.Hash{}
+ binary.BigEndian.PutUint64(h[:], uint64(i+1))
+ accounts[h] = randomAccount()
+ }
+ return accounts
+ }
+ // Build up a large stack of snapshots
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ for i := 1; i < 128; i++ {
+ snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
+ }
+ // Iterate the entire stack and ensure everything is hit only once
+ head := snaps.Snapshot(common.HexToHash("0x80"))
+ verifyIterator(t, 200, head.(snapshot).AccountIterator(common.Hash{}), verifyNothing)
+ verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
+
+ it, _ := snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{})
+ verifyIterator(t, 200, it, verifyAccount)
+ it.Release()
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x80"), 2)
+
+ verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{})
+ verifyIterator(t, 200, it, verifyAccount)
+ it.Release()
+}
+
+// TestAccountIteratorFlattening tests what happens when we
+// - have a live iterator on child C (parent C1 -> C2 .. CN)
+// - flattens C2 all the way into CN
+// - continues iterating
+func TestAccountIteratorFlattening(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Create a stack of diffs on top
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
+ randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
+
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
+ randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
+
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
+ randomAccountSet("0xcc", "0xf0", "0xff"), nil)
+
+ // Create an iterator and flatten the data from underneath it
+ it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
+ defer it.Release()
+
+ if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil {
+ t.Fatalf("failed to flatten snapshot stack: %v", err)
+ }
+ //verifyIterator(t, 7, it)
+}
+
+func TestAccountIteratorSeek(t *testing.T) {
+ // Create a snapshot stack with some initial data
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
+ randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
+
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
+ randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
+
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
+ randomAccountSet("0xcc", "0xf0", "0xff"), nil)
+
+ // Account set is now
+ // 02: aa, ee, f0, ff
+ // 03: aa, bb, dd, ee, f0 (, f0), ff
+ // 04: aa, bb, cc, dd, ee, f0 (, f0), ff (, ff)
+ // Construct various iterators and ensure their traversal is correct
+ it, _ := snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xdd"))
+ defer it.Release()
+ verifyIterator(t, 3, it, verifyAccount) // expected: ee, f0, ff
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"))
+ defer it.Release()
+ verifyIterator(t, 4, it, verifyAccount) // expected: aa, ee, f0, ff
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff"))
+ defer it.Release()
+ verifyIterator(t, 1, it, verifyAccount) // expected: ff
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff1"))
+ defer it.Release()
+ verifyIterator(t, 0, it, verifyAccount) // expected: nothing
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xbb"))
+ defer it.Release()
+ verifyIterator(t, 6, it, verifyAccount) // expected: bb, cc, dd, ee, f0, ff
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xef"))
+ defer it.Release()
+ verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xf0"))
+ defer it.Release()
+ verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff"))
+ defer it.Release()
+ verifyIterator(t, 1, it, verifyAccount) // expected: ff
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff1"))
+ defer it.Release()
+ verifyIterator(t, 0, it, verifyAccount) // expected: nothing
+}
+
+func TestStorageIteratorSeek(t *testing.T) {
+ // Create a snapshot stack with some initial data
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Stack three diff layers on top with various overlaps
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
+
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil))
+
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil))
+
+ // Account set is now
+ // 02: 01, 03, 05
+ // 03: 01, 02, 03, 05 (, 05), 06
+ // 04: 01(, 01), 02, 03, 05(, 05, 05), 06, 08
+ // Construct various iterators and ensure their traversal is correct
+ it, _ := snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
+ defer it.Release()
+ verifyIterator(t, 3, it, verifyStorage) // expected: 01, 03, 05
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x02"))
+ defer it.Release()
+ verifyIterator(t, 2, it, verifyStorage) // expected: 03, 05
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x5"))
+ defer it.Release()
+ verifyIterator(t, 1, it, verifyStorage) // expected: 05
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x6"))
+ defer it.Release()
+ verifyIterator(t, 0, it, verifyStorage) // expected: nothing
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
+ defer it.Release()
+ verifyIterator(t, 6, it, verifyStorage) // expected: 01, 02, 03, 05, 06, 08
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x05"))
+ defer it.Release()
+ verifyIterator(t, 3, it, verifyStorage) // expected: 05, 06, 08
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x08"))
+ defer it.Release()
+ verifyIterator(t, 1, it, verifyStorage) // expected: 08
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x09"))
+ defer it.Release()
+ verifyIterator(t, 0, it, verifyStorage) // expected: nothing
+}
+
+// TestAccountIteratorDeletions tests that the iterator behaves correct when there are
+// deleted accounts (where the Account() value is nil). The iterator
+// should not output any accounts or nil-values for those cases.
+func TestAccountIteratorDeletions(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Stack three diff layers on top with various overlaps
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
+ nil, randomAccountSet("0x11", "0x22", "0x33"), nil)
+
+ deleted := common.HexToHash("0x22")
+ destructed := map[common.Hash]struct{}{
+ deleted: {},
+ }
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
+ destructed, randomAccountSet("0x11", "0x33"), nil)
+
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
+ nil, randomAccountSet("0x33", "0x44", "0x55"), nil)
+
+ // The output should be 11,33,44,55
+ it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
+ // Do a quick check
+ verifyIterator(t, 4, it, verifyAccount)
+ it.Release()
+
+ // And a more detailed verification that we indeed do not see '0x22'
+ it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
+ defer it.Release()
+ for it.Next() {
+ hash := it.Hash()
+ if it.Account() == nil {
+ t.Errorf("iterator returned nil-value for hash %x", hash)
+ }
+ if hash == deleted {
+ t.Errorf("expected deleted elem %x to not be returned by iterator", deleted)
+ }
+ }
+}
+
+func TestStorageIteratorDeletions(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Stack three diff layers on top with various overlaps
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
+
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}))
+
+ // The output should be 02,04,05,06
+ it, _ := snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 4, it, verifyStorage)
+ it.Release()
+
+ // The output should be 04,05,06
+ it, _ = snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.HexToHash("0x03"))
+ verifyIterator(t, 3, it, verifyStorage)
+ it.Release()
+
+ // Destruct the whole storage
+ destructed := map[common.Hash]struct{}{
+ common.HexToHash("0xaa"): {},
+ }
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil)
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 0, it, verifyStorage)
+ it.Release()
+
+ // Re-insert the slots of the same account
+ snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil))
+
+ // The output should be 07,08,09
+ it, _ = snaps.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 3, it, verifyStorage)
+ it.Release()
+
+ // Destruct the whole storage but re-create the account in the same layer
+ snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil))
+ it, _ = snaps.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
+ it.Release()
+
+ verifyIterator(t, 2, snaps.Snapshot(common.HexToHash("0x06")).(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
+}
+
+// BenchmarkAccountIteratorTraversal is a bit a bit notorious -- all layers contain the
+// exact same 200 accounts. That means that we need to process 2000 items, but
+// only spit out 200 values eventually.
+//
+// The value-fetching benchmark is easy on the binary iterator, since it never has to reach
+// down at any depth for retrieving the values -- all are on the topmost layer
+//
+// BenchmarkAccountIteratorTraversal/binary_iterator_keys-6 2239 483674 ns/op
+// BenchmarkAccountIteratorTraversal/binary_iterator_values-6 2403 501810 ns/op
+// BenchmarkAccountIteratorTraversal/fast_iterator_keys-6 1923 677966 ns/op
+// BenchmarkAccountIteratorTraversal/fast_iterator_values-6 1741 649967 ns/op
+func BenchmarkAccountIteratorTraversal(b *testing.B) {
+ // Create a custom account factory to recreate the same addresses
+ makeAccounts := func(num int) map[common.Hash][]byte {
+ accounts := make(map[common.Hash][]byte)
+ for i := 0; i < num; i++ {
+ h := common.Hash{}
+ binary.BigEndian.PutUint64(h[:], uint64(i+1))
+ accounts[h] = randomAccount()
+ }
+ return accounts
+ }
+ // Build up a large stack of snapshots
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ for i := 1; i <= 100; i++ {
+ snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
+ }
+ // We call this once before the benchmark, so the creation of
+ // sorted accountlists are not included in the results.
+ head := snaps.Snapshot(common.HexToHash("0x65"))
+ head.(*diffLayer).newBinaryAccountIterator()
+
+ b.Run("binary iterator keys", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ got := 0
+ it := head.(*diffLayer).newBinaryAccountIterator()
+ for it.Next() {
+ got++
+ }
+ if exp := 200; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+ b.Run("binary iterator values", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ got := 0
+ it := head.(*diffLayer).newBinaryAccountIterator()
+ for it.Next() {
+ got++
+ head.(*diffLayer).accountRLP(it.Hash(), 0)
+ }
+ if exp := 200; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+ b.Run("fast iterator keys", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{})
+ defer it.Release()
+
+ got := 0
+ for it.Next() {
+ got++
+ }
+ if exp := 200; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+ b.Run("fast iterator values", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{})
+ defer it.Release()
+
+ got := 0
+ for it.Next() {
+ got++
+ it.Account()
+ }
+ if exp := 200; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+}
+
+// BenchmarkAccountIteratorLargeBaselayer is a pretty realistic benchmark, where
+// the baselayer is a lot larger than the upper layer.
+//
+// This is heavy on the binary iterator, which in most cases will have to
+// call recursively 100 times for the majority of the values
+//
+// BenchmarkAccountIteratorLargeBaselayer/binary_iterator_(keys)-6 514 1971999 ns/op
+// BenchmarkAccountIteratorLargeBaselayer/binary_iterator_(values)-6 61 18997492 ns/op
+// BenchmarkAccountIteratorLargeBaselayer/fast_iterator_(keys)-6 10000 114385 ns/op
+// BenchmarkAccountIteratorLargeBaselayer/fast_iterator_(values)-6 4047 296823 ns/op
+func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
+ // Create a custom account factory to recreate the same addresses
+ makeAccounts := func(num int) map[common.Hash][]byte {
+ accounts := make(map[common.Hash][]byte)
+ for i := 0; i < num; i++ {
+ h := common.Hash{}
+ binary.BigEndian.PutUint64(h[:], uint64(i+1))
+ accounts[h] = randomAccount()
+ }
+ return accounts
+ }
+ // Build up a large stack of snapshots
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil)
+ for i := 2; i <= 100; i++ {
+ snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil)
+ }
+ // We call this once before the benchmark, so the creation of
+ // sorted accountlists are not included in the results.
+ head := snaps.Snapshot(common.HexToHash("0x65"))
+ head.(*diffLayer).newBinaryAccountIterator()
+
+ b.Run("binary iterator (keys)", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ got := 0
+ it := head.(*diffLayer).newBinaryAccountIterator()
+ for it.Next() {
+ got++
+ }
+ if exp := 2000; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+ b.Run("binary iterator (values)", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ got := 0
+ it := head.(*diffLayer).newBinaryAccountIterator()
+ for it.Next() {
+ got++
+ v := it.Hash()
+ head.(*diffLayer).accountRLP(v, 0)
+ }
+ if exp := 2000; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+ b.Run("fast iterator (keys)", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{})
+ defer it.Release()
+
+ got := 0
+ for it.Next() {
+ got++
+ }
+ if exp := 2000; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+ b.Run("fast iterator (values)", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{})
+ defer it.Release()
+
+ got := 0
+ for it.Next() {
+ it.Account()
+ got++
+ }
+ if exp := 2000; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+}
+
+/*
+func BenchmarkBinaryAccountIteration(b *testing.B) {
+ benchmarkAccountIteration(b, func(snap snapshot) AccountIterator {
+ return snap.(*diffLayer).newBinaryAccountIterator()
+ })
+}
+
+func BenchmarkFastAccountIteration(b *testing.B) {
+ benchmarkAccountIteration(b, newFastAccountIterator)
+}
+
+func benchmarkAccountIteration(b *testing.B, iterator func(snap snapshot) AccountIterator) {
+ // Create a diff stack and randomize the accounts across them
+ layers := make([]map[common.Hash][]byte, 128)
+ for i := 0; i < len(layers); i++ {
+ layers[i] = make(map[common.Hash][]byte)
+ }
+ for i := 0; i < b.N; i++ {
+ depth := rand.Intn(len(layers))
+ layers[depth][randomHash()] = randomAccount()
+ }
+ stack := snapshot(emptyLayer())
+ for _, layer := range layers {
+ stack = stack.Update(common.Hash{}, layer, nil, nil)
+ }
+ // Reset the timers and report all the stats
+ it := iterator(stack)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for it.Next() {
+ }
+}
+*/
diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go
new file mode 100644
index 0000000000..a724581164
--- /dev/null
+++ b/core/state/snapshot/journal.go
@@ -0,0 +1,374 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/harmony-one/harmony/core/rawdb"
+ "github.com/harmony-one/harmony/internal/utils"
+)
+
+const journalVersion uint64 = 0
+
+// journalGenerator is a disk layer entry containing the generator progress marker.
+type journalGenerator struct {
+ // Indicator that whether the database was in progress of being wiped.
+ // It's deprecated but keep it here for background compatibility.
+ Wiping bool
+
+ Done bool // Whether the generator finished creating the snapshot
+ Marker []byte
+ Accounts uint64
+ Slots uint64
+ Storage uint64
+}
+
+// journalDestruct is an account deletion entry in a diffLayer's disk journal.
+type journalDestruct struct {
+ Hash common.Hash
+}
+
+// journalAccount is an account entry in a diffLayer's disk journal.
+type journalAccount struct {
+ Hash common.Hash
+ Blob []byte
+}
+
+// journalStorage is an account's storage map in a diffLayer's disk journal.
+type journalStorage struct {
+ Hash common.Hash
+ Keys []common.Hash
+ Vals [][]byte
+}
+
+func ParseGeneratorStatus(generatorBlob []byte) string {
+ if len(generatorBlob) == 0 {
+ return ""
+ }
+ var generator journalGenerator
+ if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil {
+ utils.Logger().Warn().Err(err).Msg("failed to decode snapshot generator")
+ return ""
+ }
+ // Figure out whether we're after or within an account
+ var m string
+ switch marker := generator.Marker; len(marker) {
+ case common.HashLength:
+ m = fmt.Sprintf("at %#x", marker)
+ case 2 * common.HashLength:
+ m = fmt.Sprintf("in %#x at %#x", marker[:common.HashLength], marker[common.HashLength:])
+ default:
+ m = fmt.Sprintf("%#x", marker)
+ }
+ return fmt.Sprintf(`Done: %v, Accounts: %d, Slots: %d, Storage: %d, Marker: %s`,
+ generator.Done, generator.Accounts, generator.Slots, generator.Storage, m)
+}
+
+// loadAndParseJournal tries to parse the snapshot journal in latest format.
+func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
+ // Retrieve the disk layer generator. It must exist, no matter the
+ // snapshot is fully generated or not. Otherwise the entire disk
+ // layer is invalid.
+ generatorBlob := rawdb.ReadSnapshotGenerator(db)
+ if len(generatorBlob) == 0 {
+ return nil, journalGenerator{}, errors.New("missing snapshot generator")
+ }
+ var generator journalGenerator
+ if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil {
+ return nil, journalGenerator{}, fmt.Errorf("failed to decode snapshot generator: %v", err)
+ }
+ // Retrieve the diff layer journal. It's possible that the journal is
+ // not existent, e.g. the disk layer is generating while that the Geth
+ // crashes without persisting the diff journal.
+ // So if there is no journal, or the journal is invalid(e.g. the journal
+ // is not matched with disk layer; or the it's the legacy-format journal,
+ // etc.), we just discard all diffs and try to recover them later.
+ var current snapshot = base
+ err := iterateJournal(db, func(parent common.Hash, root common.Hash, destructSet map[common.Hash]struct{}, accountData map[common.Hash][]byte, storageData map[common.Hash]map[common.Hash][]byte) error {
+ current = newDiffLayer(current, root, destructSet, accountData, storageData)
+ return nil
+ })
+ if err != nil {
+ return base, generator, nil
+ }
+ return current, generator, nil
+}
+
+// loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
+func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, root common.Hash, cache int, recovery bool, noBuild bool) (snapshot, bool, error) {
+ // If snapshotting is disabled (initial sync in progress), don't do anything,
+ // wait for the chain to permit us to do something meaningful
+ if rawdb.ReadSnapshotDisabled(diskdb) {
+ return nil, true, nil
+ }
+ // Retrieve the block number and hash of the snapshot, failing if no snapshot
+ // is present in the database (or crashed mid-update).
+ baseRoot := rawdb.ReadSnapshotRoot(diskdb)
+ if baseRoot == (common.Hash{}) {
+ return nil, false, errors.New("missing or corrupted snapshot")
+ }
+ base := &diskLayer{
+ diskdb: diskdb,
+ triedb: triedb,
+ cache: fastcache.New(cache * 1024 * 1024),
+ root: baseRoot,
+ }
+ snapshot, generator, err := loadAndParseJournal(diskdb, base)
+ if err != nil {
+ utils.Logger().Warn().Err(err).Msg("Failed to load journal")
+ return nil, false, err
+ }
+ // Entire snapshot journal loaded, sanity check the head. If the loaded
+ // snapshot is not matched with current state root, print a warning log
+ // or discard the entire snapshot it's legacy snapshot.
+ //
+ // Possible scenario: Geth was crashed without persisting journal and then
+ // restart, the head is rewound to the point with available state(trie)
+ // which is below the snapshot. In this case the snapshot can be recovered
+ // by re-executing blocks but right now it's unavailable.
+ if head := snapshot.Root(); head != root {
+ // If it's legacy snapshot, or it's new-format snapshot but
+ // it's not in recovery mode, returns the error here for
+ // rebuilding the entire snapshot forcibly.
+ if !recovery {
+ return nil, false, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
+ }
+ // It's in snapshot recovery, the assumption is held that
+ // the disk layer is always higher than chain head. It can
+ // be eventually recovered when the chain head beyonds the
+ // disk layer.
+ utils.Logger().Warn().Err(err).
+ Interface("snaproot", head).
+ Interface("chainroot", root).
+ Msg("Snapshot is not continuous with chain")
+ }
+ // Load the disk layer status from the generator if it's not complete
+ if !generator.Done {
+ base.genMarker = generator.Marker
+ if base.genMarker == nil {
+ base.genMarker = []byte{}
+ }
+ }
+ // Everything loaded correctly, resume any suspended operations
+ // if the background generation is allowed
+ if !generator.Done && !noBuild {
+ base.genPending = make(chan struct{})
+ base.genAbort = make(chan chan *generatorStats)
+
+ var origin uint64
+ if len(generator.Marker) >= 8 {
+ origin = binary.BigEndian.Uint64(generator.Marker)
+ }
+ go base.generate(&generatorStats{
+ origin: origin,
+ start: time.Now(),
+ accounts: generator.Accounts,
+ slots: generator.Slots,
+ storage: common.StorageSize(generator.Storage),
+ })
+ }
+ return snapshot, false, nil
+}
+
+// Journal terminates any in-progress snapshot generation, also implicitly pushing
+// the progress into the database.
+func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
+ // If the snapshot is currently being generated, abort it
+ var stats *generatorStats
+ if dl.genAbort != nil {
+ abort := make(chan *generatorStats)
+ dl.genAbort <- abort
+
+ if stats = <-abort; stats != nil {
+ stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker)
+ }
+ }
+ // Ensure the layer didn't get stale
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ if dl.stale {
+ return common.Hash{}, ErrSnapshotStale
+ }
+ // Ensure the generator stats is written even if none was ran this cycle
+ journalProgress(dl.diskdb, dl.genMarker, stats)
+
+ utils.Logger().Debug().Interface("root", dl.root).Msg("Journalled disk layer")
+ return dl.root, nil
+}
+
+// Journal writes the memory layer contents into a buffer to be stored in the
+// database as the snapshot journal.
+func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
+ // Journal the parent first
+ base, err := dl.parent.Journal(buffer)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ // Ensure the layer didn't get stale
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ if dl.Stale() {
+ return common.Hash{}, ErrSnapshotStale
+ }
+ // Everything below was journalled, persist this layer too
+ if err := rlp.Encode(buffer, dl.root); err != nil {
+ return common.Hash{}, err
+ }
+ destructs := make([]journalDestruct, 0, len(dl.destructSet))
+ for hash := range dl.destructSet {
+ destructs = append(destructs, journalDestruct{Hash: hash})
+ }
+ if err := rlp.Encode(buffer, destructs); err != nil {
+ return common.Hash{}, err
+ }
+ accounts := make([]journalAccount, 0, len(dl.accountData))
+ for hash, blob := range dl.accountData {
+ accounts = append(accounts, journalAccount{Hash: hash, Blob: blob})
+ }
+ if err := rlp.Encode(buffer, accounts); err != nil {
+ return common.Hash{}, err
+ }
+ storage := make([]journalStorage, 0, len(dl.storageData))
+ for hash, slots := range dl.storageData {
+ keys := make([]common.Hash, 0, len(slots))
+ vals := make([][]byte, 0, len(slots))
+ for key, val := range slots {
+ keys = append(keys, key)
+ vals = append(vals, val)
+ }
+ storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals})
+ }
+ if err := rlp.Encode(buffer, storage); err != nil {
+ return common.Hash{}, err
+ }
+ utils.Logger().Debug().Err(err).Interface("root", dl.root).Interface("parent", dl.parent.Root()).Msg("Journalled diff layer")
+ return base, nil
+}
+
+// journalCallback is a function which is invoked by iterateJournal, every
+// time a difflayer is loaded from disk.
+type journalCallback = func(parent common.Hash, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error
+
+// iterateJournal iterates through the journalled difflayers, loading them from
+// the database, and invoking the callback for each loaded layer.
+// The order is incremental; starting with the bottom-most difflayer, going towards
+// the most recent layer.
+// This method returns error either if there was some error reading from disk,
+// OR if the callback returns an error when invoked.
+func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error {
+ journal := rawdb.ReadSnapshotJournal(db)
+ if len(journal) == 0 {
+ utils.Logger().Warn().Str("diffs", "missing").Msg("Loaded snapshot journal")
+ return nil
+ }
+ r := rlp.NewStream(bytes.NewReader(journal), 0)
+ // Firstly, resolve the first element as the journal version
+ version, err := r.Uint64()
+ if err != nil {
+ utils.Logger().Warn().Err(err).Msg("Failed to resolve the journal version")
+ return errors.New("failed to resolve journal version")
+ }
+ if version != journalVersion {
+ utils.Logger().Warn().Err(err).
+ Uint64("required", journalVersion).
+ Uint64("got", version).
+ Msg("Discarded the snapshot journal with wrong version")
+
+ return errors.New("wrong journal version")
+ }
+ // Secondly, resolve the disk layer root, ensure it's continuous
+ // with disk layer. Note now we can ensure it's the snapshot journal
+ // correct version, so we expect everything can be resolved properly.
+ var parent common.Hash
+ if err := r.Decode(&parent); err != nil {
+ return errors.New("missing disk layer root")
+ }
+ if baseRoot := rawdb.ReadSnapshotRoot(db); baseRoot != parent {
+ utils.Logger().Warn().Err(err).
+ Interface("disk_root", baseRoot).
+ Str("diffs", "unmatched").
+ Msg("Loaded snapshot journal")
+
+ return fmt.Errorf("mismatched disk and diff layers")
+ }
+ for {
+ var (
+ root common.Hash
+ destructs []journalDestruct
+ accounts []journalAccount
+ storage []journalStorage
+ destructSet = make(map[common.Hash]struct{})
+ accountData = make(map[common.Hash][]byte)
+ storageData = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ // Read the next diff journal entry
+ if err := r.Decode(&root); err != nil {
+ // The first read may fail with EOF, marking the end of the journal
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+ return fmt.Errorf("load diff root: %v", err)
+ }
+ if err := r.Decode(&destructs); err != nil {
+ return fmt.Errorf("load diff destructs: %v", err)
+ }
+ if err := r.Decode(&accounts); err != nil {
+ return fmt.Errorf("load diff accounts: %v", err)
+ }
+ if err := r.Decode(&storage); err != nil {
+ return fmt.Errorf("load diff storage: %v", err)
+ }
+ for _, entry := range destructs {
+ destructSet[entry.Hash] = struct{}{}
+ }
+ for _, entry := range accounts {
+ if len(entry.Blob) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
+ accountData[entry.Hash] = entry.Blob
+ } else {
+ accountData[entry.Hash] = nil
+ }
+ }
+ for _, entry := range storage {
+ slots := make(map[common.Hash][]byte)
+ for i, key := range entry.Keys {
+ if len(entry.Vals[i]) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
+ slots[key] = entry.Vals[i]
+ } else {
+ slots[key] = nil
+ }
+ }
+ storageData[entry.Hash] = slots
+ }
+ if err := callback(parent, root, destructSet, accountData, storageData); err != nil {
+ return err
+ }
+ parent = root
+ }
+}
diff --git a/core/state/snapshot/metrics.go b/core/state/snapshot/metrics.go
new file mode 100644
index 0000000000..b2e884588b
--- /dev/null
+++ b/core/state/snapshot/metrics.go
@@ -0,0 +1,53 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import "github.com/ethereum/go-ethereum/metrics"
+
+// Metrics in generation
+var (
+ snapGeneratedAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/generated", nil)
+ snapRecoveredAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/recovered", nil)
+ snapWipedAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/wiped", nil)
+ snapMissallAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/missall", nil)
+ snapGeneratedStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/generated", nil)
+ snapRecoveredStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/recovered", nil)
+ snapWipedStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/wiped", nil)
+ snapMissallStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/missall", nil)
+ snapDanglingStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/dangling", nil)
+ snapSuccessfulRangeProofMeter = metrics.NewRegisteredMeter("state/snapshot/generation/proof/success", nil)
+ snapFailedRangeProofMeter = metrics.NewRegisteredMeter("state/snapshot/generation/proof/failure", nil)
+
+ // snapAccountProveCounter measures time spent on the account proving
+ snapAccountProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/prove", nil)
+ // snapAccountTrieReadCounter measures time spent on the account trie iteration
+ snapAccountTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/trieread", nil)
+ // snapAccountSnapReadCounter measures time spent on the snapshot account iteration
+ snapAccountSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/snapread", nil)
+ // snapAccountWriteCounter measures time spent on writing/updating/deleting accounts
+ snapAccountWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/write", nil)
+ // snapStorageProveCounter measures time spent on storage proving
+ snapStorageProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/prove", nil)
+ // snapStorageTrieReadCounter measures time spent on the storage trie iteration
+ snapStorageTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/trieread", nil)
+ // snapStorageSnapReadCounter measures time spent on the snapshot storage iteration
+ snapStorageSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/snapread", nil)
+ // snapStorageWriteCounter measures time spent on writing/updating storages
+ snapStorageWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/write", nil)
+ // snapStorageCleanCounter measures time spent on deleting storages
+ snapStorageCleanCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/clean", nil)
+)
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
new file mode 100644
index 0000000000..47dc1a3a17
--- /dev/null
+++ b/core/state/snapshot/snapshot.go
@@ -0,0 +1,854 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package snapshot implements a journalled, dynamic state dump.
+package snapshot
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/harmony-one/harmony/core/rawdb"
+ "github.com/harmony-one/harmony/internal/utils"
+)
+
+var (
+ snapshotCleanAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/hit", nil)
+ snapshotCleanAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/miss", nil)
+ snapshotCleanAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/inex", nil)
+ snapshotCleanAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/read", nil)
+ snapshotCleanAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/write", nil)
+
+ snapshotCleanStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/hit", nil)
+ snapshotCleanStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/miss", nil)
+ snapshotCleanStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/inex", nil)
+ snapshotCleanStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/read", nil)
+ snapshotCleanStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/write", nil)
+
+ snapshotDirtyAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/hit", nil)
+ snapshotDirtyAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/miss", nil)
+ snapshotDirtyAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/inex", nil)
+ snapshotDirtyAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil)
+ snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil)
+
+ snapshotDirtyStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil)
+ snapshotDirtyStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil)
+ snapshotDirtyStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/inex", nil)
+ snapshotDirtyStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/read", nil)
+ snapshotDirtyStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/write", nil)
+
+ snapshotDirtyAccountHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/account/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
+ snapshotDirtyStorageHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/storage/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
+
+ snapshotFlushAccountItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/item", nil)
+ snapshotFlushAccountSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/size", nil)
+ snapshotFlushStorageItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/item", nil)
+ snapshotFlushStorageSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/size", nil)
+
+ snapshotBloomIndexTimer = metrics.NewRegisteredResettingTimer("state/snapshot/bloom/index", nil)
+ snapshotBloomErrorGauge = metrics.NewRegisteredGaugeFloat64("state/snapshot/bloom/error", nil)
+
+ snapshotBloomAccountTrueHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/truehit", nil)
+ snapshotBloomAccountFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/falsehit", nil)
+ snapshotBloomAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/miss", nil)
+
+ snapshotBloomStorageTrueHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/truehit", nil)
+ snapshotBloomStorageFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/falsehit", nil)
+ snapshotBloomStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/miss", nil)
+
+ // ErrSnapshotStale is returned from data accessors if the underlying snapshot
+ // layer had been invalidated due to the chain progressing forward far enough
+ // to not maintain the layer's original state.
+ ErrSnapshotStale = errors.New("snapshot stale")
+
+ // ErrNotCoveredYet is returned from data accessors if the underlying snapshot
+ // is being generated currently and the requested data item is not yet in the
+ // range of accounts covered.
+ ErrNotCoveredYet = errors.New("not covered yet")
+
+ // ErrNotConstructed is returned if the callers want to iterate the snapshot
+ // while the generation is not finished yet.
+ ErrNotConstructed = errors.New("snapshot is not constructed")
+
+ // errSnapshotCycle is returned if a snapshot is attempted to be inserted
+ // that forms a cycle in the snapshot tree.
+ errSnapshotCycle = errors.New("snapshot cycle")
+)
+
+// Snapshot represents the functionality supported by a snapshot storage layer.
+type Snapshot interface {
+ // Root returns the root hash for which this snapshot was made.
+ Root() common.Hash
+
+ // Account directly retrieves the account associated with a particular hash in
+ // the snapshot slim data format.
+ Account(hash common.Hash) (*Account, error)
+
+ // AccountRLP directly retrieves the account RLP associated with a particular
+ // hash in the snapshot slim data format.
+ AccountRLP(hash common.Hash) ([]byte, error)
+
+ // Storage directly retrieves the storage data associated with a particular hash,
+ // within a particular account.
+ Storage(accountHash, storageHash common.Hash) ([]byte, error)
+}
+
+// snapshot is the internal version of the snapshot data layer that supports some
+// additional methods compared to the public API.
+type snapshot interface {
+ Snapshot
+
+ // Parent returns the subsequent layer of a snapshot, or nil if the base was
+ // reached.
+ //
+ // Note, the method is an internal helper to avoid type switching between the
+ // disk and diff layers. There is no locking involved.
+ Parent() snapshot
+
+ // Update creates a new layer on top of the existing snapshot diff tree with
+ // the specified data items.
+ //
+ // Note, the maps are retained by the method to avoid copying everything.
+ Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
+
+ // Journal commits an entire diff hierarchy to disk into a single journal entry.
+ // This is meant to be used during shutdown to persist the snapshot without
+ // flattening everything down (bad for reorgs).
+ Journal(buffer *bytes.Buffer) (common.Hash, error)
+
+ // Stale return whether this layer has become stale (was flattened across) or
+ // if it's still live.
+ Stale() bool
+
+ // AccountIterator creates an account iterator over an arbitrary layer.
+ AccountIterator(seek common.Hash) AccountIterator
+
+ // StorageIterator creates a storage iterator over an arbitrary layer.
+ StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
+}
+
+// Config includes the configurations for snapshots.
+type Config struct {
+ CacheSize int // Megabytes permitted to use for read caches
+ Recovery bool // Indicator that the snapshots is in the recovery mode
+ NoBuild bool // Indicator that the snapshots generation is disallowed
+ AsyncBuild bool // The snapshot generation is allowed to be constructed asynchronously
+}
+
+// Tree is an Ethereum state snapshot tree. It consists of one persistent base
+// layer backed by a key-value store, on top of which arbitrarily many in-memory
+// diff layers are topped. The memory diffs can form a tree with branching, but
+// the disk layer is singleton and common to all. If a reorg goes deeper than the
+// disk layer, everything needs to be deleted.
+//
+// The goal of a state snapshot is twofold: to allow direct access to account and
+// storage data to avoid expensive multi-level trie lookups; and to allow sorted,
+// cheap iteration of the account/storage tries for sync aid.
+type Tree struct {
+ config Config // Snapshots configurations
+ diskdb ethdb.KeyValueStore // Persistent database to store the snapshot
+ triedb *trie.Database // In-memory cache to access the trie through
+ layers map[common.Hash]snapshot // Collection of all known layers
+ lock sync.RWMutex
+
+ // Test hooks
+ onFlatten func() // Hook invoked when the bottom most diff layers are flattened
+}
+
+// New attempts to load an already existing snapshot from a persistent key-value
+// store (with a number of memory layers from a journal), ensuring that the head
+// of the snapshot matches the expected one.
+//
+// If the snapshot is missing or the disk layer is broken, the snapshot will be
+// reconstructed using both the existing data and the state trie.
+// The repair happens on a background thread.
+//
+// If the memory layers in the journal do not match the disk layer (e.g. there is
+// a gap) or the journal is missing, there are two repair cases:
+//
+// - if the 'recovery' parameter is true, memory diff-layers and the disk-layer
+// will all be kept. This case happens when the snapshot is 'ahead' of the
+// state trie.
+// - otherwise, the entire snapshot is considered invalid and will be recreated on
+// a background thread.
+func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root common.Hash) (*Tree, error) {
+ // Create a new, empty snapshot tree
+ snap := &Tree{
+ config: config,
+ diskdb: diskdb,
+ triedb: triedb,
+ layers: make(map[common.Hash]snapshot),
+ }
+ // Attempt to load a previously persisted snapshot and rebuild one if failed
+ head, disabled, err := loadSnapshot(diskdb, triedb, root, config.CacheSize, config.Recovery, config.NoBuild)
+ if disabled {
+ utils.Logger().Warn().Err(err).Msg("Snapshot maintenance disabled (syncing)")
+ return snap, nil
+ }
+ // Create the building waiter iff the background generation is allowed
+ if !config.NoBuild && !config.AsyncBuild {
+ defer snap.waitBuild()
+ }
+ if err != nil {
+ utils.Logger().Warn().Err(err).Msg("Failed to load snapshot")
+ if !config.NoBuild {
+ snap.Rebuild(root)
+ return snap, nil
+ }
+ return nil, err // Bail out the error, don't rebuild automatically.
+ }
+ // Existing snapshot loaded, seed all the layers
+ for head != nil {
+ snap.layers[head.Root()] = head
+ head = head.Parent()
+ }
+ return snap, nil
+}
+
+// waitBuild blocks until the snapshot finishes rebuilding. This method is meant
+// to be used by tests to ensure we're testing what we believe we are.
+func (t *Tree) waitBuild() {
+ // Find the rebuild termination channel
+ var done chan struct{}
+
+ t.lock.RLock()
+ for _, layer := range t.layers {
+ if layer, ok := layer.(*diskLayer); ok {
+ done = layer.genPending
+ break
+ }
+ }
+ t.lock.RUnlock()
+
+ // Wait until the snapshot is generated
+ if done != nil {
+ <-done
+ }
+}
+
+// Disable interrupts any pending snapshot generator, deletes all the snapshot
+// layers in memory and marks snapshots disabled globally. In order to resume
+// the snapshot functionality, the caller must invoke Rebuild.
+func (t *Tree) Disable() {
+ // Interrupt any live snapshot layers
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ for _, layer := range t.layers {
+ switch layer := layer.(type) {
+ case *diskLayer:
+ // If the base layer is generating, abort it
+ if layer.genAbort != nil {
+ abort := make(chan *generatorStats)
+ layer.genAbort <- abort
+ <-abort
+ }
+ // Layer should be inactive now, mark it as stale
+ layer.lock.Lock()
+ layer.stale = true
+ layer.lock.Unlock()
+
+ case *diffLayer:
+ // If the layer is a simple diff, simply mark as stale
+ layer.lock.Lock()
+ atomic.StoreUint32(&layer.stale, 1)
+ layer.lock.Unlock()
+
+ default:
+ panic(fmt.Sprintf("unknown layer type: %T", layer))
+ }
+ }
+ t.layers = map[common.Hash]snapshot{}
+
+ // Delete all snapshot liveness information from the database
+ batch := t.diskdb.NewBatch()
+
+ rawdb.WriteSnapshotDisabled(batch)
+ rawdb.DeleteSnapshotRoot(batch)
+ rawdb.DeleteSnapshotJournal(batch)
+ rawdb.DeleteSnapshotGenerator(batch)
+ rawdb.DeleteSnapshotRecoveryNumber(batch)
+ // Note, we don't delete the sync progress
+
+ if err := batch.Write(); err != nil {
+ utils.Logger().Fatal().Err(err).Msg("Failed to disable snapshots")
+ }
+}
+
+// Snapshot retrieves a snapshot belonging to the given block root, or nil if no
+// snapshot is maintained for that block.
+func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return t.layers[blockRoot]
+}
+
+// Snapshots returns all visited layers from the topmost layer with specific
+// root and traverses downward. The layer amount is limited by the given number.
+// If nodisk is set, then disk layer is excluded.
+func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ if limits == 0 {
+ return nil
+ }
+ layer := t.layers[root]
+ if layer == nil {
+ return nil
+ }
+ var ret []Snapshot
+ for {
+ if _, isdisk := layer.(*diskLayer); isdisk && nodisk {
+ break
+ }
+ ret = append(ret, layer)
+ limits -= 1
+ if limits == 0 {
+ break
+ }
+ parent := layer.Parent()
+ if parent == nil {
+ break
+ }
+ layer = parent
+ }
+ return ret
+}
+
+// Update adds a new snapshot into the tree, if that can be linked to an existing
+// old parent. It is disallowed to insert a disk layer (the origin of all).
+func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
+ // Reject noop updates to avoid self-loops in the snapshot tree. This is a
+ // special case that can only happen for Clique networks where empty blocks
+ // don't modify the state (0 block subsidy).
+ //
+ // Although we could silently ignore this internally, it should be the caller's
+ // responsibility to avoid even attempting to insert such a snapshot.
+ if blockRoot == parentRoot {
+ return errSnapshotCycle
+ }
+ // Generate a new snapshot on top of the parent
+ parent := t.Snapshot(parentRoot)
+ if parent == nil {
+ return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
+ }
+ snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage)
+
+ // Save the new snapshot for later
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ t.layers[snap.root] = snap
+ return nil
+}
+
+// Cap traverses downwards the snapshot tree from a head block hash until the
+// number of allowed layers are crossed. All layers beyond the permitted number
+// are flattened downwards.
+//
+// Note, the final diff layer count in general will be one more than the amount
+// requested. This happens because the bottom-most diff layer is the accumulator
+// which may or may not overflow and cascade to disk. Since this last layer's
+// survival is only known *after* capping, we need to omit it from the count if
+// we want to ensure that *at least* the requested number of diff layers remain.
+func (t *Tree) Cap(root common.Hash, layers int) error {
+ // Retrieve the head snapshot to cap from
+ snap := t.Snapshot(root)
+ if snap == nil {
+ return fmt.Errorf("snapshot [%#x] missing", root)
+ }
+ diff, ok := snap.(*diffLayer)
+ if !ok {
+ return fmt.Errorf("snapshot [%#x] is disk layer", root)
+ }
+ // If the generator is still running, use a more aggressive cap
+ diff.origin.lock.RLock()
+ if diff.origin.genMarker != nil && layers > 8 {
+ layers = 8
+ }
+ diff.origin.lock.RUnlock()
+
+ // Run the internal capping and discard all stale layers
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ // Flattening the bottom-most diff layer requires special casing since there's
+ // no child to rewire to the grandparent. In that case we can fake a temporary
+ // child for the capping and then remove it.
+ if layers == 0 {
+ // If full commit was requested, flatten the diffs and merge onto disk
+ diff.lock.RLock()
+ base := diffToDisk(diff.flatten().(*diffLayer))
+ diff.lock.RUnlock()
+
+ // Replace the entire snapshot tree with the flat base
+ t.layers = map[common.Hash]snapshot{base.root: base}
+ return nil
+ }
+ persisted := t.cap(diff, layers)
+
+ // Remove any layer that is stale or links into a stale layer
+ children := make(map[common.Hash][]common.Hash)
+ for root, snap := range t.layers {
+ if diff, ok := snap.(*diffLayer); ok {
+ parent := diff.parent.Root()
+ children[parent] = append(children[parent], root)
+ }
+ }
+ var remove func(root common.Hash)
+ remove = func(root common.Hash) {
+ delete(t.layers, root)
+ for _, child := range children[root] {
+ remove(child)
+ }
+ delete(children, root)
+ }
+ for root, snap := range t.layers {
+ if snap.Stale() {
+ remove(root)
+ }
+ }
+ // If the disk layer was modified, regenerate all the cumulative blooms
+ if persisted != nil {
+ var rebloom func(root common.Hash)
+ rebloom = func(root common.Hash) {
+ if diff, ok := t.layers[root].(*diffLayer); ok {
+ diff.rebloom(persisted)
+ }
+ for _, child := range children[root] {
+ rebloom(child)
+ }
+ }
+ rebloom(persisted.root)
+ }
+ return nil
+}
+
+// cap traverses downwards the diff tree until the number of allowed layers are
+// crossed. All diffs beyond the permitted number are flattened downwards. If the
+// layer limit is reached, memory cap is also enforced (but not before).
+//
+// The method returns the new disk layer if diffs were persisted into it.
+//
+// Note, the final diff layer count in general will be one more than the amount
+// requested. This happens because the bottom-most diff layer is the accumulator
+// which may or may not overflow and cascade to disk. Since this last layer's
+// survival is only known *after* capping, we need to omit it from the count if
+// we want to ensure that *at least* the requested number of diff layers remain.
+func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
+ // Dive until we run out of layers or reach the persistent database
+ for i := 0; i < layers-1; i++ {
+ // If we still have diff layers below, continue down
+ if parent, ok := diff.parent.(*diffLayer); ok {
+ diff = parent
+ } else {
+ // Diff stack too shallow, return without modifications
+ return nil
+ }
+ }
+ // We're out of layers, flatten anything below, stopping if it's the disk or if
+ // the memory limit is not yet exceeded.
+ switch parent := diff.parent.(type) {
+ case *diskLayer:
+ return nil
+
+ case *diffLayer:
+ // Hold the write lock until the flattened parent is linked correctly.
+ // Otherwise, the stale layer may be accessed by external reads in the
+ // meantime.
+ diff.lock.Lock()
+ defer diff.lock.Unlock()
+
+ // Flatten the parent into the grandparent. The flattening internally obtains a
+ // write lock on grandparent.
+ flattened := parent.flatten().(*diffLayer)
+ t.layers[flattened.root] = flattened
+
+ // Invoke the hook if it's registered. Ugly hack.
+ if t.onFlatten != nil {
+ t.onFlatten()
+ }
+ diff.parent = flattened
+ if flattened.memory < aggregatorMemoryLimit {
+ // Accumulator layer is smaller than the limit, so we can abort, unless
+ // there's a snapshot being generated currently. In that case, the trie
+ // will move from underneath the generator so we **must** merge all the
+ // partial data down into the snapshot and restart the generation.
+ if flattened.parent.(*diskLayer).genAbort == nil {
+ return nil
+ }
+ }
+ default:
+ panic(fmt.Sprintf("unknown data layer: %T", parent))
+ }
+ // If the bottom-most layer is larger than our memory cap, persist to disk
+ bottom := diff.parent.(*diffLayer)
+
+ bottom.lock.RLock()
+ base := diffToDisk(bottom)
+ bottom.lock.RUnlock()
+
+ t.layers[base.root] = base
+ diff.parent = base
+ return base
+}
+
+// diffToDisk merges a bottom-most diff into the persistent disk layer underneath
+// it. The method will panic if called onto a non-bottom-most diff layer.
+//
+// The disk layer persistence should be operated in an atomic way. All updates should
+// be discarded if the whole transition if not finished.
+func diffToDisk(bottom *diffLayer) *diskLayer {
+ var (
+ base = bottom.parent.(*diskLayer)
+ batch = base.diskdb.NewBatch()
+ stats *generatorStats
+ )
+ // If the disk layer is running a snapshot generator, abort it
+ if base.genAbort != nil {
+ abort := make(chan *generatorStats)
+ base.genAbort <- abort
+ stats = <-abort
+ }
+ // Put the deletion in the batch writer, flush all updates in the final step.
+ rawdb.DeleteSnapshotRoot(batch)
+
+ // Mark the original base as stale as we're going to create a new wrapper
+ base.lock.Lock()
+ if base.stale {
+ panic("parent disk layer is stale") // we've committed into the same base from two children, boo
+ }
+ base.stale = true
+ base.lock.Unlock()
+
+ // Destroy all the destructed accounts from the database
+ for hash := range bottom.destructSet {
+ // Skip any account not covered yet by the snapshot
+ if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
+ continue
+ }
+ // Remove all storage slots
+ rawdb.DeleteAccountSnapshot(batch, hash)
+ base.cache.Set(hash[:], nil)
+
+ it := rawdb.IterateStorageSnapshots(base.diskdb, hash)
+ for it.Next() {
+ key := it.Key()
+ batch.Delete(key)
+ base.cache.Del(key[1:])
+ snapshotFlushStorageItemMeter.Mark(1)
+
+ // Ensure we don't delete too much data blindly (contract can be
+ // huge). It's ok to flush, the root will go missing in case of a
+ // crash and we'll detect and regenerate the snapshot.
+ if batch.ValueSize() > ethdb.IdealBatchSize {
+ if err := batch.Write(); err != nil {
+ utils.Logger().Fatal().Err(err).Msg("Failed to write storage deletions")
+ }
+ batch.Reset()
+ }
+ }
+ it.Release()
+ }
+ // Push all updated accounts into the database
+ for hash, data := range bottom.accountData {
+ // Skip any account not covered yet by the snapshot
+ if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
+ continue
+ }
+ // Push the account to disk
+ rawdb.WriteAccountSnapshot(batch, hash, data)
+ base.cache.Set(hash[:], data)
+ snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
+
+ snapshotFlushAccountItemMeter.Mark(1)
+ snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
+
+ // Ensure we don't write too much data blindly. It's ok to flush, the
+ // root will go missing in case of a crash and we'll detect and regen
+ // the snapshot.
+ if batch.ValueSize() > ethdb.IdealBatchSize {
+ if err := batch.Write(); err != nil {
+ utils.Logger().Fatal().Err(err).Msg("Failed to write storage deletions")
+ }
+ batch.Reset()
+ }
+ }
+ // Push all the storage slots into the database
+ for accountHash, storage := range bottom.storageData {
+ // Skip any account not covered yet by the snapshot
+ if base.genMarker != nil && bytes.Compare(accountHash[:], base.genMarker) > 0 {
+ continue
+ }
+ // Generation might be mid-account, track that case too
+ midAccount := base.genMarker != nil && bytes.Equal(accountHash[:], base.genMarker[:common.HashLength])
+
+ for storageHash, data := range storage {
+ // Skip any slot not covered yet by the snapshot
+ if midAccount && bytes.Compare(storageHash[:], base.genMarker[common.HashLength:]) > 0 {
+ continue
+ }
+ if len(data) > 0 {
+ rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data)
+ base.cache.Set(append(accountHash[:], storageHash[:]...), data)
+ snapshotCleanStorageWriteMeter.Mark(int64(len(data)))
+ } else {
+ rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash)
+ base.cache.Set(append(accountHash[:], storageHash[:]...), nil)
+ }
+ snapshotFlushStorageItemMeter.Mark(1)
+ snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
+ }
+ }
+ // Update the snapshot block marker and write any remainder data
+ rawdb.WriteSnapshotRoot(batch, bottom.root)
+
+ // Write out the generator progress marker and report
+ journalProgress(batch, base.genMarker, stats)
+
+ // Flush all the updates in the single db operation. Ensure the
+ // disk layer transition is atomic.
+ if err := batch.Write(); err != nil {
+ utils.Logger().Fatal().Err(err).Msg("Failed to write leftover snapshot")
+ }
+ utils.Logger().Debug().Interface("root", bottom.root).Bool("complete", base.genMarker == nil).Msg("Journalled disk layer")
+ res := &diskLayer{
+ root: bottom.root,
+ cache: base.cache,
+ diskdb: base.diskdb,
+ triedb: base.triedb,
+ genMarker: base.genMarker,
+ genPending: base.genPending,
+ }
+ // If snapshot generation hasn't finished yet, port over all the starts and
+ // continue where the previous round left off.
+ //
+ // Note, the `base.genAbort` comparison is not used normally, it's checked
+ // to allow the tests to play with the marker without triggering this path.
+ if base.genMarker != nil && base.genAbort != nil {
+ res.genMarker = base.genMarker
+ res.genAbort = make(chan chan *generatorStats)
+ go res.generate(stats)
+ }
+ return res
+}
+
+// Journal commits an entire diff hierarchy to disk into a single journal entry.
+// This is meant to be used during shutdown to persist the snapshot without
+// flattening everything down (bad for reorgs).
+//
+// The method returns the root hash of the base layer that needs to be persisted
+// to disk as a trie too to allow continuing any pending generation op.
+func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
+ // Retrieve the head snapshot to journal from var snap snapshot
+ snap := t.Snapshot(root)
+ if snap == nil {
+ return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
+ }
+ // Run the journaling
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ // Firstly write out the metadata of journal
+ journal := new(bytes.Buffer)
+ if err := rlp.Encode(journal, journalVersion); err != nil {
+ return common.Hash{}, err
+ }
+ diskroot := t.diskRoot()
+ if diskroot == (common.Hash{}) {
+ return common.Hash{}, errors.New("invalid disk root")
+ }
+ // Secondly write out the disk layer root, ensure the
+ // diff journal is continuous with disk.
+ if err := rlp.Encode(journal, diskroot); err != nil {
+ return common.Hash{}, err
+ }
+ // Finally write out the journal of each layer in reverse order.
+ base, err := snap.(snapshot).Journal(journal)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ // Store the journal into the database and return
+ rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes())
+ return base, nil
+}
+
+// Rebuild wipes all available snapshot data from the persistent database and
+// discard all caches and diff layers. Afterwards, it starts a new snapshot
+// generator with the given root hash.
+func (t *Tree) Rebuild(root common.Hash) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ // Firstly delete any recovery flag in the database. Because now we are
+ // building a brand new snapshot. Also reenable the snapshot feature.
+ rawdb.DeleteSnapshotRecoveryNumber(t.diskdb)
+ rawdb.DeleteSnapshotDisabled(t.diskdb)
+
+ // Iterate over and mark all layers stale
+ for _, layer := range t.layers {
+ switch layer := layer.(type) {
+ case *diskLayer:
+ // If the base layer is generating, abort it and save
+ if layer.genAbort != nil {
+ abort := make(chan *generatorStats)
+ layer.genAbort <- abort
+ <-abort
+ }
+ // Layer should be inactive now, mark it as stale
+ layer.lock.Lock()
+ layer.stale = true
+ layer.lock.Unlock()
+
+ case *diffLayer:
+ // If the layer is a simple diff, simply mark as stale
+ layer.lock.Lock()
+ atomic.StoreUint32(&layer.stale, 1)
+ layer.lock.Unlock()
+
+ default:
+ panic(fmt.Sprintf("unknown layer type: %T", layer))
+ }
+ }
+ // Start generating a new snapshot from scratch on a background thread. The
+ // generator will run a wiper first if there's not one running right now.
+ utils.Logger().Info().Msg("Rebuilding state snapshot")
+ t.layers = map[common.Hash]snapshot{
+ root: generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root),
+ }
+}
+
+// AccountIterator creates a new account iterator for the specified root hash and
+// seeks to a starting account hash.
+func (t *Tree) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) {
+ ok, err := t.generating()
+ if err != nil {
+ return nil, err
+ }
+ if ok {
+ return nil, ErrNotConstructed
+ }
+ return newFastAccountIterator(t, root, seek)
+}
+
+// StorageIterator creates a new storage iterator for the specified root hash and
+// account. The iterator will be move to the specific start position.
+func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
+ ok, err := t.generating()
+ if err != nil {
+ return nil, err
+ }
+ if ok {
+ return nil, ErrNotConstructed
+ }
+ return newFastStorageIterator(t, root, account, seek)
+}
+
+// Verify iterates the whole state(all the accounts as well as the corresponding storages)
+// with the specific root and compares the re-computed hash with the original one.
+func (t *Tree) Verify(root common.Hash) error {
+ acctIt, err := t.AccountIterator(root, common.Hash{})
+ if err != nil {
+ return err
+ }
+ defer acctIt.Release()
+
+ got, err := generateTrieRoot(nil, "", acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
+ storageIt, err := t.StorageIterator(root, accountHash, common.Hash{})
+ if err != nil {
+ return common.Hash{}, err
+ }
+ defer storageIt.Release()
+
+ hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ return hash, nil
+ }, newGenerateStats(), true)
+
+ if err != nil {
+ return err
+ }
+ if got != root {
+ return fmt.Errorf("state root hash mismatch: got %x, want %x", got, root)
+ }
+ return nil
+}
+
+// disklayer is an internal helper function to return the disk layer.
+// The lock of snapTree is assumed to be held already.
+func (t *Tree) disklayer() *diskLayer {
+ var snap snapshot
+ for _, s := range t.layers {
+ snap = s
+ break
+ }
+ if snap == nil {
+ return nil
+ }
+ switch layer := snap.(type) {
+ case *diskLayer:
+ return layer
+ case *diffLayer:
+ return layer.origin
+ default:
+ panic(fmt.Sprintf("%T: undefined layer", snap))
+ }
+}
+
+// diskRoot is a internal helper function to return the disk layer root.
+// The lock of snapTree is assumed to be held already.
+func (t *Tree) diskRoot() common.Hash {
+ disklayer := t.disklayer()
+ if disklayer == nil {
+ return common.Hash{}
+ }
+ return disklayer.Root()
+}
+
+// generating is an internal helper function which reports whether the snapshot
+// is still under the construction.
+func (t *Tree) generating() (bool, error) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ layer := t.disklayer()
+ if layer == nil {
+ return false, errors.New("disk layer is missing")
+ }
+ layer.lock.RLock()
+ defer layer.lock.RUnlock()
+ return layer.genMarker != nil, nil
+}
+
+// DiskRoot is a external helper function to return the disk layer root.
+func (t *Tree) DiskRoot() common.Hash {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ return t.diskRoot()
+}
diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go
new file mode 100644
index 0000000000..951576bc8f
--- /dev/null
+++ b/core/state/snapshot/snapshot_test.go
@@ -0,0 +1,488 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ crand "crypto/rand"
+ "encoding/binary"
+ "fmt"
+ "math/big"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/harmony-one/harmony/core/rawdb"
+)
+
+// randomHash generates a random blob of data and returns it as a hash.
+func randomHash() common.Hash {
+ var hash common.Hash
+ if n, err := crand.Read(hash[:]); n != common.HashLength || err != nil {
+ panic(err)
+ }
+ return hash
+}
+
+// randomAccount generates a random account and returns it RLP encoded.
+func randomAccount() []byte {
+ root := randomHash()
+ a := Account{
+ Balance: big.NewInt(rand.Int63()),
+ Nonce: rand.Uint64(),
+ Root: root[:],
+ CodeHash: types.EmptyCodeHash[:],
+ }
+ data, _ := rlp.EncodeToBytes(a)
+ return data
+}
+
+// randomAccountSet generates a set of random accounts with the given strings as
+// the account address hashes.
+func randomAccountSet(hashes ...string) map[common.Hash][]byte {
+ accounts := make(map[common.Hash][]byte)
+ for _, hash := range hashes {
+ accounts[common.HexToHash(hash)] = randomAccount()
+ }
+ return accounts
+}
+
+// randomStorageSet generates a set of random slots with the given strings as
+// the slot addresses.
+func randomStorageSet(accounts []string, hashes [][]string, nilStorage [][]string) map[common.Hash]map[common.Hash][]byte {
+ storages := make(map[common.Hash]map[common.Hash][]byte)
+ for index, account := range accounts {
+ storages[common.HexToHash(account)] = make(map[common.Hash][]byte)
+
+ if index < len(hashes) {
+ hashes := hashes[index]
+ for _, hash := range hashes {
+ storages[common.HexToHash(account)][common.HexToHash(hash)] = randomHash().Bytes()
+ }
+ }
+ if index < len(nilStorage) {
+ nils := nilStorage[index]
+ for _, hash := range nils {
+ storages[common.HexToHash(account)][common.HexToHash(hash)] = nil
+ }
+ }
+ }
+ return storages
+}
+
+// Tests that if a disk layer becomes stale, no active external references will
+// be returned with junk data. This version of the test flattens every diff layer
+// to check internal corner case around the bottom-most memory accumulator.
+func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Retrieve a reference to the base and commit a diff on top
+ ref := snaps.Snapshot(base.root)
+
+ accounts := map[common.Hash][]byte{
+ common.HexToHash("0xa1"): randomAccount(),
+ }
+ if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
+ t.Fatalf("failed to create a diff layer: %v", err)
+ }
+ if n := len(snaps.layers); n != 2 {
+ t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 2)
+ }
+ // Commit the diff layer onto the disk and ensure it's persisted
+ if err := snaps.Cap(common.HexToHash("0x02"), 0); err != nil {
+ t.Fatalf("failed to merge diff layer onto disk: %v", err)
+ }
+ // Since the base layer was modified, ensure that data retrieval on the external reference fail
+ if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
+ t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
+ }
+ if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale {
+ t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err)
+ }
+ if n := len(snaps.layers); n != 1 {
+ t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 1)
+ fmt.Println(snaps.layers)
+ }
+}
+
+// Tests that if a disk layer becomes stale, no active external references will
+// be returned with junk data. This version of the test retains the bottom diff
+// layer to check the usual mode of operation where the accumulator is retained.
+func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Retrieve a reference to the base and commit two diffs on top
+ ref := snaps.Snapshot(base.root)
+
+ accounts := map[common.Hash][]byte{
+ common.HexToHash("0xa1"): randomAccount(),
+ }
+ if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
+ t.Fatalf("failed to create a diff layer: %v", err)
+ }
+ if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
+ t.Fatalf("failed to create a diff layer: %v", err)
+ }
+ if n := len(snaps.layers); n != 3 {
+ t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 3)
+ }
+ // Commit the diff layer onto the disk and ensure it's persisted
+ defer func(memcap uint64) { aggregatorMemoryLimit = memcap }(aggregatorMemoryLimit)
+ aggregatorMemoryLimit = 0
+
+ if err := snaps.Cap(common.HexToHash("0x03"), 1); err != nil {
+ t.Fatalf("failed to merge accumulator onto disk: %v", err)
+ }
+ // Since the base layer was modified, ensure that data retrievals on the external reference fail
+ if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
+ t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
+ }
+ if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale {
+ t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err)
+ }
+ if n := len(snaps.layers); n != 2 {
+ t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 2)
+ fmt.Println(snaps.layers)
+ }
+}
+
+// Tests that if a diff layer becomes stale, no active external references will
+// be returned with junk data. This version of the test retains the bottom diff
+// layer to check the usual mode of operation where the accumulator is retained.
+func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Commit three diffs on top and retrieve a reference to the bottommost
+ accounts := map[common.Hash][]byte{
+ common.HexToHash("0xa1"): randomAccount(),
+ }
+ if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
+ t.Fatalf("failed to create a diff layer: %v", err)
+ }
+ if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
+ t.Fatalf("failed to create a diff layer: %v", err)
+ }
+ if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil {
+ t.Fatalf("failed to create a diff layer: %v", err)
+ }
+ if n := len(snaps.layers); n != 4 {
+ t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 4)
+ }
+ ref := snaps.Snapshot(common.HexToHash("0x02"))
+
+ // Doing a Cap operation with many allowed layers should be a no-op
+ exp := len(snaps.layers)
+ if err := snaps.Cap(common.HexToHash("0x04"), 2000); err != nil {
+ t.Fatalf("failed to flatten diff layer into accumulator: %v", err)
+ }
+ if got := len(snaps.layers); got != exp {
+ t.Errorf("layers modified, got %d exp %d", got, exp)
+ }
+ // Flatten the diff layer into the bottom accumulator
+ if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil {
+ t.Fatalf("failed to flatten diff layer into accumulator: %v", err)
+ }
+ // Since the accumulator diff layer was modified, ensure that data retrievals on the external reference fail
+ if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
+ t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
+ }
+ if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale {
+ t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err)
+ }
+ if n := len(snaps.layers); n != 3 {
+ t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 3)
+ fmt.Println(snaps.layers)
+ }
+}
+
+// TestPostCapBasicDataAccess tests some functionality regarding capping/flattening.
+func TestPostCapBasicDataAccess(t *testing.T) {
+ // setAccount is a helper to construct a random account entry and assign it to
+ // an account slot in a snapshot
+ setAccount := func(accKey string) map[common.Hash][]byte {
+ return map[common.Hash][]byte{
+ common.HexToHash(accKey): randomAccount(),
+ }
+ }
+ // Create a starting base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // The lowest difflayer
+ snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
+ snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
+ snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil)
+
+ snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
+ snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil)
+
+ // checkExist verifies if an account exists in a snapshot
+ checkExist := func(layer *diffLayer, key string) error {
+ if data, _ := layer.Account(common.HexToHash(key)); data == nil {
+ return fmt.Errorf("expected %x to exist, got nil", common.HexToHash(key))
+ }
+ return nil
+ }
+ // shouldErr checks that an account access errors as expected
+ shouldErr := func(layer *diffLayer, key string) error {
+ if data, err := layer.Account(common.HexToHash(key)); err == nil {
+ return fmt.Errorf("expected error, got data %x", data)
+ }
+ return nil
+ }
+ // check basics
+ snap := snaps.Snapshot(common.HexToHash("0xb3")).(*diffLayer)
+
+ if err := checkExist(snap, "0xa1"); err != nil {
+ t.Error(err)
+ }
+ if err := checkExist(snap, "0xb2"); err != nil {
+ t.Error(err)
+ }
+ if err := checkExist(snap, "0xb3"); err != nil {
+ t.Error(err)
+ }
+ // Cap to a bad root should fail
+ if err := snaps.Cap(common.HexToHash("0x1337"), 0); err == nil {
+ t.Errorf("expected error, got none")
+ }
+ // Now, merge the a-chain
+ snaps.Cap(common.HexToHash("0xa3"), 0)
+
+ // At this point, a2 got merged into a1. Thus, a1 is now modified, and as a1 is
+ // the parent of b2, b2 should no longer be able to iterate into parent.
+
+ // These should still be accessible
+ if err := checkExist(snap, "0xb2"); err != nil {
+ t.Error(err)
+ }
+ if err := checkExist(snap, "0xb3"); err != nil {
+ t.Error(err)
+ }
+ // But these would need iteration into the modified parent
+ if err := shouldErr(snap, "0xa1"); err != nil {
+ t.Error(err)
+ }
+ if err := shouldErr(snap, "0xa2"); err != nil {
+ t.Error(err)
+ }
+ if err := shouldErr(snap, "0xa3"); err != nil {
+ t.Error(err)
+ }
+ // Now, merge it again, just for fun. It should now error, since a3
+ // is a disk layer
+ if err := snaps.Cap(common.HexToHash("0xa3"), 0); err == nil {
+ t.Error("expected error capping the disk layer, got none")
+ }
+}
+
+// TestSnaphots tests the functionality for retrieving the snapshot
+// with given head root and the desired depth.
+func TestSnaphots(t *testing.T) {
+ // setAccount is a helper to construct a random account entry and assign it to
+ // an account slot in a snapshot
+ setAccount := func(accKey string) map[common.Hash][]byte {
+ return map[common.Hash][]byte{
+ common.HexToHash(accKey): randomAccount(),
+ }
+ }
+ makeRoot := func(height uint64) common.Hash {
+ var buffer [8]byte
+ binary.BigEndian.PutUint64(buffer[:], height)
+ return common.BytesToHash(buffer[:])
+ }
+ // Create a starting base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: makeRoot(1),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Construct the snapshots with 129 layers, flattening whatever's above that
+ var (
+ last = common.HexToHash("0x01")
+ head common.Hash
+ )
+ for i := 0; i < 129; i++ {
+ head = makeRoot(uint64(i + 2))
+ snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil)
+ last = head
+ snaps.Cap(head, 128) // 130 layers (128 diffs + 1 accumulator + 1 disk)
+ }
+ var cases = []struct {
+ headRoot common.Hash
+ limit int
+ nodisk bool
+ expected int
+ expectBottom common.Hash
+ }{
+ {head, 0, false, 0, common.Hash{}},
+ {head, 64, false, 64, makeRoot(129 + 2 - 64)},
+ {head, 128, false, 128, makeRoot(3)}, // Normal diff layers, no accumulator
+ {head, 129, true, 129, makeRoot(2)}, // All diff layers, including accumulator
+ {head, 130, false, 130, makeRoot(1)}, // All diff layers + disk layer
+ }
+ for i, c := range cases {
+ layers := snaps.Snapshots(c.headRoot, c.limit, c.nodisk)
+ if len(layers) != c.expected {
+ t.Errorf("non-overflow test %d: returned snapshot layers are mismatched, want %v, got %v", i, c.expected, len(layers))
+ }
+ if len(layers) == 0 {
+ continue
+ }
+ bottommost := layers[len(layers)-1]
+ if bottommost.Root() != c.expectBottom {
+ t.Errorf("non-overflow test %d: snapshot mismatch, want %v, get %v", i, c.expectBottom, bottommost.Root())
+ }
+ }
+ // Above we've tested the normal capping, which leaves the accumulator live.
+ // Test that if the bottommost accumulator diff layer overflows the allowed
+ // memory limit, the snapshot tree gets capped to one less layer.
+ // Commit the diff layer onto the disk and ensure it's persisted
+ defer func(memcap uint64) { aggregatorMemoryLimit = memcap }(aggregatorMemoryLimit)
+ aggregatorMemoryLimit = 0
+
+ snaps.Cap(head, 128) // 129 (128 diffs + 1 overflown accumulator + 1 disk)
+
+ cases = []struct {
+ headRoot common.Hash
+ limit int
+ nodisk bool
+ expected int
+ expectBottom common.Hash
+ }{
+ {head, 0, false, 0, common.Hash{}},
+ {head, 64, false, 64, makeRoot(129 + 2 - 64)},
+ {head, 128, false, 128, makeRoot(3)}, // All diff layers, accumulator was flattened
+ {head, 129, true, 128, makeRoot(3)}, // All diff layers, accumulator was flattened
+ {head, 130, false, 129, makeRoot(2)}, // All diff layers + disk layer
+ }
+ for i, c := range cases {
+ layers := snaps.Snapshots(c.headRoot, c.limit, c.nodisk)
+ if len(layers) != c.expected {
+ t.Errorf("overflow test %d: returned snapshot layers are mismatched, want %v, got %v", i, c.expected, len(layers))
+ }
+ if len(layers) == 0 {
+ continue
+ }
+ bottommost := layers[len(layers)-1]
+ if bottommost.Root() != c.expectBottom {
+ t.Errorf("overflow test %d: snapshot mismatch, want %v, get %v", i, c.expectBottom, bottommost.Root())
+ }
+ }
+}
+
+// TestReadStateDuringFlattening tests the scenario that, during the
+// bottom diff layers are merging which tags these as stale, the read
+// happens via a pre-created top snapshot layer which tries to access
+// the state in these stale layers. Ensure this read can retrieve the
+// right state back(block until the flattening is finished) instead of
+// an unexpected error(snapshot layer is stale).
+func TestReadStateDuringFlattening(t *testing.T) {
+ // setAccount is a helper to construct a random account entry and assign it to
+ // an account slot in a snapshot
+ setAccount := func(accKey string) map[common.Hash][]byte {
+ return map[common.Hash][]byte{
+ common.HexToHash(accKey): randomAccount(),
+ }
+ }
+ // Create a starting base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // 4 layers in total, 3 diff layers and 1 disk layers
+ snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
+ snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
+ snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
+
+ // Obtain the topmost snapshot handler for state accessing
+ snap := snaps.Snapshot(common.HexToHash("0xa3"))
+
+ // Register the testing hook to access the state after flattening
+ var result = make(chan *Account)
+ snaps.onFlatten = func() {
+ // Spin up a thread to read the account from the pre-created
+ // snapshot handler. It's expected to be blocked.
+ go func() {
+ account, _ := snap.Account(common.HexToHash("0xa1"))
+ result <- account
+ }()
+ select {
+ case res := <-result:
+ t.Fatalf("Unexpected return %v", res)
+ case <-time.NewTimer(time.Millisecond * 300).C:
+ }
+ }
+ // Cap the snap tree, which will mark the bottom-most layer as stale.
+ snaps.Cap(common.HexToHash("0xa3"), 1)
+ select {
+ case account := <-result:
+ if account == nil {
+ t.Fatal("Failed to retrieve account")
+ }
+ case <-time.NewTimer(time.Millisecond * 300).C:
+ t.Fatal("Unexpected blocker")
+ }
+}
diff --git a/core/state/snapshot/sort.go b/core/state/snapshot/sort.go
new file mode 100644
index 0000000000..88841231d9
--- /dev/null
+++ b/core/state/snapshot/sort.go
@@ -0,0 +1,36 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// hashes is a helper to implement sort.Interface.
+type hashes []common.Hash
+
+// Len is the number of elements in the collection.
+func (hs hashes) Len() int { return len(hs) }
+
+// Less reports whether the element with index i should sort before the element
+// with index j.
+func (hs hashes) Less(i, j int) bool { return bytes.Compare(hs[i][:], hs[j][:]) < 0 }
+
+// Swap swaps the elements with indexes i and j.
+func (hs hashes) Swap(i, j int) { hs[i], hs[j] = hs[j], hs[i] }
diff --git a/core/state/snapshot/utils.go b/core/state/snapshot/utils.go
new file mode 100644
index 0000000000..68c049a427
--- /dev/null
+++ b/core/state/snapshot/utils.go
@@ -0,0 +1,165 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "bytes"
+ "fmt"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/harmony-one/harmony/core/rawdb"
+ "github.com/harmony-one/harmony/internal/utils"
+)
+
+// CheckDanglingStorage iterates the snap storage data, and verifies that all
+// storage also has corresponding account data.
+func CheckDanglingStorage(chaindb ethdb.KeyValueStore) error {
+ if err := checkDanglingDiskStorage(chaindb); err != nil {
+ utils.Logger().Error().Err(err).Msg("Database check error")
+ }
+ return checkDanglingMemStorage(chaindb)
+}
+
+// checkDanglingDiskStorage checks if there is any 'dangling' storage data in the
+// disk-backed snapshot layer.
+func checkDanglingDiskStorage(chaindb ethdb.KeyValueStore) error {
+ var (
+ lastReport = time.Now()
+ start = time.Now()
+ lastKey []byte
+ it = rawdb.NewKeyLengthIterator(chaindb.NewIterator(rawdb.SnapshotStoragePrefix, nil), 1+2*common.HashLength)
+ )
+ utils.Logger().Info().Msg("Checking dangling snapshot disk storage")
+
+ defer it.Release()
+ for it.Next() {
+ k := it.Key()
+ accKey := k[1:33]
+ if bytes.Equal(accKey, lastKey) {
+ // No need to look up for every slot
+ continue
+ }
+ lastKey = common.CopyBytes(accKey)
+ if time.Since(lastReport) > time.Second*8 {
+ utils.Logger().Info().
+ Str("at", fmt.Sprintf("%#x", accKey)).
+ Interface("elapsed", common.PrettyDuration(time.Since(start))).
+ Msg("Iterating snap storage")
+ lastReport = time.Now()
+ }
+ if data := rawdb.ReadAccountSnapshot(chaindb, common.BytesToHash(accKey)); len(data) == 0 {
+ utils.Logger().Warn().
+ Str("account", fmt.Sprintf("%#x", accKey)).
+ Str("storagekey", fmt.Sprintf("%#x", k)).
+ Msg("Dangling storage - missing account")
+
+ return fmt.Errorf("dangling snapshot storage account %#x", accKey)
+ }
+ }
+ utils.Logger().Info().Err(it.Error()).
+ Interface("time", common.PrettyDuration(time.Since(start))).
+ Msg("Verified the snapshot disk storage")
+
+ return nil
+}
+
+// checkDanglingMemStorage checks if there is any 'dangling' storage in the journalled
+// snapshot difflayers.
+func checkDanglingMemStorage(db ethdb.KeyValueStore) error {
+ start := time.Now()
+ utils.Logger().Info().Msg("Checking dangling journalled storage")
+ err := iterateJournal(db, func(pRoot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
+ for accHash := range storage {
+ if _, ok := accounts[accHash]; !ok {
+ utils.Logger().Error().
+ Str("account", fmt.Sprintf("%#x", accHash)).
+ Interface("root", root).
+ Msg("Dangling storage - missing account")
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ utils.Logger().Info().Err(err).Msg("Failed to resolve snapshot journal")
+ return err
+ }
+ utils.Logger().Info().Interface("time", common.PrettyDuration(time.Since(start))).Msg("Verified the snapshot journalled storage")
+ return nil
+}
+
+// CheckJournalAccount shows information about an account, from the disk layer and
+// up through the diff layers.
+func CheckJournalAccount(db ethdb.KeyValueStore, hash common.Hash) error {
+ // Look up the disk layer first
+ baseRoot := rawdb.ReadSnapshotRoot(db)
+ fmt.Printf("Disklayer: Root: %x\n", baseRoot)
+ if data := rawdb.ReadAccountSnapshot(db, hash); data != nil {
+ account := new(Account)
+ if err := rlp.DecodeBytes(data, account); err != nil {
+ panic(err)
+ }
+ fmt.Printf("\taccount.nonce: %d\n", account.Nonce)
+ fmt.Printf("\taccount.balance: %x\n", account.Balance)
+ fmt.Printf("\taccount.root: %x\n", account.Root)
+ fmt.Printf("\taccount.codehash: %x\n", account.CodeHash)
+ }
+ // Check storage
+ {
+ it := rawdb.NewKeyLengthIterator(db.NewIterator(append(rawdb.SnapshotStoragePrefix, hash.Bytes()...), nil), 1+2*common.HashLength)
+ fmt.Printf("\tStorage:\n")
+ for it.Next() {
+ slot := it.Key()[33:]
+ fmt.Printf("\t\t%x: %x\n", slot, it.Value())
+ }
+ it.Release()
+ }
+ var depth = 0
+
+ return iterateJournal(db, func(pRoot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
+ _, a := accounts[hash]
+ _, b := destructs[hash]
+ _, c := storage[hash]
+ depth++
+ if !a && !b && !c {
+ return nil
+ }
+ fmt.Printf("Disklayer+%d: Root: %x, parent %x\n", depth, root, pRoot)
+ if data, ok := accounts[hash]; ok {
+ account := new(Account)
+ if err := rlp.DecodeBytes(data, account); err != nil {
+ panic(err)
+ }
+ fmt.Printf("\taccount.nonce: %d\n", account.Nonce)
+ fmt.Printf("\taccount.balance: %x\n", account.Balance)
+ fmt.Printf("\taccount.root: %x\n", account.Root)
+ fmt.Printf("\taccount.codehash: %x\n", account.CodeHash)
+ }
+ if _, ok := destructs[hash]; ok {
+ fmt.Printf("\t Destructed!")
+ }
+ if data, ok := storage[hash]; ok {
+ fmt.Printf("\tStorage\n")
+ for k, v := range data {
+ fmt.Printf("\t\t%x: %x\n", k, v)
+ }
+ }
+ return nil
+ })
+}
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 0d4fd7a800..262f5136ee 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -23,12 +23,12 @@ import (
"math/big"
"time"
- "github.com/ethereum/go-ethereum/metrics"
-
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
-
+ "github.com/ethereum/go-ethereum/trie"
"github.com/harmony-one/harmony/staking"
)
@@ -54,7 +54,7 @@ func (s Storage) String() (str string) {
// Copy ...
func (s Storage) Copy() Storage {
- cpy := make(Storage)
+ cpy := make(Storage, len(s))
for key, value := range s {
cpy[key] = value
}
@@ -67,18 +67,18 @@ func (s Storage) Copy() Storage {
// The usage pattern is as follows:
// First you need to obtain a state object.
// Account values can be accessed and modified through the object.
-// Finally, call CommitTrie to write the modified storage trie into a database.
+// Finally, call commitTrie to write the modified storage trie into a database.
type Object struct {
address common.Address
addrHash common.Hash // hash of ethereum address of the account
- data Account
+ data types.StateAccount
db *DB
// DB error.
// State objects are used by the consensus core and VM which are
// unable to deal with database-level errors. Any error that occurs
// during a database read is memoized here and will eventually be returned
- // by StateDB.Commit.
+ // by DB.Commit.
dbErr error
// Write caches.
@@ -93,9 +93,10 @@ type Object struct {
// Cache flags.
// When an object is marked suicided it will be delete from the trie
// during the "update" phase of the state transition.
- dirtyCode bool // true if the code was updated
- suicided bool
- deleted bool
+ validatorWrapper bool // true if the code belongs to validator wrapper
+ dirtyCode bool // true if the code was updated
+ suicided bool
+ deleted bool
}
// empty returns whether the account is considered empty.
@@ -113,15 +114,15 @@ type Account struct {
}
// newObject creates a state object.
-func newObject(db *DB, address common.Address, data Account) *Object {
+func newObject(db *DB, address common.Address, data types.StateAccount) *Object {
if data.Balance == nil {
data.Balance = new(big.Int)
}
if data.CodeHash == nil {
- data.CodeHash = emptyCodeHash
+ data.CodeHash = types.EmptyCodeHash.Bytes()
}
if data.Root == (common.Hash{}) {
- data.Root = emptyRoot
+ data.Root = types.EmptyRootHash
}
return &Object{
db: db,
@@ -136,7 +137,7 @@ func newObject(db *DB, address common.Address, data Account) *Object {
// EncodeRLP implements rlp.Encoder.
func (s *Object) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, s.data)
+ return rlp.Encode(w, &s.data)
}
// setError remembers the first non-nil error it is called with.
@@ -161,16 +162,27 @@ func (s *Object) touch() {
}
}
-func (s *Object) getTrie(db Database) Trie {
+// getTrie returns the associated storage trie. The trie will be opened
+// if it's not loaded previously. An error will be returned if trie can't
+// be loaded.
+func (s *Object) getTrie(db Database) (Trie, error) {
if s.trie == nil {
- var err error
- s.trie, err = db.OpenStorageTrie(s.addrHash, s.data.Root)
- if err != nil {
- s.trie, _ = db.OpenStorageTrie(s.addrHash, common.Hash{})
- s.setError(fmt.Errorf("can't create storage trie: %v", err))
+ // Try fetching from prefetcher first
+ // We don't prefetch empty tries
+ if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil {
+ // When the miner is creating the pending state, there is no
+ // prefetcher
+ s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root)
+ }
+ if s.trie == nil {
+ tr, err := db.OpenStorageTrie(s.db.originalRoot, s.addrHash, s.data.Root)
+ if err != nil {
+ return nil, err
+ }
+ s.trie = tr
}
}
- return s.trie
+ return s.trie, nil
}
// GetState retrieves a value from the account storage trie.
@@ -201,16 +213,44 @@ func (s *Object) GetCommittedState(db Database, key common.Hash) common.Hash {
if value, cached := s.originStorage[key]; cached {
return value
}
- // Track the amount of time wasted on reading the storage trie
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.db.StorageReads += time.Since(start) }(time.Now())
- }
- // Otherwise load the value from the database
- enc, err := s.getTrie(db).TryGet(key[:])
- if err != nil {
- s.setError(err)
+ // If the object was destructed in *this* block (and potentially resurrected),
+ // the storage has been cleared out, and we should *not* consult the previous
+ // database about any storage values. The only possible alternatives are:
+ // 1) resurrect happened, and new slot values were set -- those should
+ // have been handles via pendingStorage above.
+ // 2) we don't have new values, and can deliver empty response back
+ if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed {
return common.Hash{}
}
+ // If no live objects are available, attempt to use snapshots
+ var (
+ enc []byte
+ err error
+ )
+ if s.db.snap != nil {
+ start := time.Now()
+ enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes()))
+ if metrics.EnabledExpensive {
+ s.db.SnapshotStorageReads += time.Since(start)
+ }
+ }
+ // If the snapshot is unavailable or reading from it fails, load from the database.
+ if s.db.snap == nil || err != nil {
+ start := time.Now()
+ tr, err := s.getTrie(db)
+ if err != nil {
+ s.setError(err)
+ return common.Hash{}
+ }
+ enc, err = tr.TryGet(key.Bytes())
+ if metrics.EnabledExpensive {
+ s.db.StorageReads += time.Since(start)
+ }
+ if err != nil {
+ s.setError(err)
+ return common.Hash{}
+ }
+ }
var value common.Hash
if len(enc) > 0 {
_, content, _, err := rlp.Split(enc)
@@ -268,9 +308,16 @@ func (s *Object) setState(key, value common.Hash) {
// finalise moves all dirty storage slots into the pending area to be hashed or
// committed later. It is invoked at the end of every transaction.
-func (s *Object) finalise() {
+func (s *Object) finalise(prefetch bool) {
+ slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage))
for key, value := range s.dirtyStorage {
s.pendingStorage[key] = value
+ if value != s.originStorage[key] {
+ slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure
+ }
+ }
+ if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash {
+ s.db.prefetcher.prefetch(s.addrHash, s.data.Root, slotsToPrefetch)
}
if len(s.dirtyStorage) > 0 {
s.dirtyStorage = make(Storage)
@@ -278,16 +325,30 @@ func (s *Object) finalise() {
}
// updateTrie writes cached storage modifications into the object's storage trie.
-func (s *Object) updateTrie(db Database) Trie {
+// It will return nil if the trie has not been loaded and no changes have been
+// made. An error will be returned if the trie can't be loaded/updated correctly.
+func (s *Object) updateTrie(db Database) (Trie, error) {
// Make sure all dirty slots are finalized into the pending storage area
- s.finalise()
-
- // Track the amount of time wasted on updating the storge trie
+ s.finalise(false) // Don't prefetch anymore, pull directly if need be
+ if len(s.pendingStorage) == 0 {
+ return s.trie, nil
+ }
+ // Track the amount of time wasted on updating the storage trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now())
}
+ // The snapshot storage map for the object
+ var (
+ storage map[common.Hash][]byte
+ hasher = s.db.hasher
+ )
+ tr, err := s.getTrie(db)
+ if err != nil {
+ s.setError(err)
+ return nil, err
+ }
// Insert all the pending updates into the trie
- tr := s.getTrie(db)
+ usedStorage := make([][]byte, 0, len(s.pendingStorage))
for key, value := range s.pendingStorage {
// Skip noop changes, persist actual changes
if value == s.originStorage[key] {
@@ -295,65 +356,101 @@ func (s *Object) updateTrie(db Database) Trie {
}
s.originStorage[key] = value
+ var v []byte
if (value == common.Hash{}) {
- s.setError(tr.TryDelete(key[:]))
- continue
+ if err := tr.TryDelete(key[:]); err != nil {
+ s.setError(err)
+ return nil, err
+ }
+ s.db.StorageDeleted += 1
+ } else {
+ // Encoding []byte cannot fail, ok to ignore the error.
+ v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
+ if err := tr.TryUpdate(key[:], v); err != nil {
+ s.setError(err)
+ return nil, err
+ }
+ s.db.StorageUpdated += 1
}
- // Encoding []byte cannot fail, ok to ignore the error.
- v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
- s.setError(tr.TryUpdate(key[:], v))
+ // If state snapshotting is active, cache the data til commit
+ if s.db.snap != nil {
+ if storage == nil {
+ // Retrieve the old storage map, if available, create a new one otherwise
+ if storage = s.db.snapStorage[s.addrHash]; storage == nil {
+ storage = make(map[common.Hash][]byte)
+ s.db.snapStorage[s.addrHash] = storage
+ }
+ }
+ storage[crypto.HashData(hasher, key[:])] = v // v will be nil if it's deleted
+ }
+ usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure
+ }
+ if s.db.prefetcher != nil {
+ s.db.prefetcher.used(s.addrHash, s.data.Root, usedStorage)
}
if len(s.pendingStorage) > 0 {
s.pendingStorage = make(Storage)
}
- return tr
+ return tr, nil
}
-// UpdateRoot sets the trie root to the current root hash of
+// UpdateRoot sets the trie root to the current root hash of. An error
+// will be returned if trie root hash is not computed correctly.
func (s *Object) updateRoot(db Database) {
- s.updateTrie(db)
-
- // Track the amount of time wasted on hashing the storge trie
+ tr, err := s.updateTrie(db)
+ if err != nil {
+ s.setError(fmt.Errorf("updateRoot (%x) error: %w", s.address, err))
+ return
+ }
+ // If nothing changed, don't bother with hashing anything
+ if tr == nil {
+ return
+ }
+ // Track the amount of time wasted on hashing the storage trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageHashes += time.Since(start) }(time.Now())
}
- s.data.Root = s.trie.Hash()
+ s.data.Root = tr.Hash()
}
-// CommitTrie the storage trie of the object to db.
-// This updates the trie root.
-func (s *Object) CommitTrie(db Database) error {
- s.updateTrie(db)
+// commitTrie submits the storage changes into the storage trie and re-computes
+// the root. Besides, all trie changes will be collected in a nodeset and returned.
+func (s *Object) commitTrie(db Database) (*trie.NodeSet, error) {
+ tr, err := s.updateTrie(db)
+ if err != nil {
+ return nil, err
+ }
if s.dbErr != nil {
- return s.dbErr
+ return nil, s.dbErr
+ }
+ // If nothing changed, don't bother with committing anything
+ if tr == nil {
+ return nil, nil
}
- // Track the amount of time wasted on committing the storge trie
+ // Track the amount of time wasted on committing the storage trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now())
}
- root, err := s.trie.Commit(nil)
- if err == nil {
- s.data.Root = root
- }
- return err
+ root, nodes := tr.Commit(false)
+ s.data.Root = root
+ return nodes, err
}
-// AddBalance removes amount from c's balance.
+// AddBalance adds amount to s's balance.
// It is used to add funds to the destination account of a transfer.
func (s *Object) AddBalance(amount *big.Int) {
- // EIP158: We must check emptiness for the objects such that the account
+ // EIP161: We must check emptiness for the objects such that the account
// clearing (0,0,0 objects) can take effect.
if amount.Sign() == 0 {
if s.empty() {
s.touch()
}
-
return
}
s.SetBalance(new(big.Int).Add(s.Balance(), amount))
}
-// SubBalance removes amount from c's balance.
+// SubBalance removes amount from s's balance.
// It is used to remove funds from the origin account of a transfer.
func (s *Object) SubBalance(amount *big.Int) {
if amount.Sign() == 0 {
@@ -362,7 +459,6 @@ func (s *Object) SubBalance(amount *big.Int) {
s.SetBalance(new(big.Int).Sub(s.Balance(), amount))
}
-// SetBalance ...
func (s *Object) SetBalance(amount *big.Int) {
s.db.journal.append(balanceChange{
account: &s.address,
@@ -379,18 +475,18 @@ func (s *Object) setBalance(amount *big.Int) {
func (s *Object) ReturnGas(gas *big.Int) {}
func (s *Object) deepCopy(db *DB) *Object {
- stateObject := newObject(db, s.address, s.data)
+ Object := newObject(db, s.address, s.data)
if s.trie != nil {
- stateObject.trie = db.db.CopyTrie(s.trie)
+ Object.trie = db.db.CopyTrie(s.trie)
}
- stateObject.code = s.code
- stateObject.dirtyStorage = s.dirtyStorage.Copy()
- stateObject.originStorage = s.originStorage.Copy()
- stateObject.pendingStorage = s.pendingStorage.Copy()
- stateObject.suicided = s.suicided
- stateObject.dirtyCode = s.dirtyCode
- stateObject.deleted = s.deleted
- return stateObject
+ Object.code = s.code
+ Object.dirtyStorage = s.dirtyStorage.Copy()
+ Object.originStorage = s.originStorage.Copy()
+ Object.pendingStorage = s.pendingStorage.Copy()
+ Object.suicided = s.suicided
+ Object.dirtyCode = s.dirtyCode
+ Object.deleted = s.deleted
+ return Object
}
//
@@ -402,14 +498,24 @@ func (s *Object) Address() common.Address {
return s.address
}
-// Code returns the contract code associated with this object, if any.
-func (s *Object) Code(db Database) []byte {
+// Code returns the contract/validator code associated with this object, if any.
+func (s *Object) Code(db Database, isValidatorCode bool) []byte {
if s.code != nil {
return s.code
}
- if bytes.Equal(s.CodeHash(), emptyCodeHash) {
+ if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
return nil
}
+ if s.validatorWrapper || isValidatorCode {
+ code, err := db.ValidatorCode(s.addrHash, common.BytesToHash(s.CodeHash()))
+ if err != nil {
+ s.setError(fmt.Errorf("can't load validator code hash %x: %v", s.CodeHash(), err))
+ }
+ if code != nil {
+ s.code = code
+ return code
+ }
+ }
code, err := db.ContractCode(s.addrHash, common.BytesToHash(s.CodeHash()))
if err != nil {
s.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err))
@@ -418,24 +524,49 @@ func (s *Object) Code(db Database) []byte {
return code
}
-// SetCode ...
-func (s *Object) SetCode(codeHash common.Hash, code []byte) {
- prevcode := s.Code(s.db.db)
+// CodeSize returns the size of the contract/validator code associated with this object,
+// or zero if none. This method is an almost mirror of Code, but uses a cache
+// inside the database to avoid loading codes seen recently.
+func (s *Object) CodeSize(db Database, isValidatorCode bool) int {
+ if s.code != nil {
+ return len(s.code)
+ }
+ if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
+ return 0
+ }
+ if s.validatorWrapper || isValidatorCode {
+ size, err := db.ValidatorCodeSize(s.addrHash, common.BytesToHash(s.CodeHash()))
+ if err != nil {
+ s.setError(fmt.Errorf("can't load validator code size %x: %v", s.CodeHash(), err))
+ }
+ if size > 0 {
+ return size
+ }
+ }
+ size, err := db.ContractCodeSize(s.addrHash, common.BytesToHash(s.CodeHash()))
+ if err != nil {
+ s.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err))
+ }
+ return size
+}
+
+func (s *Object) SetCode(codeHash common.Hash, code []byte, isValidatorCode bool) {
+ prevcode := s.Code(s.db.db, isValidatorCode)
s.db.journal.append(codeChange{
account: &s.address,
prevhash: s.CodeHash(),
prevcode: prevcode,
})
- s.setCode(codeHash, code)
+ s.setCode(codeHash, code, isValidatorCode)
}
-func (s *Object) setCode(codeHash common.Hash, code []byte) {
+func (s *Object) setCode(codeHash common.Hash, code []byte, isValidatorCode bool) {
s.code = code
s.data.CodeHash = codeHash[:]
s.dirtyCode = true
+ s.validatorWrapper = isValidatorCode
}
-// SetNonce ...
func (s *Object) SetNonce(nonce uint64) {
s.db.journal.append(nonceChange{
account: &s.address,
@@ -448,26 +579,23 @@ func (s *Object) setNonce(nonce uint64) {
s.data.Nonce = nonce
}
-// CodeHash ...
func (s *Object) CodeHash() []byte {
return s.data.CodeHash
}
-// Balance ...
func (s *Object) Balance() *big.Int {
return s.data.Balance
}
-// Nonce ...
func (s *Object) Nonce() uint64 {
return s.data.Nonce
}
-// Value Never called, but must be present to allow stateObject to be used
+// Value is never called, but must be present to allow Object to be used
// as a vm.Account interface that also satisfies the vm.ContractRef
// interface. Interfaces are awesome.
func (s *Object) Value() *big.Int {
- panic("Value on stateObject should never be called")
+ panic("Value on state object should never be called")
}
// IsValidator checks whether it is a validator object
diff --git a/core/state/state_object_test.go b/core/state/state_object_test.go
new file mode 100644
index 0000000000..42fd778025
--- /dev/null
+++ b/core/state/state_object_test.go
@@ -0,0 +1,46 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+func BenchmarkCutOriginal(b *testing.B) {
+ value := common.HexToHash("0x01")
+ for i := 0; i < b.N; i++ {
+ bytes.TrimLeft(value[:], "\x00")
+ }
+}
+
+func BenchmarkCutsetterFn(b *testing.B) {
+ value := common.HexToHash("0x01")
+ cutSetFn := func(r rune) bool { return r == 0 }
+ for i := 0; i < b.N; i++ {
+ bytes.TrimLeftFunc(value[:], cutSetFn)
+ }
+}
+
+func BenchmarkCutCustomTrim(b *testing.B) {
+ value := common.HexToHash("0x01")
+ for i := 0; i < b.N; i++ {
+ common.TrimLeftZeroes(value[:])
+ }
+}
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 37eb128866..9d209a80be 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -22,9 +22,10 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/harmony-one/harmony/core/rawdb"
)
type stateTest struct {
@@ -34,18 +35,20 @@ type stateTest struct {
func newStateTest() *stateTest {
db := rawdb.NewMemoryDatabase()
- sdb, _ := New(common.Hash{}, NewDatabase(db))
+ sdb, _ := New(common.Hash{}, NewDatabase(db), nil)
return &stateTest{db: db, state: sdb}
}
func TestDump(t *testing.T) {
- s := newStateTest()
+ db := rawdb.NewMemoryDatabase()
+ sdb, _ := New(common.Hash{}, NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil)
+ s := &stateTest{db: db, state: sdb}
// generate a few entries
obj1 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01}))
obj1.AddBalance(big.NewInt(22))
obj2 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02}))
- obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3})
+ obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3}, false)
obj3 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x02}))
obj3.SetBalance(big.NewInt(44))
@@ -147,7 +150,7 @@ func TestSnapshotEmpty(t *testing.T) {
}
func TestSnapshot2(t *testing.T) {
- state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
stateobjaddr0 := common.BytesToAddress([]byte("so0"))
stateobjaddr1 := common.BytesToAddress([]byte("so1"))
@@ -163,19 +166,19 @@ func TestSnapshot2(t *testing.T) {
so0 := state.getStateObject(stateobjaddr0)
so0.SetBalance(big.NewInt(42))
so0.SetNonce(43)
- so0.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e'}), []byte{'c', 'a', 'f', 'e'})
+ so0.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e'}), []byte{'c', 'a', 'f', 'e'}, false)
so0.suicided = false
so0.deleted = false
state.setStateObject(so0)
root, _ := state.Commit(false)
- state.Reset(root)
+ state, _ = New(root, state.db, state.snaps)
// and one with deleted == true
so1 := state.getStateObject(stateobjaddr1)
so1.SetBalance(big.NewInt(52))
so1.SetNonce(53)
- so1.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e', '2'}), []byte{'c', 'a', 'f', 'e', '2'})
+ so1.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e', '2'}), []byte{'c', 'a', 'f', 'e', '2'}, false)
so1.suicided = true
so1.deleted = true
state.setStateObject(so1)
@@ -191,7 +194,7 @@ func TestSnapshot2(t *testing.T) {
so0Restored := state.getStateObject(stateobjaddr0)
// Update lazily-loaded values before comparing.
so0Restored.GetState(state.db, storageaddr)
- so0Restored.Code(state.db)
+ so0Restored.Code(state.db, false)
// non-deleted is equal (restored)
compareStateObjects(so0Restored, so0, t)
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 0a7cd3b564..30692e4d44 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -23,14 +23,16 @@ import (
"sort"
"time"
- "github.com/ethereum/go-ethereum/metrics"
-
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/state/snapshot"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
- "github.com/harmony-one/harmony/core/types"
+ "github.com/harmony-one/harmony/core/rawdb"
+
+ types2 "github.com/harmony-one/harmony/core/types"
common2 "github.com/harmony-one/harmony/internal/common"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/numeric"
@@ -65,20 +67,32 @@ func (n *proofList) Delete(key []byte) error {
panic("not supported")
}
-// DB within the ethereum protocol are used to store anything
+// DB structs within the ethereum protocol are used to store anything
// within the merkle trie. StateDBs take care of caching and storing
// nested states. It's the general query interface to retrieve:
// * Contracts
// * Accounts
type DB struct {
- db Database
- trie Trie
+ db Database
+ prefetcher *triePrefetcher
+ trie Trie
+ hasher crypto.KeccakState
+
+ // originalRoot is the pre-state root, before any changes were made.
+ // It will be updated when the Commit is called.
+ originalRoot common.Hash
+
+ snaps *snapshot.Tree
+ snap snapshot.Snapshot
+ snapAccounts map[common.Hash][]byte
+ snapStorage map[common.Hash]map[common.Hash][]byte
// This map holds 'live' objects, which will get modified while processing a state transition.
- stateObjects map[common.Address]*Object
- stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
- stateObjectsDirty map[common.Address]struct{}
- stateValidators map[common.Address]*stk.ValidatorWrapper
+ stateObjects map[common.Address]*Object
+ stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
+ stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
+ stateObjectsDestruct map[common.Address]struct{} // State objects destructed in the block
+ stateValidators map[common.Address]*stk.ValidatorWrapper
// DB error.
// State objects are used by the consensus core and VM which are
@@ -93,45 +107,95 @@ type DB struct {
thash, bhash common.Hash // thash means hmy tx hash
ethTxHash common.Hash // ethTxHash is eth tx hash, use by tracer
txIndex int
- logs map[common.Hash][]*types.Log
+ logs map[common.Hash][]*types2.Log
logSize uint
preimages map[common.Hash][]byte
+ // Per-transaction access list
+ accessList *accessList
+
+ // Transient storage
+ transientStorage transientStorage
+
// Journal of state modifications. This is the backbone of
// Snapshot and RevertToSnapshot.
journal *journal
validRevisions []revision
- nextRevisionID int
+ nextRevisionId int
// Measurements gathered during execution for debugging purposes
- AccountReads time.Duration
- AccountHashes time.Duration
- AccountUpdates time.Duration
- AccountCommits time.Duration
- StorageReads time.Duration
- StorageHashes time.Duration
- StorageUpdates time.Duration
- StorageCommits time.Duration
+ AccountReads time.Duration
+ AccountHashes time.Duration
+ AccountUpdates time.Duration
+ AccountCommits time.Duration
+ StorageReads time.Duration
+ StorageHashes time.Duration
+ StorageUpdates time.Duration
+ StorageCommits time.Duration
+ SnapshotAccountReads time.Duration
+ SnapshotStorageReads time.Duration
+ SnapshotCommits time.Duration
+ TrieDBCommits time.Duration
+
+ AccountUpdated int
+ StorageUpdated int
+ AccountDeleted int
+ StorageDeleted int
}
// New creates a new state from a given trie.
-func New(root common.Hash, db Database) (*DB, error) {
+func New(root common.Hash, db Database, snaps *snapshot.Tree) (*DB, error) {
tr, err := db.OpenTrie(root)
if err != nil {
return nil, err
}
- return &DB{
- db: db,
- trie: tr,
- stateObjects: make(map[common.Address]*Object),
- stateObjectsPending: make(map[common.Address]struct{}),
- stateObjectsDirty: make(map[common.Address]struct{}),
- stateValidators: make(map[common.Address]*stk.ValidatorWrapper),
- logs: make(map[common.Hash][]*types.Log),
- preimages: make(map[common.Hash][]byte),
- journal: newJournal(),
- }, nil
+ sdb := &DB{
+ db: db,
+ trie: tr,
+ originalRoot: root,
+ snaps: snaps,
+ stateObjects: make(map[common.Address]*Object),
+ stateObjectsPending: make(map[common.Address]struct{}),
+ stateObjectsDirty: make(map[common.Address]struct{}),
+ stateObjectsDestruct: make(map[common.Address]struct{}),
+ stateValidators: make(map[common.Address]*stk.ValidatorWrapper),
+ logs: make(map[common.Hash][]*types2.Log),
+ preimages: make(map[common.Hash][]byte),
+ journal: newJournal(),
+ accessList: newAccessList(),
+ transientStorage: newTransientStorage(),
+ hasher: crypto.NewKeccakState(),
+ }
+ if sdb.snaps != nil {
+ if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
+ sdb.snapAccounts = make(map[common.Hash][]byte)
+ sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
+ }
+ }
+ return sdb, nil
+}
+
+// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
+// state trie concurrently while the state is mutated so that when we reach the
+// commit phase, most of the needed data is already hot.
+func (db *DB) StartPrefetcher(namespace string) {
+ if db.prefetcher != nil {
+ db.prefetcher.close()
+ db.prefetcher = nil
+ }
+ if db.snap != nil {
+ db.prefetcher = newTriePrefetcher(db.db, db.originalRoot, namespace)
+ }
+}
+
+// StopPrefetcher terminates a running prefetcher and reports any leftover stats
+// from the gathered metrics.
+func (db *DB) StopPrefetcher() {
+ if db.prefetcher != nil {
+ db.prefetcher.close()
+ db.prefetcher = nil
+ }
}
// setError remembers the first non-nil error it is called with.
@@ -161,15 +225,14 @@ func (db *DB) Reset(root common.Hash) error {
db.bhash = common.Hash{}
db.ethTxHash = common.Hash{}
db.txIndex = 0
- db.logs = make(map[common.Hash][]*types.Log)
+ db.logs = make(map[common.Hash][]*types2.Log)
db.logSize = 0
db.preimages = make(map[common.Hash][]byte)
db.clearJournalAndRefund()
return nil
}
-// AddLog ...
-func (db *DB) AddLog(log *types.Log) {
+func (db *DB) AddLog(log *types2.Log) {
db.journal.append(addLogChange{txhash: db.thash})
log.TxHash = db.thash
@@ -180,14 +243,19 @@ func (db *DB) AddLog(log *types.Log) {
db.logSize++
}
-// GetLogs ...
-func (db *DB) GetLogs(hash common.Hash) []*types.Log {
- return db.logs[hash]
+// GetLogs returns the logs matching the specified transaction hash, and annotates
+// them with the given blockNumber and blockHash.
+func (db *DB) GetLogs(hash common.Hash, blockNumber uint64, blockHash common.Hash) []*types2.Log {
+ logs := db.logs[hash]
+ for _, l := range logs {
+ l.BlockNumber = blockNumber
+ l.BlockHash = blockHash
+ }
+ return logs
}
-// Logs ...
-func (db *DB) Logs() []*types.Log {
- var logs []*types.Log
+func (db *DB) Logs() []*types2.Log {
+ var logs []*types2.Log
for _, lgs := range db.logs {
logs = append(logs, lgs...)
}
@@ -220,7 +288,7 @@ func (db *DB) AddRefund(gas uint64) {
func (db *DB) SubRefund(gas uint64) {
db.journal.append(refundChange{prev: db.refund})
if gas > db.refund {
- panic("Refund counter below zero")
+ panic(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, db.refund))
}
db.refund -= gas
}
@@ -240,18 +308,17 @@ func (db *DB) Empty(addr common.Address) bool {
// GetBalance retrieves the balance from the given address or 0 if object not found
func (db *DB) GetBalance(addr common.Address) *big.Int {
- stateObject := db.getStateObject(addr)
- if stateObject != nil {
- return stateObject.Balance()
+ Object := db.getStateObject(addr)
+ if Object != nil {
+ return Object.Balance()
}
return common.Big0
}
-// GetNonce ...
func (db *DB) GetNonce(addr common.Address) uint64 {
- stateObject := db.getStateObject(addr)
- if stateObject != nil {
- return stateObject.Nonce()
+ Object := db.getStateObject(addr)
+ if Object != nil {
+ return Object.Nonce()
}
return 0
@@ -262,12 +329,12 @@ func (db *DB) TxIndex() int {
return db.txIndex
}
-func (s *DB) TxHash() common.Hash {
- return s.thash
+func (db *DB) TxHash() common.Hash {
+ return db.thash
}
-func (s *DB) TxHashETH() common.Hash {
- return s.ethTxHash
+func (db *DB) TxHashETH() common.Hash {
+ return db.ethTxHash
}
// BlockHash returns the current block hash set by Prepare.
@@ -275,74 +342,73 @@ func (db *DB) BlockHash() common.Hash {
return db.bhash
}
-// GetCode ...
-func (db *DB) GetCode(addr common.Address) []byte {
- stateObject := db.getStateObject(addr)
- if stateObject != nil {
- return stateObject.Code(db.db)
+func (db *DB) GetCode(addr common.Address, isValidatorCode bool) []byte {
+ Object := db.getStateObject(addr)
+ if Object != nil {
+ return Object.Code(db.db, isValidatorCode)
}
return nil
}
-// GetCodeSize ...
-func (db *DB) GetCodeSize(addr common.Address) int {
- stateObject := db.getStateObject(addr)
- if stateObject == nil {
- return 0
- }
- if stateObject.code != nil {
- return len(stateObject.code)
- }
- size, err := db.db.ContractCodeSize(
- stateObject.addrHash, common.BytesToHash(stateObject.CodeHash()),
- )
- if err != nil {
- db.setError(err)
+func (db *DB) GetCodeSize(addr common.Address, isValidatorCode bool) int {
+ Object := db.getStateObject(addr)
+ if Object != nil {
+ return Object.CodeSize(db.db, isValidatorCode)
}
- return size
+ return 0
}
-// GetCodeHash ...
func (db *DB) GetCodeHash(addr common.Address) common.Hash {
- stateObject := db.getStateObject(addr)
- if stateObject == nil {
+ Object := db.getStateObject(addr)
+ if Object == nil {
return common.Hash{}
}
- return common.BytesToHash(stateObject.CodeHash())
+ return common.BytesToHash(Object.CodeHash())
}
// GetState retrieves a value from the given account's storage trie.
func (db *DB) GetState(addr common.Address, hash common.Hash) common.Hash {
- stateObject := db.getStateObject(addr)
- if stateObject != nil {
- return stateObject.GetState(db.db, hash)
+ Object := db.getStateObject(addr)
+ if Object != nil {
+ return Object.GetState(db.db, hash)
}
return common.Hash{}
}
-// GetProof returns the MerkleProof for a given Account
-func (db *DB) GetProof(a common.Address) ([][]byte, error) {
+// GetProof returns the Merkle proof for a given account.
+func (db *DB) GetProof(addr common.Address) ([][]byte, error) {
+ return db.GetProofByHash(crypto.Keccak256Hash(addr.Bytes()))
+}
+
+// GetProofByHash returns the Merkle proof for a given account.
+func (db *DB) GetProofByHash(addrHash common.Hash) ([][]byte, error) {
var proof proofList
- err := db.trie.Prove(crypto.Keccak256(a.Bytes()), 0, &proof)
- return [][]byte(proof), err
+ err := db.trie.Prove(addrHash[:], 0, &proof)
+ return proof, err
}
-// GetStorageProof returns the StorageProof for given key
+// GetStorageProof returns the Merkle proof for given storage slot.
func (db *DB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) {
- var proof proofList
- trie := db.StorageTrie(a)
+ trie, err := db.StorageTrie(a)
+ if err != nil {
+ return nil, err
+ }
if trie == nil {
- return proof, errors.New("storage trie for requested address does not exist")
+ return nil, errors.New("storage trie for requested address does not exist")
}
- err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof)
- return [][]byte(proof), err
+ var proof proofList
+ err = trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof)
+ if err != nil {
+ return nil, err
+ }
+ return proof, nil
}
// GetCommittedState retrieves a value from the given account's committed storage trie.
func (db *DB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
- stateObject := db.getStateObject(addr)
- if stateObject != nil {
- return stateObject.GetCommittedState(db.db, hash)
+ Object := db.getStateObject(addr)
+ if Object != nil {
+ return Object.GetCommittedState(db.db, hash)
}
return common.Hash{}
}
@@ -352,22 +418,25 @@ func (db *DB) Database() Database {
return db.db
}
-// StorageTrie returns the storage trie of an account.
-// The return value is a copy and is nil for non-existent accounts.
-func (db *DB) StorageTrie(addr common.Address) Trie {
- stateObject := db.getStateObject(addr)
- if stateObject == nil {
- return nil
+// StorageTrie returns the storage trie of an account. The return value is a copy
+// and is nil for non-existent accounts. An error will be returned if storage trie
+// is existent but can't be loaded correctly.
+func (db *DB) StorageTrie(addr common.Address) (Trie, error) {
+ Object := db.getStateObject(addr)
+ if Object == nil {
+ return nil, nil
}
- cpy := stateObject.deepCopy(db)
- return cpy.updateTrie(db.db)
+ cpy := Object.deepCopy(db)
+ if _, err := cpy.updateTrie(db.db); err != nil {
+ return nil, err
+ }
+ return cpy.getTrie(db.db)
}
-// HasSuicided ...
func (db *DB) HasSuicided(addr common.Address) bool {
- stateObject := db.getStateObject(addr)
- if stateObject != nil {
- return stateObject.suicided
+ Object := db.getStateObject(addr)
+ if Object != nil {
+ return Object.suicided
}
return false
}
@@ -378,49 +447,60 @@ func (db *DB) HasSuicided(addr common.Address) bool {
// AddBalance adds amount to the account associated with addr.
func (db *DB) AddBalance(addr common.Address, amount *big.Int) {
- stateObject := db.GetOrNewStateObject(addr)
- if stateObject != nil {
- stateObject.AddBalance(amount)
+ Object := db.GetOrNewStateObject(addr)
+ if Object != nil {
+ Object.AddBalance(amount)
}
}
// SubBalance subtracts amount from the account associated with addr.
func (db *DB) SubBalance(addr common.Address, amount *big.Int) {
- stateObject := db.GetOrNewStateObject(addr)
- if stateObject != nil {
- stateObject.SubBalance(amount)
+ Object := db.GetOrNewStateObject(addr)
+ if Object != nil {
+ Object.SubBalance(amount)
}
}
-// SetBalance ...
func (db *DB) SetBalance(addr common.Address, amount *big.Int) {
- stateObject := db.GetOrNewStateObject(addr)
- if stateObject != nil {
- stateObject.SetBalance(amount)
+ Object := db.GetOrNewStateObject(addr)
+ if Object != nil {
+ Object.SetBalance(amount)
}
}
-// SetNonce ...
func (db *DB) SetNonce(addr common.Address, nonce uint64) {
- stateObject := db.GetOrNewStateObject(addr)
- if stateObject != nil {
- stateObject.SetNonce(nonce)
+ Object := db.GetOrNewStateObject(addr)
+ if Object != nil {
+ Object.SetNonce(nonce)
}
}
-// SetCode ...
-func (db *DB) SetCode(addr common.Address, code []byte) {
- stateObject := db.GetOrNewStateObject(addr)
- if stateObject != nil {
- stateObject.SetCode(crypto.Keccak256Hash(code), code)
+func (db *DB) SetCode(addr common.Address, code []byte, isValidatorCode bool) {
+ Object := db.GetOrNewStateObject(addr)
+ if Object != nil {
+ Object.SetCode(crypto.Keccak256Hash(code), code, isValidatorCode)
}
}
-// SetState ...
func (db *DB) SetState(addr common.Address, key, value common.Hash) {
- stateObject := db.GetOrNewStateObject(addr)
- if stateObject != nil {
- stateObject.SetState(db.db, key, value)
+ Object := db.GetOrNewStateObject(addr)
+ if Object != nil {
+ Object.SetState(db.db, key, value)
+ }
+}
+
+// SetStorage replaces the entire storage for the specified account with given
+// storage. This function should only be used for debugging.
+func (db *DB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) {
+ // SetStorage needs to wipe existing storage. We achieve this by pretending
+ // that the account self-destructed earlier in this block, by flagging
+ // it in stateObjectsDestruct. The effect of doing so is that storage lookups
+ // will not hit disk, since it is assumed that the disk-data is belonging
+ // to a previous incarnation of the object.
+ db.stateObjectsDestruct[addr] = struct{}{}
+ Object := db.GetOrNewStateObject(addr)
+ for k, v := range storage {
+ Object.SetState(db.db, k, v)
}
}
@@ -430,21 +510,50 @@ func (db *DB) SetState(addr common.Address, key, value common.Hash) {
// The account's state object is still available until the state is committed,
// getStateObject will return a non-nil account after Suicide.
func (db *DB) Suicide(addr common.Address) bool {
- stateObject := db.getStateObject(addr)
- if stateObject == nil {
+ Object := db.getStateObject(addr)
+ if Object == nil {
return false
}
db.journal.append(suicideChange{
account: &addr,
- prev: stateObject.suicided,
- prevbalance: new(big.Int).Set(stateObject.Balance()),
+ prev: Object.suicided,
+ prevbalance: new(big.Int).Set(Object.Balance()),
})
- stateObject.markSuicided()
- stateObject.data.Balance = new(big.Int)
+ Object.markSuicided()
+ Object.data.Balance = new(big.Int)
return true
}
+// SetTransientState sets transient storage for a given account. It
+// adds the change to the journal so that it can be rolled back
+// to its previous value if there is a revert.
+func (db *DB) SetTransientState(addr common.Address, key, value common.Hash) {
+ prev := db.GetTransientState(addr, key)
+ if prev == value {
+ return
+ }
+
+ db.journal.append(transientStorageChange{
+ account: &addr,
+ key: key,
+ prevalue: prev,
+ })
+
+ db.setTransientState(addr, key, value)
+}
+
+// setTransientState is a lower level setter for transient storage. It
+// is called during a revert to prevent modifications to the journal.
+func (db *DB) setTransientState(addr common.Address, key, value common.Hash) {
+ db.transientStorage.Set(addr, key, value)
+}
+
+// GetTransientState gets transient storage for a given account.
+func (db *DB) GetTransientState(addr common.Address, key common.Hash) common.Hash {
+ return db.transientStorage.Get(addr, key)
+}
+
//
// Setting, updating & deleting state object methods.
//
@@ -457,12 +566,17 @@ func (db *DB) updateStateObject(obj *Object) {
}
// Encode the account and update the account trie
addr := obj.Address()
+ if err := db.trie.TryUpdateAccount(addr, &obj.data); err != nil {
+ db.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
+ }
- data, err := rlp.EncodeToBytes(obj)
- if err != nil {
- panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err))
+ // If state snapshotting is active, cache the data til commit. Note, this
+ // update mechanism is not symmetric to the deletion, because whereas it is
+ // enough to track account updates at commit time, deletions need tracking
+ // at transaction boundary level to ensure we capture state clearing.
+ if db.snap != nil {
+ db.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
}
- db.setError(db.trie.TryUpdate(addr[:], data))
}
// deleteStateObject removes the given object from the state trie.
@@ -473,7 +587,9 @@ func (db *DB) deleteStateObject(obj *Object) {
}
// Delete the account from the trie
addr := obj.Address()
- db.setError(db.trie.TryDelete(addr[:]))
+ if err := db.trie.TryDeleteAccount(addr); err != nil {
+ db.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
+ }
}
// getStateObject retrieves a state object given by the address, returning nil if
@@ -495,23 +611,50 @@ func (db *DB) getDeletedStateObject(addr common.Address) *Object {
if obj := db.stateObjects[addr]; obj != nil {
return obj
}
- // Track the amount of time wasted on loading the object from the database
- if metrics.EnabledExpensive {
- defer func(start time.Time) { db.AccountReads += time.Since(start) }(time.Now())
- }
- // Load the object from the database
- enc, err := db.trie.TryGet(addr[:])
- if len(enc) == 0 {
- db.setError(err)
- return nil
+ // If no live objects are available, attempt to use snapshots
+ var data *types.StateAccount
+ if db.snap != nil {
+ start := time.Now()
+ acc, err := db.snap.Account(crypto.HashData(db.hasher, addr.Bytes()))
+ if metrics.EnabledExpensive {
+ db.SnapshotAccountReads += time.Since(start)
+ }
+ if err == nil {
+ if acc == nil {
+ return nil
+ }
+ data = &types.StateAccount{
+ Nonce: acc.Nonce,
+ Balance: acc.Balance,
+ CodeHash: acc.CodeHash,
+ Root: common.BytesToHash(acc.Root),
+ }
+ if len(data.CodeHash) == 0 {
+ data.CodeHash = types.EmptyCodeHash.Bytes()
+ }
+ if data.Root == (common.Hash{}) {
+ data.Root = types.EmptyRootHash
+ }
+ }
}
- var data Account
- if err := rlp.DecodeBytes(enc, &data); err != nil {
- log.Error("Failed to decode state object", "addr", addr, "err", err)
- return nil
+ // If snapshot unavailable or reading from it failed, load from the database
+ if data == nil {
+ start := time.Now()
+ var err error
+ data, err = db.trie.TryGetAccount(addr)
+ if metrics.EnabledExpensive {
+ db.AccountReads += time.Since(start)
+ }
+ if err != nil {
+ db.setError(fmt.Errorf("getDeleteStateObject (%x) error: %w", addr.Bytes(), err))
+ return nil
+ }
+ if data == nil {
+ return nil
+ }
}
// Insert into the live set
- obj := newObject(db, addr, data)
+ obj := newObject(db, addr, *data)
db.setStateObject(obj)
return obj
}
@@ -522,11 +665,11 @@ func (db *DB) setStateObject(object *Object) {
// GetOrNewStateObject retrieves a state object or create a new state object if nil.
func (db *DB) GetOrNewStateObject(addr common.Address) *Object {
- stateObject := db.getStateObject(addr)
- if stateObject == nil {
- stateObject, _ = db.createObject(addr)
+ Object := db.getStateObject(addr)
+ if Object == nil {
+ Object, _ = db.createObject(addr)
}
- return stateObject
+ return Object
}
// createObject creates a new state object. If there is an existing account with
@@ -534,12 +677,18 @@ func (db *DB) GetOrNewStateObject(addr common.Address) *Object {
func (db *DB) createObject(addr common.Address) (newobj, prev *Object) {
prev = db.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
- newobj = newObject(db, addr, Account{})
- newobj.setNonce(0) // sets the object to dirty
+ var prevdestruct bool
+ if prev != nil {
+ _, prevdestruct = db.stateObjectsDestruct[prev.address]
+ if !prevdestruct {
+ db.stateObjectsDestruct[prev.address] = struct{}{}
+ }
+ }
+ newobj = newObject(db, addr, types.StateAccount{})
if prev == nil {
db.journal.append(createObjectChange{account: &addr})
} else {
- db.journal.append(resetObjectChange{prev: prev})
+ db.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct})
}
db.setStateObject(newobj)
if prev != nil && !prev.deleted {
@@ -565,13 +714,16 @@ func (db *DB) CreateAccount(addr common.Address) {
}
}
-// ForEachStorage ...
func (db *DB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error {
so := db.getStateObject(addr)
if so == nil {
return nil
}
- it := trie.NewIterator(so.getTrie(db.db).NodeIterator(nil))
+ tr, err := so.getTrie(db.db)
+ if err != nil {
+ return err
+ }
+ it := trie.NewIterator(tr.NodeIterator(nil))
for it.Next() {
key := common.BytesToHash(db.trie.GetKey(it.Key))
@@ -600,17 +752,20 @@ func (db *DB) ForEachStorage(addr common.Address, cb func(key, value common.Hash
func (db *DB) Copy() *DB {
// Copy all the basic fields, initialize the memory ones
state := &DB{
- db: db.db,
- trie: db.db.CopyTrie(db.trie),
- stateObjects: make(map[common.Address]*Object, len(db.journal.dirties)),
- stateObjectsPending: make(map[common.Address]struct{}, len(db.stateObjectsPending)),
- stateObjectsDirty: make(map[common.Address]struct{}, len(db.journal.dirties)),
- stateValidators: make(map[common.Address]*stk.ValidatorWrapper),
- refund: db.refund,
- logs: make(map[common.Hash][]*types.Log, len(db.logs)),
- logSize: db.logSize,
- preimages: make(map[common.Hash][]byte),
- journal: newJournal(),
+ db: db.db,
+ trie: db.db.CopyTrie(db.trie),
+ originalRoot: db.originalRoot,
+ stateObjects: make(map[common.Address]*Object, len(db.journal.dirties)),
+ stateObjectsPending: make(map[common.Address]struct{}, len(db.stateObjectsPending)),
+ stateObjectsDirty: make(map[common.Address]struct{}, len(db.journal.dirties)),
+ stateObjectsDestruct: make(map[common.Address]struct{}, len(db.stateObjectsDestruct)),
+ stateValidators: make(map[common.Address]*stk.ValidatorWrapper),
+ refund: db.refund,
+ logs: make(map[common.Hash][]*types2.Log, len(db.logs)),
+ logSize: db.logSize,
+ preimages: make(map[common.Hash][]byte, len(db.preimages)),
+ journal: newJournal(),
+ hasher: crypto.NewKeccakState(),
}
// Copy the dirty states, logs, and preimages
for addr := range db.journal.dirties {
@@ -620,7 +775,7 @@ func (db *DB) Copy() *DB {
// nil
if object, exist := db.stateObjects[addr]; exist {
// Even though the original object is dirty, we are not copying the journal,
- // so we need to make sure that anyside effect the journal would have caused
+ // so we need to make sure that any side-effect the journal would have caused
// during a commit (or similar op) is already applied to the copy.
state.stateObjects[addr] = object.deepCopy(state)
@@ -628,9 +783,10 @@ func (db *DB) Copy() *DB {
state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits
}
}
- // Above, we don't copy the actual journal. This means that if the copy is copied, the
- // loop above will be a no-op, since the copy's journal is empty.
- // Thus, here we iterate over stateObjects, to enable copies of copies
+ // Above, we don't copy the actual journal. This means that if the copy
+ // is copied, the loop above will be a no-op, since the copy's journal
+ // is empty. Thus, here we iterate over stateObjects, to enable copies
+ // of copies.
for addr := range db.stateObjectsPending {
if _, exist := state.stateObjects[addr]; !exist {
state.stateObjects[addr] = db.stateObjects[addr].deepCopy(state)
@@ -643,14 +799,18 @@ func (db *DB) Copy() *DB {
}
state.stateObjectsDirty[addr] = struct{}{}
}
+ // Deep copy the destruction flag.
+ for addr := range db.stateObjectsDestruct {
+ state.stateObjectsDestruct[addr] = struct{}{}
+ }
for addr, wrapper := range db.stateValidators {
copied := staketest.CopyValidatorWrapper(*wrapper)
state.stateValidators[addr] = &copied
}
for hash, logs := range db.logs {
- cpy := make([]*types.Log, len(logs))
+ cpy := make([]*types2.Log, len(logs))
for i, l := range logs {
- cpy[i] = new(types.Log)
+ cpy[i] = new(types2.Log)
*cpy[i] = *l
}
state.logs[hash] = cpy
@@ -658,13 +818,50 @@ func (db *DB) Copy() *DB {
for hash, preimage := range db.preimages {
state.preimages[hash] = preimage
}
+ // Do we need to copy the access list and transient storage?
+ // In practice: No. At the start of a transaction, these two lists are empty.
+ // In practice, we only ever copy state _between_ transactions/blocks, never
+ // in the middle of a transaction. However, it doesn't cost us much to copy
+ // empty lists, so we do it anyway to not blow up if we ever decide copy them
+ // in the middle of a transaction.
+ state.accessList = db.accessList.Copy()
+ state.transientStorage = db.transientStorage.Copy()
+
+ // If there's a prefetcher running, make an inactive copy of it that can
+ // only access data but does not actively preload (since the user will not
+ // know that they need to explicitly terminate an active copy).
+ if db.prefetcher != nil {
+ state.prefetcher = db.prefetcher.copy()
+ }
+ if db.snaps != nil {
+ // In order for the miner to be able to use and make additions
+ // to the snapshot tree, we need to copy that as well.
+ // Otherwise, any block mined by ourselves will cause gaps in the tree,
+ // and force the miner to operate trie-backed only
+ state.snaps = db.snaps
+ state.snap = db.snap
+
+ // deep copy needed
+ state.snapAccounts = make(map[common.Hash][]byte)
+ for k, v := range db.snapAccounts {
+ state.snapAccounts[k] = v
+ }
+ state.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
+ for k, v := range db.snapStorage {
+ temp := make(map[common.Hash][]byte)
+ for kk, vv := range v {
+ temp[kk] = vv
+ }
+ state.snapStorage[k] = temp
+ }
+ }
return state
}
// Snapshot returns an identifier for the current revision of the state.
func (db *DB) Snapshot() int {
- id := db.nextRevisionID
- db.nextRevisionID++
+ id := db.nextRevisionId
+ db.nextRevisionId++
db.validRevisions = append(db.validRevisions, revision{id, db.journal.length()})
return id
}
@@ -690,15 +887,16 @@ func (db *DB) GetRefund() uint64 {
return db.refund
}
-// Finalise finalises the state by removing the db destructed objects
-// and clears the journal as well as the refunds.
+// Finalise finalises the state by removing the destructed objects and clears
+// the journal as well as the refunds. Finalise, however, will not push any updates
+// into the tries just yet. Only IntermediateRoot or Commit will do that.
func (db *DB) Finalise(deleteEmptyObjects bool) {
// Commit validator changes in cache to stateObjects
// TODO: remove validator cache after commit
for addr, wrapper := range db.stateValidators {
db.UpdateValidatorWrapper(addr, wrapper)
}
-
+ addressesToPrefetch := make([][]byte, 0, len(db.journal.dirties))
for addr := range db.journal.dirties {
obj, exist := db.stateObjects[addr]
if !exist {
@@ -706,17 +904,38 @@ func (db *DB) Finalise(deleteEmptyObjects bool) {
// That tx goes out of gas, and although the notion of 'touched' does not exist there, the
// touch-event will still be recorded in the journal. Since ripeMD is a special snowflake,
// it will persist in the journal even though the journal is reverted. In this special circumstance,
- // it may exist in `s.journal.dirties` but not in `s.stateObjects`.
+ // it may exist in `db.journal.dirties` but not in `db.stateObjects`.
// Thus, we can safely ignore it here
continue
}
if obj.suicided || (deleteEmptyObjects && obj.empty()) {
obj.deleted = true
+
+ // We need to maintain account deletions explicitly (will remain
+ // set indefinitely).
+ db.stateObjectsDestruct[obj.address] = struct{}{}
+
+ // If state snapshotting is active, also mark the destruction there.
+ // Note, we can't do this only at the end of a block because multiple
+ // transactions within the same block might self destruct and then
+ // resurrect an account; but the snapshotter needs both events.
+ if db.snap != nil {
+ delete(db.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect)
+ delete(db.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect)
+ }
} else {
- obj.finalise()
+ obj.finalise(true) // Prefetch slots in the background
}
db.stateObjectsPending[addr] = struct{}{}
db.stateObjectsDirty[addr] = struct{}{}
+
+ // At this point, also ship the address off to the precacher. The precacher
+ // will start loading tries, and when the change is eventually committed,
+ // the commit-phase will be a lot faster
+ addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure
+ }
+ if db.prefetcher != nil && len(addressesToPrefetch) > 0 {
+ db.prefetcher.prefetch(common.Hash{}, db.originalRoot, addressesToPrefetch)
}
// Invalidate journal because reverting across transactions is not allowed.
db.clearJournalAndRefund()
@@ -729,14 +948,51 @@ func (db *DB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// Finalise all the dirty storage states and write them into the tries
db.Finalise(deleteEmptyObjects)
+ // If there was a trie prefetcher operating, it gets aborted and irrevocably
+ // modified after we start retrieving tries. Remove it from the statedb after
+ // this round of use.
+ //
+ // This is weird pre-byzantium since the first tx runs with a prefetcher and
+ // the remainder without, but pre-byzantium even the initial prefetcher is
+ // useless, so no sleep lost.
+ prefetcher := db.prefetcher
+ if db.prefetcher != nil {
+ defer func() {
+ db.prefetcher.close()
+ db.prefetcher = nil
+ }()
+ }
+ // Although naively it makes sense to retrieve the account trie and then do
+ // the contract storage and account updates sequentially, that short circuits
+ // the account prefetcher. Instead, let's process all the storage updates
+ // first, giving the account prefetches just a few more milliseconds of time
+ // to pull useful data from disk.
for addr := range db.stateObjectsPending {
- obj := db.stateObjects[addr]
- if obj.deleted {
+ if obj := db.stateObjects[addr]; !obj.deleted {
+ obj.updateRoot(db.db)
+ }
+ }
+ // Now we're about to start to write changes to the trie. The trie is so far
+ // _untouched_. We can check with the prefetcher, if it can give us a trie
+ // which has the same root, but also has some content loaded into it.
+ if prefetcher != nil {
+ if trie := prefetcher.trie(common.Hash{}, db.originalRoot); trie != nil {
+ db.trie = trie
+ }
+ }
+ usedAddrs := make([][]byte, 0, len(db.stateObjectsPending))
+ for addr := range db.stateObjectsPending {
+ if obj := db.stateObjects[addr]; obj.deleted {
db.deleteStateObject(obj)
+ db.AccountDeleted += 1
} else {
- obj.updateRoot(db.db)
db.updateStateObject(obj)
+ db.AccountUpdated += 1
}
+ usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure
+ }
+ if prefetcher != nil {
+ prefetcher.used(common.Hash{}, db.originalRoot, usedAddrs)
}
if len(db.stateObjectsPending) > 0 {
db.stateObjectsPending = make(map[common.Address]struct{})
@@ -756,8 +1012,12 @@ func (db *DB) Prepare(thash, bhash common.Hash, ti int) {
db.txIndex = ti
}
-func (db *DB) SetTxHashETH(ethTxHash common.Hash) {
- db.ethTxHash = ethTxHash
+// SetTxContext sets the current transaction hash and index which are
+// used when the EVM emits new state logs. It should be invoked before
+// transaction execution.
+func (db *DB) SetTxContext(thash common.Hash, ti int) {
+ db.thash = thash
+ db.txIndex = ti
}
func (db *DB) clearJournalAndRefund() {
@@ -765,49 +1025,195 @@ func (db *DB) clearJournalAndRefund() {
db.journal = newJournal()
db.refund = 0
}
- db.validRevisions = db.validRevisions[:0] // Snapshots can be created without journal entires
+ db.validRevisions = db.validRevisions[:0] // Snapshots can be created without journal entries
+}
+
+func (db *DB) SetTxHashETH(ethTxHash common.Hash) {
+ db.ethTxHash = ethTxHash
}
// Commit writes the state to the underlying in-memory trie database.
-func (db *DB) Commit(deleteEmptyObjects bool) (root common.Hash, err error) {
+func (db *DB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
+ if db.dbErr != nil {
+ return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", db.dbErr)
+ }
// Finalize any pending changes and merge everything into the tries
db.IntermediateRoot(deleteEmptyObjects)
// Commit objects to the trie, measuring the elapsed time
+ var (
+ accountTrieNodesUpdated int
+ accountTrieNodesDeleted int
+ storageTrieNodesUpdated int
+ storageTrieNodesDeleted int
+ nodes = trie.NewMergedNodeSet()
+ )
+ codeWriter := db.db.DiskDB().NewBatch()
for addr := range db.stateObjectsDirty {
if obj := db.stateObjects[addr]; !obj.deleted {
// Write any contract code associated with the state object
if obj.code != nil && obj.dirtyCode {
- db.db.TrieDB().InsertBlob(common.BytesToHash(obj.CodeHash()), obj.code)
+ if obj.validatorWrapper {
+ rawdb.WriteValidatorCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
+ } else {
+ rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
+ }
obj.dirtyCode = false
}
// Write any storage changes in the state object to its storage trie
- if err := obj.CommitTrie(db.db); err != nil {
+ set, err := obj.commitTrie(db.db)
+ if err != nil {
return common.Hash{}, err
}
+ // Merge the dirty nodes of storage trie into global set
+ if set != nil {
+ if err := nodes.Merge(set); err != nil {
+ return common.Hash{}, err
+ }
+ updates, deleted := set.Size()
+ storageTrieNodesUpdated += updates
+ storageTrieNodesDeleted += deleted
+ }
}
+ // If the contract is destructed, the storage is still left in the
+ // database as dangling data. Theoretically it's should be wiped from
+ // database as well, but in hash-based-scheme it's extremely hard to
+ // determine that if the trie nodes are also referenced by other storage,
+ // and in path-based-scheme some technical challenges are still unsolved.
+ // Although it won't affect the correctness but please fix it TODO(rjl493456442).
}
if len(db.stateObjectsDirty) > 0 {
db.stateObjectsDirty = make(map[common.Address]struct{})
}
- // Write the account trie changes, measuing the amount of wasted time
+ if codeWriter.ValueSize() > 0 {
+ if err := codeWriter.Write(); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to commit dirty codes")
+ }
+ }
+ // Write the account trie changes, measuring the amount of wasted time
+ var start time.Time
if metrics.EnabledExpensive {
- defer func(start time.Time) { db.AccountCommits += time.Since(start) }(time.Now())
+ start = time.Now()
}
- return db.trie.Commit(func(leaf []byte, parent common.Hash) error {
- var account Account
- if err := rlp.DecodeBytes(leaf, &account); err != nil {
- return nil
+ root, set := db.trie.Commit(true)
+ // Merge the dirty nodes of account trie into global set
+ if set != nil {
+ if err := nodes.Merge(set); err != nil {
+ return common.Hash{}, err
}
- if account.Root != emptyRoot {
- db.db.TrieDB().Reference(account.Root, parent)
+ accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size()
+ }
+ if metrics.EnabledExpensive {
+ db.AccountCommits += time.Since(start)
+
+ accountUpdatedMeter.Mark(int64(db.AccountUpdated))
+ storageUpdatedMeter.Mark(int64(db.StorageUpdated))
+ accountDeletedMeter.Mark(int64(db.AccountDeleted))
+ storageDeletedMeter.Mark(int64(db.StorageDeleted))
+ accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated))
+ accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted))
+ storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated))
+ storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted))
+ db.AccountUpdated, db.AccountDeleted = 0, 0
+ db.StorageUpdated, db.StorageDeleted = 0, 0
+ }
+ // If snapshotting is enabled, update the snapshot tree with this new version
+ if db.snap != nil {
+ start := time.Now()
+ // Only update if there's a state transition (skip empty Clique blocks)
+ if parent := db.snap.Root(); parent != root {
+ if err := db.snaps.Update(root, parent, db.convertAccountSet(db.stateObjectsDestruct), db.snapAccounts, db.snapStorage); err != nil {
+ utils.Logger().Warn().Err(err).
+ Interface("from", parent).
+ Interface("to", root).
+ Msg("Failed to update snapshot tree")
+ }
+ // Keep 128 diff layers in the memory, persistent layer is 129th.
+ // - head layer is paired with HEAD state
+ // - head-1 layer is paired with HEAD-1 state
+ // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
+ if err := db.snaps.Cap(root, 128); err != nil {
+ utils.Logger().Warn().Err(err).
+ Interface("root", root).
+ Uint16("layers", 128).
+ Msg("Failed to cap snapshot tree")
+ }
}
- code := common.BytesToHash(account.CodeHash)
- if code != emptyCode {
- db.db.TrieDB().Reference(code, parent)
+ if metrics.EnabledExpensive {
+ db.SnapshotCommits += time.Since(start)
}
- return nil
- })
+ db.snap, db.snapAccounts, db.snapStorage = nil, nil, nil
+ }
+ if len(db.stateObjectsDestruct) > 0 {
+ db.stateObjectsDestruct = make(map[common.Address]struct{})
+ }
+ if root == (common.Hash{}) {
+ root = types.EmptyRootHash
+ }
+ origin := db.originalRoot
+ if origin == (common.Hash{}) {
+ origin = types.EmptyRootHash
+ }
+ if root != origin {
+ start := time.Now()
+ if err := db.db.TrieDB().Update(nodes); err != nil {
+ return common.Hash{}, err
+ }
+ db.originalRoot = root
+ if metrics.EnabledExpensive {
+ db.TrieDBCommits += time.Since(start)
+ }
+ }
+ return root, nil
+}
+
+// AddAddressToAccessList adds the given address to the access list
+func (db *DB) AddAddressToAccessList(addr common.Address) {
+ if db.accessList.AddAddress(addr) {
+ db.journal.append(accessListAddAccountChange{&addr})
+ }
+}
+
+// AddSlotToAccessList adds the given (address, slot)-tuple to the access list
+func (db *DB) AddSlotToAccessList(addr common.Address, slot common.Hash) {
+ addrMod, slotMod := db.accessList.AddSlot(addr, slot)
+ if addrMod {
+ // In practice, this should not happen, since there is no way to enter the
+ // scope of 'address' without having the 'address' become already added
+ // to the access list (via call-variant, create, etc).
+ // Better safe than sorry, though
+ db.journal.append(accessListAddAccountChange{&addr})
+ }
+ if slotMod {
+ db.journal.append(accessListAddSlotChange{
+ address: &addr,
+ slot: &slot,
+ })
+ }
+}
+
+// AddressInAccessList returns true if the given address is in the access list.
+func (db *DB) AddressInAccessList(addr common.Address) bool {
+ return db.accessList.ContainsAddress(addr)
+}
+
+// SlotInAccessList returns true if the given (address, slot)-tuple is in the access list.
+func (db *DB) SlotInAccessList(addr common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) {
+ return db.accessList.Contains(addr, slot)
+}
+
+// convertAccountSet converts a provided account set from address keyed to hash keyed.
+func (db *DB) convertAccountSet(set map[common.Address]struct{}) map[common.Hash]struct{} {
+ ret := make(map[common.Hash]struct{})
+ for addr := range set {
+ obj, exist := db.stateObjects[addr]
+ if !exist {
+ ret[crypto.Keccak256Hash(addr[:])] = struct{}{}
+ } else {
+ ret[obj.addrHash] = struct{}{}
+ }
+ }
+ return ret
}
var (
@@ -835,9 +1241,12 @@ func (db *DB) ValidatorWrapper(
return copyValidatorWrapperIfNeeded(cached, sendOriginal, copyDelegations), nil
}
- by := db.GetCode(addr)
+ by := db.GetCode(addr, true)
if len(by) == 0 {
- return nil, ErrAddressNotPresent
+ by = db.GetCode(addr, false)
+ if len(by) == 0 {
+ return nil, ErrAddressNotPresent
+ }
}
val := stk.ValidatorWrapper{}
if err := rlp.DecodeBytes(by, &val); err != nil {
@@ -883,7 +1292,7 @@ func (db *DB) UpdateValidatorWrapper(
return err
}
// has revert in-built for the code field
- db.SetCode(addr, by)
+ db.SetCode(addr, by, true)
// update cache
db.stateValidators[addr] = val
return nil
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index 9581495d54..f4277809bd 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -29,19 +29,9 @@ import (
"testing"
"testing/quick"
- "github.com/ethereum/go-ethereum/core/rawdb"
-
"github.com/ethereum/go-ethereum/common"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/types"
-
- "github.com/harmony-one/harmony/crypto/bls"
- "github.com/harmony-one/harmony/crypto/hash"
- "github.com/harmony-one/harmony/numeric"
- stk "github.com/harmony-one/harmony/staking/types"
- staketest "github.com/harmony-one/harmony/staking/types/test"
-
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/harmony-one/harmony/common/denominations"
)
// Tests that updating a state trie does not leak any database writes prior to
@@ -49,7 +39,7 @@ import (
func TestUpdateLeaks(t *testing.T) {
// Create an empty state database
db := rawdb.NewMemoryDatabase()
- state, _ := New(common.Hash{}, NewDatabase(db))
+ state, _ := New(common.Hash{}, NewDatabase(db), nil)
// Update it with some accounts
for i := byte(0); i < 255; i++ {
@@ -60,7 +50,7 @@ func TestUpdateLeaks(t *testing.T) {
state.SetState(addr, common.BytesToHash([]byte{i, i, i}), common.BytesToHash([]byte{i, i, i, i}))
}
if i%3 == 0 {
- state.SetCode(addr, []byte{i, i, i, i, i})
+ state.SetCode(addr, []byte{i, i, i, i, i}, false)
}
}
@@ -70,7 +60,7 @@ func TestUpdateLeaks(t *testing.T) {
}
// Ensure that no data was leaked into the database
- it := db.NewIterator()
+ it := db.NewIterator(nil, nil)
for it.Next() {
t.Errorf("State leaked into database: %x -> %x", it.Key(), it.Value())
}
@@ -83,8 +73,8 @@ func TestIntermediateLeaks(t *testing.T) {
// Create two state databases, one transitioning to the final state, the other final from the beginning
transDb := rawdb.NewMemoryDatabase()
finalDb := rawdb.NewMemoryDatabase()
- transState, _ := New(common.Hash{}, NewDatabase(transDb))
- finalState, _ := New(common.Hash{}, NewDatabase(finalDb))
+ transState, _ := New(common.Hash{}, NewDatabase(transDb), nil)
+ finalState, _ := New(common.Hash{}, NewDatabase(finalDb), nil)
modify := func(state *DB, addr common.Address, i, tweak byte) {
state.SetBalance(addr, big.NewInt(int64(11*i)+int64(tweak)))
@@ -94,7 +84,7 @@ func TestIntermediateLeaks(t *testing.T) {
state.SetState(addr, common.Hash{i, i, i, tweak}, common.Hash{i, i, i, i, tweak})
}
if i%3 == 0 {
- state.SetCode(addr, []byte{i, i, i, i, i, tweak})
+ state.SetCode(addr, []byte{i, i, i, i, i, tweak}, false)
}
}
@@ -128,7 +118,7 @@ func TestIntermediateLeaks(t *testing.T) {
t.Errorf("can not commit trie %v to persistent database", finalRoot.Hex())
}
- it := finalDb.NewIterator()
+ it := finalDb.NewIterator(nil, nil)
for it.Next() {
key, fvalue := it.Key(), it.Value()
tvalue, err := transDb.Get(key)
@@ -136,12 +126,12 @@ func TestIntermediateLeaks(t *testing.T) {
t.Errorf("entry missing from the transition database: %x -> %x", key, fvalue)
}
if !bytes.Equal(fvalue, tvalue) {
- t.Errorf("the value associate key %x is mismatch,: %x in transition database ,%x in final database", key, tvalue, fvalue)
+ t.Errorf("value mismatch at key %x: %x in transition database, %x in final database", key, tvalue, fvalue)
}
}
it.Release()
- it = transDb.NewIterator()
+ it = transDb.NewIterator(nil, nil)
for it.Next() {
key, tvalue := it.Key(), it.Value()
fvalue, err := finalDb.Get(key)
@@ -149,29 +139,22 @@ func TestIntermediateLeaks(t *testing.T) {
t.Errorf("extra entry in the transition database: %x -> %x", key, it.Value())
}
if !bytes.Equal(fvalue, tvalue) {
- t.Errorf("the value associate key %x is mismatch,: %x in transition database ,%x in final database", key, tvalue, fvalue)
+ t.Errorf("value mismatch at key %x: %x in transition database, %x in final database", key, tvalue, fvalue)
}
}
}
-// TestCopy tests that copying a statedb object indeed makes the original and
+// TestCopy tests that copying a DB object indeed makes the original and
// the copy independent of each other. This test is a regression test against
// https://github.com/ethereum/go-ethereum/pull/15549.
func TestCopy(t *testing.T) {
// Create a random state test to copy and modify "independently"
- orig, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ orig, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
for i := byte(0); i < 255; i++ {
obj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
obj.AddBalance(big.NewInt(int64(i)))
orig.updateStateObject(obj)
-
- validatorWrapper := makeValidValidatorWrapper(common.BytesToAddress([]byte{i}))
- validatorWrapper.Description.Name = "Original"
- err := orig.UpdateValidatorWrapper(common.BytesToAddress([]byte{i}), &validatorWrapper)
- if err != nil {
- t.Errorf("Couldn't update ValidatorWrapper %d with error %s", i, err)
- }
}
orig.Finalise(false)
@@ -194,85 +177,6 @@ func TestCopy(t *testing.T) {
orig.updateStateObject(origObj)
copy.updateStateObject(copyObj)
ccopy.updateStateObject(copyObj)
-
- origValWrap, err := orig.ValidatorWrapper(common.BytesToAddress([]byte{i}), false, true)
- if err != nil {
- t.Errorf("Couldn't get validatorWrapper with error: %s", err)
- }
- copyValWrap, err := copy.ValidatorWrapper(common.BytesToAddress([]byte{i}), false, true)
- if err != nil {
- t.Errorf("Couldn't get validatorWrapper with error: %s", err)
- }
- ccopyValWrap, err := ccopy.ValidatorWrapper(common.BytesToAddress([]byte{i}), false, true)
- if err != nil {
- t.Errorf("Couldn't get validatorWrapper with error: %s", err)
- }
-
- origValWrap.LastEpochInCommittee.SetInt64(1)
- copyValWrap.LastEpochInCommittee.SetInt64(2)
- ccopyValWrap.LastEpochInCommittee.SetInt64(3)
-
- origValWrap.MinSelfDelegation.Mul(big.NewInt(1e18), big.NewInt(10000))
- copyValWrap.MinSelfDelegation.Mul(big.NewInt(1e18), big.NewInt(20000))
- ccopyValWrap.MinSelfDelegation.Mul(big.NewInt(1e18), big.NewInt(30000))
-
- origValWrap.MaxTotalDelegation.Mul(big.NewInt(1e18), big.NewInt(10000))
- copyValWrap.MaxTotalDelegation.Mul(big.NewInt(1e18), big.NewInt(20000))
- ccopyValWrap.MaxTotalDelegation.Mul(big.NewInt(1e18), big.NewInt(30000))
-
- origValWrap.CreationHeight.SetInt64(1)
- copyValWrap.CreationHeight.SetInt64(2)
- ccopyValWrap.CreationHeight.SetInt64(3)
-
- origValWrap.UpdateHeight.SetInt64(1)
- copyValWrap.UpdateHeight.SetInt64(2)
- ccopyValWrap.UpdateHeight.SetInt64(3)
-
- origValWrap.Description.Name = "UpdatedOriginal" + string(i)
- copyValWrap.Description.Name = "UpdatedCopy" + string(i)
- ccopyValWrap.Description.Name = "UpdatedCCopy" + string(i)
-
- origValWrap.Delegations[0].Amount.SetInt64(1)
- copyValWrap.Delegations[0].Amount.SetInt64(2)
- ccopyValWrap.Delegations[0].Amount.SetInt64(3)
-
- origValWrap.Delegations[0].Reward.SetInt64(1)
- copyValWrap.Delegations[0].Reward.SetInt64(2)
- ccopyValWrap.Delegations[0].Reward.SetInt64(3)
-
- origValWrap.Delegations[0].Undelegations[0].Amount.SetInt64(1)
- copyValWrap.Delegations[0].Undelegations[0].Amount.SetInt64(2)
- ccopyValWrap.Delegations[0].Undelegations[0].Amount.SetInt64(3)
-
- origValWrap.Delegations[0].Undelegations[0].Epoch.SetInt64(1)
- copyValWrap.Delegations[0].Undelegations[0].Epoch.SetInt64(2)
- ccopyValWrap.Delegations[0].Undelegations[0].Epoch.SetInt64(3)
-
- origValWrap.Counters.NumBlocksToSign.SetInt64(1)
- copyValWrap.Counters.NumBlocksToSign.SetInt64(2)
- ccopyValWrap.Counters.NumBlocksToSign.SetInt64(3)
-
- origValWrap.Counters.NumBlocksSigned.SetInt64(1)
- copyValWrap.Counters.NumBlocksSigned.SetInt64(2)
- ccopyValWrap.Counters.NumBlocksSigned.SetInt64(3)
-
- origValWrap.BlockReward.SetInt64(1)
- copyValWrap.BlockReward.SetInt64(2)
- ccopyValWrap.BlockReward.SetInt64(3)
-
- err = orig.UpdateValidatorWrapper(common.BytesToAddress([]byte{i}), origValWrap)
- if err != nil {
- t.Errorf("Couldn't update ValidatorWrapper %d with error %s", i, err)
- }
- err = copy.UpdateValidatorWrapper(common.BytesToAddress([]byte{i}), copyValWrap)
- if err != nil {
- t.Errorf("Couldn't update ValidatorWrapper %d with error %s", i, err)
- }
- err = ccopy.UpdateValidatorWrapper(common.BytesToAddress([]byte{i}), ccopyValWrap)
- if err != nil {
- t.Errorf("Couldn't update ValidatorWrapper %d with error %s", i, err)
- }
-
}
// Finalise the changes on all concurrently
@@ -303,149 +207,6 @@ func TestCopy(t *testing.T) {
if want := big.NewInt(5 * int64(i)); ccopyObj.Balance().Cmp(want) != 0 {
t.Errorf("copy obj %d: balance mismatch: have %v, want %v", i, ccopyObj.Balance(), want)
}
-
- origValWrap, err := orig.ValidatorWrapper(common.BytesToAddress([]byte{i}), true, false)
- if err != nil {
- t.Errorf("Couldn't get validatorWrapper %d with error: %s", i, err)
- }
- copyValWrap, err := copy.ValidatorWrapper(common.BytesToAddress([]byte{i}), true, false)
- if err != nil {
- t.Errorf("Couldn't get validatorWrapper %d with error: %s", i, err)
- }
- ccopyValWrap, err := ccopy.ValidatorWrapper(common.BytesToAddress([]byte{i}), true, false)
- if err != nil {
- t.Errorf("Couldn't get validatorWrapper %d with error: %s", i, err)
- }
-
- if origValWrap.LastEpochInCommittee.Cmp(big.NewInt(1)) != 0 {
- t.Errorf("LastEpochInCommittee %d: balance mismatch: have %v, want %v", i, origValWrap.LastEpochInCommittee, big.NewInt(1))
- }
- if copyValWrap.LastEpochInCommittee.Cmp(big.NewInt(2)) != 0 {
- t.Errorf("LastEpochInCommittee %d: balance mismatch: have %v, want %v", i, copyValWrap.LastEpochInCommittee, big.NewInt(2))
- }
- if ccopyValWrap.LastEpochInCommittee.Cmp(big.NewInt(3)) != 0 {
- t.Errorf("LastEpochInCommittee %d: balance mismatch: have %v, want %v", i, ccopyValWrap.LastEpochInCommittee, big.NewInt(3))
- }
-
- if want := new(big.Int).Mul(big.NewInt(1e18), big.NewInt(10000)); origValWrap.MinSelfDelegation.Cmp(want) != 0 {
- t.Errorf("MinSelfDelegation %d: balance mismatch: have %v, want %v", i, origValWrap.MinSelfDelegation, want)
- }
- if want := new(big.Int).Mul(big.NewInt(1e18), big.NewInt(20000)); copyValWrap.MinSelfDelegation.Cmp(want) != 0 {
- t.Errorf("MinSelfDelegation %d: balance mismatch: have %v, want %v", i, copyValWrap.MinSelfDelegation, want)
- }
- if want := new(big.Int).Mul(big.NewInt(1e18), big.NewInt(30000)); ccopyValWrap.MinSelfDelegation.Cmp(want) != 0 {
- t.Errorf("MinSelfDelegation %d: balance mismatch: have %v, want %v", i, ccopyValWrap.MinSelfDelegation, want)
- }
-
- if want := new(big.Int).Mul(big.NewInt(1e18), big.NewInt(10000)); origValWrap.MaxTotalDelegation.Cmp(want) != 0 {
- t.Errorf("MaxTotalDelegation %d: balance mismatch: have %v, want %v", i, origValWrap.MaxTotalDelegation, want)
- }
- if want := new(big.Int).Mul(big.NewInt(1e18), big.NewInt(20000)); copyValWrap.MaxTotalDelegation.Cmp(want) != 0 {
- t.Errorf("MaxTotalDelegation %d: balance mismatch: have %v, want %v", i, copyValWrap.MaxTotalDelegation, want)
- }
- if want := new(big.Int).Mul(big.NewInt(1e18), big.NewInt(30000)); ccopyValWrap.MaxTotalDelegation.Cmp(want) != 0 {
- t.Errorf("MaxTotalDelegation %d: balance mismatch: have %v, want %v", i, ccopyValWrap.MaxTotalDelegation, want)
- }
-
- if origValWrap.CreationHeight.Cmp(big.NewInt(1)) != 0 {
- t.Errorf("CreationHeight %d: balance mismatch: have %v, want %v", i, origValWrap.CreationHeight, big.NewInt(1))
- }
- if copyValWrap.CreationHeight.Cmp(big.NewInt(2)) != 0 {
- t.Errorf("CreationHeight %d: balance mismatch: have %v, want %v", i, copyValWrap.CreationHeight, big.NewInt(2))
- }
- if ccopyValWrap.CreationHeight.Cmp(big.NewInt(3)) != 0 {
- t.Errorf("CreationHeight %d: balance mismatch: have %v, want %v", i, ccopyValWrap.CreationHeight, big.NewInt(3))
- }
-
- if origValWrap.UpdateHeight.Cmp(big.NewInt(1)) != 0 {
- t.Errorf("UpdateHeight %d: balance mismatch: have %v, want %v", i, origValWrap.UpdateHeight, big.NewInt(1))
- }
- if copyValWrap.UpdateHeight.Cmp(big.NewInt(2)) != 0 {
- t.Errorf("UpdateHeight %d: balance mismatch: have %v, want %v", i, copyValWrap.UpdateHeight, big.NewInt(2))
- }
- if ccopyValWrap.UpdateHeight.Cmp(big.NewInt(3)) != 0 {
- t.Errorf("UpdateHeight %d: balance mismatch: have %v, want %v", i, ccopyValWrap.UpdateHeight, big.NewInt(3))
- }
-
- if want := "UpdatedOriginal" + string(i); origValWrap.Description.Name != want {
- t.Errorf("originalValWrap %d: Incorrect Name: have %s, want %s", i, origValWrap.Description.Name, want)
- }
- if want := "UpdatedCopy" + string(i); copyValWrap.Description.Name != want {
- t.Errorf("originalValWrap %d: Incorrect Name: have %s, want %s", i, copyValWrap.Description.Name, want)
- }
- if want := "UpdatedCCopy" + string(i); ccopyValWrap.Description.Name != want {
- t.Errorf("originalValWrap %d: Incorrect Name: have %s, want %s", i, ccopyValWrap.Description.Name, want)
- }
-
- if origValWrap.Delegations[0].Amount.Cmp(big.NewInt(1)) != 0 {
- t.Errorf("Delegations[0].Amount %d: balance mismatch: have %v, want %v", i, origValWrap.Delegations[0].Amount, big.NewInt(1))
- }
- if copyValWrap.Delegations[0].Amount.Cmp(big.NewInt(2)) != 0 {
- t.Errorf("Delegations[0].Amount %d: balance mismatch: have %v, want %v", i, copyValWrap.Delegations[0].Amount, big.NewInt(2))
- }
- if ccopyValWrap.Delegations[0].Amount.Cmp(big.NewInt(3)) != 0 {
- t.Errorf("Delegations[0].Amount %d: balance mismatch: have %v, want %v", i, ccopyValWrap.Delegations[0].Amount, big.NewInt(3))
- }
-
- if origValWrap.Delegations[0].Reward.Cmp(big.NewInt(1)) != 0 {
- t.Errorf("Delegations[0].Reward %d: balance mismatch: have %v, want %v", i, origValWrap.Delegations[0].Reward, big.NewInt(1))
- }
- if copyValWrap.Delegations[0].Reward.Cmp(big.NewInt(2)) != 0 {
- t.Errorf("Delegations[0].Reward %d: balance mismatch: have %v, want %v", i, copyValWrap.Delegations[0].Reward, big.NewInt(2))
- }
- if ccopyValWrap.Delegations[0].Reward.Cmp(big.NewInt(3)) != 0 {
- t.Errorf("Delegations[0].Reward %d: balance mismatch: have %v, want %v", i, ccopyValWrap.Delegations[0].Reward, big.NewInt(3))
- }
-
- if origValWrap.Delegations[0].Undelegations[0].Amount.Cmp(big.NewInt(1)) != 0 {
- t.Errorf("Delegations[0].Undelegations[0].Amount %d: balance mismatch: have %v, want %v", i, origValWrap.Delegations[0].Undelegations[0].Amount, big.NewInt(1))
- }
- if copyValWrap.Delegations[0].Undelegations[0].Amount.Cmp(big.NewInt(2)) != 0 {
- t.Errorf("Delegations[0].Undelegations[0].Amount %d: balance mismatch: have %v, want %v", i, copyValWrap.Delegations[0].Undelegations[0].Amount, big.NewInt(2))
- }
- if ccopyValWrap.Delegations[0].Undelegations[0].Amount.Cmp(big.NewInt(3)) != 0 {
- t.Errorf("Delegations[0].Undelegations[0].Amount %d: balance mismatch: have %v, want %v", i, ccopyValWrap.Delegations[0].Undelegations[0].Amount, big.NewInt(3))
- }
-
- if origValWrap.Delegations[0].Undelegations[0].Epoch.Cmp(big.NewInt(1)) != 0 {
- t.Errorf("CreationHeight %d: balance mismatch: have %v, want %v", i, origValWrap.Delegations[0].Undelegations[0].Epoch, big.NewInt(1))
- }
- if copyValWrap.Delegations[0].Undelegations[0].Epoch.Cmp(big.NewInt(2)) != 0 {
- t.Errorf("CreationHeight %d: balance mismatch: have %v, want %v", i, copyValWrap.Delegations[0].Undelegations[0].Epoch, big.NewInt(2))
- }
- if ccopyValWrap.Delegations[0].Undelegations[0].Epoch.Cmp(big.NewInt(3)) != 0 {
- t.Errorf("CreationHeight %d: balance mismatch: have %v, want %v", i, ccopyValWrap.Delegations[0].Undelegations[0].Epoch, big.NewInt(3))
- }
-
- if origValWrap.Counters.NumBlocksToSign.Cmp(big.NewInt(1)) != 0 {
- t.Errorf("Counters.NumBlocksToSign %d: balance mismatch: have %v, want %v", i, origValWrap.Counters.NumBlocksToSign, big.NewInt(1))
- }
- if copyValWrap.Counters.NumBlocksToSign.Cmp(big.NewInt(2)) != 0 {
- t.Errorf("Counters.NumBlocksToSign %d: balance mismatch: have %v, want %v", i, copyValWrap.Counters.NumBlocksToSign, big.NewInt(2))
- }
- if ccopyValWrap.Counters.NumBlocksToSign.Cmp(big.NewInt(3)) != 0 {
- t.Errorf("Counters.NumBlocksToSign %d: balance mismatch: have %v, want %v", i, ccopyValWrap.Counters.NumBlocksToSign, big.NewInt(3))
- }
-
- if origValWrap.Counters.NumBlocksSigned.Cmp(big.NewInt(1)) != 0 {
- t.Errorf("Counters.NumBlocksSigned %d: balance mismatch: have %v, want %v", i, origValWrap.Counters.NumBlocksSigned, big.NewInt(1))
- }
- if copyValWrap.Counters.NumBlocksSigned.Cmp(big.NewInt(2)) != 0 {
- t.Errorf("Counters.NumBlocksSigned %d: balance mismatch: have %v, want %v", i, copyValWrap.Counters.NumBlocksSigned, big.NewInt(2))
- }
- if ccopyValWrap.Counters.NumBlocksSigned.Cmp(big.NewInt(3)) != 0 {
- t.Errorf("Counters.NumBlocksSigned %d: balance mismatch: have %v, want %v", i, ccopyValWrap.Counters.NumBlocksSigned, big.NewInt(3))
- }
-
- if origValWrap.BlockReward.Cmp(big.NewInt(1)) != 0 {
- t.Errorf("Block Reward %d: balance mismatch: have %v, want %v", i, origValWrap.BlockReward, big.NewInt(1))
- }
- if copyValWrap.BlockReward.Cmp(big.NewInt(2)) != 0 {
- t.Errorf("Block Reward %d: balance mismatch: have %v, want %v", i, copyValWrap.BlockReward, big.NewInt(2))
- }
- if ccopyValWrap.BlockReward.Cmp(big.NewInt(3)) != 0 {
- t.Errorf("Block Reward %d: balance mismatch: have %v, want %v", i, ccopyValWrap.BlockReward, big.NewInt(3))
- }
}
}
@@ -460,7 +221,7 @@ func TestSnapshotRandom(t *testing.T) {
}
}
-// A snapshotTest checks that reverting StateDB snapshots properly undoes all changes
+// A snapshotTest checks that reverting DB snapshots properly undoes all changes
// captured by the snapshot. Instances of this test with pseudorandom content are created
// by Generate.
//
@@ -525,7 +286,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
code := make([]byte, 16)
binary.BigEndian.PutUint64(code, uint64(a.args[0]))
binary.BigEndian.PutUint64(code[8:], uint64(a.args[1]))
- s.SetCode(addr, code)
+ s.SetCode(addr, code, false)
},
args: make([]int64, 2),
},
@@ -567,6 +328,30 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
},
args: make([]int64, 1),
},
+ {
+ name: "AddAddressToAccessList",
+ fn: func(a testAction, s *DB) {
+ s.AddAddressToAccessList(addr)
+ },
+ },
+ {
+ name: "AddSlotToAccessList",
+ fn: func(a testAction, s *DB) {
+ s.AddSlotToAccessList(addr,
+ common.Hash{byte(a.args[0])})
+ },
+ args: make([]int64, 1),
+ },
+ {
+ name: "SetTransientState",
+ fn: func(a testAction, s *DB) {
+ var key, val common.Hash
+ binary.BigEndian.PutUint16(key[:], uint16(a.args[0]))
+ binary.BigEndian.PutUint16(val[:], uint16(a.args[1]))
+ s.SetTransientState(addr, key, val)
+ },
+ args: make([]int64, 2),
+ },
}
action := actions[r.Intn(len(actions))]
var nameargs []string
@@ -624,7 +409,7 @@ func (test *snapshotTest) String() string {
func (test *snapshotTest) run() bool {
// Run all actions and create snapshots.
var (
- state, _ = New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ state, _ = New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
snapshotRevs = make([]int, len(test.snapshots))
sindex = 0
)
@@ -638,7 +423,7 @@ func (test *snapshotTest) run() bool {
// Revert all snapshots in reverse order. Each revert must yield a state
// that is equivalent to fresh state with all actions up the snapshot applied.
for sindex--; sindex >= 0; sindex-- {
- checkstate, _ := New(common.Hash{}, state.Database())
+ checkstate, _ := New(common.Hash{}, state.Database(), nil)
for _, action := range test.actions[:test.snapshots[sindex]] {
action.fn(action, checkstate)
}
@@ -667,9 +452,9 @@ func (test *snapshotTest) checkEqual(state, checkstate *DB) error {
checkeq("HasSuicided", state.HasSuicided(addr), checkstate.HasSuicided(addr))
checkeq("GetBalance", state.GetBalance(addr), checkstate.GetBalance(addr))
checkeq("GetNonce", state.GetNonce(addr), checkstate.GetNonce(addr))
- checkeq("GetCode", state.GetCode(addr), checkstate.GetCode(addr))
+ checkeq("GetCode", state.GetCode(addr, false), checkstate.GetCode(addr, false))
checkeq("GetCodeHash", state.GetCodeHash(addr), checkstate.GetCodeHash(addr))
- checkeq("GetCodeSize", state.GetCodeSize(addr), checkstate.GetCodeSize(addr))
+ checkeq("GetCodeSize", state.GetCodeSize(addr, false), checkstate.GetCodeSize(addr, false))
// Check storage.
if obj := state.getStateObject(addr); obj != nil {
state.ForEachStorage(addr, func(key, value common.Hash) bool {
@@ -688,9 +473,9 @@ func (test *snapshotTest) checkEqual(state, checkstate *DB) error {
return fmt.Errorf("got GetRefund() == %d, want GetRefund() == %d",
state.GetRefund(), checkstate.GetRefund())
}
- if !reflect.DeepEqual(state.GetLogs(common.Hash{}), checkstate.GetLogs(common.Hash{})) {
+ if !reflect.DeepEqual(state.GetLogs(common.Hash{}, 0, common.Hash{}), checkstate.GetLogs(common.Hash{}, 0, common.Hash{})) {
return fmt.Errorf("got GetLogs(common.Hash{}) == %v, want GetLogs(common.Hash{}) == %v",
- state.GetLogs(common.Hash{}), checkstate.GetLogs(common.Hash{}))
+ state.GetLogs(common.Hash{}, 0, common.Hash{}), checkstate.GetLogs(common.Hash{}, 0, common.Hash{}))
}
return nil
}
@@ -699,7 +484,7 @@ func TestTouchDelete(t *testing.T) {
s := newStateTest()
s.state.GetOrNewStateObject(common.Address{})
root, _ := s.state.Commit(false)
- s.state.Reset(root)
+ s.state, _ = New(root, s.state.db, s.state.snaps)
snapshot := s.state.Snapshot()
s.state.AddBalance(common.Address{}, new(big.Int))
@@ -716,7 +501,7 @@ func TestTouchDelete(t *testing.T) {
// TestCopyOfCopy tests that modified objects are carried over to the copy, and the copy of the copy.
// See https://github.com/ethereum/go-ethereum/pull/15225#issuecomment-380191512
func TestCopyOfCopy(t *testing.T) {
- state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
addr := common.HexToAddress("aaaa")
state.SetBalance(addr, big.NewInt(42))
@@ -733,21 +518,21 @@ func TestCopyOfCopy(t *testing.T) {
//
// See https://github.com/ethereum/go-ethereum/issues/20106.
func TestCopyCommitCopy(t *testing.T) {
- state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
// Create an account and check if the retrieved balance is correct
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
skey := common.HexToHash("aaa")
sval := common.HexToHash("bbb")
- state.SetBalance(addr, big.NewInt(42)) // Change the account trie
- state.SetCode(addr, []byte("hello")) // Change an external metadata
- state.SetState(addr, skey, sval) // Change the storage trie
+ state.SetBalance(addr, big.NewInt(42)) // Change the account trie
+ state.SetCode(addr, []byte("hello"), false) // Change an external metadata
+ state.SetState(addr, skey, sval) // Change the storage trie
if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
}
- if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ if code := state.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello"))
}
if val := state.GetState(addr, skey); val != sval {
@@ -761,7 +546,7 @@ func TestCopyCommitCopy(t *testing.T) {
if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
t.Fatalf("first copy pre-commit balance mismatch: have %v, want %v", balance, 42)
}
- if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ if code := copyOne.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
t.Fatalf("first copy pre-commit code mismatch: have %x, want %x", code, []byte("hello"))
}
if val := copyOne.GetState(addr, skey); val != sval {
@@ -775,7 +560,7 @@ func TestCopyCommitCopy(t *testing.T) {
if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
t.Fatalf("first copy post-commit balance mismatch: have %v, want %v", balance, 42)
}
- if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ if code := copyOne.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
t.Fatalf("first copy post-commit code mismatch: have %x, want %x", code, []byte("hello"))
}
if val := copyOne.GetState(addr, skey); val != sval {
@@ -789,7 +574,7 @@ func TestCopyCommitCopy(t *testing.T) {
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
t.Fatalf("second copy balance mismatch: have %v, want %v", balance, 42)
}
- if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ if code := copyTwo.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
t.Fatalf("second copy code mismatch: have %x, want %x", code, []byte("hello"))
}
if val := copyTwo.GetState(addr, skey); val != sval {
@@ -805,21 +590,21 @@ func TestCopyCommitCopy(t *testing.T) {
//
// See https://github.com/ethereum/go-ethereum/issues/20106.
func TestCopyCopyCommitCopy(t *testing.T) {
- state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
// Create an account and check if the retrieved balance is correct
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
skey := common.HexToHash("aaa")
sval := common.HexToHash("bbb")
- state.SetBalance(addr, big.NewInt(42)) // Change the account trie
- state.SetCode(addr, []byte("hello")) // Change an external metadata
- state.SetState(addr, skey, sval) // Change the storage trie
+ state.SetBalance(addr, big.NewInt(42)) // Change the account trie
+ state.SetCode(addr, []byte("hello"), false) // Change an external metadata
+ state.SetState(addr, skey, sval) // Change the storage trie
if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
}
- if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ if code := state.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello"))
}
if val := state.GetState(addr, skey); val != sval {
@@ -833,7 +618,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
t.Fatalf("first copy balance mismatch: have %v, want %v", balance, 42)
}
- if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ if code := copyOne.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
t.Fatalf("first copy code mismatch: have %x, want %x", code, []byte("hello"))
}
if val := copyOne.GetState(addr, skey); val != sval {
@@ -847,7 +632,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
t.Fatalf("second copy pre-commit balance mismatch: have %v, want %v", balance, 42)
}
- if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ if code := copyTwo.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
t.Fatalf("second copy pre-commit code mismatch: have %x, want %x", code, []byte("hello"))
}
if val := copyTwo.GetState(addr, skey); val != sval {
@@ -860,7 +645,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
t.Fatalf("second copy post-commit balance mismatch: have %v, want %v", balance, 42)
}
- if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ if code := copyTwo.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
t.Fatalf("second copy post-commit code mismatch: have %x, want %x", code, []byte("hello"))
}
if val := copyTwo.GetState(addr, skey); val != sval {
@@ -874,7 +659,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
if balance := copyThree.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
t.Fatalf("third copy balance mismatch: have %v, want %v", balance, 42)
}
- if code := copyThree.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ if code := copyThree.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
t.Fatalf("third copy code mismatch: have %x, want %x", code, []byte("hello"))
}
if val := copyThree.GetState(addr, skey); val != sval {
@@ -886,22 +671,22 @@ func TestCopyCopyCommitCopy(t *testing.T) {
}
// TestDeleteCreateRevert tests a weird state transition corner case that we hit
-// while changing the internals of statedb. The workflow is that a contract is
-// self destructed, then in a followup transaction (but same block) it's created
+// while changing the internals of DB. The workflow is that a contract is
+// self-destructed, then in a follow-up transaction (but same block) it's created
// again and the transaction reverted.
//
-// The original statedb implementation flushed dirty objects to the tries after
+// The original DB implementation flushed dirty objects to the tries after
// each transaction, so this works ok. The rework accumulated writes in memory
// first, but the journal wiped the entire state object on create-revert.
func TestDeleteCreateRevert(t *testing.T) {
// Create an initial state with a single contract
- state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
addr := common.BytesToAddress([]byte("so"))
state.SetBalance(addr, big.NewInt(1))
root, _ := state.Commit(false)
- state.Reset(root)
+ state, _ = New(root, state.db, state.snaps)
// Simulate self-destructing in one transaction, then create-reverting in another
state.Suicide(addr)
@@ -913,315 +698,303 @@ func TestDeleteCreateRevert(t *testing.T) {
// Commit the entire state and make sure we don't crash and have the correct state
root, _ = state.Commit(true)
- state.Reset(root)
+ state, _ = New(root, state.db, state.snaps)
if state.getStateObject(addr) != nil {
t.Fatalf("self-destructed contract came alive")
}
}
-func makeValidValidatorWrapper(addr common.Address) stk.ValidatorWrapper {
- cr := stk.CommissionRates{
- Rate: numeric.ZeroDec(),
- MaxRate: numeric.ZeroDec(),
- MaxChangeRate: numeric.ZeroDec(),
- }
- c := stk.Commission{CommissionRates: cr, UpdateHeight: big.NewInt(300)}
- d := stk.Description{
- Name: "Wayne",
- Identity: "wen",
- Website: "harmony.one.wen",
- Details: "best",
- }
-
- v := stk.Validator{
- Address: addr,
- SlotPubKeys: []bls.SerializedPublicKey{makeBLSPubSigPair().pub},
- LastEpochInCommittee: big.NewInt(20),
- MinSelfDelegation: new(big.Int).Mul(big.NewInt(10000), big.NewInt(1e18)),
- MaxTotalDelegation: new(big.Int).Mul(big.NewInt(12000), big.NewInt(1e18)),
- Commission: c,
- Description: d,
- CreationHeight: big.NewInt(12306),
- }
- ds := stk.Delegations{
- stk.Delegation{
- DelegatorAddress: v.Address,
- Amount: big.NewInt(0),
- Reward: big.NewInt(0),
- Undelegations: stk.Undelegations{
- stk.Undelegation{
- Amount: big.NewInt(0),
- Epoch: big.NewInt(0),
- },
- },
- },
+// TestMissingTrieNodes tests that if the DB fails to load parts of the trie,
+// the Commit operation fails with an error
+// If we are missing trie nodes, we should not continue writing to the trie
+func TestMissingTrieNodes(t *testing.T) {
+ // Create an initial state with a few accounts
+ memDb := rawdb.NewMemoryDatabase()
+ db := NewDatabase(memDb)
+ var root common.Hash
+ state, _ := New(common.Hash{}, db, nil)
+ addr := common.BytesToAddress([]byte("so"))
+ {
+ state.SetBalance(addr, big.NewInt(1))
+ state.SetCode(addr, []byte{1, 2, 3}, false)
+ a2 := common.BytesToAddress([]byte("another"))
+ state.SetBalance(a2, big.NewInt(100))
+ state.SetCode(a2, []byte{1, 2, 4}, false)
+ root, _ = state.Commit(false)
+ t.Logf("root: %x", root)
+ // force-flush
+ state.Database().TrieDB().Cap(0)
+ }
+ // Create a new state on the old root
+ state, _ = New(root, db, nil)
+ // Now we clear out the memdb
+ it := memDb.NewIterator(nil, nil)
+ for it.Next() {
+ k := it.Key()
+ // Leave the root intact
+ if !bytes.Equal(k, root[:]) {
+ t.Logf("key: %x", k)
+ memDb.Delete(k)
+ }
}
- br := big.NewInt(1)
-
- w := stk.ValidatorWrapper{
- Validator: v,
- Delegations: ds,
- BlockReward: br,
+ balance := state.GetBalance(addr)
+ // The removed elem should lead to it returning zero balance
+ if exp, got := uint64(0), balance.Uint64(); got != exp {
+ t.Errorf("expected %d, got %d", exp, got)
+ }
+ // Modify the state
+ state.SetBalance(addr, big.NewInt(2))
+ root, err := state.Commit(false)
+ if err == nil {
+ t.Fatalf("expected error, got root :%x", root)
}
- w.Counters.NumBlocksSigned = big.NewInt(0)
- w.Counters.NumBlocksToSign = big.NewInt(0)
- return w
}
-type blsPubSigPair struct {
- pub bls.SerializedPublicKey
- sig bls.SerializedSignature
-}
+func TestStateDBAccessList(t *testing.T) {
+ // Some helpers
+ addr := func(a string) common.Address {
+ return common.HexToAddress(a)
+ }
+ slot := func(a string) common.Hash {
+ return common.HexToHash(a)
+ }
+
+ memDb := rawdb.NewMemoryDatabase()
+ db := NewDatabase(memDb)
+ state, _ := New(common.Hash{}, db, nil)
+ state.accessList = newAccessList()
+
+ verifyAddrs := func(astrings ...string) {
+ t.Helper()
+ // convert to common.Address form
+ var addresses []common.Address
+ var addressMap = make(map[common.Address]struct{})
+ for _, astring := range astrings {
+ address := addr(astring)
+ addresses = append(addresses, address)
+ addressMap[address] = struct{}{}
+ }
+ // Check that the given addresses are in the access list
+ for _, address := range addresses {
+ if !state.AddressInAccessList(address) {
+ t.Fatalf("expected %x to be in access list", address)
+ }
+ }
+ // Check that only the expected addresses are present in the access list
+ for address := range state.accessList.addresses {
+ if _, exist := addressMap[address]; !exist {
+ t.Fatalf("extra address %x in access list", address)
+ }
+ }
+ }
+ verifySlots := func(addrString string, slotStrings ...string) {
+ if !state.AddressInAccessList(addr(addrString)) {
+ t.Fatalf("scope missing address/slots %v", addrString)
+ }
+ var address = addr(addrString)
+ // convert to common.Hash form
+ var slots []common.Hash
+ var slotMap = make(map[common.Hash]struct{})
+ for _, slotString := range slotStrings {
+ s := slot(slotString)
+ slots = append(slots, s)
+ slotMap[s] = struct{}{}
+ }
+ // Check that the expected items are in the access list
+ for i, s := range slots {
+ if _, slotPresent := state.SlotInAccessList(address, s); !slotPresent {
+ t.Fatalf("input %d: scope missing slot %v (address %v)", i, s, addrString)
+ }
+ }
+ // Check that no extra elements are in the access list
+ index := state.accessList.addresses[address]
+ if index >= 0 {
+ stateSlots := state.accessList.slots[index]
+ for s := range stateSlots {
+ if _, slotPresent := slotMap[s]; !slotPresent {
+ t.Fatalf("scope has extra slot %v (address %v)", s, addrString)
+ }
+ }
+ }
+ }
-func makeBLSPubSigPair() blsPubSigPair {
- blsPriv := bls.RandPrivateKey()
- blsPub := blsPriv.GetPublicKey()
- msgHash := hash.Keccak256([]byte(stk.BLSVerificationStr))
- sig := blsPriv.SignHash(msgHash)
+ state.AddAddressToAccessList(addr("aa")) // 1
+ state.AddSlotToAccessList(addr("bb"), slot("01")) // 2,3
+ state.AddSlotToAccessList(addr("bb"), slot("02")) // 4
+ verifyAddrs("aa", "bb")
+ verifySlots("bb", "01", "02")
- var shardPub bls.SerializedPublicKey
- copy(shardPub[:], blsPub.Serialize())
+ // Make a copy
+ stateCopy1 := state.Copy()
+ if exp, got := 4, state.journal.length(); exp != got {
+ t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
+ }
- var shardSig bls.SerializedSignature
- copy(shardSig[:], sig.Serialize())
+ // same again, should cause no journal entries
+ state.AddSlotToAccessList(addr("bb"), slot("01"))
+ state.AddSlotToAccessList(addr("bb"), slot("02"))
+ state.AddAddressToAccessList(addr("aa"))
+ if exp, got := 4, state.journal.length(); exp != got {
+ t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
+ }
+ // some new ones
+ state.AddSlotToAccessList(addr("bb"), slot("03")) // 5
+ state.AddSlotToAccessList(addr("aa"), slot("01")) // 6
+ state.AddSlotToAccessList(addr("cc"), slot("01")) // 7,8
+ state.AddAddressToAccessList(addr("cc"))
+ if exp, got := 8, state.journal.length(); exp != got {
+ t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
+ }
- return blsPubSigPair{shardPub, shardSig}
-}
+ verifyAddrs("aa", "bb", "cc")
+ verifySlots("aa", "01")
+ verifySlots("bb", "01", "02", "03")
+ verifySlots("cc", "01")
-func updateAndCheckValidator(t *testing.T, state *DB, wrapper stk.ValidatorWrapper) {
- // do not modify / link the original into the state object
- copied := staketest.CopyValidatorWrapper(wrapper)
- if err := state.UpdateValidatorWrapperWithRevert(copied.Address, &copied); err != nil {
- t.Fatalf("Could not update wrapper with revert %v\n", err)
+ // now start rolling back changes
+ state.journal.revert(state, 7)
+ if _, ok := state.SlotInAccessList(addr("cc"), slot("01")); ok {
+ t.Fatalf("slot present, expected missing")
}
+ verifyAddrs("aa", "bb", "cc")
+ verifySlots("aa", "01")
+ verifySlots("bb", "01", "02", "03")
- // load a copy here to be safe
- loadedWrapper, err := state.ValidatorWrapper(copied.Address, false, true)
- if err != nil {
- t.Fatalf("Could not load wrapper %v\n", err)
+ state.journal.revert(state, 6)
+ if state.AddressInAccessList(addr("cc")) {
+ t.Fatalf("addr present, expected missing")
}
+ verifyAddrs("aa", "bb")
+ verifySlots("aa", "01")
+ verifySlots("bb", "01", "02", "03")
- if err := staketest.CheckValidatorWrapperEqual(wrapper, *loadedWrapper); err != nil {
- t.Fatalf("Wrappers are unequal %v\n", err)
+ state.journal.revert(state, 5)
+ if _, ok := state.SlotInAccessList(addr("aa"), slot("01")); ok {
+ t.Fatalf("slot present, expected missing")
}
-}
+ verifyAddrs("aa", "bb")
+ verifySlots("bb", "01", "02", "03")
-func verifyValidatorWrapperRevert(
- t *testing.T,
- state *DB,
- snapshot int,
- wrapperAddress common.Address, // if expectedWrapper is nil, this is needed
- allowErrAddressNotPresent bool,
- expectedWrapper *stk.ValidatorWrapper,
- stateToCompare *DB,
- modifiedAddresses []common.Address,
-) {
- state.RevertToSnapshot(snapshot)
- loadedWrapper, err := state.ValidatorWrapper(wrapperAddress, true, false)
- if err != nil && !(err == ErrAddressNotPresent && allowErrAddressNotPresent) {
- t.Fatalf("Could not load wrapper %v\n", err)
- }
- if expectedWrapper != nil {
- if err := staketest.CheckValidatorWrapperEqual(*expectedWrapper, *loadedWrapper); err != nil {
- fmt.Printf("ExpectWrapper: %v\n", expectedWrapper)
- fmt.Printf("LoadedWrapper: %v\n", loadedWrapper)
- fmt.Printf("ExpectCounters: %v\n", expectedWrapper.Counters)
- fmt.Printf("LoadedCounters: %v\n", loadedWrapper.Counters)
- t.Fatalf("Loaded wrapper not equal to expected wrapper after revert %v\n", err)
- }
- } else if loadedWrapper != nil {
- t.Fatalf("Expected wrapper is nil but got loaded wrapper %v\n", loadedWrapper)
+ state.journal.revert(state, 4)
+ if _, ok := state.SlotInAccessList(addr("bb"), slot("03")); ok {
+ t.Fatalf("slot present, expected missing")
}
+ verifyAddrs("aa", "bb")
+ verifySlots("bb", "01", "02")
- st := &snapshotTest{addrs: modifiedAddresses}
- if err := st.checkEqual(state, stateToCompare); err != nil {
- t.Fatalf("State not as expected after revert %v\n", err)
+ state.journal.revert(state, 3)
+ if _, ok := state.SlotInAccessList(addr("bb"), slot("02")); ok {
+ t.Fatalf("slot present, expected missing")
}
-}
+ verifyAddrs("aa", "bb")
+ verifySlots("bb", "01")
-func TestValidatorCreationRevert(t *testing.T) {
- state, err := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
- emptyState := state.Copy()
- if err != nil {
- t.Fatalf("Could not instantiate state %v\n", err)
+ state.journal.revert(state, 2)
+ if _, ok := state.SlotInAccessList(addr("bb"), slot("01")); ok {
+ t.Fatalf("slot present, expected missing")
}
- snapshot := state.Snapshot()
- key, err := crypto.GenerateKey()
- if err != nil {
- t.Fatalf("Could not generate key %v\n", err)
- }
- wrapper := makeValidValidatorWrapper(crypto.PubkeyToAddress(key.PublicKey))
- // step 1 is adding the validator, and checking that is it successfully added
- updateAndCheckValidator(t, state, wrapper)
- // step 2 is the revert check, the meat of the test
- verifyValidatorWrapperRevert(t,
- state,
- snapshot,
- wrapper.Address,
- true,
- nil,
- emptyState,
- []common.Address{wrapper.Address},
- )
-}
+ verifyAddrs("aa", "bb")
-func TestValidatorAddDelegationRevert(t *testing.T) {
- state, err := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
- if err != nil {
- t.Fatalf("Could not instantiate state %v\n", err)
- }
- key, err := crypto.GenerateKey()
- if err != nil {
- t.Fatalf("Could not generate key %v\n", err)
+ state.journal.revert(state, 1)
+ if state.AddressInAccessList(addr("bb")) {
+ t.Fatalf("addr present, expected missing")
}
- delegatorKey, err := crypto.GenerateKey()
- if err != nil {
- t.Fatalf("Could not generate key %v\n", err)
- }
- wrapper := makeValidValidatorWrapper(crypto.PubkeyToAddress(key.PublicKey))
- // always, step 1 is adding the validator, and checking that is it successfully added
- updateAndCheckValidator(t, state, wrapper)
- wrapperWithoutDelegation := staketest.CopyValidatorWrapper(wrapper) // for comparison later
- stateWithoutDelegation := state.Copy()
- // we will revert to the state without the delegation
- snapshot := state.Snapshot()
- // which is added here
- wrapper.Delegations = append(wrapper.Delegations, stk.NewDelegation(
- crypto.PubkeyToAddress(delegatorKey.PublicKey),
- new(big.Int).Mul(big.NewInt(denominations.One), big.NewInt(100))),
- )
- // again, add and make sure added == sent
- updateAndCheckValidator(t, state, wrapper)
- // now the meat of the test
- verifyValidatorWrapperRevert(t,
- state,
- snapshot,
- wrapper.Address,
- false,
- &wrapperWithoutDelegation,
- stateWithoutDelegation,
- []common.Address{wrapper.Address, wrapper.Delegations[1].DelegatorAddress},
- )
-}
-
-type expectedRevertItem struct {
- snapshot int
- expectedWrapperAfterRevert *stk.ValidatorWrapper
- expectedStateAfterRevert *DB
- modifiedAddresses []common.Address
-}
+ verifyAddrs("aa")
-func makeExpectedRevertItem(state *DB,
- wrapper *stk.ValidatorWrapper,
- modifiedAddresses []common.Address,
-) expectedRevertItem {
- x := expectedRevertItem{
- snapshot: state.Snapshot(),
- expectedStateAfterRevert: state.Copy(),
- modifiedAddresses: modifiedAddresses,
+ state.journal.revert(state, 0)
+ if state.AddressInAccessList(addr("aa")) {
+ t.Fatalf("addr present, expected missing")
+ }
+ if got, exp := len(state.accessList.addresses), 0; got != exp {
+ t.Fatalf("expected empty, got %d", got)
+ }
+ if got, exp := len(state.accessList.slots), 0; got != exp {
+ t.Fatalf("expected empty, got %d", got)
}
- if wrapper != nil {
- copied := staketest.CopyValidatorWrapper(*wrapper)
- x.expectedWrapperAfterRevert = &copied
+ // Check the copy
+ // Make a copy
+ state = stateCopy1
+ verifyAddrs("aa", "bb")
+ verifySlots("bb", "01", "02")
+ if got, exp := len(state.accessList.addresses), 2; got != exp {
+ t.Fatalf("expected empty, got %d", got)
+ }
+ if got, exp := len(state.accessList.slots), 1; got != exp {
+ t.Fatalf("expected empty, got %d", got)
}
- return x
}
-func TestValidatorMultipleReverts(t *testing.T) {
- var expectedRevertItems []expectedRevertItem
-
- state, err := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
- if err != nil {
- t.Fatalf("Could not instantiate state %v\n", err)
+// Tests that account and storage tries are flushed in the correct order and that
+// no data loss occurs.
+func TestFlushOrderDataLoss(t *testing.T) {
+ // Create a state trie with many accounts and slots
+ var (
+ memdb = rawdb.NewMemoryDatabase()
+ statedb = NewDatabase(memdb)
+ state, _ = New(common.Hash{}, statedb, nil)
+ )
+ for a := byte(0); a < 10; a++ {
+ state.CreateAccount(common.Address{a})
+ for s := byte(0); s < 10; s++ {
+ state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s})
+ }
}
- key, err := crypto.GenerateKey()
+ root, err := state.Commit(false)
if err != nil {
- t.Fatalf("Could not generate key %v\n", err)
+ t.Fatalf("failed to commit state trie: %v", err)
}
- delegatorKey, err := crypto.GenerateKey()
+ statedb.TrieDB().Reference(root, common.Hash{})
+ if err := statedb.TrieDB().Cap(1024); err != nil {
+ t.Fatalf("failed to cap trie dirty cache: %v", err)
+ }
+ if err := statedb.TrieDB().Commit(root, false); err != nil {
+ t.Fatalf("failed to commit state trie: %v", err)
+ }
+ // Reopen the state trie from flushed disk and verify it
+ state, err = New(root, NewDatabase(memdb), nil)
if err != nil {
- t.Fatalf("Could not generate key %v\n", err)
- }
- validatorAddress := crypto.PubkeyToAddress(key.PublicKey)
- delegatorAddress := crypto.PubkeyToAddress(delegatorKey.PublicKey)
- modifiedAddresses := []common.Address{validatorAddress, delegatorAddress}
- // first we add a validator
- expectedRevertItems = append(expectedRevertItems,
- makeExpectedRevertItem(state, nil, modifiedAddresses))
- wrapper := makeValidValidatorWrapper(crypto.PubkeyToAddress(key.PublicKey))
- updateAndCheckValidator(t, state, wrapper)
- // then we add a delegation
- expectedRevertItems = append(expectedRevertItems,
- makeExpectedRevertItem(state, &wrapper, modifiedAddresses))
- wrapper.Delegations = append(wrapper.Delegations, stk.NewDelegation(
- crypto.PubkeyToAddress(delegatorKey.PublicKey),
- new(big.Int).Mul(big.NewInt(denominations.One), big.NewInt(100))),
- )
- updateAndCheckValidator(t, state, wrapper)
- // then we have it sign blocks
- wrapper.Counters.NumBlocksToSign.Add(
- wrapper.Counters.NumBlocksToSign, common.Big1,
- )
- wrapper.Counters.NumBlocksSigned.Add(
- wrapper.Counters.NumBlocksSigned, common.Big1,
- )
- updateAndCheckValidator(t, state, wrapper)
- // then modify the name and the block reward
- expectedRevertItems = append(expectedRevertItems,
- makeExpectedRevertItem(state, &wrapper, modifiedAddresses))
- wrapper.BlockReward.SetInt64(1)
- wrapper.Validator.Description.Name = "Name"
- for i := len(expectedRevertItems) - 1; i >= 0; i-- {
- item := expectedRevertItems[i]
- verifyValidatorWrapperRevert(t,
- state,
- item.snapshot,
- wrapper.Address,
- i == 0,
- item.expectedWrapperAfterRevert,
- item.expectedStateAfterRevert,
- []common.Address{wrapper.Address},
- )
+ t.Fatalf("failed to reopen state trie: %v", err)
+ }
+ for a := byte(0); a < 10; a++ {
+ for s := byte(0); s < 10; s++ {
+ if have := state.GetState(common.Address{a}, common.Hash{a, s}); have != (common.Hash{a, s}) {
+ t.Errorf("account %d: slot %d: state mismatch: have %x, want %x", a, s, have, common.Hash{a, s})
+ }
+ }
}
}
-func TestValidatorWrapperPanic(t *testing.T) {
- defer func() { recover() }()
+func TestStateDBTransientStorage(t *testing.T) {
+ memDb := rawdb.NewMemoryDatabase()
+ db := NewDatabase(memDb)
+ state, _ := New(common.Hash{}, db, nil)
- state, err := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
- if err != nil {
- t.Fatalf("Could not instantiate state %v\n", err)
+ key := common.Hash{0x01}
+ value := common.Hash{0x02}
+ addr := common.Address{}
+
+ state.SetTransientState(addr, key, value)
+ if exp, got := 1, state.journal.length(); exp != got {
+ t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
}
- key, err := crypto.GenerateKey()
- if err != nil {
- t.Fatalf("Could not generate key %v\n", err)
+ // the retrieved value should equal what was set
+ if got := state.GetTransientState(addr, key); got != value {
+ t.Fatalf("transient storage mismatch: have %x, want %x", got, value)
}
- validatorAddress := crypto.PubkeyToAddress(key.PublicKey)
- // will panic because we are asking for Original with copy of delegations
- _, _ = state.ValidatorWrapper(validatorAddress, true, true)
- t.Fatalf("Did not panic")
-}
-func TestValidatorWrapperGetCode(t *testing.T) {
- state, err := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
- if err != nil {
- t.Fatalf("Could not instantiate state %v\n", err)
+ // revert the transient state being set and then check that the
+ // value is now the empty hash
+ state.journal.revert(state, 0)
+ if got, exp := state.GetTransientState(addr, key), (common.Hash{}); exp != got {
+ t.Fatalf("transient storage mismatch: have %x, want %x", got, exp)
}
- key, err := crypto.GenerateKey()
- if err != nil {
- t.Fatalf("Could not generate key %v\n", err)
- }
- wrapper := makeValidValidatorWrapper(crypto.PubkeyToAddress(key.PublicKey))
- updateAndCheckValidator(t, state, wrapper)
- // delete it from the cache so we can force it to use GetCode
- delete(state.stateValidators, wrapper.Address)
- loadedWrapper, err := state.ValidatorWrapper(wrapper.Address, false, false)
- if err := staketest.CheckValidatorWrapperEqual(wrapper, *loadedWrapper); err != nil {
- fmt.Printf("ExpectWrapper: %v\n", wrapper)
- fmt.Printf("LoadedWrapper: %v\n", loadedWrapper)
- fmt.Printf("ExpectCounters: %v\n", wrapper.Counters)
- fmt.Printf("LoadedCounters: %v\n", loadedWrapper.Counters)
- t.Fatalf("Loaded wrapper not equal to expected wrapper%v\n", err)
+
+ // set transient state and then copy the statedb and ensure that
+ // the transient state is copied
+ state.SetTransientState(addr, key, value)
+ cpy := state.Copy()
+ if got := cpy.GetTransientState(addr, key); got != value {
+ t.Fatalf("transient storage mismatch: have %x, want %x", got, value)
}
}
diff --git a/core/state/sync.go b/core/state/sync.go
new file mode 100644
index 0000000000..75740e3e0d
--- /dev/null
+++ b/core/state/sync.go
@@ -0,0 +1,56 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import (
+ "bytes"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+// NewStateSync create a new state trie download scheduler.
+func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(keys [][]byte, leaf []byte) error, scheme string) *trie.Sync {
+ // Register the storage slot callback if the external callback is specified.
+ var onSlot func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
+ if onLeaf != nil {
+ onSlot = func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error {
+ return onLeaf(keys, leaf)
+ }
+ }
+ // Register the account callback to connect the state trie and the storage
+ // trie belongs to the contract.
+ var syncer *trie.Sync
+ onAccount := func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error {
+ if onLeaf != nil {
+ if err := onLeaf(keys, leaf); err != nil {
+ return err
+ }
+ }
+ var obj Account
+ if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil {
+ return err
+ }
+ syncer.AddSubTrie(obj.Root, path, parent, parentPath, onSlot)
+ syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), path, parent, parentPath)
+ return nil
+ }
+ syncer = trie.NewSync(root, database, onAccount, scheme)
+ return syncer
+}
diff --git a/core/state/tikv_clean.go b/core/state/tikv_clean.go
index a62d429014..1442c3a139 100644
--- a/core/state/tikv_clean.go
+++ b/core/state/tikv_clean.go
@@ -6,6 +6,7 @@ import (
"sync/atomic"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/harmony-one/harmony/internal/shardchain/tikv_manage"
@@ -36,7 +37,7 @@ func (s *DB) DiffAndCleanCache(shardId uint32, to *DB) (int, error) {
addrBytes := s.trie.GetKey(it.LeafKey())
addr := common.BytesToAddress(addrBytes)
- var fromAccount, toAccount Account
+ var fromAccount, toAccount types.StateAccount
if err := rlp.DecodeBytes(it.LeafBlob(), &fromAccount); err != nil {
continue
}
@@ -53,8 +54,14 @@ func (s *DB) DiffAndCleanCache(shardId uint32, to *DB) (int, error) {
}
// create account difference iterator
- fromAccountTrie := newObject(s, addr, fromAccount).getTrie(s.db)
- toAccountTrie := newObject(to, addr, toAccount).getTrie(to.db)
+ fromAccountTrie, errFromAcc := newObject(s, addr, fromAccount).getTrie(s.db)
+ if errFromAcc != nil {
+ continue
+ }
+ toAccountTrie, errToAcc := newObject(to, addr, toAccount).getTrie(to.db)
+ if errToAcc != nil {
+ continue
+ }
accountIt, _ := trie.NewDifferenceIterator(toAccountTrie.NodeIterator(nil), fromAccountTrie.NodeIterator(nil))
// parallel to delete data
diff --git a/core/state/transient_storage.go b/core/state/transient_storage.go
new file mode 100644
index 0000000000..66e563efa7
--- /dev/null
+++ b/core/state/transient_storage.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// transientStorage is a representation of EIP-1153 "Transient Storage".
+type transientStorage map[common.Address]Storage
+
+// newTransientStorage creates a new instance of a transientStorage.
+func newTransientStorage() transientStorage {
+ return make(transientStorage)
+}
+
+// Set sets the transient-storage `value` for `key` at the given `addr`.
+func (t transientStorage) Set(addr common.Address, key, value common.Hash) {
+ if _, ok := t[addr]; !ok {
+ t[addr] = make(Storage)
+ }
+ t[addr][key] = value
+}
+
+// Get gets the transient storage for `key` at the given `addr`.
+func (t transientStorage) Get(addr common.Address, key common.Hash) common.Hash {
+ val, ok := t[addr]
+ if !ok {
+ return common.Hash{}
+ }
+ return val[key]
+}
+
+// Copy does a deep copy of the transientStorage
+func (t transientStorage) Copy() transientStorage {
+ storage := make(transientStorage)
+ for key, value := range t {
+ storage[key] = value.Copy()
+ }
+ return storage
+}
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
new file mode 100644
index 0000000000..0fac2dafbf
--- /dev/null
+++ b/core/state/trie_prefetcher.go
@@ -0,0 +1,354 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import (
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/harmony-one/harmony/internal/utils"
+)
+
+var (
+ // triePrefetchMetricsPrefix is the prefix under which to publish the metrics.
+ triePrefetchMetricsPrefix = "trie/prefetch/"
+)
+
+// triePrefetcher is an active prefetcher, which receives accounts or storage
+// items and does trie-loading of them. The goal is to get as much useful content
+// into the caches as possible.
+//
+// Note, the prefetcher's API is not thread safe.
+type triePrefetcher struct {
+ db Database // Database to fetch trie nodes through
+ root common.Hash // Root hash of the account trie for metrics
+ fetches map[string]Trie // Partially or fully fetcher tries
+ fetchers map[string]*subfetcher // Subfetchers for each trie
+
+ deliveryMissMeter metrics.Meter
+ accountLoadMeter metrics.Meter
+ accountDupMeter metrics.Meter
+ accountSkipMeter metrics.Meter
+ accountWasteMeter metrics.Meter
+ storageLoadMeter metrics.Meter
+ storageDupMeter metrics.Meter
+ storageSkipMeter metrics.Meter
+ storageWasteMeter metrics.Meter
+}
+
+func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePrefetcher {
+ prefix := triePrefetchMetricsPrefix + namespace
+ p := &triePrefetcher{
+ db: db,
+ root: root,
+ fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map
+
+ deliveryMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss", nil),
+ accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil),
+ accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil),
+ accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil),
+ accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil),
+ storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil),
+ storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil),
+ storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil),
+ storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil),
+ }
+ return p
+}
+
+// close iterates over all the subfetchers, aborts any that were left spinning
+// and reports the stats to the metrics subsystem.
+func (p *triePrefetcher) close() {
+ for _, fetcher := range p.fetchers {
+ fetcher.abort() // safe to do multiple times
+
+ if metrics.Enabled {
+ if fetcher.root == p.root {
+ p.accountLoadMeter.Mark(int64(len(fetcher.seen)))
+ p.accountDupMeter.Mark(int64(fetcher.dups))
+ p.accountSkipMeter.Mark(int64(len(fetcher.tasks)))
+
+ for _, key := range fetcher.used {
+ delete(fetcher.seen, string(key))
+ }
+ p.accountWasteMeter.Mark(int64(len(fetcher.seen)))
+ } else {
+ p.storageLoadMeter.Mark(int64(len(fetcher.seen)))
+ p.storageDupMeter.Mark(int64(fetcher.dups))
+ p.storageSkipMeter.Mark(int64(len(fetcher.tasks)))
+
+ for _, key := range fetcher.used {
+ delete(fetcher.seen, string(key))
+ }
+ p.storageWasteMeter.Mark(int64(len(fetcher.seen)))
+ }
+ }
+ }
+ // Clear out all fetchers (will crash on a second call, deliberate)
+ p.fetchers = nil
+}
+
+// copy creates a deep-but-inactive copy of the trie prefetcher. Any trie data
+// already loaded will be copied over, but no goroutines will be started. This
+// is mostly used in the miner which creates a copy of it's actively mutated
+// state to be sealed while it may further mutate the state.
+func (p *triePrefetcher) copy() *triePrefetcher {
+ copy := &triePrefetcher{
+ db: p.db,
+ root: p.root,
+ fetches: make(map[string]Trie), // Active prefetchers use the fetches map
+
+ deliveryMissMeter: p.deliveryMissMeter,
+ accountLoadMeter: p.accountLoadMeter,
+ accountDupMeter: p.accountDupMeter,
+ accountSkipMeter: p.accountSkipMeter,
+ accountWasteMeter: p.accountWasteMeter,
+ storageLoadMeter: p.storageLoadMeter,
+ storageDupMeter: p.storageDupMeter,
+ storageSkipMeter: p.storageSkipMeter,
+ storageWasteMeter: p.storageWasteMeter,
+ }
+ // If the prefetcher is already a copy, duplicate the data
+ if p.fetches != nil {
+ for root, fetch := range p.fetches {
+ if fetch == nil {
+ continue
+ }
+ copy.fetches[root] = p.db.CopyTrie(fetch)
+ }
+ return copy
+ }
+ // Otherwise we're copying an active fetcher, retrieve the current states
+ for id, fetcher := range p.fetchers {
+ copy.fetches[id] = fetcher.peek()
+ }
+ return copy
+}
+
+// prefetch schedules a batch of trie items to prefetch.
+func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, keys [][]byte) {
+ // If the prefetcher is an inactive one, bail out
+ if p.fetches != nil {
+ return
+ }
+ // Active fetcher, schedule the retrievals
+ id := p.trieID(owner, root)
+ fetcher := p.fetchers[id]
+ if fetcher == nil {
+ fetcher = newSubfetcher(p.db, p.root, owner, root)
+ p.fetchers[id] = fetcher
+ }
+ fetcher.schedule(keys)
+}
+
+// trie returns the trie matching the root hash, or nil if the prefetcher doesn't
+// have it.
+func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie {
+ // If the prefetcher is inactive, return from existing deep copies
+ id := p.trieID(owner, root)
+ if p.fetches != nil {
+ trie := p.fetches[id]
+ if trie == nil {
+ p.deliveryMissMeter.Mark(1)
+ return nil
+ }
+ return p.db.CopyTrie(trie)
+ }
+ // Otherwise the prefetcher is active, bail if no trie was prefetched for this root
+ fetcher := p.fetchers[id]
+ if fetcher == nil {
+ p.deliveryMissMeter.Mark(1)
+ return nil
+ }
+ // Interrupt the prefetcher if it's by any chance still running and return
+ // a copy of any pre-loaded trie.
+ fetcher.abort() // safe to do multiple times
+
+ trie := fetcher.peek()
+ if trie == nil {
+ p.deliveryMissMeter.Mark(1)
+ return nil
+ }
+ return trie
+}
+
+// used marks a batch of state items used to allow creating statistics as to
+// how useful or wasteful the prefetcher is.
+func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) {
+ if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil {
+ fetcher.used = used
+ }
+}
+
+// trieID returns an unique trie identifier consists the trie owner and root hash.
+func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string {
+ return string(append(owner.Bytes(), root.Bytes()...))
+}
+
+// subfetcher is a trie fetcher goroutine responsible for pulling entries for a
+// single trie. It is spawned when a new root is encountered and lives until the
+// main prefetcher is paused and either all requested items are processed or if
+// the trie being worked on is retrieved from the prefetcher.
+type subfetcher struct {
+ db Database // Database to load trie nodes through
+ state common.Hash // Root hash of the state to prefetch
+ owner common.Hash // Owner of the trie, usually account hash
+ root common.Hash // Root hash of the trie to prefetch
+ trie Trie // Trie being populated with nodes
+
+ tasks [][]byte // Items queued up for retrieval
+ lock sync.Mutex // Lock protecting the task queue
+
+ wake chan struct{} // Wake channel if a new task is scheduled
+ stop chan struct{} // Channel to interrupt processing
+ term chan struct{} // Channel to signal interruption
+ copy chan chan Trie // Channel to request a copy of the current trie
+
+ seen map[string]struct{} // Tracks the entries already loaded
+ dups int // Number of duplicate preload tasks
+ used [][]byte // Tracks the entries used in the end
+}
+
+// newSubfetcher creates a goroutine to prefetch state items belonging to a
+// particular root hash.
+func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash) *subfetcher {
+ sf := &subfetcher{
+ db: db,
+ state: state,
+ owner: owner,
+ root: root,
+ wake: make(chan struct{}, 1),
+ stop: make(chan struct{}),
+ term: make(chan struct{}),
+ copy: make(chan chan Trie),
+ seen: make(map[string]struct{}),
+ }
+ go sf.loop()
+ return sf
+}
+
+// schedule adds a batch of trie keys to the queue to prefetch.
+func (sf *subfetcher) schedule(keys [][]byte) {
+ // Append the tasks to the current queue
+ sf.lock.Lock()
+ sf.tasks = append(sf.tasks, keys...)
+ sf.lock.Unlock()
+
+ // Notify the prefetcher, it's fine if it's already terminated
+ select {
+ case sf.wake <- struct{}{}:
+ default:
+ }
+}
+
+// peek tries to retrieve a deep copy of the fetcher's trie in whatever form it
+// is currently.
+func (sf *subfetcher) peek() Trie {
+ ch := make(chan Trie)
+ select {
+ case sf.copy <- ch:
+ // Subfetcher still alive, return copy from it
+ return <-ch
+
+ case <-sf.term:
+ // Subfetcher already terminated, return a copy directly
+ if sf.trie == nil {
+ return nil
+ }
+ return sf.db.CopyTrie(sf.trie)
+ }
+}
+
+// abort interrupts the subfetcher immediately. It is safe to call abort multiple
+// times but it is not thread safe.
+func (sf *subfetcher) abort() {
+ select {
+ case <-sf.stop:
+ default:
+ close(sf.stop)
+ }
+ <-sf.term
+}
+
+// loop waits for new tasks to be scheduled and keeps loading them until it runs
+// out of tasks or its underlying trie is retrieved for committing.
+func (sf *subfetcher) loop() {
+ // No matter how the loop stops, signal anyone waiting that it's terminated
+ defer close(sf.term)
+
+ // Start by opening the trie and stop processing if it fails
+ if sf.owner == (common.Hash{}) {
+ trie, err := sf.db.OpenTrie(sf.root)
+ if err != nil {
+ utils.Logger().Warn().Err(err).Interface("root", sf.root).Msg("Trie prefetcher failed opening trie")
+ return
+ }
+ sf.trie = trie
+ } else {
+ trie, err := sf.db.OpenStorageTrie(sf.state, sf.owner, sf.root)
+ if err != nil {
+ utils.Logger().Warn().Err(err).Interface("root", sf.root).Msg("Trie prefetcher failed opening trie")
+ return
+ }
+ sf.trie = trie
+ }
+ // Trie opened successfully, keep prefetching items
+ for {
+ select {
+ case <-sf.wake:
+ // Subfetcher was woken up, retrieve any tasks to avoid spinning the lock
+ sf.lock.Lock()
+ tasks := sf.tasks
+ sf.tasks = nil
+ sf.lock.Unlock()
+
+ // Prefetch any tasks until the loop is interrupted
+ for i, task := range tasks {
+ select {
+ case <-sf.stop:
+ // If termination is requested, add any leftover back and return
+ sf.lock.Lock()
+ sf.tasks = append(sf.tasks, tasks[i:]...)
+ sf.lock.Unlock()
+ return
+
+ case ch := <-sf.copy:
+ // Somebody wants a copy of the current trie, grant them
+ ch <- sf.db.CopyTrie(sf.trie)
+
+ default:
+ // No termination request yet, prefetch the next entry
+ if _, ok := sf.seen[string(task)]; ok {
+ sf.dups++
+ } else {
+ sf.trie.TryGet(task)
+ sf.seen[string(task)] = struct{}{}
+ }
+ }
+ }
+
+ case ch := <-sf.copy:
+ // Somebody wants a copy of the current trie, grant them
+ ch <- sf.db.CopyTrie(sf.trie)
+
+ case <-sf.stop:
+ // Termination is requested, abort and leave remaining tasks
+ return
+ }
+ }
+}
diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go
new file mode 100644
index 0000000000..d5774d38fd
--- /dev/null
+++ b/core/state/trie_prefetcher_test.go
@@ -0,0 +1,110 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import (
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/harmony-one/harmony/core/rawdb"
+)
+
+func filledStateDB() *DB {
+ state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
+
+ // Create an account and check if the retrieved balance is correct
+ addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
+ skey := common.HexToHash("aaa")
+ sval := common.HexToHash("bbb")
+
+ state.SetBalance(addr, big.NewInt(42)) // Change the account trie
+ state.SetCode(addr, []byte("hello"), false) // Change an external metadata
+ state.SetState(addr, skey, sval) // Change the storage trie
+ for i := 0; i < 100; i++ {
+ sk := common.BigToHash(big.NewInt(int64(i)))
+ state.SetState(addr, sk, sk) // Change the storage trie
+ }
+ return state
+}
+
+func TestCopyAndClose(t *testing.T) {
+ db := filledStateDB()
+ prefetcher := newTriePrefetcher(db.db, db.originalRoot, "")
+ skey := common.HexToHash("aaa")
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ time.Sleep(1 * time.Second)
+ a := prefetcher.trie(common.Hash{}, db.originalRoot)
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ b := prefetcher.trie(common.Hash{}, db.originalRoot)
+ cpy := prefetcher.copy()
+ cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ c := cpy.trie(common.Hash{}, db.originalRoot)
+ prefetcher.close()
+ cpy2 := cpy.copy()
+ cpy2.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ d := cpy2.trie(common.Hash{}, db.originalRoot)
+ cpy.close()
+ cpy2.close()
+ if a.Hash() != b.Hash() || a.Hash() != c.Hash() || a.Hash() != d.Hash() {
+ t.Fatalf("Invalid trie, hashes should be equal: %v %v %v %v", a.Hash(), b.Hash(), c.Hash(), d.Hash())
+ }
+}
+
+func TestUseAfterClose(t *testing.T) {
+ db := filledStateDB()
+ prefetcher := newTriePrefetcher(db.db, db.originalRoot, "")
+ skey := common.HexToHash("aaa")
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ a := prefetcher.trie(common.Hash{}, db.originalRoot)
+ prefetcher.close()
+ b := prefetcher.trie(common.Hash{}, db.originalRoot)
+ if a == nil {
+ t.Fatal("Prefetching before close should not return nil")
+ }
+ if b != nil {
+ t.Fatal("Trie after close should return nil")
+ }
+}
+
+func TestCopyClose(t *testing.T) {
+ db := filledStateDB()
+ prefetcher := newTriePrefetcher(db.db, db.originalRoot, "")
+ skey := common.HexToHash("aaa")
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ cpy := prefetcher.copy()
+ a := prefetcher.trie(common.Hash{}, db.originalRoot)
+ b := cpy.trie(common.Hash{}, db.originalRoot)
+ prefetcher.close()
+ c := prefetcher.trie(common.Hash{}, db.originalRoot)
+ d := cpy.trie(common.Hash{}, db.originalRoot)
+ if a == nil {
+ t.Fatal("Prefetching before close should not return nil")
+ }
+ if b == nil {
+ t.Fatal("Copy trie should return nil")
+ }
+ if c != nil {
+ t.Fatal("Trie after close should return nil")
+ }
+ if d == nil {
+ t.Fatal("Copy trie should not return nil")
+ }
+}
diff --git a/core/state_processor.go b/core/state_processor.go
index 23c7e35389..fe7eeffd12 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -308,7 +308,7 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
// Set the receipt logs and create a bloom for filtering
if config.IsReceiptLog(header.Epoch()) {
- receipt.Logs = statedb.GetLogs(tx.Hash())
+ receipt.Logs = statedb.GetLogs(tx.Hash(), header.Number().Uint64(), header.Hash())
}
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
@@ -384,7 +384,7 @@ func ApplyStakingTransaction(
receipt.GasUsed = gas
if config.IsReceiptLog(header.Epoch()) {
- receipt.Logs = statedb.GetLogs(tx.Hash())
+ receipt.Logs = statedb.GetLogs(tx.Hash(), header.Number().Uint64(), header.Hash())
utils.Logger().Info().Interface("CollectReward", receipt.Logs)
}
diff --git a/core/state_transition.go b/core/state_transition.go
index c3ad10d496..9684812cbb 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -25,6 +25,7 @@ import (
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm"
"github.com/harmony-one/harmony/internal/utils"
+ "github.com/harmony-one/harmony/numeric"
"github.com/harmony-one/harmony/shard"
stakingTypes "github.com/harmony-one/harmony/staking/types"
"github.com/pkg/errors"
@@ -281,14 +282,28 @@ func (st *StateTransition) refundGas() {
}
func (st *StateTransition) collectGas() {
- // Burn Txn Fees after staking epoch
if config := st.evm.ChainConfig(); !config.IsStaking(st.evm.EpochNumber) {
- txFee := new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.gasPrice)
+ // Before staking epoch, add the fees to the block producer
+ txFee := new(big.Int).Mul(
+ new(big.Int).SetUint64(st.gasUsed()),
+ st.gasPrice,
+ )
st.state.AddBalance(st.evm.Coinbase, txFee)
- } else if config.IsFeeCollectEpoch(st.evm.EpochNumber) { // collect Txn Fees to community-managed account.
- txFee := new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.gasPrice)
- txFeeCollector := shard.Schedule.InstanceForEpoch(st.evm.EpochNumber).FeeCollector()
- st.state.AddBalance(txFeeCollector, txFee)
+ } else if feeCollectors := shard.Schedule.InstanceForEpoch(
+ st.evm.EpochNumber,
+ ).FeeCollectors(); len(feeCollectors) > 0 {
+ // The caller must ensure that the feeCollectors are accurately set
+ // at the appropriate epochs
+ txFee := numeric.NewDecFromBigInt(
+ new(big.Int).Mul(
+ new(big.Int).SetUint64(st.gasUsed()),
+ st.gasPrice,
+ ),
+ )
+ for address, percent := range feeCollectors {
+ collectedFee := percent.Mul(txFee)
+ st.state.AddBalance(address, collectedFee.TruncateInt())
+ }
}
}
diff --git a/core/state_transition_test.go b/core/state_transition_test.go
index 43b592c0d8..2a335ef779 100644
--- a/core/state_transition_test.go
+++ b/core/state_transition_test.go
@@ -4,11 +4,16 @@ import (
"crypto/ecdsa"
"fmt"
"math"
+ "math/big"
"testing"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm"
+ shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding"
"github.com/harmony-one/harmony/internal/params"
+ "github.com/harmony-one/harmony/shard"
staking "github.com/harmony-one/harmony/staking/types"
"github.com/pkg/errors"
)
@@ -81,3 +86,129 @@ func testApplyStakingMessage(test applyStakingMessageTest, t *testing.T) {
}
})
}
+
+func TestCollectGas(t *testing.T) {
+ key, _ := crypto.GenerateKey()
+ chain, db, header, _ := getTestEnvironment(*key)
+ header.SetEpoch(new(big.Int).Set(params.LocalnetChainConfig.FeeCollectEpoch))
+
+ // set the shard schedule so that fee collectors are available
+ shard.Schedule = shardingconfig.LocalnetSchedule
+ feeCollectors := shard.Schedule.InstanceForEpoch(header.Epoch()).FeeCollectors()
+ if len(feeCollectors) == 0 {
+ t.Fatal("No fee collectors set")
+ }
+
+ tx := types.NewTransaction(
+ 0, // nonce
+ common.BytesToAddress([]byte("to")),
+ 0, // shardid
+ big.NewInt(1e18), // amount, 1 ONE
+ 50000, // gasLimit, intentionally higher than the 21000 required
+ big.NewInt(100e9), // gasPrice
+ []byte{}, // payload, intentionally empty
+ )
+ from, _ := tx.SenderAddress()
+ initialBalance := big.NewInt(2e18)
+ db.AddBalance(from, initialBalance)
+ msg, _ := tx.AsMessage(types.NewEIP155Signer(common.Big2))
+ ctx := NewEVMContext(msg, header, chain, nil /* coinbase is nil, no block reward */)
+ ctx.TxType = types.SameShardTx
+
+ vmenv := vm.NewEVM(ctx, db, params.TestChainConfig, vm.Config{})
+ gasPool := new(GasPool).AddGas(math.MaxUint64)
+ _, err := ApplyMessage(vmenv, msg, gasPool)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // check that sender got the exact expected balance
+ balance := db.GetBalance(from)
+ fee := new(big.Int).Mul(tx.GasPrice(), new(big.Int).
+ SetUint64(
+ 21000, // base fee for normal transfers
+ ))
+ feePlusAmount := new(big.Int).Add(fee, tx.Value())
+ expectedBalance := new(big.Int).Sub(initialBalance, feePlusAmount)
+ if balance.Cmp(expectedBalance) != 0 {
+ t.Errorf("Balance mismatch for sender: got %v, expected %v", balance, expectedBalance)
+ }
+
+ // check that the fee collectors got half of the fees each
+ expectedFeePerCollector := new(big.Int).Mul(tx.GasPrice(),
+ new(big.Int).SetUint64(21000/2))
+ for collector := range feeCollectors {
+ balance := db.GetBalance(collector)
+ if balance.Cmp(expectedFeePerCollector) != 0 {
+ t.Errorf("Balance mismatch for collector %v: got %v, expected %v",
+ collector, balance, expectedFeePerCollector)
+ }
+ }
+
+ // lastly, check the receiver's balance
+ balance = db.GetBalance(*tx.To())
+ if balance.Cmp(tx.Value()) != 0 {
+ t.Errorf("Balance mismatch for receiver: got %v, expected %v", balance, tx.Value())
+ }
+}
+
+func TestCollectGasRounding(t *testing.T) {
+ // We want to test that the fee collectors get the correct amount of fees
+ // even if the total fee is not a multiple of the fee ratio.
+ // For example, if the fee ratio is 1:1, and the total fee is 1e9, then
+ // the fee collectors should get 0.5e9 each.
+ // If the gas is 1e9 + 1, then the fee collectors should get 0.5e9 each,
+ // with the extra 1 being dropped. This test checks for that.
+ // Such a situation can potentially occur, but is not an immediate concern
+ // since we require transactions to have a minimum gas price of 100 gwei
+ // which is always even (in wei) and can be divided across two collectors.
+ // Hypothetically, a gas price of 1 wei * gas used of (21,000 + odd number)
+ // could result in such a case which is well handled.
+ key, _ := crypto.GenerateKey()
+ chain, db, header, _ := getTestEnvironment(*key)
+ header.SetEpoch(new(big.Int).Set(params.LocalnetChainConfig.FeeCollectEpoch))
+
+ // set the shard schedule so that fee collectors are available
+ shard.Schedule = shardingconfig.LocalnetSchedule
+ feeCollectors := shard.Schedule.InstanceForEpoch(header.Epoch()).FeeCollectors()
+ if len(feeCollectors) == 0 {
+ t.Fatal("No fee collectors set")
+ }
+
+ tx := types.NewTransaction(
+ 0, // nonce
+ common.BytesToAddress([]byte("to")),
+ 0, // shardid
+ big.NewInt(1e18), // amount, 1 ONE
+ 5, // gasLimit
+ big.NewInt(1), // gasPrice
+ []byte{}, // payload, intentionally empty
+ )
+ from, _ := tx.SenderAddress()
+ initialBalance := big.NewInt(2e18)
+ db.AddBalance(from, initialBalance)
+ msg, _ := tx.AsMessage(types.NewEIP155Signer(common.Big2))
+ ctx := NewEVMContext(msg, header, chain, nil /* coinbase is nil, no block reward */)
+ ctx.TxType = types.SameShardTx
+
+ vmenv := vm.NewEVM(ctx, db, params.TestChainConfig, vm.Config{})
+ gasPool := new(GasPool).AddGas(math.MaxUint64)
+ st := NewStateTransition(vmenv, msg, gasPool, nil)
+ // buy gas to set initial gas to 5: gasLimit * gasPrice
+ if err := st.buyGas(); err != nil {
+ t.Fatal(err)
+ }
+ // set left over gas to 0, so gasUsed is 5
+ st.gas = 0
+ st.collectGas()
+
+ // check that the fee collectors got the fees in the provided ratio
+ expectedFeePerCollector := big.NewInt(2) // GIF(5 / 2) = 2
+ for collector := range feeCollectors {
+ balance := db.GetBalance(collector)
+ if balance.Cmp(expectedFeePerCollector) != 0 {
+ t.Errorf("Balance mismatch for collector %v: got %v, expected %v",
+ collector, balance, expectedFeePerCollector)
+ }
+ }
+}
diff --git a/core/tx_pool.go b/core/tx_pool.go
index 2efaf3be01..d918490223 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -185,12 +185,12 @@ var DefaultTxPoolConfig = TxPoolConfig{
PriceLimit: 100e9, // 100 Gwei/Nano
PriceBump: 1, // PriceBump is percent, 1% is enough
- AccountSlots: 16,
- GlobalSlots: 4096,
- AccountQueue: 64,
- GlobalQueue: 1024,
+ AccountSlots: 16, // --txpool.accountslots
+ GlobalSlots: 4096, // --txpool.globalslots
+ AccountQueue: 64, // --txpool.accountqueue
+ GlobalQueue: 5120, // --txpool.globalqueue
- Lifetime: 30 * time.Minute,
+ Lifetime: 30 * time.Minute, // --txpool.lifetime
Blacklist: map[common.Address]struct{}{},
AllowedTxs: map[common.Address]AllowedTxData{},
@@ -243,6 +243,27 @@ func (config *TxPoolConfig) sanitize() TxPoolConfig {
Msg("Sanitizing invalid txpool global slots")
conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots
}
+ if conf.AccountQueue == 0 {
+ utils.Logger().Warn().
+ Uint64("provided", conf.AccountQueue).
+ Uint64("updated", DefaultTxPoolConfig.AccountQueue).
+ Msg("Sanitizing invalid txpool account queue")
+ conf.AccountQueue = DefaultTxPoolConfig.AccountQueue
+ }
+ if conf.GlobalQueue == 0 {
+ utils.Logger().Warn().
+ Uint64("provided", conf.GlobalQueue).
+ Uint64("updated", DefaultTxPoolConfig.GlobalQueue).
+ Msg("Sanitizing invalid txpool account queue")
+ conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue
+ }
+ if conf.Lifetime == 0 {
+ utils.Logger().Warn().
+ Dur("provided", conf.Lifetime).
+ Dur("updated", DefaultTxPoolConfig.Lifetime).
+ Msg("Sanitizing invalid txpool lifetime")
+ conf.Lifetime = DefaultTxPoolConfig.Lifetime
+ }
return conf
}
@@ -1415,7 +1436,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
if pending > pool.config.GlobalSlots {
pendingBeforeCap := pending
// Assemble a spam order to penalize large transactors first
- spammers := prque.New(nil)
+ spammers := prque.New[int64, common.Address](nil)
for addr, list := range pool.pending {
// Only evict transactions from high rollers
if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
@@ -1427,12 +1448,12 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
for pending > pool.config.GlobalSlots && !spammers.Empty() {
// Retrieve the next offender if not local address
offender, _ := spammers.Pop()
- offenders = append(offenders, offender.(common.Address))
+ offenders = append(offenders, offender)
// Equalize balances until all the same or below threshold
if len(offenders) > 1 {
// Calculate the equalization threshold for all current offenders
- threshold := pool.pending[offender.(common.Address)].Len()
+ threshold := pool.pending[offender].Len()
// Iteratively reduce all offenders until below limit or threshold reached
for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go
index fa3af1c972..9b126eac90 100644
--- a/core/tx_pool_test.go
+++ b/core/tx_pool_test.go
@@ -27,7 +27,7 @@ import (
"testing"
"time"
- "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/crypto/bls"
@@ -167,7 +167,7 @@ func createBlockChain() *BlockChainImpl {
func setupTxPool(chain blockChain) (*TxPool, *ecdsa.PrivateKey) {
if chain == nil {
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
chain = &testBlockChain{statedb, 1e18, new(event.Feed)}
}
@@ -225,7 +225,7 @@ func (c *testChain) State() (*state.DB, error) {
// a state change between those fetches.
stdb := c.statedb
if *c.trigger {
- c.statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ c.statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
// simulate that the new head block included tx0 and tx1
c.statedb.SetNonce(c.address, 2)
c.statedb.SetBalance(c.address, new(big.Int).SetUint64(denominations.One))
@@ -243,7 +243,7 @@ func TestStateChangeDuringTransactionPoolReset(t *testing.T) {
var (
key, _ = crypto.GenerateKey()
address = crypto.PubkeyToAddress(key.PublicKey)
- statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
trigger = false
)
@@ -579,7 +579,7 @@ func TestTransactionChainFork(t *testing.T) {
addr := crypto.PubkeyToAddress(key.PublicKey)
resetState := func() {
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
statedb.AddBalance(addr, big.NewInt(9000000000000000000))
pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)}
@@ -605,7 +605,7 @@ func TestTransactionDoubleNonce(t *testing.T) {
key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey)
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
statedb.AddBalance(addr, big.NewInt(1000000000000000000))
pool, _ := setupTxPool(&testBlockChain{statedb, 1000000, new(event.Feed)})
defer pool.Stop()
@@ -799,7 +799,7 @@ func TestTransactionPostponing(t *testing.T) {
t.Parallel()
// Create the pool to test the postponing with
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain, dummyErrorSink)
@@ -961,7 +961,7 @@ func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) {
t.Parallel()
// Create the pool to test the limit enforcement with
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
@@ -1051,7 +1051,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) {
evictionInterval = time.Second
// Create the pool to test the non-expiration enforcement
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
@@ -1086,7 +1086,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Wait a bit for eviction to run and clean up any leftovers, and ensure only the local remains
- time.Sleep(2 * config.Lifetime)
+ time.Sleep(4 * config.Lifetime)
pending, queued = pool.Stats()
if pending != 0 {
@@ -1166,7 +1166,7 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
t.Parallel()
// Create the pool to test the limit enforcement with
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
@@ -1212,7 +1212,7 @@ func TestTransactionCapClearsFromAll(t *testing.T) {
t.Parallel()
// Create the pool to test the limit enforcement with
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
@@ -1246,7 +1246,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
t.Parallel()
// Create the pool to test the limit enforcement with
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
@@ -1291,7 +1291,7 @@ func TestTransactionPoolRepricingKeepsLocals(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain, dummyErrorSink)
@@ -1365,7 +1365,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) {
os.Remove(journal)
// Create the original pool to inject transaction into the journal
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
@@ -1463,7 +1463,7 @@ func TestTransactionStatusCheck(t *testing.T) {
t.Parallel()
// Create the pool to test the status retrievals with
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain, dummyErrorSink)
diff --git a/core/types/bloom9_test.go b/core/types/bloom9_test.go
index 718a2078d2..4a4fcd5215 100644
--- a/core/types/bloom9_test.go
+++ b/core/types/bloom9_test.go
@@ -34,19 +34,19 @@ func TestBloom(t *testing.T) {
"tes",
"lo",
}
-
var bloom ethtypes.Bloom
for _, data := range positive {
- bloom.Add(new(big.Int).SetBytes([]byte(data)))
+ b := new(big.Int).SetBytes([]byte(data)).Bytes()
+ bloom.Add(b)
}
for _, data := range positive {
- if !bloom.TestBytes([]byte(data)) {
+ if !bloom.Test([]byte(data)) {
t.Error("expected", data, "to test true")
}
}
for _, data := range negative {
- if bloom.TestBytes([]byte(data)) {
+ if bloom.Test([]byte(data)) {
t.Error("did not expect", data, "to test true")
}
}
diff --git a/core/vm/contracts_write.go b/core/vm/contracts_write.go
index 46ba7fe923..7e24eb9a27 100644
--- a/core/vm/contracts_write.go
+++ b/core/vm/contracts_write.go
@@ -242,7 +242,7 @@ func (c *crossShardXferPrecompile) RunWriteCapable(
return nil, err
}
// validate not a contract (toAddress can still be a contract)
- if len(evm.StateDB.GetCode(fromAddress)) > 0 && !evm.IsValidator(evm.StateDB, fromAddress) {
+ if len(evm.StateDB.GetCode(fromAddress, false)) > 0 && !evm.IsValidator(evm.StateDB, fromAddress) {
return nil, errors.New("cross shard xfer not yet implemented for contracts")
}
// can't have too many shards
diff --git a/core/vm/contracts_write_test.go b/core/vm/contracts_write_test.go
index 649bbc6169..479bf420fc 100644
--- a/core/vm/contracts_write_test.go
+++ b/core/vm/contracts_write_test.go
@@ -7,8 +7,8 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/params"
@@ -230,12 +230,12 @@ func testCrossShardXferPrecompile(test writeCapablePrecompileTest, t *testing.T)
db.Close()
}
}()
- if db, err = rawdb.NewLevelDBDatabase("/tmp/harmony_shard_0", 256, 1024, ""); err != nil {
+ if db, err = rawdb.NewLevelDBDatabase("/tmp/harmony_shard_0", 256, 1024, "", false); err != nil {
db = nil
t.Fatalf("Could not initialize db %s", err)
}
stateCache := state.NewDatabase(db)
- state, err := state.New(common.Hash{}, stateCache)
+ state, err := state.New(common.Hash{}, stateCache, nil)
if err != nil {
t.Fatalf("Error while initializing state %s", err)
}
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 49857255fa..53da390dba 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -336,7 +336,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
evm.Transfer(evm.StateDB, caller.Address(), to.Address(), value, txType)
codeHash := evm.StateDB.GetCodeHash(addr)
- code := evm.StateDB.GetCode(addr)
+ code := evm.StateDB.GetCode(addr, false)
// If address is a validator address, then it's not a smart contract address
// we don't use its code and codeHash fields
if evm.Context.IsValidator(evm.StateDB, addr) {
@@ -402,7 +402,7 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
// EVM. The contract is a scoped environment for this execution context
// only.
contract := NewContract(caller, to, value, gas)
- contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
+ contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr, false))
ret, err = run(evm, contract, input, false)
if err != nil {
@@ -435,7 +435,7 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
// Initialise a new contract and make initialise the delegate values
contract := NewContract(caller, to, nil, gas).AsDelegate()
- contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
+ contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr, false))
ret, err = run(evm, contract, input, false)
if err != nil {
@@ -468,7 +468,7 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
// EVM. The contract is a scoped environment for this execution context
// only.
contract := NewContract(caller, to, new(big.Int), gas)
- contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
+ contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr, false))
// We do an AddBalance of zero here, just in order to trigger a touch.
// This doesn't matter on Mainnet, where all empties are gone at the time of Byzantium,
@@ -553,7 +553,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
if err == nil && !maxCodeSizeExceeded {
createDataGas := uint64(len(ret)) * params.CreateDataGas
if contract.UseGas(createDataGas) {
- evm.StateDB.SetCode(address, ret)
+ evm.StateDB.SetCode(address, ret, false)
} else {
err = ErrCodeStoreOutOfGas
}
diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go
index 48aa846a0c..65973e832c 100644
--- a/core/vm/gas_table_test.go
+++ b/core/vm/gas_table_test.go
@@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/state"
)
@@ -83,9 +83,9 @@ func TestEIP2200(t *testing.T) {
for i, tt := range eip2200Tests {
address := common.BytesToAddress([]byte("contract"))
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
statedb.CreateAccount(address)
- statedb.SetCode(address, hexutil.MustDecode(tt.input))
+ statedb.SetCode(address, hexutil.MustDecode(tt.input), false)
statedb.SetState(address, common.Hash{}, common.BytesToHash([]byte{tt.original}))
statedb.Finalise(true) // Push the state into the "original" slot
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 091ba28ff6..73f826a63f 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -488,7 +488,7 @@ func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, contract *Contract,
slot.SetUint64(0)
return nil, nil
}
- slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(common.BigToAddress(slot))))
+ slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(common.BigToAddress(slot), false)))
return nil, nil
}
@@ -528,7 +528,7 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, contract *Contract,
// for EOAs that are not validators, statedb returns nil
code = nil
} else {
- code = interpreter.evm.StateDB.GetCode(addr)
+ code = interpreter.evm.StateDB.GetCode(addr, false)
}
codeCopy := getDataBig(code, codeOffset, length)
memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
diff --git a/core/vm/interface.go b/core/vm/interface.go
index 3e3994eda9..3b481fd69e 100644
--- a/core/vm/interface.go
+++ b/core/vm/interface.go
@@ -38,9 +38,9 @@ type StateDB interface {
SetNonce(common.Address, uint64)
GetCodeHash(common.Address) common.Hash
- GetCode(common.Address) []byte
- SetCode(common.Address, []byte)
- GetCodeSize(common.Address) int
+ GetCode(common.Address, bool) []byte
+ SetCode(common.Address, []byte, bool)
+ GetCodeSize(common.Address, bool) int
ValidatorWrapper(common.Address, bool, bool) (*staking.ValidatorWrapper, error)
UpdateValidatorWrapper(common.Address, *staking.ValidatorWrapper) error
diff --git a/core/vm/logger_test.go b/core/vm/logger_test.go
index 1fb44a94fd..191733eed2 100644
--- a/core/vm/logger_test.go
+++ b/core/vm/logger_test.go
@@ -21,7 +21,6 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
- "github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/internal/params"
)
@@ -29,10 +28,10 @@ type dummyContractRef struct {
calledForEach bool
}
-func (dummyContractRef) ReturnGas(*big.Int) {}
-func (dummyContractRef) Address() common.Address { return common.Address{} }
-func (dummyContractRef) Value() *big.Int { return new(big.Int) }
-func (dummyContractRef) SetCode(common.Hash, []byte) {}
+func (dummyContractRef) ReturnGas(*big.Int) {}
+func (dummyContractRef) Address() common.Address { return common.Address{} }
+func (dummyContractRef) Value() *big.Int { return new(big.Int) }
+func (dummyContractRef) SetCode(common.Hash, []byte, bool) {}
func (d *dummyContractRef) ForEachStorage(callback func(key, value common.Hash) bool) {
d.calledForEach = true
}
@@ -43,7 +42,7 @@ func (d *dummyContractRef) SetNonce(uint64) {}
func (d *dummyContractRef) Balance() *big.Int { return new(big.Int) }
type dummyStatedb struct {
- state.DB
+ StateDB
}
func (*dummyStatedb) GetRefund() uint64 { return 1337 }
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index 8ebba4030c..d60883a970 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -22,8 +22,8 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/vm"
"github.com/harmony-one/harmony/internal/params"
@@ -103,7 +103,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.DB, error) {
setDefaults(cfg)
if cfg.State == nil {
- cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
}
var (
address = common.BytesToAddress([]byte("contract"))
@@ -112,7 +112,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.DB, error) {
)
cfg.State.CreateAccount(address)
// set the receiver's (the executing contract) code for execution.
- cfg.State.SetCode(address, code)
+ cfg.State.SetCode(address, code, false)
// Call the code with the given configuration.
ret, _, err := vmenv.Call(
sender,
@@ -133,7 +133,7 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) {
setDefaults(cfg)
if cfg.State == nil {
- cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
}
var (
vmenv = NewEnv(cfg)
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index 39d655fddc..1ad8d19767 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -21,7 +21,7 @@ import (
"strings"
"testing"
- "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
@@ -96,7 +96,7 @@ func TestExecute(t *testing.T) {
}
func TestCall(t *testing.T) {
- state, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ state, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
address := common.HexToAddress("0x0a")
state.SetCode(address, []byte{
byte(vm.PUSH1), 10,
@@ -105,7 +105,7 @@ func TestCall(t *testing.T) {
byte(vm.PUSH1), 32,
byte(vm.PUSH1), 0,
byte(vm.RETURN),
- })
+ }, false)
ret, _, err := Call(address, nil, &Config{State: state})
if err != nil {
@@ -152,13 +152,13 @@ func BenchmarkCall(b *testing.B) {
}
func benchmarkEVMCreate(bench *testing.B, code string) {
var (
- statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
)
statedb.CreateAccount(sender)
- statedb.SetCode(receiver, common.FromHex(code))
+ statedb.SetCode(receiver, common.FromHex(code), false)
runtimeConfig := Config{
Origin: sender,
State: statedb,
diff --git a/go.mod b/go.mod
index 18e16a0c05..4dd98958b3 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.19
require (
github.com/RoaringBitmap/roaring v1.2.3
- github.com/VictoriaMetrics/fastcache v1.5.7
+ github.com/VictoriaMetrics/fastcache v1.12.1
github.com/Workiva/go-datastructures v1.0.50
github.com/allegro/bigcache v1.2.1
github.com/aws/aws-sdk-go v1.34.0
@@ -13,9 +13,8 @@ require (
github.com/cespare/cp v1.1.1
github.com/coinbase/rosetta-sdk-go v0.7.0
github.com/davecgh/go-spew v1.1.1
- github.com/deckarep/golang-set v1.7.1
- github.com/ethereum/go-ethereum v1.9.25
- github.com/fjl/memsize v0.0.0-20180929194037-2a09253e352a // indirect
+ github.com/deckarep/golang-set v1.8.0
+ github.com/ethereum/go-ethereum v1.11.2
github.com/go-redis/redis/v8 v8.11.5
github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.2
@@ -44,7 +43,7 @@ require (
github.com/rjeczalik/notify v0.9.2
github.com/rs/cors v1.7.0
github.com/rs/zerolog v1.18.0
- github.com/spf13/cobra v0.0.5
+ github.com/spf13/cobra v1.5.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.14.0
github.com/stretchr/testify v1.8.1
@@ -53,7 +52,7 @@ require (
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee
go.uber.org/ratelimit v0.1.0
go.uber.org/zap v1.24.0
- golang.org/x/crypto v0.4.0
+ golang.org/x/crypto v0.6.0
golang.org/x/net v0.7.0 // indirect
golang.org/x/sync v0.1.0
golang.org/x/sys v0.5.0 // indirect
@@ -69,27 +68,35 @@ require (
require (
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b
+ github.com/holiman/bloomfilter/v2 v2.0.3
github.com/ledgerwatch/erigon-lib v0.0.0-20221218022306-0f8fdd40c2db
github.com/ledgerwatch/log/v3 v3.6.0
github.com/libp2p/go-libp2p-core v0.20.1
+ github.com/olekukonko/tablewriter v0.0.5
)
require (
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
- github.com/BurntSushi/toml v0.3.1 // indirect
+ github.com/BurntSushi/toml v1.2.0 // indirect
+ github.com/DataDog/zstd v1.5.2 // indirect
github.com/OpenPeeDeeP/depguard v1.0.1 // indirect
github.com/VictoriaMetrics/metrics v1.23.0 // indirect
- github.com/aristanetworks/goarista v0.0.0-20190607111240-52c2a7864a08 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.2.2 // indirect
github.com/bombsimon/wsl/v2 v2.0.0 // indirect
- github.com/btcsuite/btcd v0.21.0-beta // indirect
+ github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
+ github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/cockroachdb/errors v1.9.1 // indirect
+ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
+ github.com/cockroachdb/pebble v0.0.0-20230302152029-717cbce0c2e3 // indirect
+ github.com/cockroachdb/redact v1.1.3 // indirect
github.com/containerd/cgroups v1.0.4 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
+ github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
github.com/dgraph-io/badger v1.6.2 // indirect
github.com/dgraph-io/ristretto v0.0.3 // indirect
@@ -103,8 +110,10 @@ require (
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
+ github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-critic/go-critic v0.4.0 // indirect
github.com/go-lintpack/lintpack v0.5.2 // indirect
+ github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/go-toolsmith/astcast v1.0.0 // indirect
@@ -142,6 +151,8 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
+ github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e // indirect
+ github.com/holiman/uint256 v1.2.1 // indirect
github.com/huin/goupnp v1.0.3 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/ipfs/go-cid v0.3.2 // indirect
@@ -154,13 +165,12 @@ require (
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
- github.com/jmespath/go-jmespath v0.3.0 // indirect
- github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 // indirect
+ github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/kisielk/gotool v1.0.0 // indirect
- github.com/klauspost/compress v1.15.12 // indirect
+ github.com/klauspost/compress v1.16.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.1 // indirect
github.com/koron/go-ssdp v0.0.3 // indirect
- github.com/kr/pretty v0.3.0 // indirect
+ github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-cidranger v1.1.0 // indirect
@@ -183,7 +193,7 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/mattn/go-pointer v0.0.1 // indirect
- github.com/mattn/go-runewidth v0.0.4 // indirect
+ github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/dns v1.1.50 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
@@ -204,7 +214,6 @@ require (
github.com/multiformats/go-multistream v0.3.3 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d // indirect
- github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c // indirect
github.com/onsi/ginkgo/v2 v2.5.1 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
@@ -217,42 +226,44 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect
github.com/prometheus/client_model v0.3.0 // indirect
- github.com/prometheus/common v0.37.0 // indirect
- github.com/prometheus/procfs v0.8.0 // indirect
- github.com/prometheus/tsdb v0.7.1 // indirect
+ github.com/prometheus/common v0.41.0 // indirect
+ github.com/prometheus/procfs v0.9.0 // indirect
+ github.com/prometheus/tsdb v0.10.0 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
- github.com/rogpeppe/go-internal v1.6.1 // indirect
+ github.com/rivo/uniseg v0.4.4 // indirect
+ github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d // indirect
- github.com/sirupsen/logrus v1.8.1 // indirect
+ github.com/shirou/gopsutil v3.21.11+incompatible // indirect
+ github.com/sirupsen/logrus v1.9.0 // indirect
github.com/sourcegraph/go-diff v0.5.1 // indirect
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.9.2 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
- github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
- github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 // indirect
- github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect
+ github.com/status-im/keycard-go v0.2.0 // indirect
github.com/stretchr/objx v0.5.0 // indirect
github.com/subosito/gotenv v1.4.1 // indirect
github.com/tikv/pd/client v0.0.0-20220216070739-26c668271201 // indirect
github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e // indirect
+ github.com/tklauser/go-sysconf v0.3.11 // indirect
+ github.com/tklauser/numcpus v0.6.0 // indirect
github.com/tommy-muehle/go-mnd v1.1.1 // indirect
github.com/torquem-ch/mdbx-go v0.27.0 // indirect
- github.com/tyler-smith/go-bip39 v1.0.2 // indirect
+ github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/ultraware/funlen v0.0.2 // indirect
github.com/ultraware/whitespace v0.0.4 // indirect
github.com/uudashr/gocognit v1.0.1 // indirect
github.com/valyala/fastrand v1.1.0 // indirect
github.com/valyala/histogram v1.2.0 // indirect
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
- github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 // indirect
+ github.com/yusufpapurcu/wmi v1.2.2 // indirect
go.opencensus.io v0.24.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/dig v1.15.0 // indirect
go.uber.org/fx v1.18.2 // indirect
go.uber.org/multierr v1.8.0 // indirect
- golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect
+ golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/term v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
@@ -268,4 +279,4 @@ require (
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4 // indirect
)
-replace github.com/ethereum/go-ethereum => github.com/ethereum/go-ethereum v1.9.9
+replace github.com/ethereum/go-ethereum => github.com/ethereum/go-ethereum v1.11.2
diff --git a/go.sum b/go.sum
index 2ebb5e94a2..8b86a286c2 100644
--- a/go.sum
+++ b/go.sum
@@ -3,12 +3,14 @@ cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
@@ -19,14 +21,20 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
+cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -37,49 +45,57 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
+github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
-github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
-github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
-github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4=
-github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
-github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
-github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
-github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
-github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
-github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
-github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
-github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM=
+github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
+github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw=
+github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
+github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w=
+github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
+github.com/CloudyKit/jet/v6 v6.1.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4=
+github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
+github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
+github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
+github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
+github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
+github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
+github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI=
-github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/OpenPeeDeeP/depguard v1.0.1 h1:VlW4R6jmBIv3/u1JNlawEvJMM4J+dPORPaZasQee8Us=
github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVOVmhWBY=
github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE=
+github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
+github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06/go.mod h1:7erjKLwalezA0k99cWs5L11HWOAPNjdUZ6RxH1BXbbM=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
-github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE=
-github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQuuDNdCbyAgw=
-github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8=
+github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
+github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
+github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
github.com/VictoriaMetrics/metrics v1.23.0 h1:WzfqyzCaxUZip+OBbg1+lV33WChDSu4ssYII3nxtpeA=
github.com/VictoriaMetrics/metrics v1.23.0/go.mod h1:rAr/llLpEnAdTehiNlUxKgnjcOuROSzpw0GvjpEbvFc=
github.com/Workiva/go-datastructures v1.0.50 h1:slDmfW6KCHcC7U+LP3DDBbm4fqTwZGn1beOFPfGaLvo=
github.com/Workiva/go-datastructures v1.0.50/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA=
github.com/Zilliqa/gozilliqa-sdk v1.2.1-0.20201201074141-dd0ecada1be6/go.mod h1:eSYp2T6f0apnuW8TzhV3f6Aff2SE8Dwio++U4ha4yEM=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
+github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -88,15 +104,29 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=
github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
+github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
-github.com/aristanetworks/goarista v0.0.0-20190607111240-52c2a7864a08 h1:UxoB3EYChE92EDNqRCS5vuE2ta4L/oKpeFaCK73KGvI=
-github.com/aristanetworks/goarista v0.0.0-20190607111240-52c2a7864a08/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
+github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go v1.34.0 h1:brux2dRrlwCF5JhTL7MUT3WUwo9zfDHZZp3+g3Mvlmo=
github.com/aws/aws-sdk-go v1.34.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
+github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo=
+github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y=
+github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2/go.mod h1:3hGg3PpiEjHnrkrlasTfxFqUsZ2GCk/fMUn4CbKgSkM=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2/go.mod h1:45MfaXZ0cNbeuT0KQ1XJylq8A6+OpVV2E5kvY/Kq+u8=
+github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7NkwbjlijluLsrIbu/iyl35RO4=
+github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0=
+github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM=
+github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw=
+github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
+github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beevik/ntp v0.3.0 h1:xzVrPrE4ziasFXgBVBZJDP0Wg/KpMwk2KHJ4Ba8GrDw=
github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
@@ -106,18 +136,27 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bits-and-blooms/bitset v1.2.2 h1:J5gbX05GpMdBjCvQ9MteIg2KKDExr7DrgK+Yc15FvIk=
github.com/bits-and-blooms/bitset v1.2.2/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
+github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
+github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
+github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM=
github.com/bombsimon/wsl/v2 v2.0.0 h1:+Vjcn+/T5lSrO8Bjzhk4v14Un/2UyCA1E3V5j9nwTkQ=
github.com/bombsimon/wsl/v2 v2.0.0/go.mod h1:mf25kr/SqFEPhhcxW1+7pxzGlW+hIl/hYTKY95VwV8U=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
-github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
github.com/btcsuite/btcd v0.0.0-20190315201642-aa6e0f35703c/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
-github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M=
github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94=
+github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
+github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
+github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
+github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
+github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 h1:KdUfX2zKommPRa+PD0sWZUyXe9w277ABlgELO7H04IM=
+github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
@@ -132,6 +171,7 @@ github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY=
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
@@ -141,17 +181,17 @@ github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U=
+github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -159,8 +199,29 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4=
+github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
+github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
+github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM=
+github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac=
+github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8=
+github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
+github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
+github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
+github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
+github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM=
+github.com/cockroachdb/pebble v0.0.0-20230302152029-717cbce0c2e3 h1:S4re5MXHfznkOlgkgUfh9ptgaG2esdH95IuJWwP0fM0=
+github.com/cockroachdb/pebble v0.0.0-20230302152029-717cbce0c2e3/go.mod h1:9lRMC4XN3/BLPtIp6kAKwIaHu369NOf2rMucPzipz50=
+github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
+github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ=
+github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
+github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ=
+github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/coinbase/rosetta-sdk-go v0.7.0 h1:lmTO/JEpCvZgpbkOITL95rA80CPKb5CtMzLaqF2mCNg=
github.com/coinbase/rosetta-sdk-go v0.7.0/go.mod h1:7nD3oBPIiHqhRprqvMgPoGxe/nyq3yftRmpsy29coWE=
+github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
+github.com/consensys/gnark-crypto v0.9.1-0.20230105202408-1a7a29904a7c/go.mod h1:CkbdF9hbRidRJYMRzmfX8TMOr95I2pYXRHF18MzRrvA=
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
@@ -176,49 +237,75 @@ github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7/go.mod h1:gFnFS95y8HstDP6P9pPwzrxOOC5TRDkwbM+ao15ChAI=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
+github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
-github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
-github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ=
-github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
+github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
+github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
+github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI=
+github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
+github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
+github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
+github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
+github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8=
github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE=
github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE=
+github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk=
github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI=
github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE=
+github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
+github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo=
+github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
+github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
+github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
+github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
+github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7 h1:kgvzE5wLsLa7XKfV85VZl40QXaMCaeFtHpPwJ8fhotY=
+github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7/go.mod h1:yRkwfj0CBpOGre+TwBsqPV0IH0Pk73e4PXJOeNDboGs=
+github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
+github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
+github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
-github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs=
+github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
@@ -230,34 +317,59 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/ethereum/go-ethereum v1.9.9 h1:jnoBvjH8aMH++iH14XmiJdAsnRcmZUM+B5fsnEZBVE0=
-github.com/ethereum/go-ethereum v1.9.9/go.mod h1:a9TqabFudpDu1nucId+k9S8R9whYaHnGBLKFouA5EAo=
-github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
+github.com/ethereum/go-ethereum v1.11.2 h1:z/luyejbevDCAMUUiu0rc80dxJxOnpoG58k5o0tSawc=
+github.com/ethereum/go-ethereum v1.11.2/go.mod h1:DuefStAgaxoaYGLR0FueVcVbehmn5n9QUcVrMCuOvuc=
+github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
-github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
-github.com/fjl/memsize v0.0.0-20180929194037-2a09253e352a h1:1znxn4+q2MrEdTk1eCk6KIV3muTYVclBIB6CTVR/zBc=
-github.com/fjl/memsize v0.0.0-20180929194037-2a09253e352a/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
+github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
+github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
+github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
+github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
+github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
+github.com/flosch/pongo2/v4 v4.0.2/go.mod h1:B5ObFANs/36VwxxlgKpdchIJHMvHB562PW+BWPhwZD8=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
+github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8=
+github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
+github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732/go.mod h1:o/XfIXWi4/GqbQirfRm5uTbXMG5NpqxkxblnbZ+QM9I=
+github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
+github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
+github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c=
+github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
+github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
+github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
+github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
+github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
+github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
+github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
github.com/go-critic/go-critic v0.3.5-0.20190904082202-d79a9f0c64db/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA=
github.com/go-critic/go-critic v0.4.0 h1:sXD3pix0wDemuPuSlrXpJNNYXlUiKiysLrtPVQmxkzI=
github.com/go-critic/go-critic v0.4.0/go.mod h1:7/14rZGnZbY6E38VEGk2kVhoq6itzc1E68facVDK23g=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
+github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
+github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -266,6 +378,7 @@ github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-lintpack/lintpack v0.5.2 h1:DI5mA3+eKdWeJ40nU4d6Wc26qmdG8RCi/btYq0TuRN0=
github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
@@ -274,9 +387,22 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
+github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
+github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
+github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
+github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
+github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU=
+github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
+github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
@@ -306,6 +432,14 @@ github.com/go-toolsmith/typep v1.0.0 h1:zKymWyA1TRYvqYrYDrfEMZULyrhcnGY3x7LDKU2X
github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
+github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
+github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
+github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
+github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
+github.com/gobwas/ws v1.1.0/go.mod h1:nzvNcVha5eUziGrbxFCo6qFIojQHjJV5cLYIbezhfL0=
+github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
@@ -313,11 +447,22 @@ github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
+github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
+github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
+github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
+github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog=
+github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -332,11 +477,11 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
@@ -350,10 +495,13 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=
@@ -375,6 +523,7 @@ github.com/golangci/golangci-lint v1.22.2 h1:iaihss3Tf6NvZVjun3lHimKSgofPV1+FqE/
github.com/golangci/golangci-lint v1.22.2/go.mod h1:2Bj42k6hPQFTRxkDb7S3TQ+EsnumZXOmIYNqlQrp0FI=
github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc h1:gLLhTLMk2/SutryVJ6D4VZCU3CUqr8YloG7FPIBWFpI=
github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU=
+github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=
@@ -387,10 +536,12 @@ github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0 h1:HVfrLniijszjS1
github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys=
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
+github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -402,10 +553,15 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
@@ -422,10 +578,13 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20221203041831-ce31453925ec h1:fR20TYVVwhK4O7r7y+McjRYyaTH6/vjwJOajE+XhlzM=
github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -436,18 +595,21 @@ github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE0
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
+github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
+github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3 h1:JVnpOZS+qxli+rgVl98ILOXVNbW+kb5wcxeGx8ShUIw=
github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
-github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
+github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE=
@@ -466,32 +628,67 @@ github.com/harmony-one/taggedrlp v0.1.4 h1:RZ+qy0VCzT+d/mTfq23gH3an5tSvxOhg6AddL
github.com/harmony-one/taggedrlp v0.1.4/go.mod h1:osO5TRXLKdgCP+oj2J9qfqhywMOOA+4nP5q+o8nDSYA=
github.com/harmony-one/vdf v0.0.0-20190924175951-620379da8849 h1:rMY4jLAen3pMTq9KO7kSXzuMaicnOHP5n1MgpA1T6G4=
github.com/harmony-one/vdf v0.0.0-20190924175951-620379da8849/go.mod h1:EgNU7X5HLNBBho+OqCm1A1NrpD6xb1SHfi9pMCYaKKw=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
+github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw=
+github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU=
+github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
+github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
+github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/holiman/uint256 v1.2.1 h1:XRtyuda/zw2l+Bq/38n5XUoEF72aSOu/77Thd9pPp2o=
+github.com/holiman/uint256 v1.2.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag=
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
+github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
+github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
+github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
+github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
+github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8=
+github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
+github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
+github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
+github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
+github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
+github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
+github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
+github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc=
github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw=
github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk=
@@ -514,7 +711,15 @@ github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
github.com/ipld/go-ipld-prime v0.19.0 h1:5axC7rJmPc17Emw6TelxGwnzALk0PdupZ2oj2roDj04=
github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4=
-github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
+github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
+github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
+github.com/iris-contrib/httpexpect/v2 v2.3.1/go.mod h1:ICTf89VBKSD3KB0fsyyHviKF8G8hyepP0dOXJPWz3T0=
+github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI=
+github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
+github.com/iris-contrib/jade v1.1.4/go.mod h1:EDqR+ur9piDl6DUgs6qRrlfzmlx/D5UybogqrXvJTBE=
+github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g=
+github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
+github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm65f3wX8J5iA=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
@@ -522,28 +727,58 @@ github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABo
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o=
github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
+github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
+github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
+github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 h1:I/yrLt2WilKxlQKCM52clh5rGzTKpVctGT1lH4Dc8Jw=
-github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
+github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
+github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
+github.com/kataras/blocks v0.0.6/go.mod h1:UK+Iwk0Oxpc0GdoJja7sEildotAUKK1LYeYcVF0COWc=
+github.com/kataras/blocks v0.0.7/go.mod h1:UJIU97CluDo0f+zEjbnbkeMRlvYORtmc1304EeyXf4I=
+github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk=
+github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8=
+github.com/kataras/golog v0.1.7/go.mod h1:jOSQ+C5fUqsNSwurB/oAHq1IFSb0KI3l6GMa7xB6dZA=
+github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U=
+github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE=
+github.com/kataras/iris/v12 v12.2.0-beta5/go.mod h1:q26aoWJ0Knx/00iPKg5iizDK7oQQSPjbD8np0XDh6dc=
+github.com/kataras/jwt v0.1.8/go.mod h1:Q5j2IkcIHnfwy+oNY3TVWuEBJNw0ADgCcXK9CaZwV4o=
+github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw=
+github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE=
+github.com/kataras/neffos v0.0.20/go.mod h1:srdvC/Uo8mgrApWW0AYtiiLgMbyNPf69qPsd2FhE6MQ=
+github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0=
+github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro=
+github.com/kataras/pio v0.0.10/go.mod h1:gS3ui9xSD+lAUpbYnjOGiQyY7sUMJO+EHpiRzhtZ5no=
+github.com/kataras/pio v0.0.11/go.mod h1:38hH6SWH6m4DKSYmRhlrCJ5WItwWgCVrTNU62XZyUvI=
+github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8=
+github.com/kataras/sitemap v0.0.6/go.mod h1:dW4dOCNs896OR1HmG+dMLdT7JjDk7mYBzoIRwuj5jA4=
+github.com/kataras/tunnel v0.0.4/go.mod h1:9FkU4LaeifdMWqZu7o20ojmW4B7hdhv2CMLwfnHGpYw=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@@ -552,14 +787,27 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
-github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
+github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.10/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
+github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
+github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
+github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
+github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
+github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.1 h1:U33DW0aiEj633gHYw3LoDNfkDiYnE5Q8M/TKJn2f2jI=
github.com/klauspost/cpuid/v2 v2.2.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
+github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk=
@@ -570,19 +818,30 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g=
+github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
+github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
+github.com/labstack/echo/v4 v4.9.0/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks=
+github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
+github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
+github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/ledgerwatch/erigon-lib v0.0.0-20221218022306-0f8fdd40c2db h1:wV9YkkYQArbUdTdlPxXi5BW6H9ovYbyUT8Af7foetvQ=
github.com/ledgerwatch/erigon-lib v0.0.0-20221218022306-0f8fdd40c2db/go.mod h1:5GCPOzxAshLF7f0wrMZu2Bdq0qqIiMcIubM9n+25gGo=
github.com/ledgerwatch/log/v3 v3.6.0 h1:JBUSK1epPyutUrz7KYDTcJtQLEHnehECRpKbM1ugy5M=
github.com/ledgerwatch/log/v3 v3.6.0/go.mod h1:L+Sp+ma/h205EdCjviZECjGEvYUYEyXSdiuHNZzg+xQ=
+github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
@@ -623,12 +882,18 @@ github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQ
github.com/lucas-clemente/quic-go v0.31.0 h1:MfNp3fk0wjWRajw6quMFA3ap1AVtlU+2mtwmbVogB2M=
github.com/lucas-clemente/quic-go v0.31.0/go.mod h1:0wFbizLgYzqHqtlyxyCaJKlE7bYgE6JQ+54TLd/Dq2g=
github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/mailgun/raymond/v2 v2.0.46/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/marten-seemann/qpack v0.3.0 h1:UiWstOgT8+znlkDPOg2+3rIuYXJ2CnGDkGUXN6ki6hE=
github.com/marten-seemann/qtls-go1-18 v0.1.3 h1:R4H2Ks8P6pAtUagjFty2p7BVHn3XiwDAl7TTQf5h7TI=
github.com/marten-seemann/qtls-go1-18 v0.1.3/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4=
@@ -639,16 +904,23 @@ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs
github.com/marten-seemann/webtransport-go v0.4.1 h1:8Ir7OoAvtp79yxQpa3foTKIPuoH+0eKpisHObJyu9Sk=
github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb h1:RHba4YImhrUVQDHUCe2BNSOz4tVy2yGyXhvYDvxGgeE=
github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
-github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
+github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2/go.mod h1:0KeJpeMD6o+O4hW7qJOT7vyQPKrWmj26uf5wMc/IiIs=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
-github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
-github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
-github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
@@ -656,13 +928,25 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0=
github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
-github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
+github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
+github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
+github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
+github.com/mediocregopher/radix/v3 v3.8.0/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
+github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
+github.com/microcosm-cc/bluemonday v1.0.20/go.mod h1:yfBmMi8mxvaZut3Yytv+jTXRY8mxyjJ0/kQBTElld50=
+github.com/microcosm-cc/bluemonday v1.0.21/go.mod h1:ytNkv4RrDrLJ2pqlsSI46O6IVXmZOBBD4SaJyDwwTkM=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
@@ -673,16 +957,28 @@ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKo
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
+github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
+github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
+github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
+github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -690,11 +986,14 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
+github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
@@ -729,22 +1028,37 @@ github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM=
github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk=
+github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
+github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
+github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
+github.com/nats-io/nats-server/v2 v2.8.4/go.mod h1:8zZa+Al3WsESfmgSs98Fi06dRWLH5Bnq90m5bKD/eT4=
+github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM=
+github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
+github.com/nats-io/nats.go v1.15.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
+github.com/nats-io/nats.go v1.16.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
+github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
+github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
+github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E=
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
+github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/neilotoole/errgroup v0.1.5/go.mod h1:Q2nLGf+594h0CLBs/Mbg6qOr7GtqDK7C2S41udRnToE=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c h1:1RHs3tNxjXGHeul8z2t6H2N2TlAqpKe5yryJztRx4Jk=
-github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
+github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
@@ -762,21 +1076,29 @@ github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5h
github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg=
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
-github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
+github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
+github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
github.com/pingcap/check v0.0.0-20211026125417-57bd13f7b5f0 h1:HVl5539r48eA+uDuX/ziBmQCxzT1pGrzWbKuXT46Bq0=
github.com/pingcap/check v0.0.0-20211026125417-57bd13f7b5f0/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc=
@@ -793,21 +1115,27 @@ github.com/pingcap/kvproto v0.0.0-20220106070556-3fa8fa04f898/go.mod h1:IOdRDPLy
github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
github.com/pingcap/log v0.0.0-20211215031037-e024ba4eb0ee h1:VO2t6IBpfvW34TdtD/G10VvnGqjLic1jzOuHjUb5VqM=
github.com/pingcap/log v0.0.0-20211215031037-e024ba4eb0ee/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls=
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
@@ -815,17 +1143,21 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
+github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
+github.com/prometheus/common v0.41.0 h1:npo01n6vUlRViIj5fgwiK8vlNIh8bnoxqh3gypKsyAw=
+github.com/prometheus/common v0.41.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -833,47 +1165,67 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
-github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
+github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
+github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic=
+github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
-github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
+github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
+github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8=
github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM=
-github.com/robertkrimen/otto v0.0.0-20170205013659-6a77b7cbc37d/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
+github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
-github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.18.0 h1:CbAm3kP2Tptby1i9sYy2MGRg0uxIN9cyDb59Ys7W8z8=
github.com/rs/zerolog v1.18.0/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I=
+github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
+github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d h1:BzRvVq1EHuIjxpijCEKpAxzKUUMurOQ4sknehIATRh8=
github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do=
github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY=
+github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
+github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc=
+github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
+github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shirou/gopsutil/v3 v3.22.8/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI=
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
-github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e h1:MZM7FHLqUHYI0Y/mQAt3d2aYa0SiNms/hFqC9qJYolM=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636 h1:aSISeOcal5irEhJd1M+IrApc0PdcN7e7Aj4yuEnOrfQ=
+github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041 h1:llrF3Fs4018ePo4+G/HV/uQUqEI1HMDjCeOf2V6puPc=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
@@ -884,6 +1236,7 @@ github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9A
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
+github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
@@ -893,17 +1246,22 @@ github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1l
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
+github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
+github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
+github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs=
+github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
+github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/go-diff v0.5.1 h1:gO6i5zugwzo1RVTvgvfwCOSVegNuvnNi6bAD1QCmkHs=
@@ -912,17 +1270,22 @@ github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU=
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
-github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
+github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=
+github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
@@ -932,20 +1295,19 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
+github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU=
github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As=
-github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
-github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
-github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
-github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
-github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=
-github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
+github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
+github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -953,16 +1315,22 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
-github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
+github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
+github.com/tdewolff/minify/v2 v2.12.1/go.mod h1:p5pwbvNs1ghbFED/ZW1towGsnnWwzvM8iz8l0eURi9g=
+github.com/tdewolff/minify/v2 v2.12.4/go.mod h1:h+SRvSIX3kwgwTFOpSckvSxgax3uy8kZTSF1Ojrr3bk=
+github.com/tdewolff/parse/v2 v2.6.3/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs=
+github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs=
+github.com/tdewolff/test v1.0.7/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
github.com/tidwall/gjson v1.6.7/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI=
github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
@@ -973,57 +1341,99 @@ github.com/tikv/pd/client v0.0.0-20220216070739-26c668271201 h1:7h/Oi4Zw6eGCeXh4
github.com/tikv/pd/client v0.0.0-20220216070739-26c668271201/go.mod h1:fEvI5fhAuJn1Fn87VJF8ByE9Vc16EzWGoePZB21/nL8=
github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e h1:RumXZ56IrCj4CL+g1b9OL/oH0QnsF976bC8xQFYUD5Q=
github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
+github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
+github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
+github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
+github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
+github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
+github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
+github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
+github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
+github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tommy-muehle/go-mnd v1.1.1 h1:4D0wuPKjOTiK2garzuPGGvm4zZ/wLYDOH8TJSABC7KU=
github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig=
github.com/torquem-ch/mdbx-go v0.27.0 h1:FquhRvKL2zweMdk1R6UdOx3h6DiHgJ0+P9yQvSouURI=
github.com/torquem-ch/mdbx-go v0.27.0/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E=
github.com/twmb/murmur3 v1.1.3/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
-github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
-github.com/tyler-smith/go-bip39 v1.0.2 h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8=
github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
+github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
+github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
+github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/ultraware/funlen v0.0.2 h1:Av96YVBwwNSe4MLR7iI/BIa3VyI7/djnto/pK3Uxbdo=
github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
github.com/ultraware/whitespace v0.0.4 h1:If7Va4cM03mpgrNH9k49/VOicWpGoG70XPBFFODYDsg=
github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
-github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
+github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
+github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
+github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/uudashr/gocognit v0.0.0-20190926065955-1655d0de0517/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM=
github.com/uudashr/gocognit v1.0.1 h1:MoG2fZ0b/Eo7NXoIwCVFLG5JED3qgQz5/NEE+rOsjPs=
github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
+github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
+github.com/valyala/fasthttp v1.40.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I=
github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8=
github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
+github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
+github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ=
github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY=
github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
+github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
github.com/vmihailenco/msgpack/v5 v5.1.4/go.mod h1:C5gboKD0TJPqWDTVTtrQNfRbiBwHZGo8UTqP/9/XvLI=
+github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
+github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w=
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow=
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg=
-github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8cVcUy5CeIBYhEESkOB7m6Gmkrk=
-github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees=
+github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
+github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
+github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
github.com/ybbus/jsonrpc v2.1.2+incompatible/go.mod h1:XJrh1eMSzdIYFbM08flv0wp5G35eRniyeGut1z+LSiE=
+github.com/yosssi/ace v0.0.5/go.mod h1:ALfIzm2vT7t5ZE7uoIZqF3TQ7SAOyupFZnkrF5id+K0=
+github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
+github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
+github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
+github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
go.etcd.io/etcd/client/v3 v3.5.2/go.mod h1:kOOaWFFgHygyT0WlSmL8TJiXmMysO/nNUlEsSsN6W4o=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@@ -1032,6 +1442,7 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
@@ -1078,6 +1489,7 @@ go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -1086,19 +1498,38 @@ golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
-golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
+golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
+golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
+golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
@@ -1108,8 +1539,12 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20221205204356-47842c84f3db h1:D/cFflL63o2KSLJIwjlcIt8PR064j/xsmdEJL/YvY/o=
-golang.org/x/exp v0.0.0-20221205204356-47842c84f3db/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
+golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE=
+golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 h1:Jvc7gsqn21cJHCmAWx0LiimpP18LZmUxkT5Mp7EZ1mI=
+golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1136,22 +1571,30 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -1161,6 +1604,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -1180,19 +1624,34 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221002022538-bcab6841153b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1206,8 +1665,12 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1220,17 +1683,22 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1244,9 +1712,10 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1255,6 +1724,7 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1275,17 +1745,27 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201207223542-d4d67f95c62d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1294,16 +1774,40 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1315,12 +1819,19 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.2.0 h1:52I/1L54xyEQAYdtcSuxtiT84KGYTBGXwayxmIpNJhE=
golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1330,8 +1841,10 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190221204921-83362c3779f5/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@@ -1339,6 +1852,7 @@ golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
@@ -1360,14 +1874,17 @@ golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113232020-e2727e816f5a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191126055441-b0650ceb63d9/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -1398,14 +1915,26 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM=
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
+gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
+gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
@@ -1428,6 +1957,9 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1437,6 +1969,7 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
@@ -1446,6 +1979,7 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
@@ -1454,6 +1988,7 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
@@ -1479,10 +2014,17 @@ google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y=
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
@@ -1506,6 +2048,7 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
@@ -1522,33 +2065,38 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
+gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
-gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
-gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -1560,11 +2108,11 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -1578,6 +2126,7 @@ honnef.co/go/tools v0.0.1-2020.1.5 h1:nI5egYTGJakVyOryqLs1cQO5dO0ksin5XXs2pspk75
honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
+moul.io/http2curl v1.0.0/go.mod h1:f6cULg+e4Md/oW1cYmwW4IWQOVl2lGbmCNGOHvzX2kE=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=
@@ -1585,8 +2134,10 @@ mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jC
mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f h1:Cq7MalBHYACRd6EesksG1Q8EoIAKOsiZviGKbOLIej4=
mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4 h1:JPJh2pk3+X4lXAkZIk2RuE/7/FoK9maXw+TNPJhVS/c=
diff --git a/hmy/blockchain.go b/hmy/blockchain.go
index f72be5d830..45bd099f1f 100644
--- a/hmy/blockchain.go
+++ b/hmy/blockchain.go
@@ -211,11 +211,21 @@ func (hmy *Harmony) CurrentBlock() *types.Block {
return types.NewBlockWithHeader(hmy.BlockChain.CurrentHeader())
}
-// GetBlock ...
+// CurrentHeader returns the current header from the local chain.
+func (hmy *Harmony) CurrentHeader() *block.Header {
+ return hmy.BlockChain.CurrentHeader()
+}
+
+// GetBlock returns block by hash.
func (hmy *Harmony) GetBlock(ctx context.Context, hash common.Hash) (*types.Block, error) {
return hmy.BlockChain.GetBlockByHash(hash), nil
}
+// GetHeader returns header by hash.
+func (hmy *Harmony) GetHeader(ctx context.Context, hash common.Hash) (*block.Header, error) {
+ return hmy.BlockChain.GetHeaderByHash(hash), nil
+}
+
// GetCurrentBadBlocks ..
func (hmy *Harmony) GetCurrentBadBlocks() []core.BadBlock {
return hmy.BlockChain.BadBlocks()
@@ -362,11 +372,11 @@ func (hmy *Harmony) GetLogs(ctx context.Context, blockHash common.Hash, isEth bo
return nil, errors.New("Missing block data")
}
txns := block.Transactions()
- for i, _ := range receipts {
+ for i := range receipts {
if i < len(txns) {
ethHash := txns[i].ConvertToEth().Hash()
receipts[i].TxHash = ethHash
- for j, _ := range receipts[i].Logs {
+ for j := range receipts[i].Logs {
// Override log txHash with receipt's
receipts[i].Logs[j].TxHash = ethHash
}
diff --git a/hmy/bloombits.go b/hmy/bloombits.go
index aaef781cac..461372de23 100644
--- a/hmy/bloombits.go
+++ b/hmy/bloombits.go
@@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/core/bloombits"
- ethRawDB "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/harmony-one/harmony/block"
"github.com/harmony-one/harmony/core"
@@ -72,7 +71,7 @@ func NewBloomIndexer(db core.Chain, size, confirms uint64) *core.ChainIndexer {
db: db.ChainDb(),
size: size,
}
- table := ethRawDB.NewTable(db.ChainDb(), string(rawdb.BloomBitsIndexPrefix))
+ table := rawdb.NewTable(db.ChainDb(), string(rawdb.BloomBitsIndexPrefix))
return core.NewChainIndexer(db, table, backend, size, confirms, bloomThrottling, "bloombits")
}
diff --git a/hmy/hmy.go b/hmy/hmy.go
index 58e8d59edf..fd01f123c6 100644
--- a/hmy/hmy.go
+++ b/hmy/hmy.go
@@ -187,16 +187,20 @@ func (hmy *Harmony) ProtocolVersion() int {
return proto.ProtocolVersion
}
-// IsLeader exposes if node is currently leader
+// IsLeader exposes if node is currently leader.
func (hmy *Harmony) IsLeader() bool {
return hmy.NodeAPI.IsCurrentlyLeader()
}
-// GetNodeMetadata ..
+// GetNodeMetadata returns the node metadata.
func (hmy *Harmony) GetNodeMetadata() commonRPC.NodeMetadata {
- header := hmy.CurrentBlock().Header()
- cfg := nodeconfig.GetShardConfig(header.ShardID())
- var blockEpoch *uint64
+ var (
+ header = hmy.CurrentHeader()
+ cfg = nodeconfig.GetShardConfig(header.ShardID())
+ blockEpoch *uint64
+ blsKeys []string
+ c = commonRPC.C{}
+ )
if header.ShardID() == shard.BeaconChainShardID {
sched := shard.Schedule.InstanceForEpoch(header.Epoch())
@@ -204,13 +208,12 @@ func (hmy *Harmony) GetNodeMetadata() commonRPC.NodeMetadata {
blockEpoch = &b
}
- blsKeys := []string{}
if cfg.ConsensusPriKey != nil {
for _, key := range cfg.ConsensusPriKey {
blsKeys = append(blsKeys, key.Pub.Bytes.Hex())
}
}
- c := commonRPC.C{}
+
c.TotalKnownPeers, c.Connected, c.NotConnected = hmy.NodeAPI.PeerConnectivity()
syncPeers := hmy.NodeAPI.SyncPeers()
diff --git a/hmy/tracer.go b/hmy/tracer.go
index 699f784297..2529e7a2f6 100644
--- a/hmy/tracer.go
+++ b/hmy/tracer.go
@@ -122,7 +122,7 @@ func (hmy *Harmony) TraceChain(ctx context.Context, start, end *types.Block, con
}
}
- statedb, err := state.New(start.Root(), database)
+ statedb, err := state.New(start.Root(), database, nil)
if err != nil {
// If the starting state is missing, allow some number of blocks to be executed
reexec := defaultTraceReexec
@@ -135,7 +135,7 @@ func (hmy *Harmony) TraceChain(ctx context.Context, start, end *types.Block, con
if start == nil {
break
}
- if statedb, err = state.New(start.Root(), database); err == nil {
+ if statedb, err = state.New(start.Root(), database, nil); err == nil {
break
}
}
@@ -644,7 +644,7 @@ func (hmy *Harmony) ComputeStateDB(block *types.Block, reexec uint64) (*state.DB
if block == nil {
break
}
- if statedb, err = state.New(block.Root(), database); err == nil {
+ if statedb, err = state.New(block.Root(), database, nil); err == nil {
break
}
}
diff --git a/hmy/tracers/block_tracer.go b/hmy/tracers/block_tracer.go
index 12391c6fb9..daaf4171d0 100644
--- a/hmy/tracers/block_tracer.go
+++ b/hmy/tracers/block_tracer.go
@@ -353,7 +353,7 @@ func (jst *ParityBlockTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode,
ret := stackPeek(0)
if ret.Sign() != 0 {
call.to = common.BigToAddress(ret)
- call.output = env.StateDB.GetCode(call.to)
+ call.output = env.StateDB.GetCode(call.to, false)
} else if call.err == nil {
call.err = errors.New("internal failure")
}
diff --git a/hmy/tracers/tracer.go b/hmy/tracers/tracer.go
index bc349a5147..69d52e3fdc 100644
--- a/hmy/tracers/tracer.go
+++ b/hmy/tracers/tracer.go
@@ -210,7 +210,7 @@ func (dw *dbWrapper) pushObject(vm *duktape.Context) {
// Push the wrapper for statedb.GetCode
vm.PushGoFunction(func(ctx *duktape.Context) int {
- code := dw.db.GetCode(common.BytesToAddress(popSlice(ctx)))
+ code := dw.db.GetCode(common.BytesToAddress(popSlice(ctx)), false)
ptr := ctx.PushFixedBuffer(len(code))
copy(makeSlice(ptr, uint(len(code))), code)
diff --git a/internal/chain/engine_test.go b/internal/chain/engine_test.go
index 463a0afdb6..5a842088c8 100644
--- a/internal/chain/engine_test.go
+++ b/internal/chain/engine_test.go
@@ -19,7 +19,7 @@ import (
types2 "github.com/harmony-one/harmony/staking/types"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/params"
@@ -228,7 +228,7 @@ func (maker *shardSlotMaker) makeSlot() shard.Slot {
func makeTestStateDB() *state.DB {
db := state.NewDatabase(rawdb.NewMemoryDatabase())
- sdb, err := state.New(common.Hash{}, db)
+ sdb, err := state.New(common.Hash{}, db, nil)
if err != nil {
panic(err)
}
diff --git a/internal/configs/harmony/harmony.go b/internal/configs/harmony/harmony.go
index d4e8df4b00..32161d1deb 100644
--- a/internal/configs/harmony/harmony.go
+++ b/internal/configs/harmony/harmony.go
@@ -1,6 +1,7 @@
package harmony
import (
+ "fmt"
"reflect"
"strings"
"time"
@@ -181,8 +182,13 @@ type TxPoolConfig struct {
AllowedTxsFile string
RosettaFixFile string
AccountSlots uint64
+ AccountQueue uint64
+ GlobalQueue uint64
LocalAccountsFile string
GlobalSlots uint64
+ Lifetime time.Duration
+ PriceLimit PriceLimit
+ PriceBump uint64
}
type PprofConfig struct {
@@ -318,3 +324,26 @@ type StagedSyncConfig struct {
UseMemDB bool // it uses memory by default. set it to false to use disk
LogProgress bool // log the full sync progress in console
}
+
+type PriceLimit int64
+
+func (s *PriceLimit) UnmarshalTOML(data interface{}) error {
+ switch v := data.(type) {
+ case float64:
+ *s = PriceLimit(v)
+ case int64:
+ *s = PriceLimit(v)
+ case PriceLimit:
+ *s = v
+ default:
+ return fmt.Errorf("PriceLimit.UnmarshalTOML: %T", data)
+ }
+ return nil
+}
+
+func (s PriceLimit) MarshalTOML() ([]byte, error) {
+ if s > 1_000_000_000 {
+ return []byte(fmt.Sprintf("%de9", s/1_000_000_000)), nil
+ }
+ return []byte(fmt.Sprintf("%d", s)), nil
+}
diff --git a/internal/configs/harmony/harmony_test.go b/internal/configs/harmony/harmony_test.go
index fef7cac9df..f51e2ef2f7 100644
--- a/internal/configs/harmony/harmony_test.go
+++ b/internal/configs/harmony/harmony_test.go
@@ -6,7 +6,9 @@ import (
"time"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
+ "github.com/pelletier/go-toml"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestToRPCServerConfig(t *testing.T) {
@@ -79,3 +81,38 @@ func TestToRPCServerConfig(t *testing.T) {
})
}
}
+
+var data = `big = 100e9
+small = 100
+zero = 0
+`
+
+func TestPriceLimit_UnmarshalTOML(t *testing.T) {
+ type V struct {
+ Big PriceLimit `toml:"big"`
+ Small PriceLimit `toml:"small"`
+ Zero PriceLimit `toml:"zero"`
+ }
+ var v V
+ require.NoError(t, toml.Unmarshal([]byte(data), &v))
+
+ require.Equal(t, PriceLimit(100e9), v.Big)
+ require.Equal(t, PriceLimit(100), v.Small)
+ require.Equal(t, PriceLimit(0), v.Zero)
+}
+
+func TestPriceLimit_MarshalTOML(t *testing.T) {
+ type V struct {
+ Big PriceLimit `toml:"big"`
+ Small PriceLimit `toml:"small"`
+ Zero PriceLimit `toml:"zero"`
+ }
+ v := V{
+ Big: PriceLimit(100e9),
+ Small: PriceLimit(100),
+ Zero: PriceLimit(0),
+ }
+ e, err := toml.Marshal(v)
+ require.NoError(t, err)
+ require.Equal(t, data, string(e))
+}
diff --git a/internal/configs/sharding/instance.go b/internal/configs/sharding/instance.go
index cf5919a685..90076e595b 100644
--- a/internal/configs/sharding/instance.go
+++ b/internal/configs/sharding/instance.go
@@ -36,9 +36,11 @@ type instance struct {
blocksPerEpoch uint64
slotsLimit int // HIP-16: The absolute number of maximum effective slots per shard limit for each validator. 0 means no limit.
allowlist Allowlist
- feeCollector ethCommon.Address
+ feeCollectors FeeCollectors
}
+type FeeCollectors map[ethCommon.Address]numeric.Dec
+
// NewInstance creates and validates a new sharding configuration based
// upon given parameters.
func NewInstance(
@@ -46,7 +48,7 @@ func NewInstance(
hmyAccounts []genesis.DeployAccount,
fnAccounts []genesis.DeployAccount,
allowlist Allowlist,
- feeCollector ethCommon.Address,
+ feeCollectors FeeCollectors,
reshardingEpoch []*big.Int, blocksE uint64,
) (Instance, error) {
if numShards < 1 {
@@ -81,6 +83,17 @@ func NewInstance(
"total voting power of harmony nodes should be within [0, 1]",
)
}
+ if len(feeCollectors) > 0 {
+ total := numeric.ZeroDec() // is a copy
+ for _, v := range feeCollectors {
+ total = total.Add(v)
+ }
+ if !total.Equal(numeric.OneDec()) {
+ return nil, errors.Errorf(
+ "total fee collection percentage should be 1, but got %v", total,
+ )
+ }
+ }
return instance{
numShards: numShards,
@@ -94,7 +107,7 @@ func NewInstance(
reshardingEpoch: reshardingEpoch,
blocksPerEpoch: blocksE,
slotsLimit: slotsLimit,
- feeCollector: feeCollector,
+ feeCollectors: feeCollectors,
}, nil
}
@@ -108,13 +121,13 @@ func MustNewInstance(
hmyAccounts []genesis.DeployAccount,
fnAccounts []genesis.DeployAccount,
allowlist Allowlist,
- feeCollector ethCommon.Address,
+ feeCollectors FeeCollectors,
reshardingEpoch []*big.Int, blocksPerEpoch uint64,
) Instance {
slotsLimit := int(float32(numNodesPerShard-numHarmonyOperatedNodesPerShard) * slotsLimitPercent)
sc, err := NewInstance(
numShards, numNodesPerShard, numHarmonyOperatedNodesPerShard, slotsLimit, harmonyVotePercent,
- hmyAccounts, fnAccounts, allowlist, feeCollector, reshardingEpoch, blocksPerEpoch,
+ hmyAccounts, fnAccounts, allowlist, feeCollectors, reshardingEpoch, blocksPerEpoch,
)
if err != nil {
panic(err)
@@ -137,9 +150,9 @@ func (sc instance) SlotsLimit() int {
return sc.slotsLimit
}
-// FeeCollector returns a address to receive txn fees
-func (sc instance) FeeCollector() ethCommon.Address {
- return sc.feeCollector
+// FeeCollector returns a mapping of address to decimal % of fee
+func (sc instance) FeeCollectors() FeeCollectors {
+ return sc.feeCollectors
}
// HarmonyVotePercent returns total percentage of voting power harmony nodes possess.
diff --git a/internal/configs/sharding/localnet.go b/internal/configs/sharding/localnet.go
index a31c66f0da..00ea1a7ac2 100644
--- a/internal/configs/sharding/localnet.go
+++ b/internal/configs/sharding/localnet.go
@@ -14,8 +14,12 @@ import (
// configuration schedule.
var LocalnetSchedule localnetSchedule
-// privatekey: 0x1111111111111111111111111111111111111111111111111111111111111111
-var feeCollectorLocalnet = mustAddress("0x19E7E376E7C213B7E7e7e46cc70A5dD086DAff2A")
+var feeCollectorsLocalnet = FeeCollectors{
+ // pk: 0x1111111111111111111111111111111111111111111111111111111111111111
+ mustAddress("0x19E7E376E7C213B7E7e7e46cc70A5dD086DAff2A"): numeric.MustNewDecFromStr("0.5"),
+ // pk: 0x2222222222222222222222222222222222222222222222222222222222222222
+ mustAddress("0x1563915e194D8CfBA1943570603F7606A3115508"): numeric.MustNewDecFromStr("0.5"),
+}
type localnetSchedule struct{}
@@ -152,10 +156,10 @@ var (
big.NewInt(0), big.NewInt(localnetV1Epoch), params.LocalnetChainConfig.StakingEpoch, params.LocalnetChainConfig.TwoSecondsEpoch,
}
// Number of shards, how many slots on each , how many slots owned by Harmony
- localnetV0 = MustNewInstance(2, 7, 5, 0, numeric.OneDec(), genesis.LocalHarmonyAccounts, genesis.LocalFnAccounts, emptyAllowlist, emptyAddress, localnetReshardingEpoch, LocalnetSchedule.BlocksPerEpochOld())
- localnetV1 = MustNewInstance(2, 8, 5, 0, numeric.OneDec(), genesis.LocalHarmonyAccountsV1, genesis.LocalFnAccountsV1, emptyAllowlist, emptyAddress, localnetReshardingEpoch, LocalnetSchedule.BlocksPerEpochOld())
- localnetV2 = MustNewInstance(2, 9, 6, 0, numeric.MustNewDecFromStr("0.68"), genesis.LocalHarmonyAccountsV2, genesis.LocalFnAccountsV2, emptyAllowlist, emptyAddress, localnetReshardingEpoch, LocalnetSchedule.BlocksPerEpochOld())
- localnetV3 = MustNewInstance(2, 9, 6, 0, numeric.MustNewDecFromStr("0.68"), genesis.LocalHarmonyAccountsV2, genesis.LocalFnAccountsV2, emptyAllowlist, emptyAddress, localnetReshardingEpoch, LocalnetSchedule.BlocksPerEpoch())
- localnetV3_1 = MustNewInstance(2, 9, 6, 0, numeric.MustNewDecFromStr("0.68"), genesis.LocalHarmonyAccountsV2, genesis.LocalFnAccountsV2, emptyAllowlist, emptyAddress, localnetReshardingEpoch, LocalnetSchedule.BlocksPerEpoch())
- localnetV3_2 = MustNewInstance(2, 9, 6, 0, numeric.MustNewDecFromStr("0.68"), genesis.LocalHarmonyAccountsV2, genesis.LocalFnAccountsV2, emptyAllowlist, feeCollectorLocalnet, localnetReshardingEpoch, LocalnetSchedule.BlocksPerEpoch())
+ localnetV0 = MustNewInstance(2, 7, 5, 0, numeric.OneDec(), genesis.LocalHarmonyAccounts, genesis.LocalFnAccounts, emptyAllowlist, nil, localnetReshardingEpoch, LocalnetSchedule.BlocksPerEpochOld())
+ localnetV1 = MustNewInstance(2, 8, 5, 0, numeric.OneDec(), genesis.LocalHarmonyAccountsV1, genesis.LocalFnAccountsV1, emptyAllowlist, nil, localnetReshardingEpoch, LocalnetSchedule.BlocksPerEpochOld())
+ localnetV2 = MustNewInstance(2, 9, 6, 0, numeric.MustNewDecFromStr("0.68"), genesis.LocalHarmonyAccountsV2, genesis.LocalFnAccountsV2, emptyAllowlist, nil, localnetReshardingEpoch, LocalnetSchedule.BlocksPerEpochOld())
+ localnetV3 = MustNewInstance(2, 9, 6, 0, numeric.MustNewDecFromStr("0.68"), genesis.LocalHarmonyAccountsV2, genesis.LocalFnAccountsV2, emptyAllowlist, nil, localnetReshardingEpoch, LocalnetSchedule.BlocksPerEpoch())
+ localnetV3_1 = MustNewInstance(2, 9, 6, 0, numeric.MustNewDecFromStr("0.68"), genesis.LocalHarmonyAccountsV2, genesis.LocalFnAccountsV2, emptyAllowlist, nil, localnetReshardingEpoch, LocalnetSchedule.BlocksPerEpoch())
+ localnetV3_2 = MustNewInstance(2, 9, 6, 0, numeric.MustNewDecFromStr("0.68"), genesis.LocalHarmonyAccountsV2, genesis.LocalFnAccountsV2, emptyAllowlist, feeCollectorsLocalnet, localnetReshardingEpoch, LocalnetSchedule.BlocksPerEpoch())
)
diff --git a/internal/configs/sharding/mainnet.go b/internal/configs/sharding/mainnet.go
index 9e73a6f12b..fb6b0f13bc 100644
--- a/internal/configs/sharding/mainnet.go
+++ b/internal/configs/sharding/mainnet.go
@@ -47,9 +47,12 @@ var (
3: []*big.Int{big.NewInt(183), big.NewInt(184), big.NewInt(185)},
}
- emptyAddress = ethCommon.Address{}
- // TODO: set a valid address
- feeCollector ethCommon.Address // = mustAddress("0xXXX or onexxx")
+ feeCollectorsMainnet = FeeCollectors{
+ // Infrastructure
+ mustAddress("0xa0c395A83503ad89613E43397e9fE1f8E93B6384"): numeric.MustNewDecFromStr("0.5"),
+ // Community
+ mustAddress("0xbdFeE8587d347Cd8df002E6154763325265Fa84c"): numeric.MustNewDecFromStr("0.5"),
+ }
)
func mustAddress(addrStr string) ethCommon.Address {
@@ -220,23 +223,23 @@ func (ms mainnetSchedule) IsSkippedEpoch(shardID uint32, epoch *big.Int) bool {
var mainnetReshardingEpoch = []*big.Int{big.NewInt(0), big.NewInt(mainnetV0_1Epoch), big.NewInt(mainnetV0_2Epoch), big.NewInt(mainnetV0_3Epoch), big.NewInt(mainnetV0_4Epoch), big.NewInt(mainnetV1Epoch), big.NewInt(mainnetV1_1Epoch), big.NewInt(mainnetV1_2Epoch), big.NewInt(mainnetV1_3Epoch), big.NewInt(mainnetV1_4Epoch), big.NewInt(mainnetV1_5Epoch), big.NewInt(mainnetV2_0Epoch), big.NewInt(mainnetV2_1Epoch), big.NewInt(mainnetV2_2Epoch), params.MainnetChainConfig.TwoSecondsEpoch, params.MainnetChainConfig.SixtyPercentEpoch, params.MainnetChainConfig.HIP6And8Epoch}
var (
- mainnetV0 = MustNewInstance(4, 150, 112, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccounts, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
- mainnetV0_1 = MustNewInstance(4, 152, 112, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV0_1, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
- mainnetV0_2 = MustNewInstance(4, 200, 148, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV0_2, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
- mainnetV0_3 = MustNewInstance(4, 210, 148, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV0_3, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
- mainnetV0_4 = MustNewInstance(4, 216, 148, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV0_4, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
- mainnetV1 = MustNewInstance(4, 250, 170, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
- mainnetV1_1 = MustNewInstance(4, 250, 170, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_1, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
- mainnetV1_2 = MustNewInstance(4, 250, 170, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_2, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
- mainnetV1_3 = MustNewInstance(4, 250, 170, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_3, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
- mainnetV1_4 = MustNewInstance(4, 250, 170, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_4, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
- mainnetV1_5 = MustNewInstance(4, 250, 170, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
- mainnetV2_0 = MustNewInstance(4, 250, 170, 0, numeric.MustNewDecFromStr("0.68"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
- mainnetV2_1 = MustNewInstance(4, 250, 130, 0, numeric.MustNewDecFromStr("0.68"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
- mainnetV2_2 = MustNewInstance(4, 250, 90, 0, numeric.MustNewDecFromStr("0.68"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
- mainnetV3 = MustNewInstance(4, 250, 90, 0, numeric.MustNewDecFromStr("0.68"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpoch())
- mainnetV3_1 = MustNewInstance(4, 250, 50, 0, numeric.MustNewDecFromStr("0.60"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpoch())
- mainnetV3_2 = MustNewInstance(4, 250, 25, 0, numeric.MustNewDecFromStr("0.49"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpoch())
- mainnetV3_3 = MustNewInstance(4, 250, 25, 0.06, numeric.MustNewDecFromStr("0.49"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, emptyAddress, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpoch())
- mainnetV3_4 = MustNewInstance(4, 250, 25, 0.06, numeric.MustNewDecFromStr("0.49"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, feeCollector, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpoch())
+ mainnetV0 = MustNewInstance(4, 150, 112, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccounts, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
+ mainnetV0_1 = MustNewInstance(4, 152, 112, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV0_1, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
+ mainnetV0_2 = MustNewInstance(4, 200, 148, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV0_2, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
+ mainnetV0_3 = MustNewInstance(4, 210, 148, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV0_3, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
+ mainnetV0_4 = MustNewInstance(4, 216, 148, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV0_4, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
+ mainnetV1 = MustNewInstance(4, 250, 170, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
+ mainnetV1_1 = MustNewInstance(4, 250, 170, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_1, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
+ mainnetV1_2 = MustNewInstance(4, 250, 170, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_2, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
+ mainnetV1_3 = MustNewInstance(4, 250, 170, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_3, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
+ mainnetV1_4 = MustNewInstance(4, 250, 170, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_4, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
+ mainnetV1_5 = MustNewInstance(4, 250, 170, 0, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
+ mainnetV2_0 = MustNewInstance(4, 250, 170, 0, numeric.MustNewDecFromStr("0.68"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
+ mainnetV2_1 = MustNewInstance(4, 250, 130, 0, numeric.MustNewDecFromStr("0.68"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
+ mainnetV2_2 = MustNewInstance(4, 250, 90, 0, numeric.MustNewDecFromStr("0.68"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpochOld())
+ mainnetV3 = MustNewInstance(4, 250, 90, 0, numeric.MustNewDecFromStr("0.68"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpoch())
+ mainnetV3_1 = MustNewInstance(4, 250, 50, 0, numeric.MustNewDecFromStr("0.60"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpoch())
+ mainnetV3_2 = MustNewInstance(4, 250, 25, 0, numeric.MustNewDecFromStr("0.49"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpoch())
+ mainnetV3_3 = MustNewInstance(4, 250, 25, 0.06, numeric.MustNewDecFromStr("0.49"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, nil, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpoch())
+ mainnetV3_4 = MustNewInstance(4, 250, 25, 0.06, numeric.MustNewDecFromStr("0.49"), genesis.HarmonyAccounts, genesis.FoundationalNodeAccountsV1_5, emptyAllowlist, feeCollectorsMainnet, mainnetReshardingEpoch, MainnetSchedule.BlocksPerEpoch())
)
diff --git a/internal/configs/sharding/pangaea.go b/internal/configs/sharding/pangaea.go
index a5542296c5..12ffc7fe59 100644
--- a/internal/configs/sharding/pangaea.go
+++ b/internal/configs/sharding/pangaea.go
@@ -75,5 +75,5 @@ var pangaeaReshardingEpoch = []*big.Int{
params.PangaeaChainConfig.StakingEpoch,
}
-var pangaeaV0 = MustNewInstance(4, 30, 30, 0, numeric.OneDec(), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, emptyAddress, pangaeaReshardingEpoch, PangaeaSchedule.BlocksPerEpoch())
-var pangaeaV1 = MustNewInstance(4, 110, 30, 0, numeric.MustNewDecFromStr("0.68"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, emptyAddress, pangaeaReshardingEpoch, PangaeaSchedule.BlocksPerEpoch())
+var pangaeaV0 = MustNewInstance(4, 30, 30, 0, numeric.OneDec(), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, nil, pangaeaReshardingEpoch, PangaeaSchedule.BlocksPerEpoch())
+var pangaeaV1 = MustNewInstance(4, 110, 30, 0, numeric.MustNewDecFromStr("0.68"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, nil, pangaeaReshardingEpoch, PangaeaSchedule.BlocksPerEpoch())
diff --git a/internal/configs/sharding/partner.go b/internal/configs/sharding/partner.go
index 96dd584953..9cd6f69305 100644
--- a/internal/configs/sharding/partner.go
+++ b/internal/configs/sharding/partner.go
@@ -13,7 +13,17 @@ import (
// configuration schedule.
var PartnerSchedule partnerSchedule
-var feeCollectorDevnet = mustAddress("0xb728AEaBF60fD01816ee9e756c18bc01dC91ba5D")
+var feeCollectEpochV1 = big.NewInt(574)
+
+var feeCollectorsDevnet = []FeeCollectors{
+ FeeCollectors{
+ mustAddress("0xb728AEaBF60fD01816ee9e756c18bc01dC91ba5D"): numeric.OneDec(),
+ },
+ FeeCollectors{
+ mustAddress("0xb728AEaBF60fD01816ee9e756c18bc01dC91ba5D"): numeric.MustNewDecFromStr("0.5"),
+ mustAddress("0xb41B6B8d9e68fD44caC8342BC2EEf4D59531d7d7"): numeric.MustNewDecFromStr("0.5"),
+ },
+}
type partnerSchedule struct{}
@@ -32,6 +42,8 @@ const (
func (ps partnerSchedule) InstanceForEpoch(epoch *big.Int) Instance {
switch {
case params.PartnerChainConfig.IsFeeCollectEpoch(epoch):
+ return partnerV3
+ case epoch.Cmp(feeCollectEpochV1) >= 0:
return partnerV2
case epoch.Cmp(params.PartnerChainConfig.StakingEpoch) >= 0:
return partnerV1
@@ -80,6 +92,7 @@ var partnerReshardingEpoch = []*big.Int{
params.PartnerChainConfig.StakingEpoch,
}
-var partnerV0 = MustNewInstance(2, 5, 5, 0, numeric.OneDec(), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, emptyAddress, partnerReshardingEpoch, PartnerSchedule.BlocksPerEpoch())
-var partnerV1 = MustNewInstance(2, 5, 4, 0, numeric.MustNewDecFromStr("0.9"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, emptyAddress, partnerReshardingEpoch, PartnerSchedule.BlocksPerEpoch())
-var partnerV2 = MustNewInstance(2, 5, 4, 0, numeric.MustNewDecFromStr("0.9"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, feeCollectorDevnet, partnerReshardingEpoch, PartnerSchedule.BlocksPerEpoch())
+var partnerV0 = MustNewInstance(2, 5, 5, 0, numeric.OneDec(), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, nil, partnerReshardingEpoch, PartnerSchedule.BlocksPerEpoch())
+var partnerV1 = MustNewInstance(2, 5, 4, 0, numeric.MustNewDecFromStr("0.9"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, nil, partnerReshardingEpoch, PartnerSchedule.BlocksPerEpoch())
+var partnerV2 = MustNewInstance(2, 5, 4, 0, numeric.MustNewDecFromStr("0.9"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, feeCollectorsDevnet[0], partnerReshardingEpoch, PartnerSchedule.BlocksPerEpoch())
+var partnerV3 = MustNewInstance(2, 5, 4, 0, numeric.MustNewDecFromStr("0.9"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, feeCollectorsDevnet[1], partnerReshardingEpoch, PartnerSchedule.BlocksPerEpoch())
diff --git a/internal/configs/sharding/shardingconfig.go b/internal/configs/sharding/shardingconfig.go
index 30b4dca4c3..577839dfc1 100644
--- a/internal/configs/sharding/shardingconfig.go
+++ b/internal/configs/sharding/shardingconfig.go
@@ -9,7 +9,6 @@ import (
"github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/numeric"
- ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/harmony-one/harmony/internal/genesis"
)
@@ -72,8 +71,9 @@ type Instance interface {
// ReshardingEpoch returns a list of Epoch while off-chain resharding happens
ReshardingEpoch() []*big.Int
- // Count of blocks per epoch
+ // BlocksPerEpoch returns the number of blocks per epoch.
BlocksPerEpoch() uint64
+
// HIP-16: The absolute number of maximum effective slots per shard limit for each validator. 0 means no limit.
SlotsLimit() int
@@ -83,8 +83,8 @@ type Instance interface {
// ExternalAllowlistLimit returns the maximum number of external leader keys on each shard(HIP18)
ExternalAllowlistLimit() int
- // FeeCollector returns a address to receive txn fees
- FeeCollector() ethCommon.Address
+ // FeeCollector returns a mapping of address to decimal % of fee
+ FeeCollectors() FeeCollectors
}
// genShardingStructure return sharding structure, given shard number and its patterns.
diff --git a/internal/configs/sharding/stress.go b/internal/configs/sharding/stress.go
index e4e511fd65..70c294c637 100644
--- a/internal/configs/sharding/stress.go
+++ b/internal/configs/sharding/stress.go
@@ -78,6 +78,6 @@ var stressnetReshardingEpoch = []*big.Int{
params.StressnetChainConfig.StakingEpoch,
}
-var stressnetV0 = MustNewInstance(2, 10, 10, 0, numeric.OneDec(), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, emptyAddress, stressnetReshardingEpoch, StressNetSchedule.BlocksPerEpoch())
-var stressnetV1 = MustNewInstance(2, 30, 10, 0, numeric.MustNewDecFromStr("0.9"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, emptyAddress, stressnetReshardingEpoch, StressNetSchedule.BlocksPerEpoch())
-var stressnetV2 = MustNewInstance(2, 30, 10, 0, numeric.MustNewDecFromStr("0.6"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, emptyAddress, stressnetReshardingEpoch, StressNetSchedule.BlocksPerEpoch())
+var stressnetV0 = MustNewInstance(2, 10, 10, 0, numeric.OneDec(), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, nil, stressnetReshardingEpoch, StressNetSchedule.BlocksPerEpoch())
+var stressnetV1 = MustNewInstance(2, 30, 10, 0, numeric.MustNewDecFromStr("0.9"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, nil, stressnetReshardingEpoch, StressNetSchedule.BlocksPerEpoch())
+var stressnetV2 = MustNewInstance(2, 30, 10, 0, numeric.MustNewDecFromStr("0.6"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, nil, stressnetReshardingEpoch, StressNetSchedule.BlocksPerEpoch())
diff --git a/internal/configs/sharding/testnet.go b/internal/configs/sharding/testnet.go
index 5892748c84..6ef55775e9 100644
--- a/internal/configs/sharding/testnet.go
+++ b/internal/configs/sharding/testnet.go
@@ -12,8 +12,13 @@ import (
// configuration schedule.
var TestnetSchedule testnetSchedule
-var TestnetNinetyPercentEpoch = big.NewInt(399)
-var ShardReductionEpoch = big.NewInt(486)
+var ninetyPercentEpoch = big.NewInt(399)
+var shardReductionEpoch = big.NewInt(486)
+
+var feeCollectorsTestnet = FeeCollectors{
+ mustAddress("0xb728AEaBF60fD01816ee9e756c18bc01dC91ba5D"): numeric.MustNewDecFromStr("0.5"),
+ mustAddress("0xb41B6B8d9e68fD44caC8342BC2EEf4D59531d7d7"): numeric.MustNewDecFromStr("0.5"),
+}
type testnetSchedule struct{}
@@ -34,9 +39,11 @@ const (
func (ts testnetSchedule) InstanceForEpoch(epoch *big.Int) Instance {
switch {
- case epoch.Cmp(ShardReductionEpoch) >= 0:
+ case params.TestnetChainConfig.IsFeeCollectEpoch(epoch):
+ return testnetV4
+ case epoch.Cmp(shardReductionEpoch) >= 0:
return testnetV3
- case epoch.Cmp(TestnetNinetyPercentEpoch) >= 0:
+ case epoch.Cmp(ninetyPercentEpoch) >= 0:
return testnetV2
case params.TestnetChainConfig.IsStaking(epoch):
return testnetV1
@@ -114,8 +121,9 @@ var testnetReshardingEpoch = []*big.Int{
}
var (
- testnetV0 = MustNewInstance(4, 8, 8, 0, numeric.OneDec(), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, emptyAddress, testnetReshardingEpoch, TestnetSchedule.BlocksPerEpoch())
- testnetV1 = MustNewInstance(4, 30, 8, 0.15, numeric.MustNewDecFromStr("0.70"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, emptyAddress, testnetReshardingEpoch, TestnetSchedule.BlocksPerEpoch())
- testnetV2 = MustNewInstance(4, 30, 8, 0.15, numeric.MustNewDecFromStr("0.90"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, emptyAddress, testnetReshardingEpoch, TestnetSchedule.BlocksPerEpoch())
- testnetV3 = MustNewInstance(2, 30, 8, 0.15, numeric.MustNewDecFromStr("0.90"), genesis.TNHarmonyAccountsV1, genesis.TNFoundationalAccounts, emptyAllowlist, emptyAddress, testnetReshardingEpoch, TestnetSchedule.BlocksPerEpoch())
+ testnetV0 = MustNewInstance(4, 8, 8, 0, numeric.OneDec(), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, nil, testnetReshardingEpoch, TestnetSchedule.BlocksPerEpoch())
+ testnetV1 = MustNewInstance(4, 30, 8, 0.15, numeric.MustNewDecFromStr("0.70"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, nil, testnetReshardingEpoch, TestnetSchedule.BlocksPerEpoch())
+ testnetV2 = MustNewInstance(4, 30, 8, 0.15, numeric.MustNewDecFromStr("0.90"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, nil, testnetReshardingEpoch, TestnetSchedule.BlocksPerEpoch())
+ testnetV3 = MustNewInstance(2, 30, 8, 0.15, numeric.MustNewDecFromStr("0.90"), genesis.TNHarmonyAccountsV1, genesis.TNFoundationalAccounts, emptyAllowlist, nil, testnetReshardingEpoch, TestnetSchedule.BlocksPerEpoch())
+ testnetV4 = MustNewInstance(2, 30, 8, 0.15, numeric.MustNewDecFromStr("0.90"), genesis.TNHarmonyAccountsV1, genesis.TNFoundationalAccounts, emptyAllowlist, feeCollectorsTestnet, testnetReshardingEpoch, TestnetSchedule.BlocksPerEpoch())
)
diff --git a/internal/params/config.go b/internal/params/config.go
index d900386303..43e538243c 100644
--- a/internal/params/config.go
+++ b/internal/params/config.go
@@ -36,250 +36,250 @@ var once sync.Once
var (
// MainnetChainConfig is the chain parameters to run a node on the main network.
MainnetChainConfig = &ChainConfig{
- ChainID: MainnetChainID,
- EthCompatibleChainID: EthMainnetShard0ChainID,
- EthCompatibleShard0ChainID: EthMainnetShard0ChainID,
- EthCompatibleEpoch: big.NewInt(442), // Around Thursday Feb 4th 2020, 10AM PST
- CrossTxEpoch: big.NewInt(28),
- CrossLinkEpoch: big.NewInt(186),
- AggregatedRewardEpoch: big.NewInt(689), // Around Wed Sept 15th 2021 with 3.5s block time
- StakingEpoch: big.NewInt(186),
- PreStakingEpoch: big.NewInt(185),
- QuickUnlockEpoch: big.NewInt(191),
- FiveSecondsEpoch: big.NewInt(230),
- TwoSecondsEpoch: big.NewInt(366), // Around Tuesday Dec 8th 2020, 8AM PST
- SixtyPercentEpoch: big.NewInt(530), // Around Monday Apr 12th 2021, 22:30 UTC
- RedelegationEpoch: big.NewInt(290),
- NoEarlyUnlockEpoch: big.NewInt(530), // Around Monday Apr 12th 2021, 22:30 UTC
- VRFEpoch: big.NewInt(631), // Around Wed July 7th 2021
- PrevVRFEpoch: big.NewInt(689), // Around Wed Sept 15th 2021 with 3.5s block time
- MinDelegation100Epoch: big.NewInt(631), // Around Wed July 7th 2021
- MinCommissionRateEpoch: big.NewInt(631), // Around Wed July 7th 2021
- MinCommissionPromoPeriod: big.NewInt(100),
- EPoSBound35Epoch: big.NewInt(631), // Around Wed July 7th 2021
- EIP155Epoch: big.NewInt(28),
- S3Epoch: big.NewInt(28),
- DataCopyFixEpoch: big.NewInt(689), // Around Wed Sept 15th 2021 with 3.5s block time
- IstanbulEpoch: big.NewInt(314),
- ReceiptLogEpoch: big.NewInt(101),
- SHA3Epoch: big.NewInt(725), // Around Mon Oct 11 2021, 19:00 UTC
- HIP6And8Epoch: big.NewInt(725), // Around Mon Oct 11 2021, 19:00 UTC
- StakingPrecompileEpoch: big.NewInt(871), // Around Tue Feb 11 2022
- ChainIdFixEpoch: big.NewInt(1323), // Around Wed 8 Feb 11:30PM UTC
- SlotsLimitedEpoch: big.NewInt(999), // Around Fri, 27 May 2022 09:41:02 UTC with 2s block time
- CrossShardXferPrecompileEpoch: big.NewInt(1323), // Around Wed 8 Feb 11:30PM UTC
- AllowlistEpoch: EpochTBD,
- FeeCollectEpoch: EpochTBD,
- LeaderRotationEpoch: EpochTBD,
- LeaderRotationBlocksCount: 64,
- ValidatorCodeFixEpoch: EpochTBD,
+ ChainID: MainnetChainID,
+ EthCompatibleChainID: EthMainnetShard0ChainID,
+ EthCompatibleShard0ChainID: EthMainnetShard0ChainID,
+ EthCompatibleEpoch: big.NewInt(442), // Around Thursday Feb 4th 2020, 10AM PST
+ CrossTxEpoch: big.NewInt(28),
+ CrossLinkEpoch: big.NewInt(186),
+ AggregatedRewardEpoch: big.NewInt(689), // Around Wed Sept 15th 2021 with 3.5s block time
+ StakingEpoch: big.NewInt(186),
+ PreStakingEpoch: big.NewInt(185),
+ QuickUnlockEpoch: big.NewInt(191),
+ FiveSecondsEpoch: big.NewInt(230),
+ TwoSecondsEpoch: big.NewInt(366), // Around Tuesday Dec 8th 2020, 8AM PST
+ SixtyPercentEpoch: big.NewInt(530), // Around Monday Apr 12th 2021, 22:30 UTC
+ RedelegationEpoch: big.NewInt(290),
+ NoEarlyUnlockEpoch: big.NewInt(530), // Around Monday Apr 12th 2021, 22:30 UTC
+ VRFEpoch: big.NewInt(631), // Around Wed July 7th 2021
+ PrevVRFEpoch: big.NewInt(689), // Around Wed Sept 15th 2021 with 3.5s block time
+ MinDelegation100Epoch: big.NewInt(631), // Around Wed July 7th 2021
+ MinCommissionRateEpoch: big.NewInt(631), // Around Wed July 7th 2021
+ MinCommissionPromoPeriod: big.NewInt(100),
+ EPoSBound35Epoch: big.NewInt(631), // Around Wed July 7th 2021
+ EIP155Epoch: big.NewInt(28),
+ S3Epoch: big.NewInt(28),
+ DataCopyFixEpoch: big.NewInt(689), // Around Wed Sept 15th 2021 with 3.5s block time
+ IstanbulEpoch: big.NewInt(314),
+ ReceiptLogEpoch: big.NewInt(101),
+ SHA3Epoch: big.NewInt(725), // Around Mon Oct 11 2021, 19:00 UTC
+ HIP6And8Epoch: big.NewInt(725), // Around Mon Oct 11 2021, 19:00 UTC
+ StakingPrecompileEpoch: big.NewInt(871), // Around Tue Feb 11 2022
+ ChainIdFixEpoch: big.NewInt(1323), // Around Wed 8 Feb 11:30PM UTC
+ SlotsLimitedEpoch: big.NewInt(999), // Around Fri, 27 May 2022 09:41:02 UTC with 2s block time
+ CrossShardXferPrecompileEpoch: big.NewInt(1323), // Around Wed 8 Feb 11:30PM UTC
+ AllowlistEpoch: EpochTBD,
+ LeaderRotationExternalNonBeaconLeaders: EpochTBD,
+ LeaderRotationExternalBeaconLeaders: EpochTBD,
+ FeeCollectEpoch: big.NewInt(1451), // 2023-05-17 04:02:00+00:00
+ ValidatorCodeFixEpoch: big.NewInt(1451),
}
// TestnetChainConfig contains the chain parameters to run a node on the harmony test network.
TestnetChainConfig = &ChainConfig{
- ChainID: TestnetChainID,
- EthCompatibleChainID: EthTestnetShard0ChainID,
- EthCompatibleShard0ChainID: EthTestnetShard0ChainID,
- EthCompatibleEpoch: big.NewInt(0),
- CrossTxEpoch: big.NewInt(0),
- CrossLinkEpoch: big.NewInt(2),
- AggregatedRewardEpoch: big.NewInt(2),
- StakingEpoch: big.NewInt(2),
- PreStakingEpoch: big.NewInt(1),
- QuickUnlockEpoch: big.NewInt(0),
- FiveSecondsEpoch: big.NewInt(0),
- TwoSecondsEpoch: big.NewInt(2),
- SixtyPercentEpoch: big.NewInt(2),
- RedelegationEpoch: big.NewInt(2),
- NoEarlyUnlockEpoch: big.NewInt(2),
- VRFEpoch: big.NewInt(2),
- PrevVRFEpoch: big.NewInt(2),
- MinDelegation100Epoch: big.NewInt(2),
- MinCommissionRateEpoch: big.NewInt(2),
- MinCommissionPromoPeriod: big.NewInt(2),
- EPoSBound35Epoch: big.NewInt(2),
- EIP155Epoch: big.NewInt(0),
- S3Epoch: big.NewInt(0),
- DataCopyFixEpoch: big.NewInt(0),
- IstanbulEpoch: big.NewInt(0),
- ReceiptLogEpoch: big.NewInt(0),
- SHA3Epoch: big.NewInt(0),
- HIP6And8Epoch: big.NewInt(2),
- StakingPrecompileEpoch: big.NewInt(2),
- SlotsLimitedEpoch: big.NewInt(2),
- ChainIdFixEpoch: big.NewInt(0),
- CrossShardXferPrecompileEpoch: big.NewInt(2),
- AllowlistEpoch: big.NewInt(2),
- LeaderRotationEpoch: EpochTBD,
- LeaderRotationBlocksCount: 64,
- FeeCollectEpoch: EpochTBD,
- ValidatorCodeFixEpoch: EpochTBD,
+ ChainID: TestnetChainID,
+ EthCompatibleChainID: EthTestnetShard0ChainID,
+ EthCompatibleShard0ChainID: EthTestnetShard0ChainID,
+ EthCompatibleEpoch: big.NewInt(0),
+ CrossTxEpoch: big.NewInt(0),
+ CrossLinkEpoch: big.NewInt(2),
+ AggregatedRewardEpoch: big.NewInt(2),
+ StakingEpoch: big.NewInt(2),
+ PreStakingEpoch: big.NewInt(1),
+ QuickUnlockEpoch: big.NewInt(0),
+ FiveSecondsEpoch: big.NewInt(0),
+ TwoSecondsEpoch: big.NewInt(2),
+ SixtyPercentEpoch: big.NewInt(2),
+ RedelegationEpoch: big.NewInt(2),
+ NoEarlyUnlockEpoch: big.NewInt(2),
+ VRFEpoch: big.NewInt(2),
+ PrevVRFEpoch: big.NewInt(2),
+ MinDelegation100Epoch: big.NewInt(2),
+ MinCommissionRateEpoch: big.NewInt(2),
+ MinCommissionPromoPeriod: big.NewInt(2),
+ EPoSBound35Epoch: big.NewInt(2),
+ EIP155Epoch: big.NewInt(0),
+ S3Epoch: big.NewInt(0),
+ DataCopyFixEpoch: big.NewInt(0),
+ IstanbulEpoch: big.NewInt(0),
+ ReceiptLogEpoch: big.NewInt(0),
+ SHA3Epoch: big.NewInt(0),
+ HIP6And8Epoch: big.NewInt(2),
+ StakingPrecompileEpoch: big.NewInt(2),
+ SlotsLimitedEpoch: big.NewInt(2),
+ ChainIdFixEpoch: big.NewInt(0),
+ CrossShardXferPrecompileEpoch: big.NewInt(2),
+ AllowlistEpoch: big.NewInt(2),
+ LeaderRotationExternalNonBeaconLeaders: EpochTBD,
+ LeaderRotationExternalBeaconLeaders: EpochTBD,
+ FeeCollectEpoch: big.NewInt(1296), // 2023-04-28 07:14:20+00:00
+ ValidatorCodeFixEpoch: big.NewInt(1296), // 2023-04-28 07:14:20+00:00
}
// PangaeaChainConfig contains the chain parameters for the Pangaea network.
// All features except for CrossLink are enabled at launch.
PangaeaChainConfig = &ChainConfig{
- ChainID: PangaeaChainID,
- EthCompatibleChainID: EthPangaeaShard0ChainID,
- EthCompatibleShard0ChainID: EthPangaeaShard0ChainID,
- EthCompatibleEpoch: big.NewInt(0),
- CrossTxEpoch: big.NewInt(0),
- CrossLinkEpoch: big.NewInt(2),
- AggregatedRewardEpoch: big.NewInt(3),
- StakingEpoch: big.NewInt(2),
- PreStakingEpoch: big.NewInt(1),
- QuickUnlockEpoch: big.NewInt(0),
- FiveSecondsEpoch: big.NewInt(0),
- TwoSecondsEpoch: big.NewInt(0),
- SixtyPercentEpoch: big.NewInt(0),
- RedelegationEpoch: big.NewInt(0),
- NoEarlyUnlockEpoch: big.NewInt(0),
- VRFEpoch: big.NewInt(0),
- PrevVRFEpoch: big.NewInt(0),
- MinDelegation100Epoch: big.NewInt(0),
- MinCommissionRateEpoch: big.NewInt(0),
- MinCommissionPromoPeriod: big.NewInt(10),
- EPoSBound35Epoch: big.NewInt(0),
- EIP155Epoch: big.NewInt(0),
- S3Epoch: big.NewInt(0),
- DataCopyFixEpoch: big.NewInt(0),
- IstanbulEpoch: big.NewInt(0),
- ReceiptLogEpoch: big.NewInt(0),
- SHA3Epoch: big.NewInt(0),
- HIP6And8Epoch: big.NewInt(0),
- StakingPrecompileEpoch: big.NewInt(2), // same as staking
- ChainIdFixEpoch: big.NewInt(0),
- SlotsLimitedEpoch: EpochTBD, // epoch to enable HIP-16
- CrossShardXferPrecompileEpoch: big.NewInt(1),
- AllowlistEpoch: EpochTBD,
- LeaderRotationEpoch: EpochTBD,
- LeaderRotationBlocksCount: 64,
- FeeCollectEpoch: EpochTBD,
- ValidatorCodeFixEpoch: EpochTBD,
+ ChainID: PangaeaChainID,
+ EthCompatibleChainID: EthPangaeaShard0ChainID,
+ EthCompatibleShard0ChainID: EthPangaeaShard0ChainID,
+ EthCompatibleEpoch: big.NewInt(0),
+ CrossTxEpoch: big.NewInt(0),
+ CrossLinkEpoch: big.NewInt(2),
+ AggregatedRewardEpoch: big.NewInt(3),
+ StakingEpoch: big.NewInt(2),
+ PreStakingEpoch: big.NewInt(1),
+ QuickUnlockEpoch: big.NewInt(0),
+ FiveSecondsEpoch: big.NewInt(0),
+ TwoSecondsEpoch: big.NewInt(0),
+ SixtyPercentEpoch: big.NewInt(0),
+ RedelegationEpoch: big.NewInt(0),
+ NoEarlyUnlockEpoch: big.NewInt(0),
+ VRFEpoch: big.NewInt(0),
+ PrevVRFEpoch: big.NewInt(0),
+ MinDelegation100Epoch: big.NewInt(0),
+ MinCommissionRateEpoch: big.NewInt(0),
+ MinCommissionPromoPeriod: big.NewInt(10),
+ EPoSBound35Epoch: big.NewInt(0),
+ EIP155Epoch: big.NewInt(0),
+ S3Epoch: big.NewInt(0),
+ DataCopyFixEpoch: big.NewInt(0),
+ IstanbulEpoch: big.NewInt(0),
+ ReceiptLogEpoch: big.NewInt(0),
+ SHA3Epoch: big.NewInt(0),
+ HIP6And8Epoch: big.NewInt(0),
+ StakingPrecompileEpoch: big.NewInt(2), // same as staking
+ ChainIdFixEpoch: big.NewInt(0),
+ SlotsLimitedEpoch: EpochTBD, // epoch to enable HIP-16
+ CrossShardXferPrecompileEpoch: big.NewInt(1),
+ AllowlistEpoch: EpochTBD,
+ LeaderRotationExternalNonBeaconLeaders: EpochTBD,
+ LeaderRotationExternalBeaconLeaders: EpochTBD,
+ FeeCollectEpoch: EpochTBD,
+ ValidatorCodeFixEpoch: EpochTBD,
}
// PartnerChainConfig contains the chain parameters for the Partner network.
// This is the Devnet config
PartnerChainConfig = &ChainConfig{
- ChainID: PartnerChainID,
- EthCompatibleChainID: EthPartnerShard0ChainID,
- EthCompatibleShard0ChainID: EthPartnerShard0ChainID,
- EthCompatibleEpoch: big.NewInt(0),
- CrossTxEpoch: big.NewInt(0),
- CrossLinkEpoch: big.NewInt(2),
- AggregatedRewardEpoch: big.NewInt(3),
- StakingEpoch: big.NewInt(2),
- PreStakingEpoch: big.NewInt(1),
- QuickUnlockEpoch: big.NewInt(0),
- FiveSecondsEpoch: big.NewInt(0),
- TwoSecondsEpoch: big.NewInt(0),
- SixtyPercentEpoch: big.NewInt(4),
- RedelegationEpoch: big.NewInt(0),
- NoEarlyUnlockEpoch: big.NewInt(0),
- VRFEpoch: big.NewInt(0),
- PrevVRFEpoch: big.NewInt(0),
- MinDelegation100Epoch: big.NewInt(0),
- MinCommissionRateEpoch: big.NewInt(0),
- MinCommissionPromoPeriod: big.NewInt(10),
- EPoSBound35Epoch: big.NewInt(0),
- EIP155Epoch: big.NewInt(0),
- S3Epoch: big.NewInt(0),
- DataCopyFixEpoch: big.NewInt(0),
- IstanbulEpoch: big.NewInt(0),
- ReceiptLogEpoch: big.NewInt(0),
- SHA3Epoch: big.NewInt(0),
- HIP6And8Epoch: big.NewInt(0),
- StakingPrecompileEpoch: big.NewInt(2),
- ChainIdFixEpoch: big.NewInt(0),
- SlotsLimitedEpoch: EpochTBD, // epoch to enable HIP-16
- CrossShardXferPrecompileEpoch: big.NewInt(1),
- AllowlistEpoch: EpochTBD,
- FeeCollectEpoch: big.NewInt(574),
- LeaderRotationEpoch: EpochTBD,
- LeaderRotationBlocksCount: 64,
- ValidatorCodeFixEpoch: EpochTBD,
+ ChainID: PartnerChainID,
+ EthCompatibleChainID: EthPartnerShard0ChainID,
+ EthCompatibleShard0ChainID: EthPartnerShard0ChainID,
+ EthCompatibleEpoch: big.NewInt(0),
+ CrossTxEpoch: big.NewInt(0),
+ CrossLinkEpoch: big.NewInt(2),
+ AggregatedRewardEpoch: big.NewInt(3),
+ StakingEpoch: big.NewInt(2),
+ PreStakingEpoch: big.NewInt(1),
+ QuickUnlockEpoch: big.NewInt(0),
+ FiveSecondsEpoch: big.NewInt(0),
+ TwoSecondsEpoch: big.NewInt(0),
+ SixtyPercentEpoch: big.NewInt(4),
+ RedelegationEpoch: big.NewInt(0),
+ NoEarlyUnlockEpoch: big.NewInt(0),
+ VRFEpoch: big.NewInt(0),
+ PrevVRFEpoch: big.NewInt(0),
+ MinDelegation100Epoch: big.NewInt(0),
+ MinCommissionRateEpoch: big.NewInt(0),
+ MinCommissionPromoPeriod: big.NewInt(10),
+ EPoSBound35Epoch: big.NewInt(0),
+ EIP155Epoch: big.NewInt(0),
+ S3Epoch: big.NewInt(0),
+ DataCopyFixEpoch: big.NewInt(0),
+ IstanbulEpoch: big.NewInt(0),
+ ReceiptLogEpoch: big.NewInt(0),
+ SHA3Epoch: big.NewInt(0),
+ HIP6And8Epoch: big.NewInt(0),
+ StakingPrecompileEpoch: big.NewInt(2),
+ ChainIdFixEpoch: big.NewInt(0),
+ SlotsLimitedEpoch: EpochTBD, // epoch to enable HIP-16
+ CrossShardXferPrecompileEpoch: big.NewInt(1),
+ AllowlistEpoch: EpochTBD,
+ LeaderRotationExternalNonBeaconLeaders: EpochTBD,
+ LeaderRotationExternalBeaconLeaders: EpochTBD,
+ FeeCollectEpoch: big.NewInt(848), // 2023-04-28 04:33:33+00:00
+ ValidatorCodeFixEpoch: big.NewInt(848),
}
// StressnetChainConfig contains the chain parameters for the Stress test network.
// All features except for CrossLink are enabled at launch.
StressnetChainConfig = &ChainConfig{
- ChainID: StressnetChainID,
- EthCompatibleChainID: EthStressnetShard0ChainID,
- EthCompatibleShard0ChainID: EthStressnetShard0ChainID,
- EthCompatibleEpoch: big.NewInt(0),
- CrossTxEpoch: big.NewInt(0),
- CrossLinkEpoch: big.NewInt(2),
- AggregatedRewardEpoch: big.NewInt(3),
- StakingEpoch: big.NewInt(2),
- PreStakingEpoch: big.NewInt(1),
- QuickUnlockEpoch: big.NewInt(0),
- FiveSecondsEpoch: big.NewInt(0),
- TwoSecondsEpoch: big.NewInt(0),
- SixtyPercentEpoch: big.NewInt(10),
- RedelegationEpoch: big.NewInt(0),
- NoEarlyUnlockEpoch: big.NewInt(0),
- VRFEpoch: big.NewInt(0),
- PrevVRFEpoch: big.NewInt(0),
- MinDelegation100Epoch: big.NewInt(0),
- MinCommissionRateEpoch: big.NewInt(0),
- MinCommissionPromoPeriod: big.NewInt(10),
- EPoSBound35Epoch: big.NewInt(0),
- EIP155Epoch: big.NewInt(0),
- S3Epoch: big.NewInt(0),
- DataCopyFixEpoch: big.NewInt(0),
- IstanbulEpoch: big.NewInt(0),
- ReceiptLogEpoch: big.NewInt(0),
- SHA3Epoch: big.NewInt(0),
- HIP6And8Epoch: big.NewInt(0),
- StakingPrecompileEpoch: big.NewInt(2),
- ChainIdFixEpoch: big.NewInt(0),
- SlotsLimitedEpoch: EpochTBD, // epoch to enable HIP-16
- CrossShardXferPrecompileEpoch: big.NewInt(1),
- AllowlistEpoch: EpochTBD,
- FeeCollectEpoch: EpochTBD,
- LeaderRotationEpoch: EpochTBD,
- LeaderRotationBlocksCount: 64,
- ValidatorCodeFixEpoch: EpochTBD,
+ ChainID: StressnetChainID,
+ EthCompatibleChainID: EthStressnetShard0ChainID,
+ EthCompatibleShard0ChainID: EthStressnetShard0ChainID,
+ EthCompatibleEpoch: big.NewInt(0),
+ CrossTxEpoch: big.NewInt(0),
+ CrossLinkEpoch: big.NewInt(2),
+ AggregatedRewardEpoch: big.NewInt(3),
+ StakingEpoch: big.NewInt(2),
+ PreStakingEpoch: big.NewInt(1),
+ QuickUnlockEpoch: big.NewInt(0),
+ FiveSecondsEpoch: big.NewInt(0),
+ TwoSecondsEpoch: big.NewInt(0),
+ SixtyPercentEpoch: big.NewInt(10),
+ RedelegationEpoch: big.NewInt(0),
+ NoEarlyUnlockEpoch: big.NewInt(0),
+ VRFEpoch: big.NewInt(0),
+ PrevVRFEpoch: big.NewInt(0),
+ MinDelegation100Epoch: big.NewInt(0),
+ MinCommissionRateEpoch: big.NewInt(0),
+ MinCommissionPromoPeriod: big.NewInt(10),
+ EPoSBound35Epoch: big.NewInt(0),
+ EIP155Epoch: big.NewInt(0),
+ S3Epoch: big.NewInt(0),
+ DataCopyFixEpoch: big.NewInt(0),
+ IstanbulEpoch: big.NewInt(0),
+ ReceiptLogEpoch: big.NewInt(0),
+ SHA3Epoch: big.NewInt(0),
+ HIP6And8Epoch: big.NewInt(0),
+ StakingPrecompileEpoch: big.NewInt(2),
+ ChainIdFixEpoch: big.NewInt(0),
+ SlotsLimitedEpoch: EpochTBD, // epoch to enable HIP-16
+ CrossShardXferPrecompileEpoch: big.NewInt(1),
+ AllowlistEpoch: EpochTBD,
+ FeeCollectEpoch: EpochTBD,
+ LeaderRotationExternalNonBeaconLeaders: EpochTBD,
+ LeaderRotationExternalBeaconLeaders: EpochTBD,
+ ValidatorCodeFixEpoch: EpochTBD,
}
// LocalnetChainConfig contains the chain parameters to run for local development.
LocalnetChainConfig = &ChainConfig{
- ChainID: TestnetChainID,
- EthCompatibleChainID: EthTestnetShard0ChainID,
- EthCompatibleShard0ChainID: EthTestnetShard0ChainID,
- EthCompatibleEpoch: big.NewInt(0),
- CrossTxEpoch: big.NewInt(0),
- CrossLinkEpoch: big.NewInt(2),
- AggregatedRewardEpoch: big.NewInt(3),
- StakingEpoch: big.NewInt(2),
- PreStakingEpoch: big.NewInt(0),
- QuickUnlockEpoch: big.NewInt(0),
- FiveSecondsEpoch: big.NewInt(0),
- TwoSecondsEpoch: big.NewInt(0),
- SixtyPercentEpoch: EpochTBD, // Never enable it for localnet as localnet has no external validator setup
- RedelegationEpoch: big.NewInt(0),
- NoEarlyUnlockEpoch: big.NewInt(0),
- VRFEpoch: big.NewInt(0),
- PrevVRFEpoch: big.NewInt(0),
- MinDelegation100Epoch: big.NewInt(0),
- MinCommissionRateEpoch: big.NewInt(0),
- MinCommissionPromoPeriod: big.NewInt(10),
- EPoSBound35Epoch: big.NewInt(0),
- EIP155Epoch: big.NewInt(0),
- S3Epoch: big.NewInt(0),
- DataCopyFixEpoch: big.NewInt(0),
- IstanbulEpoch: big.NewInt(0),
- ReceiptLogEpoch: big.NewInt(0),
- SHA3Epoch: big.NewInt(0),
- HIP6And8Epoch: EpochTBD, // Never enable it for localnet as localnet has no external validator setup
- StakingPrecompileEpoch: big.NewInt(2),
- ChainIdFixEpoch: big.NewInt(0),
- SlotsLimitedEpoch: EpochTBD, // epoch to enable HIP-16
- CrossShardXferPrecompileEpoch: big.NewInt(1),
- AllowlistEpoch: EpochTBD,
- LeaderRotationEpoch: EpochTBD,
- LeaderRotationBlocksCount: 5,
- FeeCollectEpoch: big.NewInt(5),
- ValidatorCodeFixEpoch: EpochTBD,
+ ChainID: TestnetChainID,
+ EthCompatibleChainID: EthTestnetShard0ChainID,
+ EthCompatibleShard0ChainID: EthTestnetShard0ChainID,
+ EthCompatibleEpoch: big.NewInt(0),
+ CrossTxEpoch: big.NewInt(0),
+ CrossLinkEpoch: big.NewInt(2),
+ AggregatedRewardEpoch: big.NewInt(3),
+ StakingEpoch: big.NewInt(2),
+ PreStakingEpoch: big.NewInt(0),
+ QuickUnlockEpoch: big.NewInt(0),
+ FiveSecondsEpoch: big.NewInt(0),
+ TwoSecondsEpoch: big.NewInt(0),
+ SixtyPercentEpoch: EpochTBD, // Never enable it for localnet as localnet has no external validator setup
+ RedelegationEpoch: big.NewInt(0),
+ NoEarlyUnlockEpoch: big.NewInt(0),
+ VRFEpoch: big.NewInt(0),
+ PrevVRFEpoch: big.NewInt(0),
+ MinDelegation100Epoch: big.NewInt(0),
+ MinCommissionRateEpoch: big.NewInt(0),
+ MinCommissionPromoPeriod: big.NewInt(10),
+ EPoSBound35Epoch: big.NewInt(0),
+ EIP155Epoch: big.NewInt(0),
+ S3Epoch: big.NewInt(0),
+ DataCopyFixEpoch: big.NewInt(0),
+ IstanbulEpoch: big.NewInt(0),
+ ReceiptLogEpoch: big.NewInt(0),
+ SHA3Epoch: big.NewInt(0),
+ HIP6And8Epoch: EpochTBD, // Never enable it for localnet as localnet has no external validator setup
+ StakingPrecompileEpoch: big.NewInt(2),
+ ChainIdFixEpoch: big.NewInt(0),
+ SlotsLimitedEpoch: EpochTBD, // epoch to enable HIP-16
+ CrossShardXferPrecompileEpoch: big.NewInt(1),
+ AllowlistEpoch: EpochTBD,
+ LeaderRotationExternalNonBeaconLeaders: big.NewInt(5),
+ LeaderRotationExternalBeaconLeaders: big.NewInt(6),
+ FeeCollectEpoch: big.NewInt(2),
+ ValidatorCodeFixEpoch: big.NewInt(2),
}
// AllProtocolChanges ...
@@ -319,8 +319,8 @@ var (
big.NewInt(0), // SlotsLimitedEpoch
big.NewInt(1), // CrossShardXferPrecompileEpoch
big.NewInt(0), // AllowlistEpoch
- big.NewInt(1), // LeaderRotationEpoch
- 64, // LeaderRotationBlocksCount
+ big.NewInt(1), // LeaderRotationExternalNonBeaconLeaders
+ big.NewInt(1), // LeaderRotationExternalBeaconLeaders
big.NewInt(0), // FeeCollectEpoch
big.NewInt(0), // ValidatorCodeFixEpoch
}
@@ -362,8 +362,8 @@ var (
big.NewInt(0), // SlotsLimitedEpoch
big.NewInt(1), // CrossShardXferPrecompileEpoch
big.NewInt(0), // AllowlistEpoch
- big.NewInt(1), // LeaderRotationEpoch
- 64, // LeaderRotationBlocksCount
+ big.NewInt(1), // LeaderRotationExternalNonBeaconLeaders
+ big.NewInt(1), // LeaderRotationExternalBeaconLeaders
big.NewInt(0), // FeeCollectEpoch
big.NewInt(0), // ValidatorCodeFixEpoch
}
@@ -505,9 +505,9 @@ type ChainConfig struct {
// AllowlistEpoch is the first epoch to support allowlist of HIP18
AllowlistEpoch *big.Int
- LeaderRotationEpoch *big.Int `json:"leader-rotation-epoch,omitempty"`
+ LeaderRotationExternalNonBeaconLeaders *big.Int `json:"leader-rotation-external-non-beacon-leaders,omitempty"`
- LeaderRotationBlocksCount int `json:"leader-rotation-blocks-count,omitempty"`
+ LeaderRotationExternalBeaconLeaders *big.Int `json:"leader-rotation-external-beacon-leaders,omitempty"`
// FeeCollectEpoch is the first epoch that enables txn fees to be collected into the community-managed account.
// It should >= StakingEpoch.
@@ -548,16 +548,25 @@ func (c *ChainConfig) mustValid() {
panic(err)
}
}
+ // before staking epoch, fees were sent to coinbase
require(c.FeeCollectEpoch.Cmp(c.StakingEpoch) >= 0,
"must satisfy: FeeCollectEpoch >= StakingEpoch")
+ // obvious
require(c.PreStakingEpoch.Cmp(c.StakingEpoch) < 0,
"must satisfy: PreStakingEpoch < StakingEpoch")
+ // delegations can be made starting at PreStakingEpoch
require(c.StakingPrecompileEpoch.Cmp(c.PreStakingEpoch) >= 0,
"must satisfy: StakingPrecompileEpoch >= PreStakingEpoch")
+ // main functionality must come before the precompile
+ // see AcceptsCrossTx for why > and not >=
require(c.CrossShardXferPrecompileEpoch.Cmp(c.CrossTxEpoch) > 0,
"must satisfy: CrossShardXferPrecompileEpoch > CrossTxEpoch")
+ // the fix is applied only on the Solidity level, so you need eth compat
require(c.ValidatorCodeFixEpoch.Cmp(c.EthCompatibleEpoch) >= 0,
"must satisfy: ValidatorCodeFixEpoch >= EthCompatibleEpoch")
+ // we accept validator creation transactions starting at PreStakingEpoch
+ require(c.ValidatorCodeFixEpoch.Cmp(c.PreStakingEpoch) >= 0,
+ "must satisfy: ValidatorCodeFixEpoch >= PreStakingEpoch")
}
// IsEIP155 returns whether epoch is either equal to the EIP155 fork epoch or greater.
@@ -725,7 +734,17 @@ func (c *ChainConfig) IsAllowlistEpoch(epoch *big.Int) bool {
}
func (c *ChainConfig) IsLeaderRotation(epoch *big.Int) bool {
- return isForked(c.LeaderRotationEpoch, epoch)
+ return isForked(c.LeaderRotationExternalNonBeaconLeaders, epoch)
+}
+
+func (c *ChainConfig) IsLeaderRotationExternalValidatorsAllowed(epoch *big.Int, shardID uint32) bool {
+ if !c.IsLeaderRotation(epoch) {
+ return false
+ }
+ if shardID == 0 {
+ return isForked(c.LeaderRotationExternalBeaconLeaders, epoch)
+ }
+ return true
}
// IsFeeCollectEpoch determines whether Txn Fees will be collected into the community-managed account.
diff --git a/internal/registry/registry.go b/internal/registry/registry.go
index 025b652a69..98b69a3ef5 100644
--- a/internal/registry/registry.go
+++ b/internal/registry/registry.go
@@ -10,6 +10,7 @@ import (
type Registry struct {
mu sync.Mutex
blockchain core.BlockChain
+ txPool *core.TxPool
}
// New creates a new registry.
@@ -33,3 +34,20 @@ func (r *Registry) GetBlockchain() core.BlockChain {
return r.blockchain
}
+
+// SetTxPool sets the txpool to registry.
+func (r *Registry) SetTxPool(txPool *core.TxPool) *Registry {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ r.txPool = txPool
+ return r
+}
+
+// GetTxPool gets the txpool from registry.
+func (r *Registry) GetTxPool() *core.TxPool {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ return r.txPool
+}
diff --git a/internal/shardchain/dbfactory.go b/internal/shardchain/dbfactory.go
index 762ac54106..6d8657e787 100644
--- a/internal/shardchain/dbfactory.go
+++ b/internal/shardchain/dbfactory.go
@@ -9,7 +9,7 @@ import (
"github.com/harmony-one/harmony/internal/shardchain/leveldb_shard"
"github.com/harmony-one/harmony/internal/shardchain/local_cache"
- "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
)
@@ -34,7 +34,7 @@ type LDBFactory struct {
// NewChainDB returns a new LDB for the blockchain for given shard.
func (f *LDBFactory) NewChainDB(shardID uint32) (ethdb.Database, error) {
dir := path.Join(f.RootDir, fmt.Sprintf("%s_%d", LDBDirPrefix, shardID))
- return rawdb.NewLevelDBDatabase(dir, 256, 1024, "")
+ return rawdb.NewLevelDBDatabase(dir, 256, 1024, "", false)
}
// MemDBFactory is a memory-backed blockchain database factory.
diff --git a/internal/shardchain/dbfactory_tikv.go b/internal/shardchain/dbfactory_tikv.go
index 6651cd5c36..80b0117eda 100644
--- a/internal/shardchain/dbfactory_tikv.go
+++ b/internal/shardchain/dbfactory_tikv.go
@@ -5,7 +5,7 @@ import (
"io"
"sync"
- "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/internal/tikv"
tikvCommon "github.com/harmony-one/harmony/internal/tikv/common"
"github.com/harmony-one/harmony/internal/tikv/prefix"
diff --git a/internal/shardchain/leveldb_shard/shard.go b/internal/shardchain/leveldb_shard/shard.go
index 3f2339c427..607996ff29 100644
--- a/internal/shardchain/leveldb_shard/shard.go
+++ b/internal/shardchain/leveldb_shard/shard.go
@@ -96,6 +96,14 @@ func NewLeveldbShard(savePath string, diskCount int, diskShards int) (shard *Lev
return shard, err
}
+func (l *LeveldbShard) NewBatchWithSize(size int) ethdb.Batch {
+ return nil
+}
+
+func (l *LeveldbShard) NewSnapshot() (ethdb.Snapshot, error) {
+ return nil, nil
+}
+
func (l *LeveldbShard) mapDB(key []byte) *leveldb.DB {
return l.dbs[mapDBIndex(key, l.dbCount)]
}
@@ -128,7 +136,7 @@ func (l *LeveldbShard) NewBatch() ethdb.Batch {
// NewIterator creates a binary-alphabetical iterator over the entire keyspace
// contained within the key-value database.
-func (l *LeveldbShard) NewIterator() ethdb.Iterator {
+func (l *LeveldbShard) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
return l.iterator(nil)
}
diff --git a/internal/tikv/common/tikv_store.go b/internal/tikv/common/tikv_store.go
index 2ad342ad8f..2a60972b84 100644
--- a/internal/tikv/common/tikv_store.go
+++ b/internal/tikv/common/tikv_store.go
@@ -1,6 +1,7 @@
package common
import (
+ "errors"
"io"
"github.com/ethereum/go-ethereum/ethdb"
@@ -23,7 +24,7 @@ type TiKVStoreWrapper struct {
TiKVStore
}
-func (t *TiKVStoreWrapper) NewIterator() ethdb.Iterator {
+func (t *TiKVStoreWrapper) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
return t.TiKVStore.NewIterator(nil, nil)
}
@@ -36,6 +37,10 @@ func (t *TiKVStoreWrapper) NewIteratorWithPrefix(prefix []byte) ethdb.Iterator {
return t.TiKVStore.NewIterator(bytesPrefix.Start, bytesPrefix.Limit)
}
+func (t *TiKVStoreWrapper) NewSnapshot() (ethdb.Snapshot, error) {
+ return nil, errors.New("not supported")
+}
+
func ToEthKeyValueStore(store TiKVStore) ethdb.KeyValueStore {
return &TiKVStoreWrapper{TiKVStore: store}
}
diff --git a/internal/tikv/prefix/prefix_database.go b/internal/tikv/prefix/prefix_database.go
index 7bfe9d9451..7a04a0ab35 100644
--- a/internal/tikv/prefix/prefix_database.go
+++ b/internal/tikv/prefix/prefix_database.go
@@ -21,6 +21,15 @@ func NewPrefixDatabase(prefix []byte, db common.TiKVStore) *PrefixDatabase {
}
}
+func (p *PrefixDatabase) AncientDatadir() (string, error) {
+ return "", nil
+}
+
+// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
+func (p *PrefixDatabase) NewBatchWithSize(size int) ethdb.Batch {
+ return nil
+}
+
// makeKey use to create a key with prefix, keysPool can reduce gc pressure
func (p *PrefixDatabase) makeKey(keys []byte) []byte {
prefixLen := len(p.prefix)
diff --git a/internal/tikv/remote/remote_database.go b/internal/tikv/remote/remote_database.go
index 915e123754..cf33c719d8 100644
--- a/internal/tikv/remote/remote_database.go
+++ b/internal/tikv/remote/remote_database.go
@@ -39,6 +39,14 @@ func (d *RemoteDatabase) ReadOnly() {
d.readOnly = true
}
+func (d *RemoteDatabase) AncientDatadir() (string, error) {
+ return "", nil
+}
+
+func (d *RemoteDatabase) NewBatchWithSize(size int) ethdb.Batch {
+ return nil
+}
+
// Has retrieves if a key is present in the key-value data store.
func (d *RemoteDatabase) Has(key []byte) (bool, error) {
data, err := d.Get(key)
diff --git a/internal/tikv/statedb_cache/statedb_cache_database.go b/internal/tikv/statedb_cache/statedb_cache_database.go
index 9757298130..b4c95b9bc9 100644
--- a/internal/tikv/statedb_cache/statedb_cache_database.go
+++ b/internal/tikv/statedb_cache/statedb_cache_database.go
@@ -139,6 +139,14 @@ func NewStateDBCacheDatabase(remoteDB common.TiKVStore, config StateDBCacheConfi
return db, nil
}
+func (c *StateDBCacheDatabase) AncientDatadir() (string, error) {
+ return "", nil
+}
+
+func (c *StateDBCacheDatabase) NewBatchWithSize(size int) ethdb.Batch {
+ return nil
+}
+
// Has retrieves if a key is present in the key-value data store.
func (c *StateDBCacheDatabase) Has(key []byte) (bool, error) {
return c.remoteDB.Has(key)
diff --git a/internal/utils/lrucache/lrucache.go b/internal/utils/lrucache/lrucache.go
new file mode 100644
index 0000000000..4859811b51
--- /dev/null
+++ b/internal/utils/lrucache/lrucache.go
@@ -0,0 +1,27 @@
+package lrucache
+
+import lru "github.com/hashicorp/golang-lru"
+
+type Cache[K comparable, V any] struct {
+ cache *lru.Cache
+}
+
+func NewCache[K comparable, V any](size int) *Cache[K, V] {
+ c, _ := lru.New(size)
+ return &Cache[K, V]{
+ cache: c,
+ }
+}
+
+func (c *Cache[K, V]) Get(key K) (V, bool) {
+ v, ok := c.cache.Get(key)
+ if !ok {
+ var out V
+ return out, false
+ }
+ return v.(V), true
+}
+
+func (c *Cache[K, V]) Set(key K, value V) {
+ c.cache.Add(key, value)
+}
diff --git a/internal/utils/timer.go b/internal/utils/timer.go
index 2e8a77667b..d355d5c719 100644
--- a/internal/utils/timer.go
+++ b/internal/utils/timer.go
@@ -39,9 +39,9 @@ func (timeout *Timeout) Stop() {
timeout.start = time.Now()
}
-// CheckExpire checks whether the timeout is reached/expired
-func (timeout *Timeout) CheckExpire() bool {
- if timeout.state == Active && time.Since(timeout.start) > timeout.d {
+// Expired checks whether the timeout is reached/expired
+func (timeout *Timeout) Expired(now time.Time) bool {
+ if timeout.state == Active && now.Sub(timeout.start) > timeout.d {
timeout.state = Expired
}
if timeout.state == Expired {
diff --git a/internal/utils/timer_test.go b/internal/utils/timer_test.go
index 8b3061ddb6..cdd0dbd37f 100644
--- a/internal/utils/timer_test.go
+++ b/internal/utils/timer_test.go
@@ -15,17 +15,27 @@ func TestNewTimeout(t *testing.T) {
func TestCheckExpire(t *testing.T) {
timer := NewTimeout(time.Second)
timer.Start()
- time.Sleep(2 * time.Second)
- if timer.CheckExpire() == false {
- t.Fatalf("CheckExpire should be true")
+ now := time.Now()
+ if timer.Expired(now) {
+ t.Fatalf("Timer shouldn't be expired")
}
+ if !timer.Expired(now.Add(2 * time.Second)) {
+ t.Fatalf("Timer should be expired")
+ }
+ // start again
timer.Start()
- if timer.CheckExpire() == true {
- t.Fatalf("CheckExpire should be false")
+ if timer.Expired(now) {
+ t.Fatalf("Timer shouldn't be expired")
+ }
+ if !timer.Expired(now.Add(2 * time.Second)) {
+ t.Fatalf("Timer should be expired")
}
+ // stop
timer.Stop()
- if timer.CheckExpire() == true {
- t.Fatalf("CheckExpire should be false")
+ if timer.Expired(now) {
+ t.Fatalf("Timer shouldn't be expired because it is stopped")
+ }
+ if timer.Expired(now.Add(2 * time.Second)) {
+ t.Fatalf("Timer shouldn't be expired because it is stopped")
}
-
}
diff --git a/node/node.go b/node/node.go
index 13988e2ad3..710751776f 100644
--- a/node/node.go
+++ b/node/node.go
@@ -17,6 +17,7 @@ import (
"github.com/harmony-one/harmony/internal/shardchain/tikv_manage"
"github.com/harmony-one/harmony/internal/tikv"
"github.com/harmony-one/harmony/internal/tikv/redis_helper"
+ "github.com/harmony-one/harmony/internal/utils/lrucache"
"github.com/ethereum/go-ethereum/rlp"
harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony"
@@ -99,12 +100,11 @@ type ISync interface {
// Node represents a protocol-participating node in the network
type Node struct {
- Consensus *consensus.Consensus // Consensus object containing all Consensus related data (e.g. committee members, signatures, commits)
- ConfirmedBlockChannel chan *types.Block // The channel to send confirmed blocks
- BeaconBlockChannel chan *types.Block // The channel to send beacon blocks for non-beaconchain nodes
- pendingCXReceipts map[string]*types.CXReceiptsProof // All the receipts received but not yet processed for Consensus
- pendingCXMutex sync.Mutex
- crosslinks *crosslinks.Crosslinks // Memory storage for crosslink processing.
+ Consensus *consensus.Consensus // Consensus object containing all Consensus related data (e.g. committee members, signatures, commits)
+ BeaconBlockChannel chan *types.Block // The channel to send beacon blocks for non-beaconchain nodes
+ pendingCXReceipts map[string]*types.CXReceiptsProof // All the receipts received but not yet processed for Consensus
+ pendingCXMutex sync.Mutex
+ crosslinks *crosslinks.Crosslinks // Memory storage for crosslink processing.
// Shard databases
shardChains shardchain.Collection
SelfPeer p2p.Peer
@@ -133,8 +133,7 @@ type Node struct {
chainConfig params.ChainConfig
unixTimeAtNodeStart int64
// KeysToAddrs holds the addresses of bls keys run by the node
- KeysToAddrs map[string]common.Address
- keysToAddrsEpoch *big.Int
+ keysToAddrs *lrucache.Cache[uint64, map[string]common.Address]
keysToAddrsMutex sync.Mutex
// TransactionErrorSink contains error messages for any failed transaction, in memory only
TransactionErrorSink *types.TransactionErrorSink
@@ -142,7 +141,6 @@ type Node struct {
BroadcastInvalidTx bool
// InSync flag indicates the node is in-sync or not
IsSynchronized *abool.AtomicBool
- proposedBlock map[uint64]*types.Block
deciderCache *lru.Cache
committeeCache *lru.Cache
@@ -227,8 +225,8 @@ func (node *Node) tryBroadcast(tx *types.Transaction) {
utils.Logger().Info().Str("shardGroupID", string(shardGroupID)).Msg("tryBroadcast")
for attempt := 0; attempt < NumTryBroadCast; attempt++ {
- if err := node.host.SendMessageToGroups([]nodeconfig.GroupID{shardGroupID},
- p2p.ConstructMessage(msg)); err != nil && attempt < NumTryBroadCast {
+ err := node.host.SendMessageToGroups([]nodeconfig.GroupID{shardGroupID}, p2p.ConstructMessage(msg))
+ if err != nil {
utils.Logger().Error().Int("attempt", attempt).Msg("Error when trying to broadcast tx")
} else {
break
@@ -246,7 +244,7 @@ func (node *Node) tryBroadcastStaking(stakingTx *staking.StakingTransaction) {
for attempt := 0; attempt < NumTryBroadCast; attempt++ {
if err := node.host.SendMessageToGroups([]nodeconfig.GroupID{shardGroupID},
- p2p.ConstructMessage(msg)); err != nil && attempt < NumTryBroadCast {
+ p2p.ConstructMessage(msg)); err != nil {
utils.Logger().Error().Int("attempt", attempt).Msg("Error when trying to broadcast staking tx")
} else {
break
@@ -255,31 +253,27 @@ func (node *Node) tryBroadcastStaking(stakingTx *staking.StakingTransaction) {
}
// Add new transactions to the pending transaction list.
-func (node *Node) addPendingTransactions(newTxs types.Transactions) []error {
- // if inSync, _, _ := node.SyncStatus(node.Blockchain().ShardID()); !inSync && node.NodeConfig.GetNetworkType() == nodeconfig.Mainnet {
- // utils.Logger().Debug().
- // Int("length of newTxs", len(newTxs)).
- // Msg("[addPendingTransactions] Node out of sync, ignoring transactions")
- // return nil
- // }
-
- poolTxs := types.PoolTransactions{}
- errs := []error{}
- acceptCx := node.Blockchain().Config().AcceptsCrossTx(node.Blockchain().CurrentHeader().Epoch())
+func addPendingTransactions(registry *registry.Registry, newTxs types.Transactions) []error {
+ var (
+ errs []error
+ bc = registry.GetBlockchain()
+ txPool = registry.GetTxPool()
+ poolTxs = types.PoolTransactions{}
+ acceptCx = bc.Config().AcceptsCrossTx(bc.CurrentHeader().Epoch())
+ )
for _, tx := range newTxs {
if tx.ShardID() != tx.ToShardID() && !acceptCx {
errs = append(errs, errors.WithMessage(errInvalidEpoch, "cross-shard tx not accepted yet"))
continue
}
- if tx.IsEthCompatible() && !node.Blockchain().Config().IsEthCompatible(node.Blockchain().CurrentBlock().Epoch()) {
+ if tx.IsEthCompatible() && !bc.Config().IsEthCompatible(bc.CurrentBlock().Epoch()) {
errs = append(errs, errors.WithMessage(errInvalidEpoch, "ethereum tx not accepted yet"))
continue
}
poolTxs = append(poolTxs, tx)
}
- errs = append(errs, node.TxPool.AddRemotes(poolTxs)...)
-
- pendingCount, queueCount := node.TxPool.Stats()
+ errs = append(errs, registry.GetTxPool().AddRemotes(poolTxs)...)
+ pendingCount, queueCount := txPool.Stats()
utils.Logger().Debug().
Interface("err", errs).
Int("length of newTxs", len(newTxs)).
@@ -346,7 +340,7 @@ func (node *Node) AddPendingStakingTransaction(
// This is only called from SDK.
func (node *Node) AddPendingTransaction(newTx *types.Transaction) error {
if newTx.ShardID() == node.NodeConfig.ShardID {
- errs := node.addPendingTransactions(types.Transactions{newTx})
+ errs := addPendingTransactions(node.registry, types.Transactions{newTx})
var err error
for i := range errs {
if errs[i] != nil {
@@ -544,11 +538,11 @@ func (node *Node) validateNodeMessage(ctx context.Context, payload []byte) (
// validate shardID
// validate public key size
// verify message signature
-func (node *Node) validateShardBoundMessage(
- ctx context.Context, payload []byte,
+func validateShardBoundMessage(consensus *consensus.Consensus, nodeConfig *nodeconfig.ConfigType, payload []byte,
) (*msg_pb.Message, *bls.SerializedPublicKey, bool, error) {
var (
m msg_pb.Message
+ //consensus = registry.GetConsensus()
)
if err := protobuf.Unmarshal(payload, &m); err != nil {
nodeConsensusMessageCounterVec.With(prometheus.Labels{"type": "invalid_unmarshal"}).Inc()
@@ -556,7 +550,7 @@ func (node *Node) validateShardBoundMessage(
}
// ignore messages not intended for explorer
- if node.NodeConfig.Role() == nodeconfig.ExplorerNode {
+ if nodeConfig.Role() == nodeconfig.ExplorerNode {
switch m.Type {
case
msg_pb.MessageType_ANNOUNCE,
@@ -573,7 +567,7 @@ func (node *Node) validateShardBoundMessage(
// in order to avoid possible trap forever but drop PREPARE and COMMIT
// which are message types specifically for a node acting as leader
// so we just ignore those messages
- if node.Consensus.IsViewChangingMode() {
+ if consensus.IsViewChangingMode() {
switch m.Type {
case msg_pb.MessageType_PREPARE, msg_pb.MessageType_COMMIT:
nodeConsensusMessageCounterVec.With(prometheus.Labels{"type": "ignored"}).Inc()
@@ -589,7 +583,7 @@ func (node *Node) validateShardBoundMessage(
}
// ignore message not intended for leader, but still forward them to the network
- if node.Consensus.IsLeader() {
+ if consensus.IsLeader() {
switch m.Type {
case msg_pb.MessageType_ANNOUNCE, msg_pb.MessageType_PREPARED, msg_pb.MessageType_COMMITTED:
nodeConsensusMessageCounterVec.With(prometheus.Labels{"type": "ignored"}).Inc()
@@ -602,7 +596,7 @@ func (node *Node) validateShardBoundMessage(
senderBitmap := []byte{}
if maybeCon != nil {
- if maybeCon.ShardId != node.Consensus.ShardID {
+ if maybeCon.ShardId != consensus.ShardID {
nodeConsensusMessageCounterVec.With(prometheus.Labels{"type": "invalid_shard"}).Inc()
return nil, nil, true, errors.WithStack(errWrongShardID)
}
@@ -612,17 +606,17 @@ func (node *Node) validateShardBoundMessage(
senderBitmap = maybeCon.SenderPubkeyBitmap
}
// If the viewID is too old, reject the message.
- if maybeCon.ViewId+5 < node.Consensus.GetCurBlockViewID() {
+ if maybeCon.ViewId+5 < consensus.GetCurBlockViewID() {
return nil, nil, true, errors.WithStack(errViewIDTooOld)
}
} else if maybeVC != nil {
- if maybeVC.ShardId != node.Consensus.ShardID {
+ if maybeVC.ShardId != consensus.ShardID {
nodeConsensusMessageCounterVec.With(prometheus.Labels{"type": "invalid_shard"}).Inc()
return nil, nil, true, errors.WithStack(errWrongShardID)
}
senderKey = maybeVC.SenderPubkey
// If the viewID is too old, reject the message.
- if maybeVC.ViewId+5 < node.Consensus.GetViewChangingID() {
+ if maybeVC.ViewId+5 < consensus.GetViewChangingID() {
return nil, nil, true, errors.WithStack(errViewIDTooOld)
}
} else {
@@ -632,7 +626,7 @@ func (node *Node) validateShardBoundMessage(
// ignore mesage not intended for validator
// but still forward them to the network
- if !node.Consensus.IsLeader() {
+ if !consensus.IsLeader() {
switch m.Type {
case msg_pb.MessageType_PREPARE, msg_pb.MessageType_COMMIT:
nodeConsensusMessageCounterVec.With(prometheus.Labels{"type": "ignored"}).Inc()
@@ -648,12 +642,12 @@ func (node *Node) validateShardBoundMessage(
}
copy(serializedKey[:], senderKey)
- if !node.Consensus.IsValidatorInCommittee(serializedKey) {
+ if !consensus.IsValidatorInCommittee(serializedKey) {
nodeConsensusMessageCounterVec.With(prometheus.Labels{"type": "invalid_committee"}).Inc()
return nil, nil, true, errors.WithStack(shard.ErrValidNotInCommittee)
}
} else {
- count := node.Consensus.Decider.ParticipantsCount()
+ count := consensus.Decider.ParticipantsCount()
if (count+7)>>3 != int64(len(senderBitmap)) {
nodeConsensusMessageCounterVec.With(prometheus.Labels{"type": "invalid_participant_count"}).Inc()
return nil, nil, true, errors.WithStack(errWrongSizeOfBitmap)
@@ -667,7 +661,6 @@ func (node *Node) validateShardBoundMessage(
}
var (
- errMsgHadNoHMYPayLoadAssumption = errors.New("did not have sufficient size for hmy msg")
errConsensusMessageOnUnexpectedTopic = errors.New("received consensus on wrong topic")
)
@@ -794,8 +787,8 @@ func (node *Node) StartPubSub() error {
nodeP2PMessageCounterVec.With(prometheus.Labels{"type": "consensus_total"}).Inc()
// validate consensus message
- validMsg, senderPubKey, ignore, err := node.validateShardBoundMessage(
- context.TODO(), openBox[proto.MessageCategoryBytes:],
+ validMsg, senderPubKey, ignore, err := validateShardBoundMessage(
+ node.Consensus, node.NodeConfig, openBox[proto.MessageCategoryBytes:],
)
if err != nil {
@@ -1030,14 +1023,13 @@ func New(
TransactionErrorSink: types.NewTransactionErrorSink(),
crosslinks: crosslinks.New(),
syncID: GenerateSyncID(),
+ keysToAddrs: lrucache.NewCache[uint64, map[string]common.Address](10),
}
-
- // Get the node config that's created in the harmony.go program.
- if consensusObj != nil {
- node.NodeConfig = nodeconfig.GetShardConfig(consensusObj.ShardID)
- } else {
- node.NodeConfig = nodeconfig.GetDefaultConfig()
+ if consensusObj == nil {
+ panic("consensusObj is nil")
}
+ // Get the node config that's created in the harmony.go program.
+ node.NodeConfig = nodeconfig.GetShardConfig(consensusObj.ShardID)
node.HarmonyConfig = harmonyconfig
if host != nil {
@@ -1051,7 +1043,7 @@ func New(
node.shardChains = collection
node.IsSynchronized = abool.NewBool(false)
- if host != nil && consensusObj != nil {
+ if host != nil {
// Consensus and associated channel to communicate blocks
node.Consensus = consensusObj
@@ -1080,19 +1072,23 @@ func New(
}
}
- node.ConfirmedBlockChannel = make(chan *types.Block)
node.BeaconBlockChannel = make(chan *types.Block)
txPoolConfig := core.DefaultTxPoolConfig
- // Temporarily not updating other networks to make the rpc tests pass
- if node.NodeConfig.GetNetworkType() != nodeconfig.Mainnet && node.NodeConfig.GetNetworkType() != nodeconfig.Testnet {
- txPoolConfig.PriceLimit = 1e9
- txPoolConfig.PriceBump = 10
- }
if harmonyconfig != nil {
txPoolConfig.AccountSlots = harmonyconfig.TxPool.AccountSlots
txPoolConfig.GlobalSlots = harmonyconfig.TxPool.GlobalSlots
txPoolConfig.Locals = append(txPoolConfig.Locals, localAccounts...)
+ txPoolConfig.AccountQueue = harmonyconfig.TxPool.AccountQueue
+ txPoolConfig.GlobalQueue = harmonyconfig.TxPool.GlobalQueue
+ txPoolConfig.Lifetime = harmonyconfig.TxPool.Lifetime
+ txPoolConfig.PriceLimit = uint64(harmonyconfig.TxPool.PriceLimit)
+ txPoolConfig.PriceBump = harmonyconfig.TxPool.PriceBump
+ }
+ // Temporarily not updating other networks to make the rpc tests pass
+ if node.NodeConfig.GetNetworkType() != nodeconfig.Mainnet && node.NodeConfig.GetNetworkType() != nodeconfig.Testnet {
+ txPoolConfig.PriceLimit = 1e9
+ txPoolConfig.PriceBump = 10
}
txPoolConfig.Blacklist = blacklist
@@ -1109,6 +1105,7 @@ func New(
}
node.TxPool = core.NewTxPool(txPoolConfig, node.Blockchain().Config(), blockchain, node.TransactionErrorSink)
+ node.registry.SetTxPool(node.TxPool)
node.CxPool = core.NewCxPool(core.CxPoolSize)
node.Worker = worker.New(node.Blockchain().Config(), blockchain, beaconChain, engine)
@@ -1116,7 +1113,6 @@ func New(
node.committeeCache, _ = lru.New(16)
node.pendingCXReceipts = map[string]*types.CXReceiptsProof{}
- node.proposedBlock = map[uint64]*types.Block{}
node.Consensus.VerifiedNewBlock = make(chan *types.Block, 1)
// the sequence number is the next block number to be added in consensus protocol, which is
// always one more than current chain header block
@@ -1325,10 +1321,6 @@ func (node *Node) ShutDown() {
}
func (node *Node) populateSelfAddresses(epoch *big.Int) {
- // reset the self addresses
- node.KeysToAddrs = map[string]common.Address{}
- node.keysToAddrsEpoch = epoch
-
shardID := node.Consensus.ShardID
shardState, err := node.Consensus.Blockchain().ReadShardState(epoch)
if err != nil {
@@ -1347,7 +1339,7 @@ func (node *Node) populateSelfAddresses(epoch *big.Int) {
Msg("[PopulateSelfAddresses] failed to find shard committee")
return
}
-
+ keysToAddrs := map[string]common.Address{}
for _, blskey := range node.Consensus.GetPublicKeys() {
blsStr := blskey.Bytes.Hex()
shardkey := bls.FromLibBLSPublicKeyUnsafe(blskey.Object)
@@ -1368,7 +1360,7 @@ func (node *Node) populateSelfAddresses(epoch *big.Int) {
Msg("[PopulateSelfAddresses] could not find address")
return
}
- node.KeysToAddrs[blsStr] = *addr
+ keysToAddrs[blsStr] = *addr
utils.Logger().Debug().
Int64("epoch", epoch.Int64()).
Uint32("shard-id", shardID).
@@ -1376,34 +1368,27 @@ func (node *Node) populateSelfAddresses(epoch *big.Int) {
Str("address", common2.MustAddressToBech32(*addr)).
Msg("[PopulateSelfAddresses]")
}
+ node.keysToAddrs.Set(epoch.Uint64(), keysToAddrs)
}
// GetAddressForBLSKey retrieves the ECDSA address associated with bls key for epoch
func (node *Node) GetAddressForBLSKey(blskey *bls_core.PublicKey, epoch *big.Int) common.Address {
- // populate if first time setting or new epoch
- node.keysToAddrsMutex.Lock()
- defer node.keysToAddrsMutex.Unlock()
- if node.keysToAddrsEpoch == nil || epoch.Cmp(node.keysToAddrsEpoch) != 0 {
- node.populateSelfAddresses(epoch)
- }
- blsStr := blskey.SerializeToHexStr()
- addr, ok := node.KeysToAddrs[blsStr]
- if !ok {
- return common.Address{}
- }
- return addr
+ return node.GetAddresses(epoch)[blskey.SerializeToHexStr()]
}
// GetAddresses retrieves all ECDSA addresses of the bls keys for epoch
func (node *Node) GetAddresses(epoch *big.Int) map[string]common.Address {
- // populate if first time setting or new epoch
+ // populate if new epoch
+ if rs, ok := node.keysToAddrs.Get(epoch.Uint64()); ok {
+ return rs
+ }
node.keysToAddrsMutex.Lock()
- defer node.keysToAddrsMutex.Unlock()
- if node.keysToAddrsEpoch == nil || epoch.Cmp(node.keysToAddrsEpoch) != 0 {
- node.populateSelfAddresses(epoch)
+ node.populateSelfAddresses(epoch)
+ node.keysToAddrsMutex.Unlock()
+ if rs, ok := node.keysToAddrs.Get(epoch.Uint64()); ok {
+ return rs
}
- // self addresses map can never be nil
- return node.KeysToAddrs
+ return make(map[string]common.Address)
}
// IsRunningBeaconChain returns whether the node is running on beacon chain.
diff --git a/node/node_handler.go b/node/node_handler.go
index 3589d19660..acc4afc266 100644
--- a/node/node_handler.go
+++ b/node/node_handler.go
@@ -116,7 +116,7 @@ func (node *Node) transactionMessageHandler(msgPayload []byte) {
Msg("Failed to deserialize transaction list")
return
}
- node.addPendingTransactions(txs)
+ addPendingTransactions(node.registry, txs)
}
}
diff --git a/node/node_newblock.go b/node/node_newblock.go
index 5050e4d6a7..27c0d843bd 100644
--- a/node/node_newblock.go
+++ b/node/node_newblock.go
@@ -7,7 +7,6 @@ import (
"time"
"github.com/harmony-one/harmony/consensus"
-
"github.com/harmony-one/harmony/crypto/bls"
staking "github.com/harmony-one/harmony/staking/types"
@@ -27,7 +26,7 @@ const (
// WaitForConsensusReadyV2 listen for the readiness signal from consensus and generate new block for consensus.
// only leader will receive the ready signal
-func (node *Node) WaitForConsensusReadyV2(readySignal chan consensus.ProposalType, commitSigsChan chan []byte, stopChan chan struct{}, stoppedChan chan struct{}) {
+func (node *Node) WaitForConsensusReadyV2(cs *consensus.Consensus, stopChan chan struct{}, stoppedChan chan struct{}) {
go func() {
// Setup stoppedChan
defer close(stoppedChan)
@@ -47,12 +46,11 @@ func (node *Node) WaitForConsensusReadyV2(readySignal chan consensus.ProposalTyp
utils.Logger().Warn().
Msg("Consensus new block proposal: STOPPED!")
return
- case proposalType := <-readySignal:
- retryCount := 0
- for node.Consensus != nil && node.Consensus.IsLeader() {
+ case proposalType := <-cs.GetReadySignal():
+ for retryCount := 0; retryCount < 3 && cs.IsLeader(); retryCount++ {
time.Sleep(SleepPeriod)
utils.Logger().Info().
- Uint64("blockNum", node.Blockchain().CurrentBlock().NumberU64()+1).
+ Uint64("blockNum", cs.Blockchain().CurrentBlock().NumberU64()+1).
Bool("asyncProposal", proposalType == consensus.AsyncProposal).
Msg("PROPOSING NEW BLOCK ------------------------------------------------")
@@ -71,14 +69,14 @@ func (node *Node) WaitForConsensusReadyV2(readySignal chan consensus.ProposalTyp
} else {
utils.Logger().Info().Msg("[ProposeNewBlock] Timeout waiting for commit sigs, reading directly from DB")
}
- sigs, err := node.Consensus.BlockCommitSigs(node.Blockchain().CurrentBlock().NumberU64())
+ sigs, err := cs.BlockCommitSigs(cs.Blockchain().CurrentBlock().NumberU64())
if err != nil {
utils.Logger().Error().Err(err).Msg("[ProposeNewBlock] Cannot get commit signatures from last block")
} else {
newCommitSigsChan <- sigs
}
- case commitSigs := <-commitSigsChan:
+ case commitSigs := <-cs.CommitSigChannel():
utils.Logger().Info().Msg("[ProposeNewBlock] received commit sigs asynchronously")
if len(commitSigs) > bls.BLSSignatureSizeInBytes {
newCommitSigsChan <- commitSigs
@@ -86,12 +84,7 @@ func (node *Node) WaitForConsensusReadyV2(readySignal chan consensus.ProposalTyp
}
}()
newBlock, err := node.ProposeNewBlock(newCommitSigsChan)
-
if err == nil {
- if blk, ok := node.proposedBlock[newBlock.NumberU64()]; ok {
- utils.Logger().Info().Uint64("blockNum", newBlock.NumberU64()).Str("blockHash", blk.Hash().Hex()).
- Msg("Block with the same number was already proposed, abort.")
- }
utils.Logger().Info().
Uint64("blockNum", newBlock.NumberU64()).
Uint64("epoch", newBlock.Epoch().Uint64()).
@@ -102,18 +95,11 @@ func (node *Node) WaitForConsensusReadyV2(readySignal chan consensus.ProposalTyp
Msg("=========Successfully Proposed New Block==========")
// Send the new block to Consensus so it can be confirmed.
- node.proposedBlock[newBlock.NumberU64()] = newBlock
- delete(node.proposedBlock, newBlock.NumberU64()-10)
- node.Consensus.BlockChannel(newBlock)
+ cs.BlockChannel(newBlock)
break
} else {
- retryCount++
utils.Logger().Err(err).Int("retryCount", retryCount).
Msg("!!!!!!!!!Failed Proposing New Block!!!!!!!!!")
- if retryCount > 3 {
- // break to avoid repeated failures
- break
- }
continue
}
}
diff --git a/node/service_setup.go b/node/service_setup.go
index a2518110c1..94a34d4f1c 100644
--- a/node/service_setup.go
+++ b/node/service_setup.go
@@ -19,7 +19,7 @@ func (node *Node) RegisterValidatorServices() {
// Register new block service.
node.serviceManager.Register(
service.BlockProposal,
- blockproposal.New(node.Consensus.ReadySignal, node.Consensus.CommitSigChannel, node.WaitForConsensusReadyV2),
+ blockproposal.New(node.Consensus, node.WaitForConsensusReadyV2),
)
}
diff --git a/node/worker/worker_test.go b/node/worker/worker_test.go
index e6ffdfddad..37d61816cc 100644
--- a/node/worker/worker_test.go
+++ b/node/worker/worker_test.go
@@ -7,7 +7,7 @@ import (
"github.com/harmony-one/harmony/core/state"
- "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
diff --git a/numeric/decimal.go b/numeric/decimal.go
index e4236daa46..f80c9eca14 100644
--- a/numeric/decimal.go
+++ b/numeric/decimal.go
@@ -202,6 +202,12 @@ func (d Dec) Copy() Dec {
}
}
+func (d Dec) Div(d2 Dec) Dec {
+ return Dec{
+ new(big.Int).Div(d.Int, d2.Int),
+ }
+}
+
// IsNil ...
func (d Dec) IsNil() bool { return d.Int == nil } // is decimal nil
// IsZero ...
diff --git a/numeric/decimal_test.go b/numeric/decimal_test.go
index a7937d9a70..7b790f20de 100644
--- a/numeric/decimal_test.go
+++ b/numeric/decimal_test.go
@@ -368,3 +368,27 @@ func TestDecCeil(t *testing.T) {
require.Equal(t, tc.expected, res, "unexpected result for test case %d, input: %v", i, tc.input)
}
}
+
+func TestDiv(t *testing.T) {
+ tests := []struct {
+ d1, d2, exp Dec
+ }{
+ {mustNewDecFromStr(t, "0"), mustNewDecFromStr(t, "1"), ZeroDec()},
+ {mustNewDecFromStr(t, "1"), mustNewDecFromStr(t, "1"), NewDec(1)},
+ {mustNewDecFromStr(t, "1"), mustNewDecFromStr(t, "2"), mustNewDecFromStr(t, "0.5")},
+ {mustNewDecFromStr(t, "2"), mustNewDecFromStr(t, "1"), NewDec(2)},
+ {mustNewDecFromStr(t, "2"), mustNewDecFromStr(t, "3"), mustNewDecFromStr(t, "0.666666666666666667")},
+ {mustNewDecFromStr(t, "2"), mustNewDecFromStr(t, "4"), mustNewDecFromStr(t, "0.5")},
+ {mustNewDecFromStr(t, "2"), mustNewDecFromStr(t, "5"), mustNewDecFromStr(t, "0.4")},
+ {mustNewDecFromStr(t, "2"), mustNewDecFromStr(t, "6"), mustNewDecFromStr(t, "0.333333333333333333")},
+ {mustNewDecFromStr(t, "2"), mustNewDecFromStr(t, "7"), mustNewDecFromStr(t, "0.285714285714285714")},
+ {mustNewDecFromStr(t, "2"), mustNewDecFromStr(t, "8"), mustNewDecFromStr(t, "0.25")},
+ {mustNewDecFromStr(t, "2"), mustNewDecFromStr(t, "9"), mustNewDecFromStr(t, "0.222222222222222222")},
+ {mustNewDecFromStr(t, "2"), mustNewDecFromStr(t, "10"), mustNewDecFromStr(t, "0.2")},
+ }
+ for i, tc := range tests {
+ res := tc.d1.Quo(tc.d2)
+ require.True(t, res.Equal(tc.exp), "unexpected result for test case %d, input: %s %s %s", i, tc.d1, tc.d2, tc.exp)
+ }
+
+}
diff --git a/rosetta/infra/harmony-mainnet.conf b/rosetta/infra/harmony-mainnet.conf
index 8d51609cb1..6deea22942 100644
--- a/rosetta/infra/harmony-mainnet.conf
+++ b/rosetta/infra/harmony-mainnet.conf
@@ -133,6 +133,12 @@ Version = "2.5.13"
GlobalSlots = 5120
LocalAccountsFile = "./.hmy/locals.txt"
RosettaFixFile = "./rosetta_local_fix.csv"
+ GlobalSlots = 4096
+ GlobalQueue = 5120
+ AccountQueue = 64
+ Lifetime = "30m"
+ PriceBump = 1
+ PriceLimit = 100e9
[WS]
AuthPort = 9801
diff --git a/rosetta/infra/harmony-pstn.conf b/rosetta/infra/harmony-pstn.conf
index 1bb865c1a5..49dbe01c10 100644
--- a/rosetta/infra/harmony-pstn.conf
+++ b/rosetta/infra/harmony-pstn.conf
@@ -133,6 +133,12 @@ Version = "2.5.13"
GlobalSlots = 5120
LocalAccountsFile = "./.hmy/locals.txt"
RosettaFixFile = ""
+ GlobalSlots = 4096
+ GlobalQueue = 5120
+ AccountQueue = 64
+ Lifetime = "30m"
+ PriceBump = 1
+ PriceLimit = 100e9
[WS]
AuthPort = 9801
diff --git a/rosetta/services/block.go b/rosetta/services/block.go
index c488c5ff95..8a939b1081 100644
--- a/rosetta/services/block.go
+++ b/rosetta/services/block.go
@@ -178,11 +178,11 @@ func (s *BlockAPI) BlockTransaction(
// check for contract related operations, if it is a plain transaction.
if txInfo.tx.To() != nil {
// possible call to existing contract so fetch relevant data
- contractInfo.ContractCode = state.GetCode(*txInfo.tx.To())
+ contractInfo.ContractCode = state.GetCode(*txInfo.tx.To(), false)
contractInfo.ContractAddress = txInfo.tx.To()
} else {
// contract creation, so address is in receipt
- contractInfo.ContractCode = state.GetCode(txInfo.receipt.ContractAddress)
+ contractInfo.ContractCode = state.GetCode(txInfo.receipt.ContractAddress, false)
contractInfo.ContractAddress = &txInfo.receipt.ContractAddress
}
contractInfo.ExecutionResult, rosettaError = s.getTransactionTrace(ctx, blk, txInfo)
diff --git a/rosetta/services/construction_check.go b/rosetta/services/construction_check.go
index c842770ab7..d08ed6e8f3 100644
--- a/rosetta/services/construction_check.go
+++ b/rosetta/services/construction_check.go
@@ -289,7 +289,7 @@ func (s *ConstructAPI) ConstructionMetadata(
GasPrice: sugNativePrice,
GasLimit: estGasUsed,
Transaction: options.TransactionMetadata,
- ContractCode: state.GetCode(contractAddress),
+ ContractCode: state.GetCode(contractAddress, false),
EvmErrorMessage: evmErrorMsg,
EvmReturn: evmReturn,
})
diff --git a/rpc/blockchain.go b/rpc/blockchain.go
index 46e495944d..ae588e7500 100644
--- a/rpc/blockchain.go
+++ b/rpc/blockchain.go
@@ -807,7 +807,10 @@ func (s *PublicBlockchainService) GetProof(
return nil, err
}
- storageTrie := state.StorageTrie(address)
+ storageTrie, errTr := state.StorageTrie(address)
+ if errTr != nil {
+ return
+ }
storageHash := types.EmptyRootHash
codeHash := state.GetCodeHash(address)
storageProof := make([]StorageResult, len(storageKeys))
diff --git a/rpc/contract.go b/rpc/contract.go
index abcb4f9418..daed35edd7 100644
--- a/rpc/contract.go
+++ b/rpc/contract.go
@@ -123,7 +123,7 @@ func (s *PublicContractService) GetCode(
DoMetricRPCQueryInfo(GetCode, FailedNumber)
return nil, err
}
- code := state.GetCode(address)
+ code := state.GetCode(address, false)
// Response output is the same for all versions
return code, state.Error()
diff --git a/rpc/transaction.go b/rpc/transaction.go
index 92e9746954..8ea211d6a9 100644
--- a/rpc/transaction.go
+++ b/rpc/transaction.go
@@ -203,7 +203,7 @@ func (s *PublicTransactionService) GetTransactionByHash(
DoMetricRPCQueryInfo(GetTransactionByHash, FailedNumber)
return nil, nil
}
- block, err := s.hmy.GetBlock(ctx, blockHash)
+ block, err := s.hmy.GetHeader(ctx, blockHash)
if err != nil {
utils.Logger().Debug().
Err(err).
diff --git a/staking/availability/measure_test.go b/staking/availability/measure_test.go
index 8f0f502b1e..9a35ad80eb 100644
--- a/staking/availability/measure_test.go
+++ b/staking/availability/measure_test.go
@@ -434,8 +434,8 @@ func (ctx *incStateTestCtx) checkAddrIncStateByType(addr common.Address, typeInc
// checkHmyNodeStateChangeByAddr checks the state change for hmy nodes. Since hmy nodes does not
// have wrapper, it is supposed to be unchanged in code field
func (ctx *incStateTestCtx) checkHmyNodeStateChangeByAddr(addr common.Address) error {
- snapCode := ctx.snapState.GetCode(addr)
- curCode := ctx.state.GetCode(addr)
+ snapCode := ctx.snapState.GetCode(addr, false)
+ curCode := ctx.state.GetCode(addr, false)
if !reflect.DeepEqual(snapCode, curCode) {
return errors.New("code not expected")
}
@@ -618,7 +618,7 @@ func (state testStateDB) UpdateValidatorWrapper(addr common.Address, wrapper *st
return nil
}
-func (state testStateDB) GetCode(addr common.Address) []byte {
+func (state testStateDB) GetCode(addr common.Address, isValidatorCode bool) []byte {
wrapper, ok := state[addr]
if !ok {
return nil
diff --git a/staking/slash/double-sign_test.go b/staking/slash/double-sign_test.go
index 7dbd3ed707..470a8ae1d8 100644
--- a/staking/slash/double-sign_test.go
+++ b/staking/slash/double-sign_test.go
@@ -8,7 +8,7 @@ import (
"strings"
"testing"
- "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/crypto/bls"
@@ -1171,7 +1171,7 @@ func defaultTestStateDB() *state.DB {
func makeTestStateDB() *state.DB {
db := state.NewDatabase(rawdb.NewMemoryDatabase())
- sdb, err := state.New(common.Hash{}, db)
+ sdb, err := state.New(common.Hash{}, db, nil)
if err != nil {
panic(err)
}
diff --git a/test/chain/chain/chain_makers.go b/test/chain/chain/chain_makers.go
index b4ca9ecd7c..a902fc8441 100644
--- a/test/chain/chain/chain_makers.go
+++ b/test/chain/chain/chain_makers.go
@@ -216,7 +216,7 @@ func GenerateChain(
return nil, nil
}
for i := 0; i < n; i++ {
- statedb, err := state.New(parent.Root(), state.NewDatabase(db))
+ statedb, err := state.New(parent.Root(), state.NewDatabase(db), nil)
if err != nil {
panic(err)
}
diff --git a/test/chain/main.go b/test/chain/main.go
index e78b519356..fd1a74e737 100644
--- a/test/chain/main.go
+++ b/test/chain/main.go
@@ -6,7 +6,7 @@ import (
"log"
"math/big"
- "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/harmony-one/harmony/core/rawdb"
chain2 "github.com/harmony-one/harmony/test/chain/chain"
"github.com/ethereum/go-ethereum/common"
diff --git a/test/chain/reward/main.go b/test/chain/reward/main.go
index 3848697b66..5cfe9425a1 100644
--- a/test/chain/reward/main.go
+++ b/test/chain/reward/main.go
@@ -6,7 +6,7 @@ import (
"math/rand"
"time"
- "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/harmony-one/harmony/core/rawdb"
msg_pb "github.com/harmony-one/harmony/api/proto/message"
"github.com/harmony-one/harmony/crypto/bls"
@@ -110,7 +110,7 @@ func main() {
_ = genesis
engine := chain.NewEngine()
bc, _ := core.NewBlockChain(database, state.NewDatabase(database), nil, nil, gspec.Config, engine, vm.Config{})
- statedb, _ := state.New(common2.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
+ statedb, _ := state.New(common2.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
msg := createValidator()
statedb.AddBalance(msg.ValidatorAddress, new(big.Int).Mul(big.NewInt(5e18), big.NewInt(2000)))
validator, err := core.VerifyAndCreateValidatorFromMsg(