From 10f7ad1dc3acb963b73fda8f8e99f730e9186b01 Mon Sep 17 00:00:00 2001 From: aaronbuchwald Date: Fri, 24 Mar 2023 09:52:19 -0400 Subject: [PATCH 1/8] Update Cortina gas limit to 15m and move shanghai changes to DUpgrade (#1237) --- Dockerfile | 2 +- accounts/abi/bind/bind_test.go | 2 +- core/state_processor_test.go | 3 +- core/state_transition.go | 4 +-- core/tx_pool.go | 8 +++--- core/vm/interpreter.go | 4 +-- core/vm/jump_table.go | 4 +-- eth/tracers/api.go | 4 +++ params/config.go | 51 +++++++++++++++++++++++----------- plugin/evm/vm_test.go | 2 +- tests/init.go | 19 +++++++++++++ 11 files changed, 73 insertions(+), 30 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1757bf3edd..d6b8578fd0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,7 +19,7 @@ WORKDIR $GOPATH/src/github.com/ava-labs/avalanchego RUN go mod download # Replace the coreth dependency RUN go mod edit -replace github.com/ava-labs/coreth=../coreth -RUN go mod download && go mod tidy -compat=1.20 +RUN go mod download && go mod tidy -compat=1.19 # Build the AvalancheGo binary with local version of coreth. RUN ./scripts/build_avalanche.sh diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index b614021970..b06e6bba65 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -2151,7 +2151,7 @@ func golangBindings(t *testing.T, overload bool) { if out, err := replacer.CombinedOutput(); err != nil { t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out) } - tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.20") + tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.19") tidier.Dir = pkg if out, err := tidier.CombinedOutput(); err != nil { t.Fatalf("failed to tidy Go module file: %v\n%s", err, out) diff --git a/core/state_processor_test.go b/core/state_processor_test.go index a42d086b7e..04f79b554c 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -308,7 +308,7 @@ func TestStateProcessorErrors(t *testing.T) { } } - // ErrMaxInitCodeSizeExceeded, for this we need extra Shanghai (Cortina/EIP-3860) enabled. + // ErrMaxInitCodeSizeExceeded, for this we need extra Shanghai (DUpgrade/EIP-3860) enabled. { var ( db = rawdb.NewMemoryDatabase() @@ -337,6 +337,7 @@ func TestStateProcessorErrors(t *testing.T) { ApricotPhasePost6BlockTimestamp: big.NewInt(0), BanffBlockTimestamp: big.NewInt(0), CortinaBlockTimestamp: big.NewInt(0), + DUpgradeBlockTimestamp: big.NewInt(0), }, Alloc: GenesisAlloc{ common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ diff --git a/core/state_transition.go b/core/state_transition.go index e3a4b1381f..8cd5587f3e 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -338,7 +338,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { ) // Check clauses 4-5, subtract intrinsic gas if everything is correct - gas, err := IntrinsicGas(st.data, st.msg.AccessList(), contractCreation, rules.IsHomestead, rules.IsIstanbul, rules.IsCortina) + gas, err := IntrinsicGas(st.data, st.msg.AccessList(), contractCreation, rules.IsHomestead, rules.IsIstanbul, rules.IsDUpgrade) if err != nil { return nil, err } @@ -353,7 +353,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } // Check whether the init code size has been exceeded. - if rules.IsCortina && contractCreation && len(st.data) > params.MaxInitCodeSize { + if rules.IsDUpgrade && contractCreation && len(st.data) > params.MaxInitCodeSize { return nil, fmt.Errorf("%w: code size %v limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(st.data), params.MaxInitCodeSize) } diff --git a/core/tx_pool.go b/core/tx_pool.go index f9fd96533e..a069b62d19 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -261,7 +261,7 @@ type TxPool struct { istanbul bool // Fork indicator whether we are in the istanbul stage. eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions. - cortina bool // Fork indicator whether cortina is activated. (equivalent to Shanghai in go-ethereum) + eip3860 bool // Fork indicator whether EIP-3860 is activated. (activated in Shanghai Upgrade in Ethereum) currentHead *types.Header // [currentState] is the state of the blockchain head. It is reset whenever @@ -678,7 +678,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { return fmt.Errorf("%w tx size %d > max size %d", ErrOversizedData, txSize, txMaxSize) } // Check whether the init code size has been exceeded. - if pool.cortina && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { + if pool.eip3860 && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { return fmt.Errorf("%w: code size %v limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) } // Transactions can't be negative. This may never happen using RLP decoded @@ -722,7 +722,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { // Transactor should have enough funds to cover the costs // Ensure the transaction has more gas than the basic tx fee. - intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul, pool.cortina) + intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul, pool.eip3860) if err != nil { return err } @@ -1416,7 +1416,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { timestamp := new(big.Int).SetUint64(newHead.Time) pool.eip2718 = pool.chainconfig.IsApricotPhase2(timestamp) pool.eip1559 = pool.chainconfig.IsApricotPhase3(timestamp) - pool.cortina = pool.chainconfig.IsCortina(timestamp) + pool.eip3860 = pool.chainconfig.IsDUpgrade(timestamp) } // promoteExecutables moves transactions that have become processable from the diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 8f9f7ab6fc..b9ce3f46c8 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -90,8 +90,8 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { // If jump table was not initialised we set the default one. if cfg.JumpTable == nil { switch { - case evm.chainRules.IsCortina: - cfg.JumpTable = &cortinaInstructionSet + case evm.chainRules.IsDUpgrade: + cfg.JumpTable = &dUpgradeInstructionSet case evm.chainRules.IsApricotPhase3: cfg.JumpTable = &apricotPhase3InstructionSet case evm.chainRules.IsApricotPhase2: diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index b9dd7c3869..6e51d9b4a9 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -65,7 +65,7 @@ var ( apricotPhase1InstructionSet = newApricotPhase1InstructionSet() apricotPhase2InstructionSet = newApricotPhase2InstructionSet() apricotPhase3InstructionSet = newApricotPhase3InstructionSet() - cortinaInstructionSet = newCortinaInstructionSet() + dUpgradeInstructionSet = newDUpgradeInstructionSet() ) // JumpTable contains the EVM opcodes supported at a given fork. @@ -89,7 +89,7 @@ func validate(jt JumpTable) JumpTable { return jt } -func newCortinaInstructionSet() JumpTable { +func newDUpgradeInstructionSet() JumpTable { instructionSet := newApricotPhase3InstructionSet() enable3855(&instructionSet) // PUSH0 instruction enable3860(&instructionSet) // Limit and meter initcode diff --git a/eth/tracers/api.go b/eth/tracers/api.go index f3d2da4841..d912c5307a 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -1013,6 +1013,10 @@ func overrideConfig(original *params.ChainConfig, override *params.ChainConfig) copy.CortinaBlockTimestamp = timestamp canon = false } + if timestamp := override.DUpgradeBlockTimestamp; timestamp != nil { + copy.DUpgradeBlockTimestamp = timestamp + canon = false + } return copy, canon } diff --git a/params/config.go b/params/config.go index 634dad40ad..79c201f655 100644 --- a/params/config.go +++ b/params/config.go @@ -75,6 +75,7 @@ var ( ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 7, 3, 0, 0, 0, time.UTC).Unix()), BanffBlockTimestamp: big.NewInt(time.Date(2022, time.October, 18, 16, 0, 0, 0, time.UTC).Unix()), // TODO Add Cortina timestamp + // TODO Add DUpgrade timestamp } // AvalancheFujiChainConfig is the configuration for the Fuji Test Network @@ -102,6 +103,7 @@ var ( ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 7, 6, 0, 0, 0, time.UTC).Unix()), BanffBlockTimestamp: big.NewInt(time.Date(2022, time.October, 3, 14, 0, 0, 0, time.UTC).Unix()), // TODO add Cortina timestamp + // TODO Add DUpgrade timestamp } // AvalancheLocalChainConfig is the configuration for the Avalanche Local Network @@ -128,20 +130,22 @@ var ( ApricotPhase6BlockTimestamp: big.NewInt(0), ApricotPhasePost6BlockTimestamp: big.NewInt(0), BanffBlockTimestamp: big.NewInt(0), - } - - TestChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0)} - TestLaunchConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} - TestApricotPhase1Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil} - TestApricotPhase2Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil} - TestApricotPhase3Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil} - TestApricotPhase4Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil} - TestApricotPhase5Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil} - TestApricotPhasePre6Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil} - TestApricotPhase6Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil} - TestApricotPhasePost6Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil} - TestBanffChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil} - TestCortinaChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0)} + CortinaBlockTimestamp: big.NewInt(0), + } + + TestChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0)} + TestLaunchConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} + TestApricotPhase1Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} + TestApricotPhase2Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil} + TestApricotPhase3Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil} + TestApricotPhase4Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil} + TestApricotPhase5Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil} + TestApricotPhasePre6Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil} + TestApricotPhase6Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil} + TestApricotPhasePost6Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil} + TestBanffChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil} + TestCortinaChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil} + TestDUpgradeChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0)} TestRules = TestChainConfig.AvalancheRules(new(big.Int), new(big.Int)) ) @@ -190,10 +194,12 @@ type ChainConfig struct { ApricotPhase6BlockTimestamp *big.Int `json:"apricotPhase6BlockTimestamp,omitempty"` // Apricot Phase Post-6 deprecates the NativeAssetCall precompile (soft). (nil = no fork, 0 = already activated) ApricotPhasePost6BlockTimestamp *big.Int `json:"apricotPhasePost6BlockTimestamp,omitempty"` - // Banff TODO comment. (nil = no fork, 0 = already activated) + // Banff restricts import/export transactions to AVAX. (nil = no fork, 0 = already activated) BanffBlockTimestamp *big.Int `json:"banffBlockTimestamp,omitempty"` - // Cortina TODO comment. (nil = no fork, 0 = already activated) + // Cortina increases the block gas limit to 15M. (nil = no fork, 0 = already activated) CortinaBlockTimestamp *big.Int `json:"cortinaBlockTimestamp,omitempty"` + // DUpgrade activates the Shanghai upgrade from Ethereum. (nil = no fork, 0 = already activated) + DUpgradeBlockTimestamp *big.Int `json:"dUpgradeBlockTimestamp,omitempty"` } // AvalancheContext provides Avalanche specific context directly into the EVM. @@ -236,6 +242,7 @@ func (c *ChainConfig) String() string { banner += fmt.Sprintf(" - Apricot Phase Post-6 Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.8.0\n", c.ApricotPhasePost6BlockTimestamp) banner += fmt.Sprintf(" - Banff Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0)\n", c.BanffBlockTimestamp) banner += fmt.Sprintf(" - Cortina Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0)\n", c.CortinaBlockTimestamp) + banner += fmt.Sprintf(" - DUpgrade Timestamp %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0)\n", c.DUpgradeBlockTimestamp) banner += "\n" return banner } @@ -354,6 +361,12 @@ func (c *ChainConfig) IsCortina(blockTimestamp *big.Int) bool { return utils.IsForked(c.CortinaBlockTimestamp, blockTimestamp) } +// IsDUpgrade returns whether [blockTimestamp] represents a block +// with a timestamp after the DUpgrade upgrade time. +func (c *ChainConfig) IsDUpgrade(blockTimestamp *big.Int) bool { + return utils.IsForked(c.DUpgradeBlockTimestamp, blockTimestamp) +} + // CheckCompatible checks whether scheduled fork transitions have been imported // with a mismatching chain configuration. func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, timestamp uint64) *ConfigCompatError { @@ -434,6 +447,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "apricotPhasePost6BlockTimestamp", block: c.ApricotPhasePost6BlockTimestamp}, {name: "banffBlockTimestamp", block: c.BanffBlockTimestamp}, {name: "cortinaBlockTimestamp", block: c.CortinaBlockTimestamp}, + {name: "dUpgradeBlockTimestamp", block: c.DUpgradeBlockTimestamp}, } { if lastFork.name != "" { // Next one must be higher number @@ -531,6 +545,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, lastHeight *big.Int, if isForkIncompatible(c.CortinaBlockTimestamp, newcfg.CortinaBlockTimestamp, lastTimestamp) { return newCompatError("Cortina fork block timestamp", c.CortinaBlockTimestamp, newcfg.CortinaBlockTimestamp) } + if isForkIncompatible(c.DUpgradeBlockTimestamp, newcfg.DUpgradeBlockTimestamp, lastTimestamp) { + return newCompatError("DUpgrade fork block timestamp", c.DUpgradeBlockTimestamp, newcfg.DUpgradeBlockTimestamp) + } return nil } @@ -596,6 +613,7 @@ type Rules struct { IsApricotPhasePre6, IsApricotPhase6, IsApricotPhasePost6 bool IsBanff bool IsCortina bool + IsDUpgrade bool // Precompiles maps addresses to stateful precompiled contracts that are enabled // for this rule set. @@ -638,6 +656,7 @@ func (c *ChainConfig) AvalancheRules(blockNum, blockTimestamp *big.Int) Rules { rules.IsApricotPhasePost6 = c.IsApricotPhasePost6(blockTimestamp) rules.IsBanff = c.IsBanff(blockTimestamp) rules.IsCortina = c.IsCortina(blockTimestamp) + rules.IsDUpgrade = c.IsDUpgrade(blockTimestamp) // Initialize the stateful precompiles that should be enabled at [blockTimestamp]. rules.Precompiles = make(map[common.Address]precompile.StatefulPrecompiledContract) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 31116ea8a7..73d99b3315 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -91,7 +91,7 @@ var ( genesisJSONBanff = "{\"config\":{\"chainId\":43111,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"apricotPhase1BlockTimestamp\":0,\"apricotPhase2BlockTimestamp\":0,\"apricotPhase3BlockTimestamp\":0,\"apricotPhase4BlockTimestamp\":0,\"apricotPhase5BlockTimestamp\":0,\"apricotPhasePre6BlockTimestamp\":0,\"apricotPhase6BlockTimestamp\":0,\"apricotPhasePost6BlockTimestamp\":0,\"banffBlockTimestamp\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}" genesisJSONCortina = "{\"config\":{\"chainId\":43111,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"apricotPhase1BlockTimestamp\":0,\"apricotPhase2BlockTimestamp\":0,\"apricotPhase3BlockTimestamp\":0,\"apricotPhase4BlockTimestamp\":0,\"apricotPhase5BlockTimestamp\":0,\"apricotPhasePre6BlockTimestamp\":0,\"apricotPhase6BlockTimestamp\":0,\"apricotPhasePost6BlockTimestamp\":0,\"banffBlockTimestamp\":0,\"cortinaBlockTimestamp\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}" - genesisJSONLatest = genesisJSONBanff // TODO: update to Cortina + genesisJSONLatest = genesisJSONCortina apricotRulesPhase0 = params.Rules{} apricotRulesPhase1 = params.Rules{IsApricotPhase1: true} diff --git a/tests/init.go b/tests/init.go index 6ae8d0591a..eeb5005098 100644 --- a/tests/init.go +++ b/tests/init.go @@ -259,6 +259,25 @@ var Forks = map[string]*params.ChainConfig{ BanffBlockTimestamp: big.NewInt(0), CortinaBlockTimestamp: big.NewInt(0), }, + "DUpgrade": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + ApricotPhase1BlockTimestamp: big.NewInt(0), + ApricotPhase2BlockTimestamp: big.NewInt(0), + ApricotPhase3BlockTimestamp: big.NewInt(0), + ApricotPhase4BlockTimestamp: big.NewInt(0), + ApricotPhase5BlockTimestamp: big.NewInt(0), + BanffBlockTimestamp: big.NewInt(0), + CortinaBlockTimestamp: big.NewInt(0), + DUpgradeBlockTimestamp: big.NewInt(0), + }, } // Returns the set of defined fork names From 10b19a64b9dc1f19c8f81edb519f91a445d58fa5 Mon Sep 17 00:00:00 2001 From: aaronbuchwald Date: Mon, 3 Apr 2023 14:32:05 -0400 Subject: [PATCH 2/8] Add Cortina activation timestamp for April 6 (#241) --- params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/config.go b/params/config.go index 79c201f655..c543fd11c4 100644 --- a/params/config.go +++ b/params/config.go @@ -102,7 +102,7 @@ var ( ApricotPhase6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 6, 20, 0, 0, 0, time.UTC).Unix()), ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 7, 6, 0, 0, 0, time.UTC).Unix()), BanffBlockTimestamp: big.NewInt(time.Date(2022, time.October, 3, 14, 0, 0, 0, time.UTC).Unix()), - // TODO add Cortina timestamp + CortinaBlockTimestamp: big.NewInt(time.Date(2023, time.April, 6, 15, 0, 0, 0, time.UTC).Unix()), // TODO Add DUpgrade timestamp } From 4f5029257a832c9f44dfc7b9896e06dd4b98e299 Mon Sep 17 00:00:00 2001 From: aaronbuchwald Date: Mon, 3 Apr 2023 18:15:42 -0400 Subject: [PATCH 3/8] Bump version to v0.12.0 and bump ago dep to v1.9.16 (#242) * Bump version to v0.12.0 and bump ago dep to v1.9.16 * update gh actions --- .github/workflows/ci.yml | 24 ++++++++++++------------ go.mod | 2 +- go.sum | 4 ++-- plugin/evm/version.go | 2 +- scripts/versions.sh | 4 ++-- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3d55b89bbb..ee9296c606 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,16 +20,16 @@ jobs: name: Lint runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: check out ${{ github.event.inputs.avalanchegoRepo }} ${{ github.event.inputs.avalanchegoBranch }} if: ${{ github.event_name == 'workflow_dispatch' }} - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: repository: ${{ github.event.inputs.avalanchegoRepo }} ref: ${{ github.event.inputs.avalanchegoBranch }} path: avalanchego token: ${{ secrets.AVALANCHE_PAT }} - - uses: actions/setup-go@v1 + - uses: actions/setup-go@v3 with: go-version: "1.20" - name: change avalanchego dep @@ -54,16 +54,16 @@ jobs: go: ['1.20'] os: [macos-11.0, ubuntu-20.04, windows-latest] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: check out ${{ github.event.inputs.avalanchegoRepo }} ${{ github.event.inputs.avalanchegoBranch }} if: ${{ github.event_name == 'workflow_dispatch' }} - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: repository: ${{ github.event.inputs.avalanchegoRepo }} ref: ${{ github.event.inputs.avalanchegoBranch }} path: avalanchego token: ${{ secrets.AVALANCHE_PAT }} - - uses: actions/setup-go@v1 + - uses: actions/setup-go@v3 with: go-version: ${{ matrix.go }} - name: change avalanchego dep @@ -87,16 +87,16 @@ jobs: go: ['1.20'] os: [ubuntu-20.04] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: check out ${{ github.event.inputs.avalanchegoRepo }} ${{ github.event.inputs.avalanchegoBranch }} if: ${{ github.event_name == 'workflow_dispatch' }} - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: repository: ${{ github.event.inputs.avalanchegoRepo }} ref: ${{ github.event.inputs.avalanchegoBranch }} path: avalanchego token: ${{ secrets.AVALANCHE_PAT }} - - uses: actions/setup-go@v1 + - uses: actions/setup-go@v3 with: go-version: ${{ matrix.go }} - name: change avalanchego dep @@ -118,16 +118,16 @@ jobs: go: [ '1.20' ] os: [ ubuntu-20.04 ] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: check out ${{ github.event.inputs.avalanchegoRepo }} ${{ github.event.inputs.avalanchegoBranch }} if: ${{ github.event_name == 'workflow_dispatch' }} - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: repository: ${{ github.event.inputs.avalanchegoRepo }} ref: ${{ github.event.inputs.avalanchegoBranch }} path: avalanchego token: ${{ secrets.AVALANCHE_PAT }} - - uses: actions/setup-go@v1 + - uses: actions/setup-go@v3 with: go-version: ${{ matrix.go }} - name: change avalanchego dep diff --git a/go.mod b/go.mod index e983013307..f1e0f0b3d4 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.18 require ( github.com/VictoriaMetrics/fastcache v1.10.0 - github.com/ava-labs/avalanchego v1.9.11 + github.com/ava-labs/avalanchego v1.9.16 github.com/cespare/cp v0.1.0 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v1.8.0 diff --git a/go.sum b/go.sum index fc1fdbe2eb..9712f71304 100644 --- a/go.sum +++ b/go.sum @@ -59,8 +59,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= -github.com/ava-labs/avalanchego v1.9.11 h1:5hXHJMvErfaolWD7Hw9gZaVylck2shBaV/2NTHA0BfA= -github.com/ava-labs/avalanchego v1.9.11/go.mod h1:nNc+4JCIJMaEt2xRmeMVAUyQwDIap7RvnMrfWD2Tpo8= +github.com/ava-labs/avalanchego v1.9.16 h1:JarxIn7gy4V9f1dBgUxubRRO6CrqY2MprOLGqEmk+Vg= +github.com/ava-labs/avalanchego v1.9.16/go.mod h1:Unm7ruhAvLSRP+7gIfwyHNf+wEehWLsFhY9yp10nDbw= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= diff --git a/plugin/evm/version.go b/plugin/evm/version.go index 398f0f1057..5fdcddd874 100644 --- a/plugin/evm/version.go +++ b/plugin/evm/version.go @@ -11,7 +11,7 @@ var ( // GitCommit is set by the build script GitCommit string // Version is the version of Coreth - Version string = "v0.11.9" + Version string = "v0.12.0" ) func init() { diff --git a/scripts/versions.sh b/scripts/versions.sh index edd99b1a2f..f2a7c6cf72 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Set up the versions to be used -coreth_version=${CORETH_VERSION:-'v0.11.9'} +coreth_version=${CORETH_VERSION:-'v0.12.0'} # Don't export them as they're used in the context of other calls -avalanche_version=${AVALANCHE_VERSION:-'v1.9.11'} +avalanche_version=${AVALANCHE_VERSION:-'v1.9.16'} From 0e82b37c8eec12a9958da90c791d997c9c792b07 Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Mon, 27 Mar 2023 13:33:36 -0700 Subject: [PATCH 4/8] Geth upgrade (takes commits v1.26.10..b628d72) (#1235) --- accounts/abi/abi_test.go | 12 +- accounts/abi/bind/backends/simulated.go | 9 +- accounts/abi/bind/backends/simulated_test.go | 72 +++---- accounts/abi/bind/base.go | 4 +- accounts/abi/reflect.go | 22 +- accounts/abi/utils.go | 5 +- consensus/misc/dao.go | 8 +- core/bench_test.go | 14 +- core/blockchain.go | 129 ++++++------ core/blockchain_repair_test.go | 17 +- core/blockchain_snapshot_test.go | 45 ++-- core/blockchain_test.go | 122 +++++------ core/chain_makers.go | 14 ++ core/chain_makers_test.go | 5 +- core/dao_test.go | 72 ++++--- core/error.go | 2 +- core/genesis.go | 30 ++- core/genesis_test.go | 14 +- core/headerchain_test.go | 11 +- core/mkalloc.go | 8 +- core/rlp_test.go | 10 +- core/state/database.go | 12 +- core/state/snapshot/difflayer.go | 2 +- core/state/snapshot/generate_test.go | 24 +-- core/state/statedb.go | 2 +- core/state/trie_prefetcher.go | 3 + core/state_processor_test.go | 23 +- core/state_transition.go | 50 +++-- core/state_transition_test.go | 4 +- core/test_blockchain.go | 210 ++++++------------- core/tx_noncer.go | 2 +- core/vm/contracts.go | 9 +- core/vm/gas_table.go | 65 +++--- core/vm/instructions.go | 37 ++-- eth/api.go | 4 +- eth/api_backend.go | 7 +- eth/backend.go | 26 +-- eth/filters/filter_system_test.go | 18 +- eth/filters/filter_test.go | 34 ++- eth/gasprice/gasprice_test.go | 16 +- eth/state_accessor.go | 107 ++++++---- eth/tracers/api.go | 204 ++++++++++-------- eth/tracers/api_test.go | 127 +++++++++-- eth/tracers/js/goja.go | 43 ++-- eth/tracers/native/tracer.go | 34 ++- ethclient/ethclient.go | 4 +- internal/ethapi/api.go | 54 ++++- params/version.go | 8 +- plugin/evm/vm.go | 1 + rpc/client.go | 44 +++- rpc/client_opt.go | 116 ++++++++++ rpc/client_opt_test.go | 34 +++ rpc/client_test.go | 56 +---- rpc/errors.go | 18 +- rpc/handler.go | 7 +- rpc/http.go | 54 +++-- rpc/json.go | 15 +- rpc/server_test.go | 2 +- rpc/service.go | 3 +- rpc/subscription_test.go | 2 +- rpc/testdata/internal-error.js | 7 + rpc/testdata/invalid-badversion.js | 19 ++ rpc/testservice_test.go | 14 ++ rpc/websocket.go | 78 +++++-- sync/handlers/handler.go | 4 +- sync/statesync/sync_test.go | 157 +++++++------- sync/statesync/test_sync.go | 14 +- trie/committer.go | 18 +- trie/database.go | 5 - trie/proof.go | 2 +- trie/secure_trie.go | 101 ++++----- trie/stacktrie.go | 3 +- trie/test_trie.go | 2 +- trie/trie.go | 12 +- trie/util_test.go | 4 +- 75 files changed, 1437 insertions(+), 1109 deletions(-) create mode 100644 rpc/client_opt.go create mode 100644 rpc/client_opt_test.go create mode 100644 rpc/testdata/internal-error.js create mode 100644 rpc/testdata/invalid-badversion.js diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go index 73c68d923b..d156d82c17 100644 --- a/accounts/abi/abi_test.go +++ b/accounts/abi/abi_test.go @@ -737,12 +737,12 @@ func TestBareEvents(t *testing.T) { // TestUnpackEvent is based on this contract: // // contract T { -// event received(address sender, uint amount, bytes memo); -// event receivedAddr(address sender); -// function receive(bytes memo) external payable { -// received(msg.sender, msg.value, memo); -// receivedAddr(msg.sender); -// } +// event received(address sender, uint amount, bytes memo); +// event receivedAddr(address sender); +// function receive(bytes memo) external payable { +// received(msg.sender, msg.value, memo); +// receivedAddr(msg.sender); +// } // } // // When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt: diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 38aa5ad26a..cc1e2f1b1c 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -109,10 +109,13 @@ type SimulatedBackend struct { func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { cpcfg := params.TestChainConfig cpcfg.ChainID = big.NewInt(1337) - genesis := core.Genesis{Config: cpcfg, GasLimit: gasLimit, Alloc: alloc} - genesis.MustCommit(database) + genesis := core.Genesis{ + Config: cpcfg, + GasLimit: gasLimit, + Alloc: alloc, + } cacheConfig := &core.CacheConfig{} - blockchain, _ := core.NewBlockChain(database, cacheConfig, genesis.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) + blockchain, _ := core.NewBlockChain(database, cacheConfig, &genesis, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) backend := &SimulatedBackend{ database: database, diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index 6bcc7a9752..9c44b249ad 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -104,7 +104,8 @@ func TestSimulatedBackend(t *testing.T) { var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") -// the following is based on this contract: +// the following is based on this contract: +// // contract T { // event received(address sender, uint amount, bytes memo); // event receivedAddr(address sender); @@ -112,7 +113,7 @@ var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d // function receive(bytes calldata memo) external payable returns (string memory res) { // emit received(msg.sender, msg.value, memo); // emit receivedAddr(msg.sender); -// return "hello world"; +// return "hello world"; // } // } const abiJSON = `[ { "constant": false, "inputs": [ { "name": "memo", "type": "bytes" } ], "name": "receive", "outputs": [ { "name": "res", "type": "string" } ], "payable": true, "stateMutability": "payable", "type": "function" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" }, { "indexed": false, "name": "amount", "type": "uint256" }, { "indexed": false, "name": "memo", "type": "bytes" } ], "name": "received", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" } ], "name": "receivedAddr", "type": "event" } ]` @@ -433,12 +434,13 @@ func TestEstimateGas(t *testing.T) { /* pragma solidity ^0.6.4; contract GasEstimation { - function PureRevert() public { revert(); } - function Revert() public { revert("revert reason");} - function OOG() public { for (uint i = 0; ; i++) {}} - function Assert() public { assert(false);} - function Valid() public {} - }*/ + function PureRevert() public { revert(); } + function Revert() public { revert("revert reason");} + function OOG() public { for (uint i = 0; ; i++) {}} + function Assert() public { assert(false);} + function Valid() public {} + } + */ const contractAbi = "[{\"inputs\":[],\"name\":\"Assert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OOG\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PureRevert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Revert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Valid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" const contractBin = "0x60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040523480156100115760006000fd5b506004361061005c5760003560e01c806350f6fe3414610062578063aa8b1d301461006c578063b9b046f914610076578063d8b9839114610080578063e09fface1461008a5761005c565b60006000fd5b61006a610094565b005b6100746100ad565b005b61007e6100b5565b005b6100886100c2565b005b610092610135565b005b6000600090505b5b808060010191505061009b565b505b565b60006000fd5b565b600015156100bf57fe5b5b565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600d8152602001807f72657665727420726561736f6e0000000000000000000000000000000000000081526020015060200191505060405180910390fd5b565b5b56fea2646970667358221220345bbcbb1a5ecf22b53a78eaebf95f8ee0eceff6d10d4b9643495084d2ec934a64736f6c63430006040033" @@ -1079,27 +1081,27 @@ func TestPendingAndCallContract(t *testing.T) { // This test is based on the following contract: /* contract Reverter { - function revertString() public pure{ - require(false, "some error"); - } - function revertNoString() public pure { - require(false, ""); - } - function revertASM() public pure { - assembly { - revert(0x0, 0x0) - } - } - function noRevert() public pure { - assembly { - // Assembles something that looks like require(false, "some error") but is not reverted - mstore(0x0, 0x08c379a000000000000000000000000000000000000000000000000000000000) - mstore(0x4, 0x0000000000000000000000000000000000000000000000000000000000000020) - mstore(0x24, 0x000000000000000000000000000000000000000000000000000000000000000a) - mstore(0x44, 0x736f6d65206572726f7200000000000000000000000000000000000000000000) - return(0x0, 0x64) - } - } + function revertString() public pure{ + require(false, "some error"); + } + function revertNoString() public pure { + require(false, ""); + } + function revertASM() public pure { + assembly { + revert(0x0, 0x0) + } + } + function noRevert() public pure { + assembly { + // Assembles something that looks like require(false, "some error") but is not reverted + mstore(0x0, 0x08c379a000000000000000000000000000000000000000000000000000000000) + mstore(0x4, 0x0000000000000000000000000000000000000000000000000000000000000020) + mstore(0x24, 0x000000000000000000000000000000000000000000000000000000000000000a) + mstore(0x44, 0x736f6d65206572726f7200000000000000000000000000000000000000000000) + return(0x0, 0x64) + } + } }*/ func TestCallContractRevert(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) @@ -1226,11 +1228,11 @@ func TestFork(t *testing.T) { /* Example contract to test event emission: -pragma solidity >=0.7.0 <0.9.0; -contract Callable { - event Called(); - function Call() public { emit Called(); } -} + pragma solidity >=0.7.0 <0.9.0; + contract Callable { + event Called(); + function Call() public { emit Called(); } + } */ // The fork tests are commented out because transactions are not indexed in coreth until they are marked // as accepted, which breaks the logic of these tests. @@ -1250,7 +1252,7 @@ contract Callable { // // 7. Mine two blocks to trigger a reorg. // // 8. Check that the event was removed. // // 9. Re-send the transaction and mine a block. -// // 10. Check that the event was reborn. +// // 10. Check that the event was reborn. // func TestForkLogsReborn(t *testing.T) { // testAddr := crypto.PubkeyToAddress(testKey.PublicKey) // sim := simTestBackend(testAddr) diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index 91eb43ecad..7c6df08396 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -43,6 +43,8 @@ import ( "github.com/ethereum/go-ethereum/event" ) +const basefeeWiggleMultiplier = 2 + var ( ErrNilAssetAmount = errors.New("cannot specify nil asset amount for native asset call") errNativeAssetDeployContract = errors.New("cannot specify native asset params while deploying a contract") @@ -316,7 +318,7 @@ func (c *BoundContract) createDynamicTx(opts *TransactOpts, contract *common.Add if gasFeeCap == nil { gasFeeCap = new(big.Int).Add( gasTipCap, - new(big.Int).Mul(head.BaseFee, big.NewInt(2)), + new(big.Int).Mul(head.BaseFee, big.NewInt(basefeeWiggleMultiplier)), ) } if gasFeeCap.Cmp(gasTipCap) < 0 { diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go index 9fc91872e0..9f7a07a0c3 100644 --- a/accounts/abi/reflect.go +++ b/accounts/abi/reflect.go @@ -35,9 +35,9 @@ import ( ) // ConvertType converts an interface of a runtime type into a interface of the -// given type -// e.g. turn -// var fields []reflect.StructField +// given type, e.g. turn this code: +// +// var fields []reflect.StructField // // fields = append(fields, reflect.StructField{ // Name: "X", @@ -45,8 +45,9 @@ import ( // Tag: reflect.StructTag("json:\"" + "x" + "\""), // } // -// into -// type TupleT struct { X *big.Int } +// into: +// +// type TupleT struct { X *big.Int } func ConvertType(in interface{}, proto interface{}) interface{} { protoType := reflect.TypeOf(proto) if reflect.TypeOf(in).ConvertibleTo(protoType) { @@ -182,11 +183,12 @@ func setStruct(dst, src reflect.Value) error { } // mapArgNamesToStructFields maps a slice of argument names to struct fields. -// first round: for each Exportable field that contains a `abi:""` tag -// and this field name exists in the given argument name list, pair them together. -// second round: for each argument name that has not been already linked, -// find what variable is expected to be mapped into, if it exists and has not been -// used, pair them. +// +// first round: for each Exportable field that contains a `abi:""` tag and this field name +// exists in the given argument name list, pair them together. +// +// second round: for each argument name that has not been already linked, find what +// variable is expected to be mapped into, if it exists and has not been used, pair them. // // Note this function assumes the given value is a struct value. func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) { diff --git a/accounts/abi/utils.go b/accounts/abi/utils.go index dd28ec21c2..62b5fcd4e9 100644 --- a/accounts/abi/utils.go +++ b/accounts/abi/utils.go @@ -37,9 +37,8 @@ import "fmt" // and struct definition) name will be converted to camelcase style which // may eventually lead to name conflicts. // -// Name conflicts are mostly resolved by adding number suffix. -// e.g. if the abi contains Methods send, send1 -// ResolveNameConflict would return send2 for input send. +// Name conflicts are mostly resolved by adding number suffix. e.g. if the abi contains +// Methods "send" and "send1", ResolveNameConflict would return "send2" for input "send". func ResolveNameConflict(rawName string, used func(string) bool) string { name := rawName ok := used(name) diff --git a/consensus/misc/dao.go b/consensus/misc/dao.go index 9292509d81..95a3095e07 100644 --- a/consensus/misc/dao.go +++ b/consensus/misc/dao.go @@ -51,10 +51,10 @@ var ( // // DAO hard-fork extension to the header validity: // -// a) if the node is no-fork, do not accept blocks in the [fork, fork+10) range -// with the fork specific extra-data set -// b) if the node is pro-fork, require blocks in the specific range to have the -// unique extra-data set. +// - if the node is no-fork, do not accept blocks in the [fork, fork+10) range +// with the fork specific extra-data set. +// - if the node is pro-fork, require blocks in the specific range to have the +// unique extra-data set. func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header) error { // Short circuit validation if the node doesn't care about the DAO fork if config.DAOForkBlock == nil { diff --git a/core/bench_test.go b/core/bench_test.go index 6a6b3e408a..0bacc6f60f 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -156,16 +156,15 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { // Generate a chain of b.N blocks using the supplied block // generator function. - gspec := Genesis{ + gspec := &Genesis{ Config: params.TestChainConfig, Alloc: GenesisAlloc{benchRootAddr: {Balance: benchRootFunds}}, } - genesis := gspec.MustCommit(db) - chain, _, _ := GenerateChain(gspec.Config, genesis, dummy.NewFaker(), db, b.N, 10, gen) + _, chain, _, _ := GenerateChainWithGenesis(gspec, dummy.NewFaker(), b.N, 10, gen) // Time the insertion of the new chain. // State and blocks are stored in the same DB. - chainman, _ := NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) + chainman, _ := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) defer chainman.Stop() b.ReportAllocs() b.ResetTimer() @@ -230,6 +229,11 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) { rawdb.WriteHeader(db, header) rawdb.WriteCanonicalHash(db, hash, n) + if n == 0 { + rawdb.WriteChainConfig(db, hash, params.TestChainConfig) + } + rawdb.WriteHeadHeaderHash(db, hash) + if full || n == 0 { block := types.NewBlockWithHeader(header) rawdb.WriteBody(db, hash, n, block.Body()) @@ -268,7 +272,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) { if err != nil { b.Fatalf("error opening database at %v: %v", dir, err) } - chain, err := NewBlockChain(db, DefaultCacheConfig, params.TestChainConfig, dummy.NewFaker(), vm.Config{}, common.Hash{}) + chain, err := NewBlockChain(db, DefaultCacheConfig, nil, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) if err != nil { b.Fatalf("error creating chain: %v", err) } diff --git a/core/blockchain.go b/core/blockchain.go index 10dfad7599..b102ce27d9 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -34,6 +34,7 @@ import ( "io" "math/big" "runtime" + "strings" "sync" "sync/atomic" "time" @@ -280,8 +281,8 @@ type BlockChain struct { // available in the database. It initialises the default Ethereum Validator and // Processor. func NewBlockChain( - db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, - vmConfig vm.Config, lastAcceptedHash common.Hash, + db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis, engine consensus.Engine, + vmConfig vm.Config, lastAcceptedHash common.Hash, skipChainConfigCheckCompatible bool, ) (*BlockChain, error) { if cacheConfig == nil { return nil, errCacheConfigNotSpecified @@ -292,6 +293,24 @@ func NewBlockChain( txLookupCache, _ := lru.New(txLookupCacheLimit) badBlocks, _ := lru.New(badBlockLimit) + // Setup the genesis block, commit the provided genesis specification + // to database if the genesis block is not present yet, or load the + // stored one from database. + // Note: In go-ethereum, the code rewinds the chain on an incompatible config upgrade. + // We don't do this and expect the node operator to always update their node's configuration + // before network upgrades take effect. + chainConfig, _, err := SetupGenesisBlock(db, genesis, lastAcceptedHash, skipChainConfigCheckCompatible) + if err != nil { + return nil, err + } + log.Info("") + log.Info(strings.Repeat("-", 153)) + for _, line := range strings.Split(chainConfig.String(), "\n") { + log.Info(line) + } + log.Info(strings.Repeat("-", 153)) + log.Info("") + bc := &BlockChain{ chainConfig: chainConfig, cacheConfig: cacheConfig, @@ -318,7 +337,6 @@ func NewBlockChain( bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine) bc.processor = NewStateProcessor(chainConfig, bc, engine) - var err error bc.hc, err = NewHeaderChain(db, chainConfig, cacheConfig, engine) if err != nil { return nil, err @@ -1211,23 +1229,6 @@ func (bc *BlockChain) InsertBlockManual(block *types.Block, writes bool) error { return err } -// gatherBlockLogs fetches logs from a previously inserted block. -func (bc *BlockChain) gatherBlockLogs(hash common.Hash, number uint64, removed bool) []*types.Log { - receipts := rawdb.ReadReceipts(bc.db, hash, number, bc.chainConfig) - var logs []*types.Log - for _, receipt := range receipts { - for _, log := range receipt.Logs { - l := *log - if removed { - l.Removed = true - } - logs = append(logs, &l) - } - } - - return logs -} - func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { start := time.Now() bc.senderCacher.Recover(types.MakeSigner(bc.chainConfig, block.Number(), new(big.Int).SetUint64(block.Time())), block.Transactions()) @@ -1382,22 +1383,19 @@ func (bc *BlockChain) collectLogs(hash common.Hash, removed bool) []*types.Log { if number == nil { return nil } - return bc.gatherBlockLogs(hash, *number, removed) -} + receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) -// mergeLogs returns a merged log slice with specified sort order. -func mergeLogs(logs [][]*types.Log, reverse bool) []*types.Log { - var ret []*types.Log - if reverse { - for i := len(logs) - 1; i >= 0; i-- { - ret = append(ret, logs[i]...) - } - } else { - for i := 0; i < len(logs); i++ { - ret = append(ret, logs[i]...) + var logs []*types.Log + for _, receipt := range receipts { + for _, log := range receipt.Logs { + l := *log + if removed { + l.Removed = true + } + logs = append(logs, &l) } } - return ret + return logs } // reorg takes two blocks, an old chain and a new chain and will reconstruct the @@ -1411,20 +1409,12 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { newChain types.Blocks oldChain types.Blocks commonBlock *types.Block - - deletedLogs [][]*types.Log - rebirthLogs [][]*types.Log ) // Reduce the longer chain to the same number as the shorter one if oldBlock.NumberU64() > newBlock.NumberU64() { // Old chain is longer, gather all transactions and logs as deleted ones for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { oldChain = append(oldChain, oldBlock) - // Collect deleted logs for notification - logs := bc.collectLogs(oldBlock.Hash(), true) - if len(logs) > 0 { - deletedLogs = append(deletedLogs, logs) - } } } else { // New chain is longer, stash all blocks away for subsequent insertion @@ -1448,12 +1438,6 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { } // Remove an old block as well as stash away a new block oldChain = append(oldChain, oldBlock) - // Collect deleted logs for notification - logs := bc.collectLogs(oldBlock.Hash(), true) - if len(logs) > 0 { - deletedLogs = append(deletedLogs, logs) - } - newChain = append(newChain, newBlock) // Step back with both chains @@ -1492,13 +1476,8 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { for i := len(newChain) - 1; i >= 1; i-- { // Insert the block in the canonical way, re-writing history bc.writeHeadBlock(newChain[i]) - - // Collect reborn logs due to chain reorg - logs := bc.collectLogs(newChain[i].Hash(), false) - if len(logs) > 0 { - rebirthLogs = append(rebirthLogs, logs) - } } + // Delete any canonical number assignments above the new head indexesBatch := bc.db.NewBatch() @@ -1517,20 +1496,42 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { log.Crit("Failed to delete useless indexes", "err", err) } - // If any logs need to be fired, do it now. In theory we could avoid creating - // this goroutine if there are no events to fire, but realistcally that only - // ever happens if we're reorging empty blocks, which will only happen on idle - // networks where performance is not an issue either way. - if len(deletedLogs) > 0 { - bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)}) + // Send out events for logs from the old canon chain, and 'reborn' + // logs from the new canon chain. The number of logs can be very + // high, so the events are sent in batches of size around 512. + + // Deleted logs + blocks: + var deletedLogs []*types.Log + for i := len(oldChain) - 1; i >= 0; i-- { + // Also send event for blocks removed from the canon chain. + bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]}) + + // Collect deleted logs for notification + if logs := bc.collectLogs(oldChain[i].Hash(), true); len(logs) > 0 { + deletedLogs = append(deletedLogs, logs...) + } + if len(deletedLogs) > 512 { + bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) + deletedLogs = nil + } } - if len(rebirthLogs) > 0 { - bc.logsFeed.Send(mergeLogs(rebirthLogs, false)) + if len(deletedLogs) > 0 { + bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) } - if len(oldChain) > 0 { - for i := len(oldChain) - 1; i >= 0; i-- { - bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]}) + + // New logs: + var rebirthLogs []*types.Log + for i := len(newChain) - 1; i >= 1; i-- { + if logs := bc.collectLogs(newChain[i].Hash(), false); len(logs) > 0 { + rebirthLogs = append(rebirthLogs, logs...) } + if len(rebirthLogs) > 512 { + bc.logsFeed.Send(rebirthLogs) + rebirthLogs = nil + } + } + if len(rebirthLogs) > 0 { + bc.logsFeed.Send(rebirthLogs) } return nil } diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index 0f5eaf8d36..add53826ea 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -516,9 +516,12 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { // Initialize a fresh chain var ( - genesis = (&Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee)}).MustCommit(db) - engine = dummy.NewFullFaker() - config = &CacheConfig{ + gspec = &Genesis{ + BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), + Config: params.TestChainConfig, + } + engine = dummy.NewFullFaker() + config = &CacheConfig{ TrieCleanLimit: 256, TrieDirtyLimit: 256, SnapshotLimit: 0, // Disable snapshot by default @@ -528,7 +531,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { if snapshots { config.SnapshotLimit = 256 } - chain, err := NewBlockChain(db, config, params.TestChainConfig, engine, vm.Config{}, common.Hash{}) + chain, err := NewBlockChain(db, config, gspec, engine, vm.Config{}, common.Hash{}, false) if err != nil { t.Fatalf("Failed to create chain: %v", err) } @@ -537,14 +540,14 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { // If sidechain blocks are needed, make a light chain and import it var sideblocks types.Blocks if tt.sidechainBlocks > 0 { - sideblocks, _, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, 10, func(i int, b *BlockGen) { + sideblocks, _, _ = GenerateChain(gspec.Config, gspec.ToBlock(nil), engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, 10, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0x01}) }) if _, err := chain.InsertChain(sideblocks); err != nil { t.Fatalf("Failed to import side chain: %v", err) } } - canonblocks, _, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, 10, func(i int, b *BlockGen) { + canonblocks, _, _ := GenerateChain(gspec.Config, gspec.ToBlock(nil), engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, 10, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0x02}) b.SetDifficulty(big.NewInt(1000000)) }) @@ -576,7 +579,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { } defer db.Close() - newChain, err := NewBlockChain(db, config, params.TestChainConfig, engine, vm.Config{}, lastAcceptedHash) + newChain, err := NewBlockChain(db, config, gspec, engine, vm.Config{}, lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index aae31bafa3..98c7456a81 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -59,8 +59,9 @@ type snapshotTestBasic struct { // share fields, set in runtime datadir string db ethdb.Database - gendb ethdb.Database + genDb ethdb.Database engine consensus.Engine + gspec *Genesis lastAcceptedHash common.Hash } @@ -75,20 +76,22 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo } // Initialize a fresh chain var ( - genesis = (&Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee)}).MustCommit(db) - engine = dummy.NewFullFaker() - gendb = rawdb.NewMemoryDatabase() + gspec = &Genesis{ + BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), + Config: params.TestChainConfig, + } + engine = dummy.NewFullFaker() // Snapshot is enabled, the first snapshot is created from the Genesis. // The snapshot memory allowance is 256MB, it means no snapshot flush // will happen during the block insertion. cacheConfig = DefaultCacheConfig ) - chain, err := NewBlockChain(db, cacheConfig, params.TestChainConfig, engine, vm.Config{}, common.Hash{}) + chain, err := NewBlockChain(db, cacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) if err != nil { t.Fatalf("Failed to create chain: %v", err) } - blocks, _, _ := GenerateChain(params.TestChainConfig, genesis, engine, gendb, basic.chainBlocks, 10, func(i int, b *BlockGen) {}) + genDb, blocks, _, _ := GenerateChainWithGenesis(gspec, engine, basic.chainBlocks, 10, func(i int, b *BlockGen) {}) // genesis as last accepted basic.lastAcceptedHash = chain.GetBlockByNumber(0).Hash() @@ -126,8 +129,9 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo // Set runtime fields basic.datadir = datadir basic.db = db - basic.gendb = gendb + basic.genDb = genDb basic.engine = engine + basic.gspec = gspec return chain, blocks } @@ -146,11 +150,11 @@ func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks [ // Check the disk layer, ensure they are matched block := chain.GetBlockByNumber(basic.expSnapshotBottom) if block == nil { - t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom) + t.Errorf("The corresponding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom) } else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) { t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot()) } else if len(chain.snaps.Snapshots(block.Hash(), -1, false)) != 1 { - t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom) + t.Errorf("The corresponding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom) } // Check the snapshot, ensure it's integrated @@ -203,7 +207,7 @@ func (basic *snapshotTestBasic) dump() string { func (basic *snapshotTestBasic) teardown() { basic.db.Close() - basic.gendb.Close() + basic.genDb.Close() os.RemoveAll(basic.datadir) } @@ -221,7 +225,7 @@ func (snaptest *snapshotTest) test(t *testing.T) { // Restart the chain normally chain.Stop() - newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfig, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -257,13 +261,13 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) { // the crash, we do restart twice here: one after the crash and one // after the normal stop. It's used to ensure the broken snapshot // can be detected all the time. - newchain, err := NewBlockChain(newdb, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + newchain, err := NewBlockChain(newdb, DefaultCacheConfig, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } newchain.Stop() - newchain, err = NewBlockChain(newdb, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + newchain, err = NewBlockChain(newdb, DefaultCacheConfig, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -290,7 +294,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { // Insert blocks without enabling snapshot if gapping is required. chain.Stop() - gappedBlocks, _, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.gapped, 10, func(i int, b *BlockGen) {}) + gappedBlocks, _, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.gapped, 10, func(i int, b *BlockGen) {}) // Insert a few more blocks without enabling snapshot var cacheConfig = &CacheConfig{ @@ -300,7 +304,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { Pruning: true, CommitInterval: 4096, } - newchain, err := NewBlockChain(snaptest.db, cacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + newchain, err := NewBlockChain(snaptest.db, cacheConfig, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -308,7 +312,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { newchain.Stop() // Restart the chain with enabling the snapshot - newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfig, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -344,11 +348,11 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { Pruning: true, CommitInterval: 4096, } - newchain, err := NewBlockChain(snaptest.db, config, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + newchain, err := NewBlockChain(snaptest.db, config, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } - newBlocks, _, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, 10, func(i int, b *BlockGen) {}) + newBlocks, _, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.newBlocks, 10, func(i int, b *BlockGen) {}) newchain.InsertChain(newBlocks) newchain.Stop() @@ -360,13 +364,12 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { Pruning: true, CommitInterval: 4096, } - _, err = NewBlockChain(snaptest.db, config, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + _, err = NewBlockChain(snaptest.db, config, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } // Simulate the blockchain crash. - - newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfig, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 9ddf447ea4..615c1757ae 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -49,24 +49,25 @@ var ( func createBlockChain( db ethdb.Database, cacheConfig *CacheConfig, - chainConfig *params.ChainConfig, + gspec *Genesis, lastAcceptedHash common.Hash, ) (*BlockChain, error) { // Import the chain. This runs all block validation rules. blockchain, err := NewBlockChain( db, cacheConfig, - chainConfig, + gspec, dummy.NewDummyEngine(&TestCallbacks), vm.Config{}, lastAcceptedHash, + false, ) return blockchain, err } func TestArchiveBlockChain(t *testing.T) { - createArchiveBlockChain := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { - return createBlockChain(db, archiveConfig, chainConfig, lastAcceptedHash) + createArchiveBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + return createBlockChain(db, archiveConfig, gspec, lastAcceptedHash) } for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { @@ -119,11 +120,11 @@ func TestTrieCleanJournal(t *testing.T) { }() require.NoError(trieCleanJournalWatcher.Add(trieCleanJournal)) - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { config := *archiveConfig config.TrieCleanJournal = trieCleanJournal config.TrieCleanRejournal = 100 * time.Millisecond - return createBlockChain(db, &config, chainConfig, lastAcceptedHash) + return createBlockChain(db, &config, gspec, lastAcceptedHash) } var ( @@ -131,9 +132,6 @@ func TestTrieCleanJournal(t *testing.T) { key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -143,18 +141,14 @@ func TestTrieCleanJournal(t *testing.T) { Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) require.NoError(err) defer blockchain.Stop() // This call generates a chain of 3 blocks. signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 10, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) @@ -177,7 +171,7 @@ func TestTrieCleanJournal(t *testing.T) { } func TestArchiveBlockChainSnapsDisabled(t *testing.T) { - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { return createBlockChain( db, &CacheConfig{ @@ -188,7 +182,7 @@ func TestArchiveBlockChainSnapsDisabled(t *testing.T) { SnapshotLimit: 0, // Disable snapshots AcceptorQueueLimit: 64, }, - chainConfig, + gspec, lastAcceptedHash, ) } @@ -200,8 +194,8 @@ func TestArchiveBlockChainSnapsDisabled(t *testing.T) { } func TestPruningBlockChain(t *testing.T) { - createPruningBlockChain := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { - return createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) + createPruningBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + return createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) } for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { @@ -211,7 +205,7 @@ func TestPruningBlockChain(t *testing.T) { } func TestPruningBlockChainSnapsDisabled(t *testing.T) { - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { return createBlockChain( db, &CacheConfig{ @@ -223,7 +217,7 @@ func TestPruningBlockChainSnapsDisabled(t *testing.T) { SnapshotLimit: 0, // Disable snapshots AcceptorQueueLimit: 64, }, - chainConfig, + gspec, lastAcceptedHash, ) } @@ -241,8 +235,8 @@ type wrappedStateManager struct { func (w *wrappedStateManager) Shutdown() error { return nil } func TestPruningBlockChainUngracefulShutdown(t *testing.T) { - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { - blockchain, err := createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + blockchain, err := createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) if err != nil { return nil, err } @@ -260,7 +254,7 @@ func TestPruningBlockChainUngracefulShutdown(t *testing.T) { } func TestPruningBlockChainUngracefulShutdownSnapsDisabled(t *testing.T) { - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { blockchain, err := createBlockChain( db, &CacheConfig{ @@ -272,7 +266,7 @@ func TestPruningBlockChainUngracefulShutdownSnapsDisabled(t *testing.T) { SnapshotLimit: 0, // Disable snapshots AcceptorQueueLimit: 64, }, - chainConfig, + gspec, lastAcceptedHash, ) if err != nil { @@ -294,7 +288,7 @@ func TestPruningBlockChainUngracefulShutdownSnapsDisabled(t *testing.T) { func TestEnableSnapshots(t *testing.T) { // Set snapshots to be disabled the first time, and then enable them on the restart snapLimit := 0 - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { // Import the chain. This runs all block validation rules. blockchain, err := createBlockChain( db, @@ -307,7 +301,7 @@ func TestEnableSnapshots(t *testing.T) { SnapshotLimit: snapLimit, AcceptorQueueLimit: 64, }, - chainConfig, + gspec, lastAcceptedHash, ) if err != nil { @@ -325,13 +319,13 @@ func TestEnableSnapshots(t *testing.T) { } func TestCorruptSnapshots(t *testing.T) { - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { // Delete the snapshot block hash and state root to ensure that if we die in between writing a snapshot // diff layer to disk at any point, we can still recover on restart. rawdb.DeleteSnapshotBlockHash(db) rawdb.DeleteSnapshotRoot(db) - return createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) + return createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) } for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { @@ -341,9 +335,9 @@ func TestCorruptSnapshots(t *testing.T) { } func TestBlockChainOfflinePruningUngracefulShutdown(t *testing.T) { - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { // Import the chain. This runs all block validation rules. - blockchain, err := createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) + blockchain, err := createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) if err != nil { return nil, err } @@ -370,7 +364,7 @@ func TestBlockChainOfflinePruningUngracefulShutdown(t *testing.T) { return nil, fmt.Errorf("failed to prune blockchain with target root: %s due to: %w", targetRoot, err) } // Re-initialize the blockchain after pruning - return createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) + return createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) } for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { @@ -387,11 +381,7 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() - chainDB = rawdb.NewMemoryDatabase() - lastAcceptedHash common.Hash + chainDB = rawdb.NewMemoryDatabase() ) // Ensure that key1 has some funds in the genesis block. @@ -400,10 +390,8 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := createBlockChain(chainDB, pruningConfig, gspec.Config, lastAcceptedHash) + blockchain, err := createBlockChain(chainDB, pruningConfig, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -411,9 +399,7 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { // This call generates a chain of 3 blocks. signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 10, 10, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 10, 10, func(i int, gen *BlockGen) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) @@ -431,10 +417,10 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { } blockchain.DrainAcceptorQueue() - lastAcceptedHash = blockchain.LastConsensusAcceptedBlock().Hash() + lastAcceptedHash := blockchain.LastConsensusAcceptedBlock().Hash() blockchain.Stop() - blockchain, err = createBlockChain(chainDB, pruningConfig, gspec.Config, lastAcceptedHash) + blockchain, err = createBlockChain(chainDB, pruningConfig, gspec, lastAcceptedHash) if err != nil { t.Fatal(err) } @@ -461,7 +447,7 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { PopulateMissingTriesParallelism: parallelism, AcceptorQueueLimit: 64, }, - gspec.Config, + gspec, lastAcceptedHash, ) if err != nil { @@ -484,7 +470,7 @@ func TestRepopulateMissingTries(t *testing.T) { func TestUngracefulAsyncShutdown(t *testing.T) { var ( - create = func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create = func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { blockchain, err := createBlockChain(db, &CacheConfig{ TrieCleanLimit: 256, TrieDirtyLimit: 256, @@ -494,7 +480,7 @@ func TestUngracefulAsyncShutdown(t *testing.T) { SnapshotLimit: 256, SkipSnapshotRebuild: true, // Ensure the test errors if snapshot initialization fails AcceptorQueueLimit: 1000, // ensure channel doesn't block - }, chainConfig, lastAcceptedHash) + }, gspec, lastAcceptedHash) if err != nil { return nil, err } @@ -505,9 +491,6 @@ func TestUngracefulAsyncShutdown(t *testing.T) { key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -517,10 +500,8 @@ func TestUngracefulAsyncShutdown(t *testing.T) { Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -528,9 +509,7 @@ func TestUngracefulAsyncShutdown(t *testing.T) { // This call generates a chain of 10 blocks. signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 10, 10, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 10, 10, func(i int, gen *BlockGen) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) @@ -669,28 +648,25 @@ func TestCanonicalHashMarker(t *testing.T) { } for _, c := range cases { var ( - db = rawdb.NewMemoryDatabase() gspec = &Genesis{ Config: params.TestChainConfig, Alloc: GenesisAlloc{}, BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), } - genesis = gspec.MustCommit(db) - engine = dummy.NewFaker() + engine = dummy.NewFaker() ) - forkA, _, err := GenerateChain(params.TestChainConfig, genesis, engine, db, c.forkA, 10, func(i int, gen *BlockGen) {}) + _, forkA, _, err := GenerateChainWithGenesis(gspec, engine, c.forkA, 10, func(i int, gen *BlockGen) {}) if err != nil { t.Fatal(err) } - forkB, _, err := GenerateChain(params.TestChainConfig, genesis, engine, db, c.forkB, 10, func(i int, gen *BlockGen) {}) + _, forkB, _, err := GenerateChainWithGenesis(gspec, engine, c.forkB, 10, func(i int, gen *BlockGen) {}) if err != nil { t.Fatal(err) } // Initialize test chain diskdb := rawdb.NewMemoryDatabase() - gspec.MustCommit(diskdb) - chain, err := NewBlockChain(diskdb, DefaultCacheConfig, params.TestChainConfig, engine, vm.Config{}, common.Hash{}) + chain, err := NewBlockChain(diskdb, DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -748,7 +724,6 @@ func TestTransactionIndices(t *testing.T) { // Configure and generate a sample block chain require := require.New(t) var ( - gendb = rawdb.NewMemoryDatabase() key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -758,18 +733,17 @@ func TestTransactionIndices(t *testing.T) { Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: funds}}, } - genesis = gspec.MustCommit(gendb) - signer = types.LatestSigner(gspec.Config) + signer = types.LatestSigner(gspec.Config) ) height := uint64(128) - blocks, _, err := GenerateChain(gspec.Config, genesis, dummy.NewDummyEngine(&TestCallbacks), gendb, int(height), 10, func(i int, block *BlockGen) { + genDb, blocks, _, err := GenerateChainWithGenesis(gspec, dummy.NewDummyEngine(&TestCallbacks), int(height), 10, func(i int, block *BlockGen) { tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) require.NoError(err) block.AddTx(tx) }) require.NoError(err) - blocks2, _, err := GenerateChain(gspec.Config, blocks[len(blocks)-1], dummy.NewDummyEngine(&TestCallbacks), gendb, 10, 10, nil) + blocks2, _, err := GenerateChain(gspec.Config, blocks[len(blocks)-1], dummy.NewDummyEngine(&TestCallbacks), genDb, 10, 10, nil) require.NoError(err) check := func(tail *uint64, chain *BlockChain) { @@ -815,9 +789,7 @@ func TestTransactionIndices(t *testing.T) { // Init block chain and check all needed indices has been indexed. chainDB := rawdb.NewMemoryDatabase() - gspec.MustCommit(chainDB) - - chain, err := createBlockChain(chainDB, conf, gspec.Config, common.Hash{}) + chain, err := createBlockChain(chainDB, conf, gspec, common.Hash{}) require.NoError(err) _, err = chain.InsertChain(blocks) @@ -841,7 +813,7 @@ func TestTransactionIndices(t *testing.T) { for i, l := range limit { conf.TxLookupLimit = l - chain, err := createBlockChain(chainDB, conf, gspec.Config, lastAcceptedHash) + chain, err := createBlockChain(chainDB, conf, gspec, lastAcceptedHash) require.NoError(err) newBlks := blocks2[i : i+1] @@ -873,8 +845,8 @@ func TestTxLookupBlockChain(t *testing.T) { AcceptorQueueLimit: 64, // ensure channel doesn't block TxLookupLimit: 5, } - createTxLookupBlockChain := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { - return createBlockChain(db, cacheConf, chainConfig, lastAcceptedHash) + createTxLookupBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + return createBlockChain(db, cacheConf, gspec, lastAcceptedHash) } for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { diff --git a/core/chain_makers.go b/core/chain_makers.go index bf97fb427b..a5753c33be 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -33,6 +33,7 @@ import ( "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/consensus/misc" + "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" @@ -280,6 +281,19 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse return blocks, receipts, nil } +// GenerateChainWithGenesis is a wrapper of GenerateChain which will initialize +// genesis block to database first according to the provided genesis specification +// then generate chain on top. +func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gap uint64, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts, error) { + db := rawdb.NewMemoryDatabase() + _, err := genesis.Commit(db) + if err != nil { + return nil, nil, nil, err + } + blocks, receipts, err := GenerateChain(genesis.Config, genesis.ToBlock(nil), engine, db, n, gap, gen) + return db, blocks, receipts, err +} + func makeHeader(chain consensus.ChainReader, config *params.ChainConfig, parent *types.Block, gap uint64, state *state.StateDB, engine consensus.Engine) *types.Header { var time uint64 if parent.Time() == 0 { diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go index cd003932e7..a2e1bf2e9f 100644 --- a/core/chain_makers_test.go +++ b/core/chain_makers_test.go @@ -55,13 +55,12 @@ func ExampleGenerateChain() { Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}}, } - genesis := gspec.MustCommit(db) // This call generates a chain of 3 blocks. The function runs for // each block and adds different features to gen based on the // block index. signer := types.HomesteadSigner{} - chain, _, err := GenerateChain(gspec.Config, genesis, dummy.NewFaker(), db, 3, 10, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, dummy.NewFaker(), 3, 10, func(i int, gen *BlockGen) { switch i { case 0: // In block 1, addr1 sends addr2 some ether. @@ -82,7 +81,7 @@ func ExampleGenerateChain() { } // Import the chain. This runs all block validation rules. - blockchain, _ := NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) + blockchain, _ := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) defer blockchain.Stop() if i, err := blockchain.InsertChain(chain); err != nil { diff --git a/core/dao_test.go b/core/dao_test.go index 60c6c507e0..c349f29cad 100644 --- a/core/dao_test.go +++ b/core/dao_test.go @@ -37,39 +37,51 @@ import ( "github.com/ethereum/go-ethereum/common" ) +// setDAOForkBlock makes a copy of [cfg] and assigns the DAO fork block to [forkBlock]. +// This is necessary for testing since coreth restricts the DAO fork to be enabled at +// genesis only. +func setDAOForkBlock(cfg *params.ChainConfig, forkBlock *big.Int) *params.ChainConfig { + config := *cfg + config.DAOForkBlock = forkBlock + return &config +} + // Tests that DAO-fork enabled clients can properly filter out fork-commencing // blocks based on their extradata fields. func TestDAOForkRangeExtradata(t *testing.T) { forkBlock := big.NewInt(32) + chainConfig := *params.TestApricotPhase2Config + chainConfig.DAOForkBlock = nil // Generate a common prefix for both pro-forkers and non-forkers - db := rawdb.NewMemoryDatabase() gspec := &Genesis{ BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), - Config: params.TestApricotPhase2Config, + Config: &chainConfig, } - genesis := gspec.MustCommit(db) - prefix, _, _ := GenerateChain(params.TestApricotPhase2Config, genesis, dummy.NewFaker(), db, int(forkBlock.Int64()-1), 10, func(i int, gen *BlockGen) {}) + genDb, prefix, _, _ := GenerateChainWithGenesis(gspec, dummy.NewFaker(), int(forkBlock.Int64()-1), 10, func(i int, gen *BlockGen) {}) // Create the concurrent, conflicting two nodes proDb := rawdb.NewMemoryDatabase() - gspec.MustCommit(proDb) - proConf := *params.TestApricotPhase2Config - proConf.DAOForkBlock = forkBlock proConf.DAOForkSupport = true - proBc, _ := NewBlockChain(proDb, DefaultCacheConfig, &proConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) + progspec := &Genesis{ + BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), + Config: &proConf, + } + proBc, _ := NewBlockChain(proDb, DefaultCacheConfig, progspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + proBc.chainConfig = setDAOForkBlock(proBc.chainConfig, forkBlock) defer proBc.Stop() conDb := rawdb.NewMemoryDatabase() - gspec.MustCommit(conDb) - conConf := *params.TestApricotPhase2Config - conConf.DAOForkBlock = forkBlock conConf.DAOForkSupport = false - - conBc, _ := NewBlockChain(conDb, DefaultCacheConfig, &conConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) + congspec := &Genesis{ + BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), + Config: &conConf, + } + conBc, _ := NewBlockChain(conDb, DefaultCacheConfig, congspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + conBc.chainConfig = setDAOForkBlock(conBc.chainConfig, forkBlock) defer conBc.Stop() if _, err := proBc.InsertChain(prefix); err != nil { @@ -81,9 +93,8 @@ func TestDAOForkRangeExtradata(t *testing.T) { // Try to expand both pro-fork and non-fork chains iteratively with other camp's blocks for i := int64(0); i < params.DAOForkExtraRange.Int64(); i++ { // Create a pro-fork block, and try to feed into the no-fork chain - db = rawdb.NewMemoryDatabase() - gspec.MustCommit(db) - bc, _ := NewBlockChain(db, DefaultCacheConfig, &conConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) + bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, congspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + bc.chainConfig = setDAOForkBlock(bc.chainConfig, forkBlock) defer bc.Stop() blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64())) @@ -96,19 +107,17 @@ func TestDAOForkRangeExtradata(t *testing.T) { if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { t.Fatalf("failed to commit contra-fork head for expansion: %v", err) } - blocks, _, _ = GenerateChain(&proConf, conBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) + blocks, _, _ = GenerateChain(&proConf, conBc.CurrentBlock(), dummy.NewFaker(), genDb, 1, 10, func(i int, gen *BlockGen) {}) if _, err := conBc.InsertChain(blocks); err != nil { t.Fatalf("contra-fork chain accepted pro-fork block: %v", blocks[0]) } // Create a proper no-fork block for the contra-forker - blocks, _, _ = GenerateChain(&conConf, conBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) + blocks, _, _ = GenerateChain(&conConf, conBc.CurrentBlock(), dummy.NewFaker(), genDb, 1, 10, func(i int, gen *BlockGen) {}) if _, err := conBc.InsertChain(blocks); err != nil { t.Fatalf("contra-fork chain didn't accepted no-fork block: %v", err) } // Create a no-fork block, and try to feed into the pro-fork chain - db = rawdb.NewMemoryDatabase() - gspec.MustCommit(db) - bc, _ = NewBlockChain(db, DefaultCacheConfig, &proConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) + bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, progspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) defer bc.Stop() blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64())) @@ -121,20 +130,18 @@ func TestDAOForkRangeExtradata(t *testing.T) { if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { t.Fatalf("failed to commit pro-fork head for expansion: %v", err) } - blocks, _, _ = GenerateChain(&conConf, proBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) + blocks, _, _ = GenerateChain(&conConf, proBc.CurrentBlock(), dummy.NewFaker(), genDb, 1, 10, func(i int, gen *BlockGen) {}) if _, err := proBc.InsertChain(blocks); err != nil { t.Fatalf("pro-fork chain accepted contra-fork block: %v", blocks[0]) } // Create a proper pro-fork block for the pro-forker - blocks, _, _ = GenerateChain(&proConf, proBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) + blocks, _, _ = GenerateChain(&proConf, proBc.CurrentBlock(), dummy.NewFaker(), genDb, 1, 10, func(i int, gen *BlockGen) {}) if _, err := proBc.InsertChain(blocks); err != nil { t.Fatalf("pro-fork chain didn't accepted pro-fork block: %v", err) } } // Verify that contra-forkers accept pro-fork extra-datas after forking finishes - db = rawdb.NewMemoryDatabase() - gspec.MustCommit(db) - bc, _ := NewBlockChain(db, DefaultCacheConfig, &conConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) + bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, congspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) defer bc.Stop() blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64())) @@ -147,14 +154,12 @@ func TestDAOForkRangeExtradata(t *testing.T) { if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { t.Fatalf("failed to commit contra-fork head for expansion: %v", err) } - blocks, _, _ = GenerateChain(&proConf, conBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) + blocks, _, _ = GenerateChain(&proConf, conBc.CurrentBlock(), dummy.NewFaker(), genDb, 1, 10, func(i int, gen *BlockGen) {}) if _, err := conBc.InsertChain(blocks); err != nil { t.Fatalf("contra-fork chain didn't accept pro-fork block post-fork: %v", err) } // Verify that pro-forkers accept contra-fork extra-datas after forking finishes - db = rawdb.NewMemoryDatabase() - gspec.MustCommit(db) - bc, _ = NewBlockChain(db, DefaultCacheConfig, &proConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) + bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, progspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) defer bc.Stop() blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64())) @@ -167,7 +172,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { t.Fatalf("failed to commit pro-fork head for expansion: %v", err) } - blocks, _, _ = GenerateChain(&conConf, proBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) + blocks, _, _ = GenerateChain(&conConf, proBc.CurrentBlock(), dummy.NewFaker(), genDb, 1, 10, func(i int, gen *BlockGen) {}) if _, err := proBc.InsertChain(blocks); err != nil { t.Fatalf("pro-fork chain didn't accept contra-fork block post-fork: %v", err) } @@ -185,11 +190,10 @@ func TestDAOForkSupportPostApricotPhase3(t *testing.T) { BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), Config: &conf, } - genesis := gspec.MustCommit(db) - bc, _ := NewBlockChain(db, DefaultCacheConfig, &conf, dummy.NewFaker(), vm.Config{}, common.Hash{}) + bc, _ := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) defer bc.Stop() - blocks, _, _ := GenerateChain(&conf, genesis, dummy.NewFaker(), db, 32, 10, func(i int, gen *BlockGen) {}) + _, blocks, _, _ := GenerateChainWithGenesis(gspec, dummy.NewFaker(), 32, 10, func(i int, gen *BlockGen) {}) if _, err := bc.InsertChain(blocks); err != nil { t.Fatalf("failed to import blocks: %v", err) diff --git a/core/error.go b/core/error.go index a90475fe2e..955049eefe 100644 --- a/core/error.go +++ b/core/error.go @@ -96,7 +96,7 @@ var ( ErrFeeCapVeryHigh = errors.New("max fee per gas higher than 2^256-1") // ErrFeeCapTooLow is returned if the transaction fee cap is less than the - // the base fee of the block. + // base fee of the block. ErrFeeCapTooLow = errors.New("max fee per gas less than block base fee") // ErrSenderNoEOA is returned if the sender of a transaction is a contract. diff --git a/core/genesis.go b/core/genesis.go index efc4f2a874..54da8dff95 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -131,7 +131,6 @@ func (h *storageJSON) UnmarshalText(text []byte) error { } offset := len(h) - len(text)/2 // pad on the left if _, err := hex.Decode(h[offset:], text); err != nil { - fmt.Println(err) return fmt.Errorf("invalid hex storage key/value %q", text) } return nil @@ -164,22 +163,22 @@ func (e *GenesisMismatchError) Error() string { // error is a *params.ConfigCompatError and the new, unwritten config is returned. func SetupGenesisBlock( db ethdb.Database, genesis *Genesis, lastAcceptedHash common.Hash, skipChainConfigCheckCompatible bool, -) (*params.ChainConfig, error) { +) (*params.ChainConfig, common.Hash, error) { if genesis == nil { - return nil, ErrNoGenesis + return nil, common.Hash{}, ErrNoGenesis } if genesis.Config == nil { - return nil, errGenesisNoConfig + return nil, common.Hash{}, errGenesisNoConfig } // Just commit the new block if there is no stored genesis block. stored := rawdb.ReadCanonicalHash(db, 0) if (stored == common.Hash{}) { log.Info("Writing genesis to database") - _, err := genesis.Commit(db) + block, err := genesis.Commit(db) if err != nil { - return genesis.Config, err + return genesis.Config, common.Hash{}, err } - return genesis.Config, nil + return genesis.Config, block.Hash(), nil } // We have the genesis block in database but the corresponding state is missing. header := rawdb.ReadHeader(db, stored, 0) @@ -187,28 +186,27 @@ func SetupGenesisBlock( // Ensure the stored genesis matches with the given one. hash := genesis.ToBlock(nil).Hash() if hash != stored { - return genesis.Config, &GenesisMismatchError{stored, hash} + return genesis.Config, common.Hash{}, &GenesisMismatchError{stored, hash} } _, err := genesis.Commit(db) - return genesis.Config, err + return genesis.Config, common.Hash{}, err } // Check whether the genesis block is already written. hash := genesis.ToBlock(nil).Hash() if hash != stored { - return genesis.Config, &GenesisMismatchError{stored, hash} + return genesis.Config, common.Hash{}, &GenesisMismatchError{stored, hash} } // Get the existing chain configuration. newcfg := genesis.Config if err := newcfg.CheckConfigForkOrder(); err != nil { - return newcfg, err + return newcfg, common.Hash{}, err } storedcfg := rawdb.ReadChainConfig(db, stored) if storedcfg == nil { log.Warn("Found genesis block without chain config") rawdb.WriteChainConfig(db, stored, newcfg) - return newcfg, nil + return newcfg, stored, nil } - // Check config compatibility and write the config. Compatibility errors // are returned to the caller unless we're already at block zero. // we use last accepted block for cfg compatibility check. Note this allows @@ -219,7 +217,7 @@ func SetupGenesisBlock( // when we start syncing from scratch, the last accepted block // will be genesis block if lastBlock == nil { - return newcfg, fmt.Errorf("missing last accepted block") + return newcfg, common.Hash{}, fmt.Errorf("missing last accepted block") } height := lastBlock.NumberU64() timestamp := lastBlock.Time() @@ -228,11 +226,11 @@ func SetupGenesisBlock( } else { compatErr := storedcfg.CheckCompatible(newcfg, height, timestamp) if compatErr != nil && height != 0 && compatErr.RewindTo != 0 { - return newcfg, compatErr + return newcfg, stored, compatErr } } rawdb.WriteChainConfig(db, stored, newcfg) - return newcfg, nil + return newcfg, stored, nil } // ToBlock creates the genesis block and writes state of a genesis specification diff --git a/core/genesis_test.go b/core/genesis_test.go index fa2af0dd54..ebfa92f57b 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -43,9 +43,7 @@ import ( ) func setupGenesisBlock(db ethdb.Database, genesis *Genesis, lastAcceptedHash common.Hash) (*params.ChainConfig, common.Hash, error) { - conf, err := SetupGenesisBlock(db, genesis, lastAcceptedHash, false) - stored := rawdb.ReadCanonicalHash(db, 0) - return conf, stored, err + return SetupGenesisBlock(db, genesis, lastAcceptedHash, false) } func TestGenesisBlockForTesting(t *testing.T) { @@ -103,7 +101,6 @@ func TestSetupGenesis(t *testing.T) { return setupGenesisBlock(db, nil, common.Hash{}) }, wantErr: ErrNoGenesis, - wantHash: customghash, wantConfig: nil, }, { @@ -122,12 +119,12 @@ func TestSetupGenesis(t *testing.T) { // Advance to block #4, past the ApricotPhase1 transition block of customg. genesis := oldcustomg.MustCommit(db) - bc, _ := NewBlockChain(db, DefaultCacheConfig, oldcustomg.Config, dummy.NewFullFaker(), vm.Config{}, common.Hash{}) + bc, _ := NewBlockChain(db, DefaultCacheConfig, &oldcustomg, dummy.NewFullFaker(), vm.Config{}, genesis.Hash(), false) defer bc.Stop() blocks, _, _ := GenerateChain(oldcustomg.Config, genesis, dummy.NewFullFaker(), db, 4, 25, nil) bc.InsertChain(blocks) - bc.CurrentBlock() + for _, block := range blocks { if err := bc.Accept(block); err != nil { t.Fatal(err) @@ -183,12 +180,11 @@ func TestNetworkUpgradeBetweenHeadAndAcceptedBlock(t *testing.T) { {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, }, } - genesis := customg.MustCommit(db) - bc, _ := NewBlockChain(db, DefaultCacheConfig, customg.Config, dummy.NewFullFaker(), vm.Config{}, common.Hash{}) + bc, _ := NewBlockChain(db, DefaultCacheConfig, &customg, dummy.NewFullFaker(), vm.Config{}, common.Hash{}, false) defer bc.Stop() // Advance header to block #4, past the ApricotPhase2 timestamp. - blocks, _, _ := GenerateChain(customg.Config, genesis, dummy.NewFullFaker(), db, 4, 25, nil) + _, blocks, _, _ := GenerateChainWithGenesis(&customg, dummy.NewFullFaker(), 4, 25, nil) require := require.New(t) _, err := bc.InsertChain(blocks) diff --git a/core/headerchain_test.go b/core/headerchain_test.go index 8c8b598524..2170dded4a 100644 --- a/core/headerchain_test.go +++ b/core/headerchain_test.go @@ -39,7 +39,6 @@ import ( "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" ) func verifyUnbrokenCanonchain(bc *BlockChain) error { @@ -74,13 +73,14 @@ func testInsert(t *testing.T, bc *BlockChain, chain []*types.Block, wantErr erro // This test checks status reporting of InsertHeaderChain. func TestHeaderInsertion(t *testing.T) { var ( - db = rawdb.NewMemoryDatabase() - genesis = (&Genesis{ + db = rawdb.NewMemoryDatabase() + gspec = &Genesis{ BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), Config: params.TestChainConfig, - }).MustCommit(db) + } ) - chain, err := NewBlockChain(db, DefaultCacheConfig, params.TestChainConfig, dummy.NewFaker(), vm.Config{}, common.Hash{}) + genesis := gspec.ToBlock(nil) + chain, err := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) if err != nil { t.Fatal(err) } @@ -92,7 +92,6 @@ func TestHeaderInsertion(t *testing.T) { chainB, _, _ := GenerateChain(params.TestChainConfig, types.NewBlockWithHeader(chainA[0].Header()), dummy.NewFaker(), db, 128, 10, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0: byte(10), 19: byte(i)}) }) - log.Root().SetHandler(log.StdoutHandler) // Inserting 64 headers on an empty chain testInsert(t, chain, chainA[:64], nil) diff --git a/core/mkalloc.go b/core/mkalloc.go index 76978a547f..e7bdf8f1c0 100644 --- a/core/mkalloc.go +++ b/core/mkalloc.go @@ -28,12 +28,10 @@ // +build none /* +The mkalloc tool creates the genesis allocation constants in genesis_alloc.go +It outputs a const declaration that contains an RLP-encoded list of (address, balance) tuples. - The mkalloc tool creates the genesis allocation constants in genesis_alloc.go - It outputs a const declaration that contains an RLP-encoded list of (address, balance) tuples. - - go run mkalloc.go genesis.json - + go run mkalloc.go genesis.json */ package main diff --git a/core/rlp_test.go b/core/rlp_test.go index b6588ca6cb..9c9614f800 100644 --- a/core/rlp_test.go +++ b/core/rlp_test.go @@ -32,7 +32,6 @@ import ( "testing" "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" @@ -43,10 +42,9 @@ import ( func getBlock(transactions int, uncles int, dataSize int) *types.Block { var ( - aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") - // Generate a canonical chain to act as the main dataset + aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") engine = dummy.NewFaker() - db = rawdb.NewMemoryDatabase() + // A sender who makes transactions, has some funds key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") address = crypto.PubkeyToAddress(key.PublicKey) @@ -55,11 +53,9 @@ func getBlock(transactions int, uncles int, dataSize int) *types.Block { Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}, } - genesis = gspec.MustCommit(db) ) - // We need to generate as many blocks +1 as uncles - blocks, _, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, uncles+1, 10, + _, blocks, _, _ := GenerateChainWithGenesis(gspec, engine, uncles+1, 10, func(n int, b *BlockGen) { if n == uncles { // Add transactions and stuff on the last block diff --git a/core/state/database.go b/core/state/database.go index e861234d28..181fbec48f 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -64,6 +64,9 @@ type Database interface { // ContractCodeSize retrieves a particular contracts code's size. ContractCodeSize(addrHash, codeHash common.Hash) (int, error) + // DiskDB returns the underlying key-value disk database. + DiskDB() ethdb.KeyValueStore + // TrieDB retrieves the low level trie database used for data storage. TrieDB() *trie.Database } @@ -140,6 +143,7 @@ func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { csc, _ := lru.New(codeSizeCacheSize) return &cachingDB{ db: trie.NewDatabaseWithConfig(db, config), + disk: db, codeSizeCache: csc, codeCache: fastcache.New(codeCacheSize), } @@ -147,6 +151,7 @@ func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { type cachingDB struct { db *trie.Database + disk ethdb.KeyValueStore codeSizeCache *lru.Cache codeCache *fastcache.Cache } @@ -184,7 +189,7 @@ func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 { return code, nil } - code := rawdb.ReadCode(db.db.DiskDB(), codeHash) + code := rawdb.ReadCode(db.disk, codeHash) if len(code) > 0 { db.codeCache.Set(codeHash.Bytes(), code) db.codeSizeCache.Add(codeHash, len(code)) @@ -202,6 +207,11 @@ func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, erro return len(code), err } +// DiskDB returns the underlying key-value disk database. +func (db *cachingDB) DiskDB() ethdb.KeyValueStore { + return db.disk +} + // TrieDB retrieves any intermediate trie-node caching layer. func (db *cachingDB) TrieDB() *trie.Database { return db.db diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index d96289ca66..6b897a140b 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -78,7 +78,7 @@ var ( bloomFuncs = math.Round((bloomSize / float64(aggregatorItemLimit)) * math.Log(2)) // the bloom offsets are runtime constants which determines which part of the - // the account/storage hash the hasher functions looks at, to determine the + // account/storage hash the hasher functions looks at, to determine the // bloom key for an account/slot. This is randomized at init(), so that the // global population of nodes do not all display the exact same behaviour with // regards to bloom content diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go index 7130580ef7..973276f92c 100644 --- a/core/state/snapshot/generate_test.go +++ b/core/state/snapshot/generate_test.go @@ -501,12 +501,12 @@ func TestGenerateWithExtraAccounts(t *testing.T) { // Identical in the snap key := hashData([]byte("acc-1")) - rawdb.WriteAccountSnapshot(helper.triedb.DiskDB(), key, val) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-1")), []byte("val-1")) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-2")), []byte("val-2")) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-3")), []byte("val-3")) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-4")), []byte("val-4")) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-5")), []byte("val-5")) + rawdb.WriteAccountSnapshot(helper.diskdb, key, val) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-1")), []byte("val-1")) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-2")), []byte("val-2")) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-3")), []byte("val-3")) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-4")), []byte("val-4")) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-5")), []byte("val-5")) } { // Account two exists only in the snapshot @@ -518,15 +518,15 @@ func TestGenerateWithExtraAccounts(t *testing.T) { acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()} val, _ := rlp.EncodeToBytes(acc) key := hashData([]byte("acc-2")) - rawdb.WriteAccountSnapshot(helper.triedb.DiskDB(), key, val) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("b-key-1")), []byte("b-val-1")) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("b-key-2")), []byte("b-val-2")) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("b-key-3")), []byte("b-val-3")) + rawdb.WriteAccountSnapshot(helper.diskdb, key, val) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-1")), []byte("b-val-1")) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-2")), []byte("b-val-2")) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-3")), []byte("b-val-3")) } root := helper.Commit() // To verify the test: If we now inspect the snap db, there should exist extraneous storage items - if data := rawdb.ReadStorageSnapshot(helper.triedb.DiskDB(), hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data == nil { + if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data == nil { t.Fatalf("expected snap storage to exist") } snap := generateSnapshot(helper.diskdb, helper.triedb, 16, testBlockHash, root, nil) @@ -544,7 +544,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) { snap.genAbort <- stop <-stop // If we now inspect the snap db, there should exist no extraneous storage items - if data := rawdb.ReadStorageSnapshot(helper.triedb.DiskDB(), hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil { + if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil { t.Fatalf("expected slot to be removed, got %v", string(data)) } } diff --git a/core/state/statedb.go b/core/state/statedb.go index e4e4211bcc..9af59926e5 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -988,7 +988,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas storageTrieNodes int nodes = trie.NewMergedNodeSet() ) - codeWriter := s.db.TrieDB().DiskDB().NewBatch() + codeWriter := s.db.DiskDB().NewBatch() for addr := range s.stateObjectsDirty { if obj := s.stateObjects[addr]; !obj.deleted { // Write any contract code associated with the state object diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 9db135768b..c058214ab7 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -145,6 +145,9 @@ func (p *triePrefetcher) copy() *triePrefetcher { // If the prefetcher is already a copy, duplicate the data if p.fetches != nil { for root, fetch := range p.fetches { + if fetch == nil { + continue + } copy.fetches[root] = p.db.CopyTrie(fetch) } return copy diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 04f79b554c..05e90592a4 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -102,8 +102,7 @@ func TestStateProcessorErrors(t *testing.T) { }, GasLimit: params.CortinaGasLimit, } - genesis = gspec.MustCommit(db) - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) ) defer blockchain.Stop() bigNumber := new(big.Int).SetBytes(common.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) @@ -202,7 +201,7 @@ func TestStateProcessorErrors(t *testing.T) { want: "could not apply tx 0 [0xd82a0c2519acfeac9a948258c47e784acd20651d9d80f9a1c67b4137651c3a24]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 2431633873983640103894990685182446064918669677978451844828609264166175722438635000", }, } { - block := GenerateBadBlock(genesis, dummy.NewFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(gspec.ToBlock(nil), dummy.NewFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -241,8 +240,7 @@ func TestStateProcessorErrors(t *testing.T) { }, GasLimit: params.ApricotPhase1GasLimit, } - genesis = gspec.MustCommit(db) - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) ) defer blockchain.Stop() for i, tt := range []struct { @@ -256,7 +254,7 @@ func TestStateProcessorErrors(t *testing.T) { want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: transaction type not supported", }, } { - block := GenerateBadBlock(genesis, dummy.NewFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(gspec.ToBlock(nil), dummy.NewFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -282,8 +280,7 @@ func TestStateProcessorErrors(t *testing.T) { }, GasLimit: params.CortinaGasLimit, } - genesis = gspec.MustCommit(db) - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) ) defer blockchain.Stop() for i, tt := range []struct { @@ -297,7 +294,7 @@ func TestStateProcessorErrors(t *testing.T) { want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: sender not an eoa: address 0x71562b71999873DB5b286dF957af199Ec94617F7, codehash: 0x9280914443471259d4570a8661015ae4a5b80186dbc619658fb494bebc3da3d1", }, } { - block := GenerateBadBlock(genesis, dummy.NewFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(gspec.ToBlock(nil), dummy.NewFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -347,8 +344,7 @@ func TestStateProcessorErrors(t *testing.T) { }, GasLimit: params.CortinaGasLimit, } - genesis = gspec.MustCommit(db) - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) tooBigInitCode = [params.MaxInitCodeSize + 1]byte{} smallInitCode = [320]byte{} ) @@ -371,7 +367,7 @@ func TestStateProcessorErrors(t *testing.T) { want: "could not apply tx 0 [0x849278f616d51ab56bba399551317213ce7a10e4d9cbc3d14bb663e50cb7ab99]: intrinsic gas too low: have 54299, want 54300", }, } { - block := GenerateBadBlock(genesis, dummy.NewFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(gspec.ToBlock(nil), dummy.NewFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -430,8 +426,7 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr } func CostOfUsingGasLimitEachBlock(gspec *Genesis) { - db := rawdb.NewMemoryDatabase() - genesis := gspec.MustCommit(db) + genesis := gspec.ToBlock(nil) totalPaid := big.NewInt(0) parent := genesis.Header() gasLimit := new(big.Int).SetUint64(gspec.GasLimit) diff --git a/core/state_transition.go b/core/state_transition.go index 8cd5587f3e..e488df4a94 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -42,25 +42,26 @@ import ( var emptyCodeHash = crypto.Keccak256Hash(nil) -/* -The State Transitioning Model - -A state transition is a change made when a transaction is applied to the current world state -The state transitioning model does all the necessary work to work out a valid new state root. - -1) Nonce handling -2) Pre pay gas -3) Create a new state object if the recipient is \0*32 -4) Value transfer -== If contract creation == - - 4a) Attempt to run transaction data - 4b) If valid, use result as code for the new state object - -== end == -5) Run Script section -6) Derive new state root -*/ +// The State Transitioning Model +// +// A state transition is a change made when a transaction is applied to the current world +// state. The state transitioning model does all the necessary work to work out a valid new +// state root. +// +// 1. Nonce handling +// 2. Pre pay gas +// 3. Create a new state object if the recipient is \0*32 +// 4. Value transfer +// +// == If contract creation == +// +// 4a. Attempt to run transaction data +// 4b. If valid, use result as code for the new state object +// +// == end == +// +// 5. Run Script section +// 6. Derive new state root type StateTransition struct { gp *GasPool msg Message @@ -297,13 +298,10 @@ func (st *StateTransition) preCheck() error { // TransitionDb will transition the state by applying the current message and // returning the evm execution result with following fields. // -// - used gas: -// total gas used (including gas being refunded) -// - returndata: -// the returned data from evm -// - concrete execution error: -// various **EVM** error which aborts the execution, -// e.g. ErrOutOfGas, ErrExecutionReverted +// - used gas: total gas used (including gas being refunded) +// - returndata: the returned data from evm +// - concrete execution error: various EVM errors which abort the execution, e.g. +// ErrOutOfGas, ErrExecutionReverted // // However if any consensus issue encountered, return the error directly with // nil evm execution result. diff --git a/core/state_transition_test.go b/core/state_transition_test.go index 410f5c195a..028a8f09d6 100644 --- a/core/state_transition_test.go +++ b/core/state_transition_test.go @@ -89,9 +89,9 @@ func executeStateTransitionTest(t *testing.T, st stateTransitionTest) { }, GasLimit: params.ApricotPhase1GasLimit, } - genesis = gspec.MustCommit(db) + genesis = gspec.ToBlock(nil) engine = dummy.NewFaker() - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec.Config, engine, vm.Config{}, common.Hash{}) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) ) defer blockchain.Stop() diff --git a/core/test_blockchain.go b/core/test_blockchain.go index f530f8ff2b..10a7887a50 100644 --- a/core/test_blockchain.go +++ b/core/test_blockchain.go @@ -34,7 +34,7 @@ type ChainTest struct { Name string testFunc func( t *testing.T, - create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error), + create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error), ) } @@ -106,13 +106,12 @@ func copyMemDB(db ethdb.Database) (ethdb.Database, error) { func checkBlockChainState( t *testing.T, bc *BlockChain, - genesis *Genesis, + gspec *Genesis, originalDB ethdb.Database, - create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error), + create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error), checkState func(sdb *state.StateDB) error, ) (*BlockChain, *BlockChain, *BlockChain) { var ( - chainConfig = bc.Config() lastAcceptedBlock = bc.LastConsensusAcceptedBlock() newDB = rawdb.NewMemoryDatabase() ) @@ -125,9 +124,7 @@ func checkBlockChainState( t.Fatalf("Check state failed for original blockchain due to: %s", err) } - _ = genesis.MustCommit(newDB) - - newBlockChain, err := create(newDB, chainConfig, common.Hash{}) + newBlockChain, err := create(newDB, gspec, common.Hash{}) if err != nil { t.Fatalf("Failed to create new blockchain instance: %s", err) } @@ -165,7 +162,7 @@ func checkBlockChainState( if err != nil { t.Fatal(err) } - restartedChain, err := create(originalDB, chainConfig, lastAcceptedBlock.Hash()) + restartedChain, err := create(originalDB, gspec, lastAcceptedBlock.Hash()) if err != nil { t.Fatal(err) } @@ -189,15 +186,12 @@ func checkBlockChainState( return bc, newBlockChain, restartedChain } -func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -207,10 +201,8 @@ func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Databas Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -218,9 +210,7 @@ func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Databas // This call generates a chain of 3 blocks. signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 10, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) @@ -266,15 +256,12 @@ func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Databas checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -284,10 +271,8 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, chai Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -295,9 +280,7 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, chai numBlocks := 129 signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks, 10, func(i int, gen *BlockGen) { + _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(i int, gen *BlockGen) { // Generate a transaction to create a unique block tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -307,7 +290,7 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, chai } // Generate the forked chain to be longer than the original chain to check for a regression where // a longer chain can trigger a reorg. - chain2, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks+1, 10, func(i int, gen *BlockGen) { + _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks+1, 10, func(i int, gen *BlockGen) { // Generate a transaction with a different amount to ensure [chain2] is different than [chain1]. tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(5000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -436,7 +419,7 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, chai checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") @@ -444,7 +427,6 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, ch addr2 = crypto.PubkeyToAddress(key2.PublicKey) // We use two separate databases since GenerateChain commits the state roots to its underlying // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -454,10 +436,8 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, ch Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -465,9 +445,7 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, ch numBlocks := 3 signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks, 10, func(i int, gen *BlockGen) { + _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(i int, gen *BlockGen) { // Generate a transaction to create a unique block tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -475,7 +453,7 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, ch if err != nil { t.Fatal(err) } - chain2, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks, 10, func(i int, gen *BlockGen) { + _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(i int, gen *BlockGen) { // Generate a transaction with a different amount to create a chain of blocks different from [chain1] tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(5000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -550,15 +528,12 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, ch checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -568,10 +543,8 @@ func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, chainC Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -579,9 +552,7 @@ func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, chainC numBlocks := 3 signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks, 10, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(i int, gen *BlockGen) { // Generate a transaction to create a unique block tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -687,7 +658,7 @@ func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, chainC checkBlockChainState(t, blockchain, gspec, chainDB, create, checkUpdatedState) } -func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") @@ -695,9 +666,6 @@ func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, chain addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) addr3 = crypto.PubkeyToAddress(key3.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -710,10 +678,8 @@ func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, chain addr3: {Balance: genesisBalance}, }, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -721,9 +687,7 @@ func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, chain // This call generates a chain of 3 blocks. signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 20, 10, func(i int, gen *BlockGen) { + genDB, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 20, 10, func(i int, gen *BlockGen) { // Send all funds back and forth between the two accounts if i%2 == 0 { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, genesisBalance, params.TxGas, nil, nil), signer, key1) @@ -860,31 +824,22 @@ func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, chain checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestEmptyBlocks(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { - var ( - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() - chainDB = rawdb.NewMemoryDatabase() - ) +func TestEmptyBlocks(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { + chainDB := rawdb.NewMemoryDatabase() // Ensure that key1 has some funds in the genesis block. gspec := &Genesis{ Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } defer blockchain.Stop() - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 10, func(i int, gen *BlockGen) {}) + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) {}) if err != nil { t.Fatal(err) } @@ -908,7 +863,7 @@ func TestEmptyBlocks(t *testing.T, create func(db ethdb.Database, chainConfig *p checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") @@ -926,10 +881,9 @@ func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, chainConfig Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) + genesis := gspec.ToBlock(nil) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -1005,26 +959,25 @@ func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, chainConfig checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -//nolint:goimports // Insert two different chains that result in the identical state root. // Once we accept one of the chains, we insert and accept A3 on top of the shared // state root -// G (genesis) -// / \ -// A1 B1 -// | | -// A2 B2 (A2 and B2 represent two different paths to the identical state trie) -// | -// A3 -func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +// +// G (genesis) +// / \ +// A1 B1 +// | | +// A2 B2 (A2 and B2 represent two different paths to the identical state trie) +// | +// A3 +// +//nolint:goimports +func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -1034,19 +987,15 @@ func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Databa Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } defer blockchain.Stop() signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 10, func(i int, gen *BlockGen) { + _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { if i < 2 { // Send half the funds from addr1 to addr2 in one transaction per each of the two blocks in [chain1] tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(500000000), params.TxGas, nil, nil), signer, key1) @@ -1057,7 +1006,7 @@ func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Databa if err != nil { t.Fatal(err) } - chain2, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 2, 10, func(i int, gen *BlockGen) { + _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 2, 10, func(i int, gen *BlockGen) { // Send 1/4 of the funds from addr1 to addr2 in tx1 and 3/4 of the funds in tx2. This will produce the identical state // root in the second block of [chain2] as is present in the second block of [chain1]. if i == 0 { @@ -1152,27 +1101,26 @@ func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Databa checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -//nolint:goimports // Insert two different chains that result in the identical state root. // Once we insert both of the chains, we restart, insert both the chains again, // and then we accept one of the chains and accept A3 on top of the shared state // root -// G (genesis) -// / \ -// A1 B1 -// | | -// A2 B2 (A2 and B2 represent two different paths to the identical state trie) -// | -// A3 -func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +// +// G (genesis) +// / \ +// A1 B1 +// | | +// A2 B2 (A2 and B2 represent two different paths to the identical state trie) +// | +// A3 +// +//nolint:goimports +func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -1182,18 +1130,14 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 10, func(i int, gen *BlockGen) { + _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { if i < 2 { // Send half the funds from addr1 to addr2 in one transaction per each of the two blocks in [chain1] tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(500000000), params.TxGas, nil, nil), signer, key1) @@ -1204,7 +1148,7 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth if err != nil { t.Fatal(err) } - chain2, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 2, 10, func(i int, gen *BlockGen) { + _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 2, 10, func(i int, gen *BlockGen) { // Send 1/4 of the funds from addr1 to addr2 in tx1 and 3/4 of the funds in tx2. This will produce the identical state // root in the second block of [chain2] as is present in the second block of [chain1]. if i == 0 { @@ -1242,7 +1186,8 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth blockchain.Stop() - blockchain, err = create(chainDB, gspec.Config, common.Hash{}) + chainDB = rawdb.NewMemoryDatabase() + blockchain, err = create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -1323,15 +1268,12 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -1341,10 +1283,8 @@ func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Databas Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -1352,9 +1292,7 @@ func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Databas // This call generates a chain of 3 blocks. signer := types.LatestSigner(params.TestChainConfig) - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - _, _, err = GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 0, func(i int, gen *BlockGen) { + _, _, _, err = GenerateChainWithGenesis(gspec, blockchain.engine, 3, 0, func(i int, gen *BlockGen) { tx := types.NewTx(&types.DynamicFeeTx{ ChainID: params.TestChainConfig.ChainID, Nonce: gen.TxNonce(addr1), @@ -1379,15 +1317,12 @@ func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Databas } } -func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -1397,10 +1332,8 @@ func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -1408,10 +1341,8 @@ func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, // This call generates a chain of 3 blocks. signer := types.LatestSigner(params.TestChainConfig) - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. eng := dummy.NewComplexETHFaker(&TestCallbacks) - chain, _, err := GenerateChain(params.TestChainConfig, genesis, eng, genDB, 3, 0, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, eng, 3, 0, func(i int, gen *BlockGen) { tx := types.NewTx(&types.DynamicFeeTx{ ChainID: params.TestChainConfig.ChainID, Nonce: gen.TxNonce(addr1), @@ -1440,7 +1371,7 @@ func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, } } -func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") @@ -1448,7 +1379,6 @@ func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, c addr2 = crypto.PubkeyToAddress(key2.PublicKey) // We use two separate databases since GenerateChain commits the state roots to its underlying // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -1458,10 +1388,8 @@ func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, c Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -1469,11 +1397,9 @@ func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, c // This call generates a chain of 3 blocks. signer := types.LatestSigner(params.TestChainConfig) - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. tip := big.NewInt(50000 * params.GWei) transfer := big.NewInt(10000) - chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 0, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 0, func(i int, gen *BlockGen) { feeCap := new(big.Int).Add(gen.BaseFee(), tip) tx := types.NewTx(&types.DynamicFeeTx{ ChainID: params.TestChainConfig.ChainID, diff --git a/core/tx_noncer.go b/core/tx_noncer.go index 0dcd31c41a..948453cf35 100644 --- a/core/tx_noncer.go +++ b/core/tx_noncer.go @@ -74,7 +74,7 @@ func (txn *txNoncer) set(addr common.Address, nonce uint64) { } // setIfLower updates a new virtual nonce into the virtual state database if the -// the new one is lower. +// new one is lower. func (txn *txNoncer) setIfLower(addr common.Address, nonce uint64) { txn.lock.Lock() defer txn.lock.Unlock() diff --git a/core/vm/contracts.go b/core/vm/contracts.go index ed927adeb9..578bf0c519 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -369,11 +369,10 @@ var ( // modexpMultComplexity implements bigModexp multComplexity formula, as defined in EIP-198 // -// def mult_complexity(x): -// -// if x <= 64: return x ** 2 -// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072 -// else: return x ** 2 // 16 + 480 * x - 199680 +// def mult_complexity(x): +// if x <= 64: return x ** 2 +// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072 +// else: return x ** 2 // 16 + 480 * x - 199680 // // where is x is max(length_of_MODULUS, length_of_BASE) func modexpMultComplexity(x *big.Int) *big.Int { diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 31224d2a72..86e6fdef13 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -128,20 +128,21 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi return params.SstoreResetGas, nil } } + // The new gas metering is based on net gas costs (EIP-1283): // - // 1. If current value equals new value (this is a no-op), 200 gas is deducted. - // 2. If current value does not equal new value - // 2.1. If original value equals current value (this storage slot has not been changed by the current execution context) - // 2.1.1. If original value is 0, 20000 gas is deducted. - // 2.1.2. Otherwise, 5000 gas is deducted. If new value is 0, add 15000 gas to refund counter. - // 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses. - // 2.2.1. If original value is not 0 - // 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0. - // 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter. - // 2.2.2. If original value equals new value (this storage slot is reset) - // 2.2.2.1. If original value is 0, add 19800 gas to refund counter. - // 2.2.2.2. Otherwise, add 4800 gas to refund counter. + // (1.) If current value equals new value (this is a no-op), 200 gas is deducted. + // (2.) If current value does not equal new value + // (2.1.) If original value equals current value (this storage slot has not been changed by the current execution context) + // (2.1.1.) If original value is 0, 20000 gas is deducted. + // (2.1.2.) Otherwise, 5000 gas is deducted. If new value is 0, add 15000 gas to refund counter. + // (2.2.) If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses. + // (2.2.1.) If original value is not 0 + // (2.2.1.1.) If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0. + // (2.2.1.2.) If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter. + // (2.2.2.) If original value equals new value (this storage slot is reset) + // (2.2.2.1.) If original value is 0, add 19800 gas to refund counter. + // (2.2.2.2.) Otherwise, add 4800 gas to refund counter. value := common.Hash(y.Bytes32()) if current == value { // noop (1) return params.NetSstoreNoopGas, nil @@ -173,20 +174,21 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi return params.NetSstoreDirtyGas, nil } -//nolint:goimports -// 0. If *gasleft* is less than or equal to 2300, fail the current call. -// 1. If current value equals new value (this is a no-op), SLOAD_GAS is deducted. -// 2. If current value does not equal new value: -// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context): -// 2.1.1. If original value is 0, SSTORE_SET_GAS (20K) gas is deducted. -// 2.1.2. Otherwise, SSTORE_RESET_GAS gas is deducted. If new value is 0, add SSTORE_CLEARS_SCHEDULE to refund counter. -// 2.2. If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses: -// 2.2.1. If original value is not 0: -// 2.2.1.1. If current value is 0 (also means that new value is not 0), subtract SSTORE_CLEARS_SCHEDULE gas from refund counter. -// 2.2.1.2. If new value is 0 (also means that current value is not 0), add SSTORE_CLEARS_SCHEDULE gas to refund counter. -// 2.2.2. If original value equals new value (this storage slot is reset): -// 2.2.2.1. If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter. -// 2.2.2.2. Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter. +// Here come the EIP220 rules: +// +// (0.) If *gasleft* is less than or equal to 2300, fail the current call. +// (1.) If current value equals new value (this is a no-op), SLOAD_GAS is deducted. +// (2.) If current value does not equal new value: +// (2.1.) If original value equals current value (this storage slot has not been changed by the current execution context): +// (2.1.1.) If original value is 0, SSTORE_SET_GAS (20K) gas is deducted. +// (2.1.2.) Otherwise, SSTORE_RESET_GAS gas is deducted. If new value is 0, add SSTORE_CLEARS_SCHEDULE to refund counter. +// (2.2.) If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses: +// (2.2.1.) If original value is not 0: +// (2.2.1.1.) If current value is 0 (also means that new value is not 0), subtract SSTORE_CLEARS_SCHEDULE gas from refund counter. +// (2.2.1.2.) If new value is 0 (also means that current value is not 0), add SSTORE_CLEARS_SCHEDULE gas to refund counter. +// (2.2.2.) If original value equals new value (this storage slot is reset): +// (2.2.2.1.) If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter. +// (2.2.2.2.) Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter. func gasSStoreEIP2200(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { // If we fail the minimum gas availability invariant, fail (0) if contract.Gas <= params.SstoreSentryGasEIP2200 { @@ -229,16 +231,15 @@ func gasSStoreEIP2200(evm *EVM, contract *Contract, stack *Stack, mem *Memory, m return params.SloadGasEIP2200, nil // dirty update (2.2) } -//nolint:goimports // gasSStoreAP1 simplifies the dynamic gas cost of SSTORE by removing all refund logic // -// 0. If *gasleft* is less than or equal to 2300, fail the current call. -// 1. If current value equals new value (this is a no-op), SLOAD_GAS is deducted. -// 2. If current value does not equal new value: -// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context): +// 0. If *gasleft* is less than or equal to 2300, fail the current call. +// 1. If current value equals new value (this is a no-op), SLOAD_GAS is deducted. +// 2. If current value does not equal new value: +// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context): // 2.1.1. If original value is 0, SSTORE_SET_GAS (20K) gas is deducted. // 2.1.2. Otherwise, SSTORE_RESET_GAS gas is deducted. If new value is 0, add SSTORE_CLEARS_SCHEDULE to refund counter. -// 2.2. If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses: +// 2.2. If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses: func gasSStoreAP1(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { // If we fail the minimum gas availability invariant, fail (0) if contract.Gas <= params.SstoreSentryGasEIP2200 { diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 245838fd72..e79f059d1a 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -416,35 +416,28 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) // There are several cases when the function is called, while we can relay everything // to `state.GetCodeHash` function to ensure the correctness. // -// (1) Caller tries to get the code hash of a normal contract account, state +// 1. Caller tries to get the code hash of a normal contract account, state +// should return the relative code hash and set it as the result. // -// should return the relative code hash and set it as the result. +// 2. Caller tries to get the code hash of a non-existent account, state should +// return common.Hash{} and zero will be set as the result. // -// (2) Caller tries to get the code hash of a non-existent account, state should +// 3. Caller tries to get the code hash for an account without contract code, state +// should return emptyCodeHash(0xc5d246...) as the result. // -// return common.Hash{} and zero will be set as the result. +// 4. Caller tries to get the code hash of a precompiled account, the result should be +// zero or emptyCodeHash. // -// (3) Caller tries to get the code hash for an account without contract code, -// -// state should return emptyCodeHash(0xc5d246...) as the result. -// -// (4) Caller tries to get the code hash of a precompiled account, the result -// -// should be zero or emptyCodeHash. -// -// It is worth noting that in order to avoid unnecessary create and clean, -// all precompile accounts on mainnet have been transferred 1 wei, so the return -// here should be emptyCodeHash. -// If the precompile account is not transferred any amount on a private or +// It is worth noting that in order to avoid unnecessary create and clean, all precompile +// accounts on mainnet have been transferred 1 wei, so the return here should be +// emptyCodeHash. If the precompile account is not transferred any amount on a private or // customized chain, the return value will be zero. // -// (5) Caller tries to get the code hash for an account which is marked as suicided -// -// in the current transaction, the code hash of this account should be returned. -// -// (6) Caller tries to get the code hash for an account which is marked as deleted, +// 5. Caller tries to get the code hash for an account which is marked as suicided +// in the current transaction, the code hash of this account should be returned. // -// this account should be regarded as a non-existent account and zero should be returned. +// 6. Caller tries to get the code hash for an account which is marked as deleted, this +// account should be regarded as a non-existent account and zero should be returned. func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) diff --git a/eth/api.go b/eth/api.go index 5152ef2068..17ba652504 100644 --- a/eth/api.go +++ b/eth/api.go @@ -294,10 +294,12 @@ func (api *DebugAPI) StorageRangeAt(blockHash common.Hash, txIndex int, contract if block == nil { return StorageRangeResult{}, fmt.Errorf("block %#x not found", blockHash) } - _, _, statedb, err := api.eth.stateAtTransaction(block, txIndex, 0) + _, _, statedb, release, err := api.eth.stateAtTransaction(block, txIndex, 0) if err != nil { return StorageRangeResult{}, err } + defer release() + st := statedb.StorageTrie(contractAddress) if st == nil { return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress) diff --git a/eth/api_backend.go b/eth/api_backend.go index a41930d168..b387407cc3 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -42,6 +42,7 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/eth/gasprice" + "github.com/ava-labs/coreth/eth/tracers" "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" @@ -445,11 +446,11 @@ func (b *EthAPIBackend) GetMaxBlocksPerRequest() int64 { return b.eth.settings.MaxBlocksPerRequest } -func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) { - return b.eth.StateAtBlock(block, reexec, base, checkLive, preferDisk) +func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) { + return b.eth.StateAtBlock(block, reexec, base, readOnly, preferDisk) } -func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { +func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) { return b.eth.stateAtTransaction(block, txIndex, reexec) } diff --git a/eth/backend.go b/eth/backend.go index e940616383..db74165f48 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -30,7 +30,6 @@ package eth import ( "errors" "fmt" - "strings" "sync" "time" @@ -139,18 +138,6 @@ func New( "snapshot clean", common.StorageSize(config.SnapshotCache)*1024*1024, ) - chainConfig, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis, lastAcceptedHash, config.SkipUpgradeCheck) - if genesisErr != nil { - return nil, genesisErr - } - log.Info("") - log.Info(strings.Repeat("-", 153)) - for _, line := range strings.Split(chainConfig.String(), "\n") { - log.Info(line) - } - log.Info(strings.Repeat("-", 153)) - log.Info("") - // Note: RecoverPruning must be called to handle the case that we are midway through offline pruning. // If the data directory is changed in between runs preventing RecoverPruning from performing its job correctly, // it may cause DB corruption. @@ -160,6 +147,7 @@ func New( if err := pruner.RecoverPruning(config.OfflinePruningDataDirectory, chainDb); err != nil { log.Error("Failed to recover state", "error", err) } + eth := &Ethereum{ config: config, chainDb: chainDb, @@ -223,21 +211,21 @@ func New( } var err error - eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, lastAcceptedHash) + eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, eth.engine, vmConfig, lastAcceptedHash, config.SkipUpgradeCheck) if err != nil { return nil, err } - if err := eth.handleOfflinePruning(cacheConfig, chainConfig, vmConfig, lastAcceptedHash); err != nil { + if err := eth.handleOfflinePruning(cacheConfig, config.Genesis, vmConfig, lastAcceptedHash); err != nil { return nil, err } eth.bloomIndexer.Start(eth.blockchain) config.TxPool.Journal = "" - eth.txPool = core.NewTxPool(config.TxPool, chainConfig, eth.blockchain) + eth.txPool = core.NewTxPool(config.TxPool, eth.blockchain.Config(), eth.blockchain) - eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, clock) + eth.miner = miner.New(eth, &config.Miner, eth.blockchain.Config(), eth.EventMux(), eth.engine, clock) allowUnprotectedTxHashes := make(map[common.Hash]struct{}) for _, txHash := range config.AllowUnprotectedTxHashes { @@ -419,7 +407,7 @@ func (s *Ethereum) precheckPopulateMissingTries() error { return nil } -func (s *Ethereum) handleOfflinePruning(cacheConfig *core.CacheConfig, chainConfig *params.ChainConfig, vmConfig vm.Config, lastAcceptedHash common.Hash) error { +func (s *Ethereum) handleOfflinePruning(cacheConfig *core.CacheConfig, gspec *core.Genesis, vmConfig vm.Config, lastAcceptedHash common.Hash) error { if s.config.OfflinePruning && !s.config.Pruning { return core.ErrRefuseToCorruptArchiver } @@ -460,7 +448,7 @@ func (s *Ethereum) handleOfflinePruning(cacheConfig *core.CacheConfig, chainConf } // Note: Time Marker is written inside of [Prune] before compaction begins // (considered an optional optimization) - s.blockchain, err = core.NewBlockChain(s.chainDb, cacheConfig, chainConfig, s.engine, vmConfig, lastAcceptedHash) + s.blockchain, err = core.NewBlockChain(s.chainDb, cacheConfig, gspec, s.engine, vmConfig, lastAcceptedHash, s.config.SkipUpgradeCheck) if err != nil { return fmt.Errorf("failed to re-initialize blockchain after offline pruning: %w", err) } diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index 08029eb659..5b708e7750 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -199,13 +199,12 @@ func TestBlockSubscription(t *testing.T) { db = rawdb.NewMemoryDatabase() backend, sys = newTestFilterSystem(t, db, Config{}) api = NewFilterAPI(sys, false) - gspec = &core.Genesis{ + genesis = &core.Genesis{ Config: params.TestChainConfig, BaseFee: big.NewInt(params.ApricotPhase4MinBaseFee), } - genesis = gspec.MustCommit(db) - chain, _, _ = core.GenerateChain(gspec.Config, genesis, dummy.NewFaker(), db, 10, 10, func(i int, b *core.BlockGen) {}) - chainEvents = []core.ChainEvent{} + _, chain, _, _ = core.GenerateChainWithGenesis(genesis, dummy.NewFaker(), 10, 10, func(i int, b *core.BlockGen) {}) + chainEvents = []core.ChainEvent{} ) for _, blk := range chain { @@ -768,14 +767,13 @@ func flattenLogs(pl [][]*types.Log) []*types.Log { func TestGetLogsRegression(t *testing.T) { var ( - db = rawdb.NewMemoryDatabase() - _, sys = newSectionedTestFilterSystem(t, db, Config{}, 4096) - api = NewFilterAPI(sys, false) - gspec = core.Genesis{ + db = rawdb.NewMemoryDatabase() + _, sys = newSectionedTestFilterSystem(t, db, Config{}, 4096) + api = NewFilterAPI(sys, false) + genesis = &core.Genesis{ Config: params.TestChainConfig, } - genesis = gspec.MustCommit(db) - _, _, _ = core.GenerateChain(gspec.Config, genesis, dummy.NewFaker(), db, 10, 10, func(i int, b *core.BlockGen) {}) + _, _, _, _ = core.GenerateChainWithGenesis(genesis, dummy.NewFaker(), 10, 10, func(i int, gen *core.BlockGen) {}) ) test := FilterCriteria{BlockHash: &common.Hash{}, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())} diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index 0f24c841cd..a49ecd4578 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -51,10 +51,8 @@ func makeReceipt(addr common.Address) *types.Receipt { } func BenchmarkFilters(b *testing.B) { - dir := b.TempDir() - var ( - db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "", false) + db, _ = rawdb.NewLevelDBDatabase(b.TempDir(), 0, 0, "", false) _, sys = newTestFilterSystem(b, db, Config{}) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -62,18 +60,15 @@ func BenchmarkFilters(b *testing.B) { addr3 = common.BytesToAddress([]byte("ethereum")) addr4 = common.BytesToAddress([]byte("random addresses please")) - gspec = core.Genesis{ + gspec = &core.Genesis{ + Config: params.TestChainConfig, Alloc: core.GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}}, BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), - Config: params.TestChainConfig, } - genesis = gspec.ToBlock(db) ) defer db.Close() - gspec.MustCommit(db) - - chain, receipts, err := core.GenerateChain(gspec.Config, genesis, dummy.NewFaker(), db, 100010, 10, func(i int, gen *core.BlockGen) { + _, chain, receipts, err := core.GenerateChainWithGenesis(gspec, dummy.NewFaker(), 100010, 10, func(i int, gen *core.BlockGen) { switch i { case 2403: receipt := makeReceipt(addr1) @@ -94,6 +89,10 @@ func BenchmarkFilters(b *testing.B) { } }) require.NoError(b, err) + // The test txs are not properly signed, can't simply create a chain + // and then import blocks. TODO(rjl493456442) try to get rid of the + // manual database writes. + gspec.MustCommit(db) for i, block := range chain { rawdb.WriteBlock(db, block) rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) @@ -114,10 +113,8 @@ func BenchmarkFilters(b *testing.B) { } func TestFilters(t *testing.T) { - dir := t.TempDir() - var ( - db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "", false) + db, _ = rawdb.NewLevelDBDatabase(t.TempDir(), 0, 0, "", false) _, sys = newTestFilterSystem(t, db, Config{}) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr = crypto.PubkeyToAddress(key1.PublicKey) @@ -127,18 +124,15 @@ func TestFilters(t *testing.T) { hash3 = common.BytesToHash([]byte("topic3")) hash4 = common.BytesToHash([]byte("topic4")) - gspec = core.Genesis{ + gspec = &core.Genesis{ + Config: params.TestChainConfig, Alloc: core.GenesisAlloc{addr: {Balance: big.NewInt(1000000)}}, BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), - Config: params.TestChainConfig, } - genesis = gspec.ToBlock(db) ) defer db.Close() - gspec.MustCommit(db) - - chain, receipts, err := core.GenerateChain(gspec.Config, genesis, dummy.NewFaker(), db, 1000, 10, func(i int, gen *core.BlockGen) { + _, chain, receipts, err := core.GenerateChainWithGenesis(gspec, dummy.NewFaker(), 1000, 10, func(i int, gen *core.BlockGen) { switch i { case 1: receipt := types.NewReceipt(nil, false, 0) @@ -184,6 +178,10 @@ func TestFilters(t *testing.T) { } }) require.NoError(t, err) + // The test txs are not properly signed, can't simply create a chain + // and then import blocks. TODO(rjl493456442) try to get rid of the + // manual database writes. + gspec.MustCommit(db) for i, block := range chain { rawdb.WriteBlock(db, block) rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index 18d3f3659f..376b6933ea 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -95,18 +95,15 @@ func newTestBackendFakerEngine(t *testing.T, config *params.ChainConfig, numBloc } engine := dummy.NewETHFaker() - db := rawdb.NewMemoryDatabase() - genesis := gspec.MustCommit(db) // Generate testing blocks - blocks, _, err := core.GenerateChain(gspec.Config, genesis, engine, db, numBlocks, 0, genBlocks) + _, blocks, _, err := core.GenerateChainWithGenesis(gspec, engine, numBlocks, 0, genBlocks) if err != nil { t.Fatal(err) } // Construct testing chain diskdb := rawdb.NewMemoryDatabase() - gspec.Commit(diskdb) - chain, err := core.NewBlockChain(diskdb, core.DefaultCacheConfig, gspec.Config, engine, vm.Config{}, common.Hash{}) + chain, err := core.NewBlockChain(diskdb, core.DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) if err != nil { t.Fatalf("Failed to create local chain, %v", err) } @@ -130,19 +127,14 @@ func newTestBackend(t *testing.T, config *params.ChainConfig, numBlocks int, ext return common.Big0, extDataGasUsage, nil }, }) - db := rawdb.NewMemoryDatabase() - genesis := gspec.MustCommit(db) // Generate testing blocks - - blocks, _, err := core.GenerateChain(gspec.Config, genesis, engine, db, numBlocks, 1, genBlocks) + _, blocks, _, err := core.GenerateChainWithGenesis(gspec, engine, numBlocks, 1, genBlocks) if err != nil { t.Fatal(err) } // Construct testing chain - diskdb := rawdb.NewMemoryDatabase() - gspec.Commit(diskdb) - chain, err := core.NewBlockChain(diskdb, core.DefaultCacheConfig, gspec.Config, engine, vm.Config{}, common.Hash{}) + chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), core.DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) if err != nil { t.Fatalf("Failed to create local chain, %v", err) } diff --git a/eth/state_accessor.go b/eth/state_accessor.go index 3e5f4c99ec..7197e8e68c 100644 --- a/eth/state_accessor.go +++ b/eth/state_accessor.go @@ -36,40 +36,60 @@ import ( "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/eth/tracers" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) +// noopReleaser is returned in case there is no operation expected +// for releasing state. +var noopReleaser = tracers.StateReleaseFunc(func() {}) + // StateAtBlock retrieves the state database associated with a certain block. // If no state is locally available for the given block, a number of blocks // are attempted to be reexecuted to generate the desired state. The optional -// base layer statedb can be passed then it's regarded as the statedb of the +// base layer statedb can be provided which is regarded as the statedb of the // parent block. +// +// An additional release function will be returned if the requested state is +// available. Release is expected to be invoked when the returned state is no longer needed. +// Its purpose is to prevent resource leaking. Though it can be noop in some cases. +// // Parameters: -// - block: The block for which we want the state (== state at the stateRoot of the parent) -// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state -// - base: If the caller is tracing multiple blocks, the caller can provide the parent state -// continuously from the callsite. -// - checklive: if true, then the live 'blockchain' state database is used. If the caller want to -// perform Commit or other 'save-to-disk' changes, this should be set to false to avoid -// storing trash persistently -// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is provided, -// it would be preferable to start from a fresh state, if we have it on disk. -func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) { +// - block: The block for which we want the state(state = block.Root) +// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state +// - base: If the caller is tracing multiple blocks, the caller can provide the parent +// state continuously from the callsite. +// - readOnly: If true, then the live 'blockchain' state database is used. No mutation should +// be made from caller, e.g. perform Commit or other 'save-to-disk' changes. +// Otherwise, the trash generated by caller may be persisted permanently. +// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is +// provided, it would be preferable to start from a fresh state, if we have it +// on disk. +func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) { var ( current *types.Block database state.Database report = true origin = block.NumberU64() ) - // Check the live database first if we have the state fully available, use that. - if checkLive { - statedb, err = eth.blockchain.StateAt(block.Root()) - if err == nil { - return statedb, nil + // The state is only for reading purposes, check the state presence in + // live database. + if readOnly { + // The state is available in live database, create a reference + // on top to prevent garbage collection and return a release + // function to deref it. + if statedb, err = eth.blockchain.StateAt(block.Root()); err == nil { + statedb.Database().TrieDB().Reference(block.Root(), common.Hash{}) + return statedb, func() { + statedb.Database().TrieDB().Dereference(block.Root()) + }, nil } } + // The state is both for reading and writing, or it's unavailable in disk, + // try to construct/recover the state over an ephemeral trie.Database for + // isolating the live one. if base != nil { if preferDisk { // Create an ephemeral trie.Database for isolating the live one. Otherwise @@ -77,37 +97,37 @@ func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16}) if statedb, err = state.New(block.Root(), database, nil); err == nil { log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number()) - return statedb, nil + return statedb, noopReleaser, nil } } // The optional base statedb is given, mark the start point as parent block statedb, database, report = base, base.Database(), false current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) } else { - // Otherwise try to reexec blocks until we find a state or reach our limit + // Otherwise, try to reexec blocks until we find a state or reach our limit current = block // Create an ephemeral trie.Database for isolating the live one. Otherwise // the internal junks created by tracing will be persisted into the disk. database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16}) - // If we didn't check the dirty database, do check the clean one, otherwise - // we would rewind past a persisted block (specific corner case is chain - // tracing from the genesis). - if !checkLive { + // If we didn't check the live database, do check state over ephemeral database, + // otherwise we would rewind past a persisted block (specific corner case is + // chain tracing from the genesis). + if !readOnly { statedb, err = state.New(current.Root(), database, nil) if err == nil { - return statedb, nil + return statedb, noopReleaser, nil } } // Database does not have the state for the given block, try to regenerate for i := uint64(0); i < reexec; i++ { if current.NumberU64() == 0 { - return nil, errors.New("genesis state is missing") + return nil, nil, errors.New("genesis state is missing") } parent := eth.blockchain.GetBlock(current.ParentHash(), current.NumberU64()-1) if parent == nil { - return nil, fmt.Errorf("missing block %v %d", current.ParentHash(), current.NumberU64()-1) + return nil, nil, fmt.Errorf("missing block %v %d", current.ParentHash(), current.NumberU64()-1) } current = parent @@ -119,13 +139,14 @@ func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state if err != nil { switch err.(type) { case *trie.MissingNodeError: - return nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec) + return nil, nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec) default: - return nil, err + return nil, nil, err } } } - // State was available at historical point, regenerate + // State is available at historical point, re-execute the blocks on top for + // the desired state. var ( start = time.Now() logged time.Time @@ -141,22 +162,24 @@ func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state parentHeader := current.Header() next := current.NumberU64() + 1 if current = eth.blockchain.GetBlockByNumber(next); current == nil { - return nil, fmt.Errorf("block #%d not found", next) + return nil, nil, fmt.Errorf("block #%d not found", next) } _, _, _, err := eth.blockchain.Processor().Process(current, parentHeader, statedb, vm.Config{}) if err != nil { - return nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err) + return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err) } // Finalize the state so any modifications are written to the trie root, err := statedb.Commit(eth.blockchain.Config().IsEIP158(current.Number()), true) if err != nil { - return nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w", + return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w", current.NumberU64(), current.Root().Hex(), err) } statedb, err = state.New(root, database, nil) if err != nil { - return nil, fmt.Errorf("state reset after block %d failed: %v", current.NumberU64(), err) + return nil, nil, fmt.Errorf("state reset after block %d failed: %v", current.NumberU64(), err) } + // Note: In coreth, the state reference is held by passing true to [statedb.Commit]. + // Drop the parent state to prevent accumulating too many nodes in memory. if parent != (common.Hash{}) { database.TrieDB().Dereference(parent) } @@ -166,28 +189,28 @@ func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state nodes, imgs := database.TrieDB().Size() log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) } - return statedb, nil + return statedb, func() { database.TrieDB().Dereference(block.Root()) }, nil } // stateAtTransaction returns the execution environment of a certain transaction. -func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { +func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) { // Short circuit if it's genesis block. if block.NumberU64() == 0 { - return nil, vm.BlockContext{}, nil, errors.New("no transaction in genesis") + return nil, vm.BlockContext{}, nil, nil, errors.New("no transaction in genesis") } // Create the parent state database parent := eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { - return nil, vm.BlockContext{}, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) } // Lookup the statedb of parent block from the live database, // otherwise regenerate it on the flight. - statedb, err := eth.StateAtBlock(parent, reexec, nil, true, false) + statedb, release, err := eth.StateAtBlock(parent, reexec, nil, true, false) if err != nil { - return nil, vm.BlockContext{}, nil, err + return nil, vm.BlockContext{}, nil, nil, err } if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, statedb, nil + return nil, vm.BlockContext{}, statedb, release, nil } // Recompute transactions up to the target index. signer := types.MakeSigner(eth.blockchain.Config(), block.Number(), new(big.Int).SetUint64(block.Time())) @@ -197,17 +220,17 @@ func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil) if idx == txIndex { - return msg, context, statedb, nil + return msg, context, statedb, release, nil } // Not yet the searched for transaction, execute on top of the current state vmenv := vm.NewEVM(context, txContext, statedb, eth.blockchain.Config(), vm.Config{}) statedb.Prepare(tx.Hash(), idx) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } // Ensure any modifications are committed to the state // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) } - return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } diff --git a/eth/tracers/api.go b/eth/tracers/api.go index d912c5307a..0e0f7b2f8b 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -73,6 +73,10 @@ const ( defaultTracechainMemLimit = common.StorageSize(500 * 1024 * 1024) ) +// StateReleaseFunc is used to deallocate resources held by constructing a +// historical state for tracing purposes. +type StateReleaseFunc func() + // Backend interface provides the common API services (that are provided by // both full and light clients) with access to necessary functions. type Backend interface { @@ -86,8 +90,8 @@ type Backend interface { ChainConfig() *params.ChainConfig Engine() consensus.Engine ChainDb() ethdb.Database - StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) - StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) + StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) + StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, StateReleaseFunc, error) } // baseAPI holds the collection of common methods for API and FileTracerAPI. @@ -230,7 +234,7 @@ func (t *txTraceResult) String() string { type blockTraceTask struct { statedb *state.StateDB // Intermediate state prepped for tracing block *types.Block // Block to trace the transactions from - rootref common.Hash // Trie root reference held for this task + release StateReleaseFunc // The function to release the held resource for this task results []*txTraceResult // Trace results procudes by the task } @@ -263,13 +267,6 @@ func (api *API) TraceChain(ctx context.Context, start, end rpc.BlockNumber, conf if from.Number().Cmp(to.Number()) >= 0 { return nil, fmt.Errorf("end block (#%d) needs to come after start block (#%d)", end, start) } - return api.traceChain(ctx, from, to, config) -} - -// traceChain configures a new tracer according to the provided configuration, and -// executes all the transactions contained within. The return value will be one item -// per transaction, dependent on the requested tracer. -func (api *API) traceChain(ctx context.Context, start, end *types.Block, config *TraceConfig) (*rpc.Subscription, error) { // Tracing a chain is a **long** operation, only do with subscriptions notifier, supported := rpc.NotifierFromContext(ctx) if !supported { @@ -277,8 +274,45 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config } sub := notifier.CreateSubscription() - // Prepare all the states for tracing. Note this procedure can take very - // long time. Timeout mechanism is necessary. + resCh := api.traceChain(from, to, config, notifier.Closed()) + go func() { + for result := range resCh { + notifier.Notify(sub.ID, result) + } + }() + return sub, nil +} + +// releaser is a helper tool responsible for caching the release +// callbacks of tracing state. +type releaser struct { + releases []StateReleaseFunc + lock sync.Mutex +} + +func (r *releaser) add(release StateReleaseFunc) { + r.lock.Lock() + defer r.lock.Unlock() + + r.releases = append(r.releases, release) +} + +func (r *releaser) call() { + r.lock.Lock() + defer r.lock.Unlock() + + for _, release := range r.releases { + release() + } + r.releases = r.releases[:0] +} + +// traceChain configures a new tracer according to the provided configuration, and +// executes all the transactions contained within. The tracing chain range includes +// the end block but excludes the start one. The return value will be one item per +// transaction, dependent on the requested tracer. +// The tracing procedure should be aborted in case the closed signal is received. +func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed <-chan interface{}) chan *blockTraceResult { reexec := defaultTraceReexec if config != nil && config.Reexec != nil { reexec = *config.Reexec @@ -289,20 +323,23 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config threads = blocks } var ( - pend = new(sync.WaitGroup) - tasks = make(chan *blockTraceTask, threads) - results = make(chan *blockTraceTask, threads) - localctx = context.Background() + pend = new(sync.WaitGroup) + ctx = context.Background() + taskCh = make(chan *blockTraceTask, threads) + resCh = make(chan *blockTraceTask, threads) + reler = new(releaser) ) for th := 0; th < threads; th++ { pend.Add(1) go func() { defer pend.Done() - // Fetch and execute the next block trace tasks - for task := range tasks { - signer := types.MakeSigner(api.backend.ChainConfig(), task.block.Number(), new(big.Int).SetUint64(task.block.Time())) - blockCtx := core.NewEVMBlockContext(task.block.Header(), api.chainContext(localctx), nil) + // Fetch and execute the block trace taskCh + for task := range taskCh { + var ( + signer = types.MakeSigner(api.backend.ChainConfig(), task.block.Number(), task.block.Timestamp()) + blockCtx = core.NewEVMBlockContext(task.block.Header(), api.chainContext(ctx), nil) + ) // Trace all the transactions contained within for i, tx := range task.block.Transactions() { msg, _ := tx.AsMessage(signer, task.block.BaseFee()) @@ -311,7 +348,7 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config TxIndex: i, TxHash: tx.Hash(), } - res, err := api.traceTx(localctx, msg, txctx, blockCtx, task.statedb, config) + res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config) if err != nil { task.results[i] = &txTraceResult{Error: err.Error()} log.Warn("Tracing failed", "hash", tx.Hash(), "block", task.block.NumberU64(), "err", err) @@ -321,36 +358,38 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config task.statedb.Finalise(api.backend.ChainConfig().IsEIP158(task.block.Number())) task.results[i] = &txTraceResult{Result: res} } - // Stream the result back to the user or abort on teardown + // Tracing state is used up, queue it for de-referencing + reler.add(task.release) + + // Stream the result back to the result catcher or abort on teardown select { - case results <- task: - case <-notifier.Closed(): + case resCh <- task: + case <-closed: return } } }() } // Start a goroutine to feed all the blocks into the tracers - var ( - begin = time.Now() - derefTodo []common.Hash // list of hashes to dereference from the db - derefsMu sync.Mutex // mutex for the derefs - ) - go func() { var ( logged time.Time + begin = time.Now() number uint64 traced uint64 failed error - parent common.Hash statedb *state.StateDB + release StateReleaseFunc ) // Ensure everything is properly cleaned up on any exit path defer func() { - close(tasks) + close(taskCh) pend.Wait() + // Clean out any pending derefs. + reler.call() + + // Log the chain result switch { case failed != nil: log.Warn("Chain tracing failed", "start", start.NumberU64(), "end", end.NumberU64(), "transactions", traced, "elapsed", time.Since(begin), "err", failed) @@ -359,105 +398,97 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config default: log.Info("Chain tracing finished", "start", start.NumberU64(), "end", end.NumberU64(), "transactions", traced, "elapsed", time.Since(begin)) } - close(results) + close(resCh) }() - var preferDisk bool // Feed all the blocks both into the tracer, as well as fast process concurrently for number = start.NumberU64(); number < end.NumberU64(); number++ { // Stop tracing if interruption was requested select { - case <-notifier.Closed(): + case <-closed: return default: } - // clean out any derefs - derefsMu.Lock() - for _, h := range derefTodo { - statedb.Database().TrieDB().Dereference(h) - } - derefTodo = derefTodo[:0] - derefsMu.Unlock() - // Print progress logs if long enough time elapsed if time.Since(logged) > 8*time.Second { logged = time.Now() log.Info("Tracing chain segment", "start", start.NumberU64(), "end", end.NumberU64(), "current", number, "transactions", traced, "elapsed", time.Since(begin)) } - // Retrieve the parent state to trace on top - block, err := api.blockByNumber(localctx, rpc.BlockNumber(number)) + // Retrieve the parent block and target block for tracing. + block, err := api.blockByNumber(ctx, rpc.BlockNumber(number)) if err != nil { failed = err break } - // Prepare the statedb for tracing. Don't use the live database for - // tracing to avoid persisting state junks into the database. - statedb, err = api.backend.StateAtBlock(localctx, block, reexec, statedb, false, preferDisk) + next, err := api.blockByNumber(ctx, rpc.BlockNumber(number+1)) if err != nil { failed = err break } - if trieDb := statedb.Database().TrieDB(); trieDb != nil { - // Hold the reference for tracer, will be released at the final stage - trieDb.Reference(block.Root(), common.Hash{}) - - // Release the parent state because it's already held by the tracer - if parent != (common.Hash{}) { - trieDb.Dereference(parent) - } - // Prefer disk if the trie db memory grows too much - s1, s2 := trieDb.Size() - if !preferDisk && (s1+s2) > defaultTracechainMemLimit { - log.Info("Switching to prefer-disk mode for tracing", "size", s1+s2) - preferDisk = true - } + // Prepare the statedb for tracing. Don't use the live database for + // tracing to avoid persisting state junks into the database. Switch + // over to `preferDisk` mode only if the memory usage exceeds the + // limit, the trie database will be reconstructed from scratch only + // if the relevant state is available in disk. + var preferDisk bool + if statedb != nil { + s1, s2 := statedb.Database().TrieDB().Size() + preferDisk = s1+s2 > defaultTracechainMemLimit } - parent = block.Root() - - next, err := api.blockByNumber(localctx, rpc.BlockNumber(number+1)) + statedb, release, err = api.backend.StateAtBlock(ctx, block, reexec, statedb, false, preferDisk) if err != nil { failed = err break } + // Clean out any pending derefs. Note this step must be done after + // constructing tracing state, because the tracing state of block + // next depends on the parent state and construction may fail if + // we release too early. + reler.call() + // Send the block over to the concurrent tracers (if not in the fast-forward phase) txs := next.Transactions() select { - case tasks <- &blockTraceTask{statedb: statedb.Copy(), block: next, rootref: block.Root(), results: make([]*txTraceResult, len(txs))}: - case <-notifier.Closed(): + case taskCh <- &blockTraceTask{statedb: statedb.Copy(), block: next, release: release, results: make([]*txTraceResult, len(txs))}: + case <-closed: + reler.add(release) return } traced += uint64(len(txs)) } }() - // Keep reading the trace results and stream the to the user + // Keep reading the trace results and stream them to result channel. + retCh := make(chan *blockTraceResult) go func() { + defer close(retCh) var ( - done = make(map[uint64]*blockTraceResult) next = start.NumberU64() + 1 + done = make(map[uint64]*blockTraceResult) ) - for res := range results { + for res := range resCh { // Queue up next received result result := &blockTraceResult{ Block: hexutil.Uint64(res.block.NumberU64()), Hash: res.block.Hash(), Traces: res.results, } - // Schedule any parent tries held in memory by this task for dereferencing done[uint64(result.Block)] = result - derefsMu.Lock() - derefTodo = append(derefTodo, res.rootref) - derefsMu.Unlock() - // Stream completed traces to the user, aborting on the first error + + // Stream completed traces to the result channel for result, ok := done[next]; ok; result, ok = done[next] { if len(result.Traces) > 0 || next == end.NumberU64() { - notifier.Notify(sub.ID, result) + // It will be blocked in case the channel consumer doesn't take the + // tracing result in time(e.g. the websocket connect is not stable) + // which will eventually block the entire chain tracer. It's the + // expected behavior to not waste node resources for a non-active user. + retCh <- result } delete(done, next) next++ } } }() - return sub, nil + return retCh } // TraceBlockByNumber returns the structured logs created during the execution of @@ -550,10 +581,12 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) + statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { return nil, err } + defer release() + var ( roots []common.Hash signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), new(big.Int).SetUint64(block.Time())) @@ -621,10 +654,12 @@ func (api *baseAPI) traceBlock(ctx context.Context, block *types.Block, config * if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) + statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { return nil, err } + defer release() + // Execute all the transaction contained within the block concurrently var ( signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), new(big.Int).SetUint64(block.Time())) @@ -712,10 +747,11 @@ func (api *FileTracerAPI) standardTraceBlockToFile(ctx context.Context, block *t if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) + statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { return nil, err } + defer release() // Retrieve the tracing configurations, or use default values var ( @@ -831,10 +867,12 @@ func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config * if err != nil { return nil, err } - msg, vmctx, statedb, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec) + msg, vmctx, statedb, release, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec) if err != nil { return nil, err } + defer release() + txctx := &Context{ BlockHash: blockHash, TxIndex: int(index), @@ -875,10 +913,12 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true, false) + statedb, release, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true, false) if err != nil { return nil, err } + defer release() + vmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) // Apply the customization rules if required. if config != nil { diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 85085eefc4..be367340f1 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -36,6 +36,7 @@ import ( "math/big" "reflect" "sort" + "sync/atomic" "testing" "github.com/ava-labs/coreth/consensus" @@ -66,6 +67,9 @@ type testBackend struct { engine consensus.Engine chaindb ethdb.Database chain *core.BlockChain + + refHook func() // Hook is invoked when the requested state is referenced + relHook func() // Hook is invoked when the requested state is released } func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend { @@ -75,24 +79,19 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i chaindb: rawdb.NewMemoryDatabase(), } // Generate blocks for testing - var ( - gendb = rawdb.NewMemoryDatabase() - genesis = gspec.MustCommit(gendb) - ) - blocks, _, err := core.GenerateChain(backend.chainConfig, genesis, backend.engine, gendb, n, 10, generator) + _, blocks, _, err := core.GenerateChainWithGenesis(gspec, backend.engine, n, 10, generator) if err != nil { t.Fatal(err) } // Import the canonical chain - gspec.MustCommit(backend.chaindb) cacheConfig := &core.CacheConfig{ TrieCleanLimit: 256, TrieDirtyLimit: 256, SnapshotLimit: 128, Pruning: false, // Archive mode } - chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, backend.chainConfig, backend.engine, vm.Config{}, common.Hash{}) + chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, backend.engine, vm.Config{}, common.Hash{}, false) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -157,25 +156,33 @@ func (b *testBackend) ChainDb() ethdb.Database { return b.chaindb } -func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) { +func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) { statedb, err := b.chain.StateAt(block.Root()) if err != nil { - return nil, errStateNotFound + return nil, nil, errStateNotFound + } + if b.refHook != nil { + b.refHook() + } + release := func() { + if b.relHook != nil { + b.relHook() + } } - return statedb, nil + return statedb, release, nil } -func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { +func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, StateReleaseFunc, error) { parent := b.chain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { - return nil, vm.BlockContext{}, nil, errBlockNotFound + return nil, vm.BlockContext{}, nil, nil, errBlockNotFound } - statedb, err := b.chain.StateAt(parent.Root()) + statedb, release, err := b.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { - return nil, vm.BlockContext{}, nil, errStateNotFound + return nil, vm.BlockContext{}, nil, nil, errStateNotFound } if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, statedb, nil + return nil, vm.BlockContext{}, statedb, release, nil } // Recompute transactions up to the target index. signer := types.MakeSigner(b.chainConfig, block.Number(), new(big.Int).SetUint64(block.Time())) @@ -184,15 +191,15 @@ func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), b.chain, nil) if idx == txIndex { - return msg, context, statedb, nil + return msg, context, statedb, release, nil } vmenv := vm.NewEVM(context, txContext, statedb, b.chainConfig, vm.Config{}) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) } - return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } func TestTraceCall(t *testing.T) { @@ -337,7 +344,8 @@ func TestTraceTransaction(t *testing.T) { Alloc: core.GenesisAlloc{ accounts[0].addr: {Balance: big.NewInt(params.Ether)}, accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - }} + }, + } target := common.Hash{} signer := types.HomesteadSigner{} api := NewAPI(newTestBackend(t, 1, genesis, func(i int, b *core.BlockGen) { @@ -378,7 +386,8 @@ func TestTraceBlock(t *testing.T) { accounts[0].addr: {Balance: big.NewInt(params.Ether)}, accounts[1].addr: {Balance: big.NewInt(params.Ether)}, accounts[2].addr: {Balance: big.NewInt(params.Ether)}, - }} + }, + } genBlocks := 10 signer := types.HomesteadSigner{} api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { @@ -455,7 +464,8 @@ func TestTracingWithOverrides(t *testing.T) { accounts[0].addr: {Balance: big.NewInt(params.Ether)}, accounts[1].addr: {Balance: big.NewInt(params.Ether)}, accounts[2].addr: {Balance: big.NewInt(params.Ether)}, - }} + }, + } genBlocks := 10 signer := types.HomesteadSigner{} api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { @@ -643,3 +653,78 @@ func newStates(keys []common.Hash, vals []common.Hash) *map[common.Hash]common.H } return &m } + +func TestTraceChain(t *testing.T) { + // Initialize test accounts + // Note: the balances in this test have been increased compared to go-ethereum. + accounts := newAccounts(3) + genesis := &core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(5 * params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(5 * params.Ether)}, + accounts[2].addr: {Balance: big.NewInt(5 * params.Ether)}, + }, + } + genBlocks := 50 + signer := types.HomesteadSigner{} + + var ( + ref uint32 // total refs has made + rel uint32 // total rels has made + nonce uint64 + ) + backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + // Transfer from account[0] to account[1] + // value: 1000 wei + // fee: 0 wei + for j := 0; j < i+1; j++ { + tx, _ := types.SignTx(types.NewTransaction(nonce, accounts[1].addr, big.NewInt(1000), params.TxGas, b.BaseFee(), nil), signer, accounts[0].key) + b.AddTx(tx) + nonce += 1 + } + }) + backend.refHook = func() { atomic.AddUint32(&ref, 1) } + backend.relHook = func() { atomic.AddUint32(&rel, 1) } + api := NewAPI(backend) + + single := `{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}` + var cases = []struct { + start uint64 + end uint64 + config *TraceConfig + }{ + {0, 50, nil}, // the entire chain range, blocks [1, 50] + {10, 20, nil}, // the middle chain range, blocks [11, 20] + } + for _, c := range cases { + ref, rel = 0, 0 // clean up the counters + + from, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.start)) + to, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.end)) + resCh := api.traceChain(from, to, c.config, nil) + + next := c.start + 1 + for result := range resCh { + if next != uint64(result.Block) { + t.Error("Unexpected tracing block") + } + if len(result.Traces) != int(next) { + t.Error("Unexpected tracing result") + } + for _, trace := range result.Traces { + blob, _ := json.Marshal(trace) + if string(blob) != single { + t.Error("Unexpected tracing result") + } + } + next += 1 + } + if next != c.end+1 { + t.Error("Missing tracing block") + } + if ref != rel { + t.Errorf("Ref and deref actions are not equal, ref %d rel %d", ref, rel) + } + } +} diff --git a/eth/tracers/js/goja.go b/eth/tracers/js/goja.go index e28a17df5d..4a55f06d43 100644 --- a/eth/tracers/js/goja.go +++ b/eth/tracers/js/goja.go @@ -266,10 +266,11 @@ func (t *jsTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope log.memory.memory = scope.Memory log.stack.stack = scope.Stack log.contract.contract = scope.Contract - log.pc = uint(pc) - log.gas = uint(gas) - log.cost = uint(cost) - log.depth = uint(depth) + log.pc = pc + log.gas = gas + log.cost = cost + log.refund = t.env.StateDB.GetRefund() + log.depth = depth log.err = err if _, err := t.step(t.obj, t.logValue, t.dbValue); err != nil { t.onError("step", err) @@ -917,33 +918,19 @@ type steplog struct { stack *stackObj contract *contractObj - pc uint - gas uint - cost uint - depth uint - refund uint + pc uint64 + gas uint64 + cost uint64 + depth int + refund uint64 err error } -func (l *steplog) GetPC() uint { - return l.pc -} - -func (l *steplog) GetGas() uint { - return l.gas -} - -func (l *steplog) GetCost() uint { - return l.cost -} - -func (l *steplog) GetDepth() uint { - return l.depth -} - -func (l *steplog) GetRefund() uint { - return l.refund -} +func (l *steplog) GetPC() uint64 { return l.pc } +func (l *steplog) GetGas() uint64 { return l.gas } +func (l *steplog) GetCost() uint64 { return l.cost } +func (l *steplog) GetDepth() int { return l.depth } +func (l *steplog) GetRefund() uint64 { return l.refund } func (l *steplog) GetError() goja.Value { if l.err != nil { diff --git a/eth/tracers/native/tracer.go b/eth/tracers/native/tracer.go index dca9cd01e8..ed6f62df1c 100644 --- a/eth/tracers/native/tracer.go +++ b/eth/tracers/native/tracer.go @@ -24,26 +24,20 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -/* -Package native is a collection of tracers written in go. - -In order to add a native tracer and have it compiled into the binary, a new -file needs to be added to this folder, containing an implementation of the -`eth.tracers.Tracer` interface. - -Aside from implementing the tracer, it also needs to register itself, using the -`register` method -- and this needs to be done in the package initialization. - -Example: - -```golang - - func init() { - register("noopTracerNative", newNoopTracer) - } - -``` -*/ +// Package native is a collection of tracers written in go. +// +// In order to add a native tracer and have it compiled into the binary, a new +// file needs to be added to this folder, containing an implementation of the +// `eth.tracers.Tracer` interface. +// +// Aside from implementing the tracer, it also needs to register itself, using the +// `register` method -- and this needs to be done in the package initialization. +// +// Example: +// +// func init() { +// register("noopTracerNative", newNoopTracer) +// } package native import ( diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index f74948ba08..ee7a7b581f 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -632,12 +632,12 @@ func ToBlockNumArg(number *big.Int) string { // negative numbers to special strings (latest, pending) then is // used on its server side. See rpc/types.go for the comparison. // In Coreth, latest, pending, and accepted are all treated the same - // therefore, if [number] is nil or a negative number in [-3, -1] + // therefore, if [number] is nil or a negative number in [-4, -1] // we want the latest accepted block if number == nil { return "latest" } - low := big.NewInt(-3) + low := big.NewInt(-4) high := big.NewInt(-1) if number.Cmp(low) >= 0 && number.Cmp(high) <= 0 { return "latest" diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index bac227f157..79d8ddf536 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -2004,25 +2004,45 @@ func NewDebugAPI(b Backend) *DebugAPI { return &DebugAPI{b: b} } -// GetHeaderRlp retrieves the RLP encoded for of a single header. -func (api *DebugAPI) GetHeaderRlp(ctx context.Context, number uint64) (hexutil.Bytes, error) { - header, _ := api.b.HeaderByNumber(ctx, rpc.BlockNumber(number)) +// GetRawHeader retrieves the RLP encoding for a single header. +func (api *DebugAPI) GetRawHeader(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { + var hash common.Hash + if h, ok := blockNrOrHash.Hash(); ok { + hash = h + } else { + block, err := api.b.BlockByNumberOrHash(ctx, blockNrOrHash) + if err != nil { + return nil, err + } + hash = block.Hash() + } + header, _ := api.b.HeaderByHash(ctx, hash) if header == nil { - return nil, fmt.Errorf("header #%d not found", number) + return nil, fmt.Errorf("header #%d not found", hash) } return rlp.EncodeToBytes(header) } -// GetBlockRlp retrieves the RLP encoded for of a single block. -func (api *DebugAPI) GetBlockRlp(ctx context.Context, number uint64) (hexutil.Bytes, error) { - block, _ := api.b.BlockByNumber(ctx, rpc.BlockNumber(number)) +// GetRawBlock retrieves the RLP encoded for a single block. +func (api *DebugAPI) GetRawBlock(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { + var hash common.Hash + if h, ok := blockNrOrHash.Hash(); ok { + hash = h + } else { + block, err := api.b.BlockByNumberOrHash(ctx, blockNrOrHash) + if err != nil { + return nil, err + } + hash = block.Hash() + } + block, _ := api.b.BlockByHash(ctx, hash) if block == nil { - return nil, fmt.Errorf("block #%d not found", number) + return nil, fmt.Errorf("block #%d not found", hash) } return rlp.EncodeToBytes(block) } -// GetRawReceipts retrieves the binary-encoded raw receipts of a single block. +// GetRawReceipts retrieves the binary-encoded receipts of a single block. func (api *DebugAPI) GetRawReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]hexutil.Bytes, error) { var hash common.Hash if h, ok := blockNrOrHash.Hash(); ok { @@ -2049,6 +2069,22 @@ func (api *DebugAPI) GetRawReceipts(ctx context.Context, blockNrOrHash rpc.Block return result, nil } +// GetRawTransaction returns the bytes of the transaction for the given hash. +func (s *DebugAPI) GetRawTransaction(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) { + // Retrieve a finalized transaction, or a pooled otherwise + tx, _, _, _, err := s.b.GetTransaction(ctx, hash) + if err != nil { + return nil, err + } + if tx == nil { + if tx = s.b.GetPoolTransaction(hash); tx == nil { + // Transaction not found anywhere, abort + return nil, nil + } + } + return tx.MarshalBinary() +} + // PrintBlock retrieves a block and returns its pretty printed form. func (api *DebugAPI) PrintBlock(ctx context.Context, number uint64) (string, error) { block, _ := api.b.BlockByNumber(ctx, rpc.BlockNumber(number)) diff --git a/params/version.go b/params/version.go index 73a5bdbfd9..dbc62d1cb1 100644 --- a/params/version.go +++ b/params/version.go @@ -31,10 +31,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 10 // Minor version component of the current release - VersionPatch = 26 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 11 // Minor version component of the current release + VersionPatch = 0 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string ) // Version holds the textual version string. diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 4bab7cce43..d9c57ff739 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -953,6 +953,7 @@ func (vm *VM) setAppRequestHandlers() { ) syncRequestHandler := handlers.NewSyncHandler( vm.blockChain, + vm.chaindb, evmTrieDB, vm.atomicTrie.TrieDB(), vm.networkCodec, diff --git a/rpc/client.go b/rpc/client.go index 74a86d928f..68a683903c 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -110,7 +110,7 @@ type Client struct { reqTimeout chan *requestOp // removes response IDs when call timeout expires } -type reconnectFunc func(ctx context.Context) (ServerCodec, error) +type reconnectFunc func(context.Context) (ServerCodec, error) type clientContextKey struct{} @@ -169,14 +169,16 @@ func (op *requestOp) wait(ctx context.Context, c *Client) (*jsonrpcMessage, erro // // The currently supported URL schemes are "http", "https", "ws" and "wss". If rawurl is a // file name with no URL scheme, a local socket connection is established using UNIX -// domain sockets on supported platforms and named pipes on Windows. If you want to -// configure transport options, use DialHTTP, DialWebsocket or DialIPC instead. +// domain sockets on supported platforms and named pipes on Windows. +// +// If you want to further configure the transport, use DialOptions instead of this +// function. // // For websocket connections, the origin is set to the local host name. // -// The client reconnects automatically if the connection is lost. +// The client reconnects automatically when the connection is lost. func Dial(rawurl string) (*Client, error) { - return DialContext(context.Background(), rawurl) + return DialOptions(context.Background(), rawurl) } // DialContext creates a new RPC client, just like Dial. @@ -184,22 +186,46 @@ func Dial(rawurl string) (*Client, error) { // The context is used to cancel or time out the initial connection establishment. It does // not affect subsequent interactions with the client. func DialContext(ctx context.Context, rawurl string) (*Client, error) { + return DialOptions(ctx, rawurl) +} + +// DialOptions creates a new RPC client for the given URL. You can supply any of the +// pre-defined client options to configure the underlying transport. +// +// The context is used to cancel or time out the initial connection establishment. It does +// not affect subsequent interactions with the client. +// +// The client reconnects automatically when the connection is lost. +func DialOptions(ctx context.Context, rawurl string, options ...ClientOption) (*Client, error) { u, err := url.Parse(rawurl) if err != nil { return nil, err } + + cfg := new(clientConfig) + for _, opt := range options { + opt.applyOption(cfg) + } + + var reconnect reconnectFunc switch u.Scheme { case "http", "https": - return DialHTTP(rawurl) + reconnect = newClientTransportHTTP(rawurl, cfg) case "ws", "wss": - return DialWebsocket(ctx, rawurl, "") + rc, err := newClientTransportWS(rawurl, cfg) + if err != nil { + return nil, err + } + reconnect = rc //case "stdio": - // return DialStdIO(ctx) + //reconnect = newClientTransportIO(os.Stdin, os.Stdout) //case "": - // return DialIPC(ctx, rawurl) + //reconnect = newClientTransportIPC(rawurl) default: return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme) } + + return newClient(ctx, reconnect) } // ClientFromContext retrieves the client from the context, if any. This can be used to perform diff --git a/rpc/client_opt.go b/rpc/client_opt.go new file mode 100644 index 0000000000..c1b9931253 --- /dev/null +++ b/rpc/client_opt.go @@ -0,0 +1,116 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rpc + +import ( + "net/http" + + "github.com/gorilla/websocket" +) + +// ClientOption is a configuration option for the RPC client. +type ClientOption interface { + applyOption(*clientConfig) +} + +type clientConfig struct { + httpClient *http.Client + httpHeaders http.Header + httpAuth HTTPAuth + + wsDialer *websocket.Dialer +} + +func (cfg *clientConfig) initHeaders() { + if cfg.httpHeaders == nil { + cfg.httpHeaders = make(http.Header) + } +} + +func (cfg *clientConfig) setHeader(key, value string) { + cfg.initHeaders() + cfg.httpHeaders.Set(key, value) +} + +type optionFunc func(*clientConfig) + +func (fn optionFunc) applyOption(opt *clientConfig) { + fn(opt) +} + +// WithWebsocketDialer configures the websocket.Dialer used by the RPC client. +func WithWebsocketDialer(dialer websocket.Dialer) ClientOption { + return optionFunc(func(cfg *clientConfig) { + cfg.wsDialer = &dialer + }) +} + +// WithHeader configures HTTP headers set by the RPC client. Headers set using this option +// will be used for both HTTP and WebSocket connections. +func WithHeader(key, value string) ClientOption { + return optionFunc(func(cfg *clientConfig) { + cfg.initHeaders() + cfg.httpHeaders.Set(key, value) + }) +} + +// WithHeaders configures HTTP headers set by the RPC client. Headers set using this +// option will be used for both HTTP and WebSocket connections. +func WithHeaders(headers http.Header) ClientOption { + return optionFunc(func(cfg *clientConfig) { + cfg.initHeaders() + for k, vs := range headers { + cfg.httpHeaders[k] = vs + } + }) +} + +// WithHTTPClient configures the http.Client used by the RPC client. +func WithHTTPClient(c *http.Client) ClientOption { + return optionFunc(func(cfg *clientConfig) { + cfg.httpClient = c + }) +} + +// WithHTTPAuth configures HTTP request authentication. The given provider will be called +// whenever a request is made. Note that only one authentication provider can be active at +// any time. +func WithHTTPAuth(a HTTPAuth) ClientOption { + if a == nil { + panic("nil auth") + } + return optionFunc(func(cfg *clientConfig) { + cfg.httpAuth = a + }) +} + +// A HTTPAuth function is called by the client whenever a HTTP request is sent. +// The function must be safe for concurrent use. +// +// Usually, HTTPAuth functions will call h.Set("authorization", "...") to add +// auth information to the request. +type HTTPAuth func(h http.Header) error diff --git a/rpc/client_opt_test.go b/rpc/client_opt_test.go new file mode 100644 index 0000000000..272d73cbe2 --- /dev/null +++ b/rpc/client_opt_test.go @@ -0,0 +1,34 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +package rpc_test + +import ( + "context" + "net/http" + "time" + + "github.com/ava-labs/coreth/rpc" +) + +// This example configures a HTTP-based RPC client with two options - one setting the +// overall request timeout, the other adding a custom HTTP header to all requests. +func ExampleDialOptions() { + tokenHeader := rpc.WithHeader("x-token", "foo") + httpClient := rpc.WithHTTPClient(&http.Client{ + Timeout: 10 * time.Second, + }) + + ctx := context.Background() + c, err := rpc.DialOptions(ctx, "http://rpc.example.com", httpClient, tokenHeader) + if err != nil { + panic(err) + } + c.Close() +} diff --git a/rpc/client_test.go b/rpc/client_test.go index 35a204cc99..5bac476264 100644 --- a/rpc/client_test.go +++ b/rpc/client_test.go @@ -29,7 +29,6 @@ package rpc import ( "context" "encoding/json" - "errors" "fmt" "math/rand" "net" @@ -91,11 +90,15 @@ func TestClientErrorData(t *testing.T) { } // Check code. + // The method handler returns an error value which implements the rpc.Error + // interface, i.e. it has a custom error code. The server returns this error code. + expectedCode := testError{}.ErrorCode() if e, ok := err.(Error); !ok { t.Fatalf("client did not return rpc.Error, got %#v", e) - } else if e.ErrorCode() != (testError{}.ErrorCode()) { - t.Fatalf("wrong error code %d, want %d", e.ErrorCode(), testError{}.ErrorCode()) + } else if e.ErrorCode() != expectedCode { + t.Fatalf("wrong error code %d, want %d", e.ErrorCode(), expectedCode) } + // Check data. if e, ok := err.(DataError); !ok { t.Fatalf("client did not return rpc.DataError, got %#v", e) @@ -153,53 +156,6 @@ func TestClientBatchRequest(t *testing.T) { } } -func TestClientBatchRequest_len(t *testing.T) { - b, err := json.Marshal([]jsonrpcMessage{ - {Version: "2.0", ID: json.RawMessage("1"), Method: "foo", Result: json.RawMessage(`"0x1"`)}, - {Version: "2.0", ID: json.RawMessage("2"), Method: "bar", Result: json.RawMessage(`"0x2"`)}, - }) - if err != nil { - t.Fatal("failed to encode jsonrpc message:", err) - } - s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - _, err := rw.Write(b) - if err != nil { - t.Error("failed to write response:", err) - } - })) - t.Cleanup(s.Close) - - client, err := Dial(s.URL) - if err != nil { - t.Fatal("failed to dial test server:", err) - } - defer client.Close() - - t.Run("too-few", func(t *testing.T) { - batch := []BatchElem{ - {Method: "foo"}, - {Method: "bar"}, - {Method: "baz"}, - } - ctx, cancelFn := context.WithTimeout(context.Background(), time.Second) - defer cancelFn() - if err := client.BatchCallContext(ctx, batch); !errors.Is(err, ErrBadResult) { - t.Errorf("expected %q but got: %v", ErrBadResult, err) - } - }) - - t.Run("too-many", func(t *testing.T) { - batch := []BatchElem{ - {Method: "foo"}, - } - ctx, cancelFn := context.WithTimeout(context.Background(), time.Second) - defer cancelFn() - if err := client.BatchCallContext(ctx, batch); !errors.Is(err, ErrBadResult) { - t.Errorf("expected %q but got: %v", ErrBadResult, err) - } - }) -} - func TestClientNotify(t *testing.T) { server := newTestServer() defer server.Stop() diff --git a/rpc/errors.go b/rpc/errors.go index a43fa9bd8b..7db25ffa2c 100644 --- a/rpc/errors.go +++ b/rpc/errors.go @@ -64,9 +64,15 @@ var ( _ Error = new(invalidRequestError) _ Error = new(invalidMessageError) _ Error = new(invalidParamsError) + _ Error = new(internalServerError) ) -const defaultErrorCode = -32000 +const ( + errcodeDefault = -32000 + errcodeNotificationsUnsupported = -32001 + errcodePanic = -32603 + errcodeMarshalError = -32603 +) type methodNotFoundError struct{ method string } @@ -111,3 +117,13 @@ type invalidParamsError struct{ message string } func (e *invalidParamsError) ErrorCode() int { return -32602 } func (e *invalidParamsError) Error() string { return e.message } + +// internalServerError is used for server errors during request processing. +type internalServerError struct { + code int + message string +} + +func (e *internalServerError) ErrorCode() int { return e.code } + +func (e *internalServerError) Error() string { return e.message } diff --git a/rpc/handler.go b/rpc/handler.go index 16f5f4ca45..57473fda05 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -58,7 +58,7 @@ import ( // Now send the request, then wait for the reply to be delivered through handleMsg: // // if err := op.wait(...); err != nil { -// h.removeRequestOp(op) // timeout, etc. +// h.removeRequestOp(op) // timeout, etc. // } type handler struct { reg *serviceRegistry @@ -453,7 +453,10 @@ func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage // handleSubscribe processes *_subscribe method calls. func (h *handler) handleSubscribe(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage { if !h.allowSubscribe { - return msg.errorResponse(ErrNotificationsUnsupported) + return msg.errorResponse(&internalServerError{ + code: errcodeNotificationsUnsupported, + message: ErrNotificationsUnsupported.Error(), + }) } // Subscription method name is first argument. diff --git a/rpc/http.go b/rpc/http.go index 89e8500c89..6d0a01c602 100644 --- a/rpc/http.go +++ b/rpc/http.go @@ -55,6 +55,7 @@ type httpConn struct { closeCh chan interface{} mu sync.Mutex // protects headers headers http.Header + auth HTTPAuth } // httpConn implements ServerCodec, but it is treated specially by Client @@ -131,8 +132,15 @@ var DefaultHTTPTimeouts = HTTPTimeouts{ IdleTimeout: 120 * time.Second, } +// DialHTTP creates a new RPC client that connects to an RPC server over HTTP. +func DialHTTP(endpoint string) (*Client, error) { + return DialHTTPWithClient(endpoint, new(http.Client)) +} + // DialHTTPWithClient creates a new RPC client that connects to an RPC server over HTTP // using the provided HTTP Client. +// +// Deprecated: use DialOptions and the WithHTTPClient option. func DialHTTPWithClient(endpoint string, client *http.Client) (*Client, error) { // Sanity check URL so we don't end up with a client that will fail every request. _, err := url.Parse(endpoint) @@ -140,24 +148,35 @@ func DialHTTPWithClient(endpoint string, client *http.Client) (*Client, error) { return nil, err } - initctx := context.Background() - headers := make(http.Header, 2) + var cfg clientConfig + fn := newClientTransportHTTP(endpoint, &cfg) + return newClient(context.Background(), fn) +} + +func newClientTransportHTTP(endpoint string, cfg *clientConfig) reconnectFunc { + headers := make(http.Header, 2+len(cfg.httpHeaders)) headers.Set("accept", contentType) headers.Set("content-type", contentType) - return newClient(initctx, func(context.Context) (ServerCodec, error) { - hc := &httpConn{ - client: client, - headers: headers, - url: endpoint, - closeCh: make(chan interface{}), - } - return hc, nil - }) -} + for key, values := range cfg.httpHeaders { + headers[key] = values + } -// DialHTTP creates a new RPC client that connects to an RPC server over HTTP. -func DialHTTP(endpoint string) (*Client, error) { - return DialHTTPWithClient(endpoint, new(http.Client)) + client := cfg.httpClient + if client == nil { + client = new(http.Client) + } + + hc := &httpConn{ + client: client, + headers: headers, + url: endpoint, + auth: cfg.httpAuth, + closeCh: make(chan interface{}), + } + + return func(ctx context.Context) (ServerCodec, error) { + return hc, nil + } } func (c *Client) sendHTTP(ctx context.Context, op *requestOp, msg interface{}) error { @@ -212,6 +231,11 @@ func (hc *httpConn) doRequest(ctx context.Context, msg interface{}) (io.ReadClos hc.mu.Lock() req.Header = hc.headers.Clone() hc.mu.Unlock() + if hc.auth != nil { + if err := hc.auth(req.Header); err != nil { + return nil, err + } + } // do request resp, err := hc.client.Do(req) diff --git a/rpc/json.go b/rpc/json.go index 7e1c6a7f38..84b9fede06 100644 --- a/rpc/json.go +++ b/rpc/json.go @@ -68,21 +68,25 @@ type jsonrpcMessage struct { } func (msg *jsonrpcMessage) isNotification() bool { - return msg.ID == nil && msg.Method != "" + return msg.hasValidVersion() && msg.ID == nil && msg.Method != "" } func (msg *jsonrpcMessage) isCall() bool { - return msg.hasValidID() && msg.Method != "" + return msg.hasValidVersion() && msg.hasValidID() && msg.Method != "" } func (msg *jsonrpcMessage) isResponse() bool { - return msg.hasValidID() && msg.Method == "" && msg.Params == nil && (msg.Result != nil || msg.Error != nil) + return msg.hasValidVersion() && msg.hasValidID() && msg.Method == "" && msg.Params == nil && (msg.Result != nil || msg.Error != nil) } func (msg *jsonrpcMessage) hasValidID() bool { return len(msg.ID) > 0 && msg.ID[0] != '{' && msg.ID[0] != '[' } +func (msg *jsonrpcMessage) hasValidVersion() bool { + return msg.Version == vsn +} + func (msg *jsonrpcMessage) isSubscribe() bool { return strings.HasSuffix(msg.Method, subscribeMethodSuffix) } @@ -110,15 +114,14 @@ func (msg *jsonrpcMessage) errorResponse(err error) *jsonrpcMessage { func (msg *jsonrpcMessage) response(result interface{}) *jsonrpcMessage { enc, err := json.Marshal(result) if err != nil { - // TODO: wrap with 'internal server error' - return msg.errorResponse(err) + return msg.errorResponse(&internalServerError{errcodeMarshalError, err.Error()}) } return &jsonrpcMessage{Version: vsn, ID: msg.ID, Result: enc} } func errorMessage(err error) *jsonrpcMessage { msg := &jsonrpcMessage{Version: vsn, ID: null, Error: &jsonError{ - Code: defaultErrorCode, + Code: errcodeDefault, Message: err.Error(), }} ec, ok := err.(Error) diff --git a/rpc/server_test.go b/rpc/server_test.go index a7da64e509..ac70eb1c9f 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -54,7 +54,7 @@ func TestServerRegisterName(t *testing.T) { t.Fatalf("Expected service calc to be registered") } - wantCallbacks := 10 + wantCallbacks := 12 if len(svc.callbacks) != wantCallbacks { t.Errorf("Expected %d callbacks for service 'service', got %d", wantCallbacks, len(svc.callbacks)) } diff --git a/rpc/service.go b/rpc/service.go index 86ec93fd56..cbdb8e3047 100644 --- a/rpc/service.go +++ b/rpc/service.go @@ -28,7 +28,6 @@ package rpc import ( "context" - "errors" "fmt" "reflect" "runtime" @@ -209,7 +208,7 @@ func (c *callback) call(ctx context.Context, method string, args []reflect.Value buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] log.Error("RPC method " + method + " crashed: " + fmt.Sprintf("%v\n%s", err, buf)) - errRes = errors.New("method handler crashed") + errRes = &internalServerError{errcodePanic, "method handler crashed"} } }() // Run the callback. diff --git a/rpc/subscription_test.go b/rpc/subscription_test.go index 72caeb85d3..d09815a685 100644 --- a/rpc/subscription_test.go +++ b/rpc/subscription_test.go @@ -89,7 +89,7 @@ func TestSubscriptions(t *testing.T) { request := map[string]interface{}{ "id": i, "method": fmt.Sprintf("%s_subscribe", namespace), - "version": "2.0", + "jsonrpc": "2.0", "params": []interface{}{"someSubscription", notificationCount, i}, } if err := out.Encode(&request); err != nil { diff --git a/rpc/testdata/internal-error.js b/rpc/testdata/internal-error.js new file mode 100644 index 0000000000..2ba387401f --- /dev/null +++ b/rpc/testdata/internal-error.js @@ -0,0 +1,7 @@ +// These tests trigger various 'internal error' conditions. + +--> {"jsonrpc":"2.0","id":1,"method":"test_marshalError","params": []} +<-- {"jsonrpc":"2.0","id":1,"error":{"code":-32603,"message":"json: error calling MarshalText for type *rpc.MarshalErrObj: marshal error"}} + +--> {"jsonrpc":"2.0","id":2,"method":"test_panic","params": []} +<-- {"jsonrpc":"2.0","id":2,"error":{"code":-32603,"message":"method handler crashed"}} diff --git a/rpc/testdata/invalid-badversion.js b/rpc/testdata/invalid-badversion.js new file mode 100644 index 0000000000..75b5291dc3 --- /dev/null +++ b/rpc/testdata/invalid-badversion.js @@ -0,0 +1,19 @@ +// This test checks processing of messages with invalid Version. + +--> {"jsonrpc":"2.0","id":1,"method":"test_echo","params":["x", 3]} +<-- {"jsonrpc":"2.0","id":1,"result":{"String":"x","Int":3,"Args":null}} + +--> {"jsonrpc":"2.1","id":1,"method":"test_echo","params":["x", 3]} +<-- {"jsonrpc":"2.0","id":1,"error":{"code":-32600,"message":"invalid request"}} + +--> {"jsonrpc":"go-ethereum","id":1,"method":"test_echo","params":["x", 3]} +<-- {"jsonrpc":"2.0","id":1,"error":{"code":-32600,"message":"invalid request"}} + +--> {"jsonrpc":1,"id":1,"method":"test_echo","params":["x", 3]} +<-- {"jsonrpc":"2.0","id":1,"error":{"code":-32600,"message":"invalid request"}} + +--> {"jsonrpc":2.0,"id":1,"method":"test_echo","params":["x", 3]} +<-- {"jsonrpc":"2.0","id":1,"error":{"code":-32600,"message":"invalid request"}} + +--> {"id":1,"method":"test_echo","params":["x", 3]} +<-- {"jsonrpc":"2.0","id":1,"error":{"code":-32600,"message":"invalid request"}} diff --git a/rpc/testservice_test.go b/rpc/testservice_test.go index e3c8d74c05..c8d0d36e21 100644 --- a/rpc/testservice_test.go +++ b/rpc/testservice_test.go @@ -80,6 +80,12 @@ func (testError) Error() string { return "testError" } func (testError) ErrorCode() int { return 444 } func (testError) ErrorData() interface{} { return "testError data" } +type MarshalErrObj struct{} + +func (o *MarshalErrObj) MarshalText() ([]byte, error) { + return nil, errors.New("marshal error") +} + func (s *testService) NoArgsRets() {} func (s *testService) Echo(str string, i int, args *echoArgs) echoResult { @@ -124,6 +130,14 @@ func (s *testService) ReturnError() error { return testError{} } +func (s *testService) MarshalError() *MarshalErrObj { + return &MarshalErrObj{} +} + +func (s *testService) Panic() string { + panic("service panic") +} + func (s *testService) CallMeBack(ctx context.Context, method string, args []interface{}) (interface{}, error) { c, ok := ClientFromContext(ctx) if !ok { diff --git a/rpc/websocket.go b/rpc/websocket.go index 9e75a0b41c..a20313155c 100644 --- a/rpc/websocket.go +++ b/rpc/websocket.go @@ -195,24 +195,23 @@ func parseOriginURL(origin string) (string, string, string, error) { return scheme, hostname, port, nil } -// DialWebsocketWithDialer creates a new RPC client that communicates with a JSON-RPC server -// that is listening on the given endpoint using the provided dialer. +// DialWebsocketWithDialer creates a new RPC client using WebSocket. +// +// The context is used for the initial connection establishment. It does not +// affect subsequent interactions with the client. +// +// Deprecated: use DialOptions and the WithWebsocketDialer option. func DialWebsocketWithDialer(ctx context.Context, endpoint, origin string, dialer websocket.Dialer) (*Client, error) { - endpoint, header, err := wsClientHeaders(endpoint, origin) + cfg := new(clientConfig) + cfg.wsDialer = &dialer + if origin != "" { + cfg.setHeader("origin", origin) + } + connect, err := newClientTransportWS(endpoint, cfg) if err != nil { return nil, err } - return newClient(ctx, func(ctx context.Context) (ServerCodec, error) { - conn, resp, err := dialer.DialContext(ctx, endpoint, header) - if err != nil { - hErr := wsHandshakeError{err: err} - if resp != nil { - hErr.status = resp.Status - } - return nil, hErr - } - return newWebsocketCodec(conn, endpoint, header), nil - }) + return newClient(ctx, connect) } // DialWebsocket creates a new RPC client that communicates with a JSON-RPC server @@ -221,12 +220,53 @@ func DialWebsocketWithDialer(ctx context.Context, endpoint, origin string, diale // The context is used for the initial connection establishment. It does not // affect subsequent interactions with the client. func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error) { - dialer := websocket.Dialer{ - ReadBufferSize: wsReadBuffer, - WriteBufferSize: wsWriteBuffer, - WriteBufferPool: wsBufferPool, + cfg := new(clientConfig) + if origin != "" { + cfg.setHeader("origin", origin) + } + connect, err := newClientTransportWS(endpoint, cfg) + if err != nil { + return nil, err + } + return newClient(ctx, connect) +} + +func newClientTransportWS(endpoint string, cfg *clientConfig) (reconnectFunc, error) { + dialer := cfg.wsDialer + if dialer == nil { + dialer = &websocket.Dialer{ + ReadBufferSize: wsReadBuffer, + WriteBufferSize: wsWriteBuffer, + WriteBufferPool: wsBufferPool, + } + } + + dialURL, header, err := wsClientHeaders(endpoint, "") + if err != nil { + return nil, err + } + for key, values := range cfg.httpHeaders { + header[key] = values + } + + connect := func(ctx context.Context) (ServerCodec, error) { + header := header.Clone() + if cfg.httpAuth != nil { + if err := cfg.httpAuth(header); err != nil { + return nil, err + } + } + conn, resp, err := dialer.DialContext(ctx, dialURL, header) + if err != nil { + hErr := wsHandshakeError{err: err} + if resp != nil { + hErr.status = resp.Status + } + return nil, hErr + } + return newWebsocketCodec(conn, dialURL, header), nil } - return DialWebsocketWithDialer(ctx, endpoint, origin, dialer) + return connect, nil } func wsClientHeaders(endpoint, origin string) (string, http.Header, error) { diff --git a/sync/handlers/handler.go b/sync/handlers/handler.go index bc872e1c49..6254801cad 100644 --- a/sync/handlers/handler.go +++ b/sync/handlers/handler.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/sync/handlers/stats" "github.com/ava-labs/coreth/trie" @@ -41,6 +42,7 @@ type syncHandler struct { // NewSyncHandler constructs the handler for serving state sync. func NewSyncHandler( provider SyncDataProvider, + diskDB ethdb.KeyValueReader, evmTrieDB *trie.Database, atomicTrieDB *trie.Database, networkCodec codec.Manager, @@ -50,7 +52,7 @@ func NewSyncHandler( stateTrieLeafsRequestHandler: NewLeafsRequestHandler(evmTrieDB, provider, networkCodec, stats), atomicTrieLeafsRequestHandler: NewLeafsRequestHandler(atomicTrieDB, nil, networkCodec, stats), blockRequestHandler: NewBlockRequestHandler(provider, networkCodec, stats), - codeRequestHandler: NewCodeRequestHandler(evmTrieDB.DiskDB(), networkCodec, stats), + codeRequestHandler: NewCodeRequestHandler(diskDB, networkCodec, stats), } } diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index 13d58c0caf..5f6942ec67 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -35,7 +35,7 @@ var errInterrupted = errors.New("interrupted sync") type syncTest struct { ctx context.Context - prepareForTest func(t *testing.T) (clientDB ethdb.Database, serverTrieDB *trie.Database, syncRoot common.Hash) + prepareForTest func(t *testing.T) (clientDB ethdb.Database, serverDB ethdb.Database, serverTrieDB *trie.Database, syncRoot common.Hash) expectedError error GetLeafsIntercept func(message.LeafsRequest, message.LeafsResponse) (message.LeafsResponse, error) GetCodeIntercept func([]common.Hash, [][]byte) ([][]byte, error) @@ -47,9 +47,9 @@ func testSync(t *testing.T, test syncTest) { if test.ctx != nil { ctx = test.ctx } - clientDB, serverTrieDB, root := test.prepareForTest(t) + clientDB, serverDB, serverTrieDB, root := test.prepareForTest(t) leafsRequestHandler := handlers.NewLeafsRequestHandler(serverTrieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) - codeRequestHandler := handlers.NewCodeRequestHandler(serverTrieDB.DiskDB(), message.Codec, handlerstats.NewNoopHandlerStats()) + codeRequestHandler := handlers.NewCodeRequestHandler(serverDB, message.Codec, handlerstats.NewNoopHandlerStats()) mockClient := statesyncclient.NewMockClient(message.Codec, leafsRequestHandler, codeRequestHandler, nil) // Set intercept functions for the mock client mockClient.GetLeafsIntercept = test.GetLeafsIntercept @@ -73,7 +73,7 @@ func testSync(t *testing.T, test syncTest) { return } - assertDBConsistency(t, root, serverTrieDB, trie.NewDatabase(clientDB)) + assertDBConsistency(t, root, clientDB, serverTrieDB, trie.NewDatabase(clientDB)) } // testSyncResumes tests a series of syncTests work as expected, invoking a callback function after each @@ -117,15 +117,17 @@ func TestSimpleSyncCases(t *testing.T) { ) tests := map[string]syncTest{ "accounts": { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, nil) - return memorydb.New(), serverTrieDB, root + return memorydb.New(), serverDB, serverTrieDB, root }, }, "accounts with code": { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { if index%3 == 0 { codeBytes := make([]byte, 256) @@ -135,24 +137,26 @@ func TestSimpleSyncCases(t *testing.T) { } codeHash := crypto.Keccak256Hash(codeBytes) - rawdb.WriteCode(serverTrieDB.DiskDB(), codeHash, codeBytes) + rawdb.WriteCode(serverDB, codeHash, codeBytes) account.CodeHash = codeHash[:] } return account }) - return memorydb.New(), serverTrieDB, root + return memorydb.New(), serverDB, serverTrieDB, root }, }, "accounts with code and storage": { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - serverTrieDB := trie.NewDatabase(memorydb.New()) - root := fillAccountsWithStorage(t, serverTrieDB, common.Hash{}, numAccounts) - return memorydb.New(), serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) + root := fillAccountsWithStorage(t, serverDB, serverTrieDB, common.Hash{}, numAccounts) + return memorydb.New(), serverDB, serverTrieDB, root }, }, "accounts with storage": { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, func(t *testing.T, i int, account types.StateAccount) types.StateAccount { if i%5 == 0 { account.Root, _, _ = trie.GenerateTrie(t, serverTrieDB, 16, common.HashLength) @@ -160,21 +164,23 @@ func TestSimpleSyncCases(t *testing.T) { return account }) - return memorydb.New(), serverTrieDB, root + return memorydb.New(), serverDB, serverTrieDB, root }, }, "accounts with overlapping storage": { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) root, _ := FillAccountsWithOverlappingStorage(t, serverTrieDB, common.Hash{}, numAccounts, 3) - return memorydb.New(), serverTrieDB, root + return memorydb.New(), serverDB, serverTrieDB, root }, }, "failed to fetch leafs": { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccountsSmall, nil) - return memorydb.New(), serverTrieDB, root + return memorydb.New(), serverDB, serverTrieDB, root }, GetLeafsIntercept: func(_ message.LeafsRequest, _ message.LeafsResponse) (message.LeafsResponse, error) { return message.LeafsResponse{}, clientErr @@ -182,10 +188,11 @@ func TestSimpleSyncCases(t *testing.T) { expectedError: clientErr, }, "failed to fetch code": { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - serverTrieDB := trie.NewDatabase(memorydb.New()) - root := fillAccountsWithStorage(t, serverTrieDB, common.Hash{}, numAccountsSmall) - return memorydb.New(), serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) + root := fillAccountsWithStorage(t, serverDB, serverTrieDB, common.Hash{}, numAccountsSmall) + return memorydb.New(), serverDB, serverTrieDB, root }, GetCodeIntercept: func(_ []common.Hash, _ [][]byte) ([][]byte, error) { return nil, clientErr @@ -202,15 +209,16 @@ func TestSimpleSyncCases(t *testing.T) { } func TestCancelSync(t *testing.T) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) // Create trie with 2000 accounts (more than one leaf request) - root := fillAccountsWithStorage(t, serverTrieDB, common.Hash{}, 2000) + root := fillAccountsWithStorage(t, serverDB, serverTrieDB, common.Hash{}, 2000) ctx, cancel := context.WithCancel(context.Background()) defer cancel() testSync(t, syncTest{ ctx: ctx, - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return memorydb.New(), serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return memorydb.New(), serverDB, serverTrieDB, root }, expectedError: context.Canceled, GetLeafsIntercept: func(_ message.LeafsRequest, lr message.LeafsResponse) (message.LeafsResponse, error) { @@ -242,7 +250,8 @@ func (i *interruptLeafsIntercept) getLeafsIntercept(request message.LeafsRequest } func TestResumeSyncAccountsTrieInterrupted(t *testing.T) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) root, _ := FillAccountsWithOverlappingStorage(t, serverTrieDB, common.Hash{}, 2000, 3) clientDB := memorydb.New() intercept := &interruptLeafsIntercept{ @@ -250,8 +259,8 @@ func TestResumeSyncAccountsTrieInterrupted(t *testing.T) { interruptAfter: 1, } testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, expectedError: errInterrupted, GetLeafsIntercept: intercept.getLeafsIntercept, @@ -260,14 +269,15 @@ func TestResumeSyncAccountsTrieInterrupted(t *testing.T) { assert.EqualValues(t, 2, intercept.numRequests) testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, }) } func TestResumeSyncLargeStorageTrieInterrupted(t *testing.T) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) largeStorageRoot, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, 2000, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { @@ -283,22 +293,23 @@ func TestResumeSyncLargeStorageTrieInterrupted(t *testing.T) { interruptAfter: 1, } testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, expectedError: errInterrupted, GetLeafsIntercept: intercept.getLeafsIntercept, }) testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, }) } func TestResumeSyncToNewRootAfterLargeStorageTrieInterrupted(t *testing.T) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) largeStorageRoot1, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) largeStorageRoot2, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) @@ -321,8 +332,8 @@ func TestResumeSyncToNewRootAfterLargeStorageTrieInterrupted(t *testing.T) { interruptAfter: 1, } testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root1 + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root1 }, expectedError: errInterrupted, GetLeafsIntercept: intercept.getLeafsIntercept, @@ -331,14 +342,15 @@ func TestResumeSyncToNewRootAfterLargeStorageTrieInterrupted(t *testing.T) { <-snapshot.WipeSnapshot(clientDB, false) testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root2 + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root2 }, }) } func TestResumeSyncLargeStorageTrieWithConsecutiveDuplicatesInterrupted(t *testing.T) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) largeStorageRoot, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { @@ -354,22 +366,23 @@ func TestResumeSyncLargeStorageTrieWithConsecutiveDuplicatesInterrupted(t *testi interruptAfter: 1, } testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, expectedError: errInterrupted, GetLeafsIntercept: intercept.getLeafsIntercept, }) testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, }) } func TestResumeSyncLargeStorageTrieWithSpreadOutDuplicatesInterrupted(t *testing.T) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) largeStorageRoot, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { @@ -384,35 +397,34 @@ func TestResumeSyncLargeStorageTrieWithSpreadOutDuplicatesInterrupted(t *testing interruptAfter: 1, } testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, expectedError: errInterrupted, GetLeafsIntercept: intercept.getLeafsIntercept, }) testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, }) } func TestResyncNewRootAfterDeletes(t *testing.T) { for name, test := range map[string]struct { - deleteBetweenSyncs func(*testing.T, common.Hash, *trie.Database) + deleteBetweenSyncs func(*testing.T, common.Hash, ethdb.Database) }{ "delete code": { - deleteBetweenSyncs: func(t *testing.T, _ common.Hash, clientTrieDB *trie.Database) { - db := clientTrieDB.DiskDB() + deleteBetweenSyncs: func(t *testing.T, _ common.Hash, clientDB ethdb.Database) { // delete code - it := db.NewIterator(rawdb.CodePrefix, nil) + it := clientDB.NewIterator(rawdb.CodePrefix, nil) defer it.Release() for it.Next() { if len(it.Key()) != len(rawdb.CodePrefix)+common.HashLength { continue } - if err := db.Delete(it.Key()); err != nil { + if err := clientDB.Delete(it.Key()); err != nil { t.Fatal(err) } } @@ -422,7 +434,8 @@ func TestResyncNewRootAfterDeletes(t *testing.T) { }, }, "delete intermediate storage nodes": { - deleteBetweenSyncs: func(t *testing.T, root common.Hash, clientTrieDB *trie.Database) { + deleteBetweenSyncs: func(t *testing.T, root common.Hash, clientDB ethdb.Database) { + clientTrieDB := trie.NewDatabase(clientDB) tr, err := trie.New(common.Hash{}, root, clientTrieDB) if err != nil { t.Fatal(err) @@ -459,7 +472,8 @@ func TestResyncNewRootAfterDeletes(t *testing.T) { }, }, "delete intermediate account trie nodes": { - deleteBetweenSyncs: func(t *testing.T, root common.Hash, clientTrieDB *trie.Database) { + deleteBetweenSyncs: func(t *testing.T, root common.Hash, clientDB ethdb.Database) { + clientTrieDB := trie.NewDatabase(clientDB) trie.CorruptTrie(t, clientTrieDB, root, 5) }, }, @@ -470,10 +484,11 @@ func TestResyncNewRootAfterDeletes(t *testing.T) { } } -func testSyncerSyncsToNewRoot(t *testing.T, deleteBetweenSyncs func(*testing.T, common.Hash, *trie.Database)) { +func testSyncerSyncsToNewRoot(t *testing.T, deleteBetweenSyncs func(*testing.T, common.Hash, ethdb.Database)) { rand.Seed(1) clientDB := memorydb.New() - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) root1, _ := FillAccountsWithOverlappingStorage(t, serverTrieDB, common.Hash{}, 1000, 3) root2, _ := FillAccountsWithOverlappingStorage(t, serverTrieDB, root1, 1000, 3) @@ -482,13 +497,13 @@ func testSyncerSyncsToNewRoot(t *testing.T, deleteBetweenSyncs func(*testing.T, testSyncResumes(t, []syncTest{ { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root1 + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root1 }, }, { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root2 + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root2 }, }, }, func() { @@ -500,6 +515,6 @@ func testSyncerSyncsToNewRoot(t *testing.T, deleteBetweenSyncs func(*testing.T, // delete snapshot first since this is not the responsibility of the EVM State Syncer <-snapshot.WipeSnapshot(clientDB, false) - deleteBetweenSyncs(t, root1, trie.NewDatabase(clientDB)) + deleteBetweenSyncs(t, root1, clientDB) }) } diff --git a/sync/statesync/test_sync.go b/sync/statesync/test_sync.go index 425507a8cc..74a3686a02 100644 --- a/sync/statesync/test_sync.go +++ b/sync/statesync/test_sync.go @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -22,8 +23,7 @@ import ( // assertDBConsistency checks [serverTrieDB] and [clientTrieDB] have the same EVM state trie at [root], // and that [clientTrieDB.DiskDB] has corresponding account & snapshot values. // Also verifies any code referenced by the EVM state is present in [clientTrieDB] and the hash is correct. -func assertDBConsistency(t testing.TB, root common.Hash, serverTrieDB, clientTrieDB *trie.Database) { - clientDB := clientTrieDB.DiskDB() +func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database, serverTrieDB, clientTrieDB *trie.Database) { numSnapshotAccounts := 0 accountIt := rawdb.IterateAccountSnapshots(clientDB) defer accountIt.Release() @@ -46,14 +46,14 @@ func assertDBConsistency(t testing.TB, root common.Hash, serverTrieDB, clientTri return err } // check snapshot consistency - snapshotVal := rawdb.ReadAccountSnapshot(clientTrieDB.DiskDB(), accHash) + snapshotVal := rawdb.ReadAccountSnapshot(clientDB, accHash) expectedSnapshotVal := snapshot.SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash, acc.IsMultiCoin) assert.Equal(t, expectedSnapshotVal, snapshotVal) // check code consistency if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { codeHash := common.BytesToHash(acc.CodeHash) - code := rawdb.ReadCode(clientTrieDB.DiskDB(), codeHash) + code := rawdb.ReadCode(clientDB, codeHash) actualHash := crypto.Keccak256Hash(code) assert.NotZero(t, len(code)) assert.Equal(t, codeHash, actualHash) @@ -75,7 +75,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, serverTrieDB, clientTri // check storage trie and storage snapshot consistency trie.AssertTrieConsistency(t, acc.Root, serverTrieDB, clientTrieDB, func(key, val []byte) error { storageTrieLeavesCount++ - snapshotVal := rawdb.ReadStorageSnapshot(clientTrieDB.DiskDB(), accHash, common.BytesToHash(key)) + snapshotVal := rawdb.ReadStorageSnapshot(clientDB, accHash, common.BytesToHash(key)) assert.Equal(t, val, snapshotVal) return nil }) @@ -88,7 +88,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, serverTrieDB, clientTri assert.Equal(t, trieAccountLeaves, numSnapshotAccounts) } -func fillAccountsWithStorage(t *testing.T, serverTrieDB *trie.Database, root common.Hash, numAccounts int) common.Hash { +func fillAccountsWithStorage(t *testing.T, serverDB ethdb.Database, serverTrieDB *trie.Database, root common.Hash, numAccounts int) common.Hash { newRoot, _ := trie.FillAccounts(t, serverTrieDB, root, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { codeBytes := make([]byte, 256) _, err := rand.Read(codeBytes) @@ -97,7 +97,7 @@ func fillAccountsWithStorage(t *testing.T, serverTrieDB *trie.Database, root com } codeHash := crypto.Keccak256Hash(codeBytes) - rawdb.WriteCode(serverTrieDB.DiskDB(), codeHash, codeBytes) + rawdb.WriteCode(serverDB, codeHash, codeBytes) account.CodeHash = codeHash[:] // now create state trie diff --git a/trie/committer.go b/trie/committer.go index 535f2cf0d4..a430834dca 100644 --- a/trie/committer.go +++ b/trie/committer.go @@ -54,7 +54,8 @@ func newCommitter(owner common.Hash, collectLeaf bool) *committer { } } -// Commit collapses a node down into a hash node and inserts it into the database +// Commit collapses a node down into a hash node and returns it along with +// the modified nodeset. func (c *committer) Commit(n node) (hashNode, *NodeSet, error) { h, err := c.commit(nil, n) if err != nil { @@ -63,7 +64,7 @@ func (c *committer) Commit(n node) (hashNode, *NodeSet, error) { return h.(hashNode), c.nodes, nil } -// commit collapses a node down into a hash node and inserts it into the database +// commit collapses a node down into a hash node and returns it. func (c *committer) commit(path []byte, n node) (node, error) { // if this path is clean, use available cached data hash, dirty := n.cache() @@ -85,7 +86,8 @@ func (c *committer) commit(path []byte, n node) (node, error) { } collapsed.Val = childV } - // The key needs to be copied, since we're delivering it to database + // The key needs to be copied, since we're adding it to the + // modified nodeset. collapsed.Key = hexToCompact(cn.Key) hashedNode := c.store(path, collapsed) if hn, ok := hashedNode.(hashNode); ok { @@ -144,16 +146,16 @@ func (c *committer) commitChildren(path []byte, n *fullNode) ([17]node, error) { return children, nil } -// store hashes the node n and if we have a storage layer specified, it writes -// the key/value pair to it and tracks any node->child references as well as any -// node->external trie references. +// store hashes the node n and adds it to the modified nodeset. If leaf collection +// is enabled, leaf nodes will be tracked in the modified nodeset as well. func (c *committer) store(path []byte, n node) node { // Larger nodes are replaced by their hash and stored in the database. var hash, _ = n.cache() + // This was not generated - must be a small node stored in the parent. // In theory, we should check if the node is leaf here (embedded node - // usually is leaf node). But small value(less than 32bytes) is not - // our target(leaves in account trie only). + // usually is leaf node). But small value (less than 32bytes) is not + // our target (leaves in account trie only). if hash == nil { return n } diff --git a/trie/database.go b/trie/database.go index 343021ccb5..8d2e4cbf57 100644 --- a/trie/database.go +++ b/trie/database.go @@ -319,11 +319,6 @@ func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database return db } -// DiskDB retrieves the persistent storage backing the trie database. -func (db *Database) DiskDB() ethdb.KeyValueStore { - return db.diskdb -} - // insert inserts a simplified trie node into the memory database. // All nodes inserted by this function will be reference tracked // and in theory should only used for **trie nodes** insertion. diff --git a/trie/proof.go b/trie/proof.go index ddd0721f2b..a864b05be1 100644 --- a/trie/proof.go +++ b/trie/proof.go @@ -73,7 +73,7 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e var err error tn, err = t.resolveHash(n, prefix) if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in Trie.Prove", "err", err) return err } default: diff --git a/trie/secure_trie.go b/trie/secure_trie.go index bda17b977a..9927ada35c 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -27,8 +27,6 @@ package trie import ( - "fmt" - "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -45,14 +43,14 @@ func NewSecure(owner common.Hash, root common.Hash, db *Database) (*SecureTrie, return NewStateTrie(owner, root, db) } -// StateTrie wraps a trie with key hashing. In a secure trie, all +// StateTrie wraps a trie with key hashing. In a stateTrie trie, all // access operations hash the key using keccak256. This prevents // calling code from creating long chains of nodes that // increase the access time. // // Contrary to a regular trie, a StateTrie can only be created with // New and must have an attached database. The database also stores -// the preimage of each key. +// the preimage of each key if preimage recording is enabled. // // StateTrie is not safe for concurrent use. type StateTrie struct { @@ -63,20 +61,14 @@ type StateTrie struct { secKeyCacheOwner *StateTrie // Pointer to self, replace the key cache on mismatch } -// NewStateTrie creates a trie with an existing root node from a backing database -// and optional intermediate in-memory node pool. +// NewStateTrie creates a trie with an existing root node from a backing database. // // If root is the zero hash or the sha3 hash of an empty string, the // trie is initially empty. Otherwise, New will panic if db is nil // and returns MissingNodeError if the root node cannot be found. -// -// Accessing the trie loads nodes from the database or node pool on demand. -// Loaded nodes are kept around until their 'cache generation' expires. -// A new cache generation is created by each call to Commit. -// cachelimit sets the number of past cache generations to keep. func NewStateTrie(owner common.Hash, root common.Hash, db *Database) (*StateTrie, error) { if db == nil { - panic("trie.NewSecure called without a database") + panic("trie.NewStateTrie called without a database") } trie, err := New(owner, root, db) if err != nil { @@ -90,70 +82,53 @@ func NewStateTrie(owner common.Hash, root common.Hash, db *Database) (*StateTrie func (t *StateTrie) Get(key []byte) []byte { res, err := t.TryGet(key) if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in StateTrie.Get", "err", err) } return res } // TryGet returns the value for key stored in the trie. // The value bytes must not be modified by the caller. -// If a node was not found in the database, a MissingNodeError is returned. +// If the specified node is not in the trie, nil will be returned. +// If a trie node is not found in the database, a MissingNodeError is returned. func (t *StateTrie) TryGet(key []byte) ([]byte, error) { return t.trie.TryGet(t.hashKey(key)) } +// TryGetAccount attempts to retrieve an account with provided trie path. +// If the specified account is not in the trie, nil will be returned. +// If a trie node is not found in the database, a MissingNodeError is returned. func (t *StateTrie) TryGetAccount(key []byte) (*types.StateAccount, error) { - var ret types.StateAccount res, err := t.trie.TryGet(t.hashKey(key)) - if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) - return &ret, err - } - if res == nil { - return nil, nil + if res == nil || err != nil { + return nil, err } - err = rlp.DecodeBytes(res, &ret) - return &ret, err + ret := new(types.StateAccount) + err = rlp.DecodeBytes(res, ret) + return ret, err } // TryGetAccountWithPreHashedKey does the same thing as TryGetAccount, however // it expects a key that is already hashed. This constitutes an abstraction leak, // since the client code needs to know the key format. func (t *StateTrie) TryGetAccountWithPreHashedKey(key []byte) (*types.StateAccount, error) { - var ret types.StateAccount res, err := t.trie.TryGet(key) - if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) - return &ret, err - } - if res == nil { - return nil, nil + if res == nil || err != nil { + return nil, err } - err = rlp.DecodeBytes(res, &ret) - return &ret, err + ret := new(types.StateAccount) + err = rlp.DecodeBytes(res, ret) + return ret, err } // TryGetNode attempts to retrieve a trie node by compact-encoded path. It is not // possible to use keybyte-encoding as the path might contain odd nibbles. +// If the specified trie node is not in the trie, nil will be returned. +// If a trie node is not found in the database, a MissingNodeError is returned. func (t *StateTrie) TryGetNode(path []byte) ([]byte, int, error) { return t.trie.TryGetNode(path) } -// TryUpdateAccount account will abstract the write of an account to the -// secure trie. -func (t *StateTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error { - hk := t.hashKey(key) - data, err := rlp.EncodeToBytes(acc) - if err != nil { - return err - } - if err := t.trie.TryUpdate(hk, data); err != nil { - return err - } - t.getSecKeyCache()[string(hk)] = common.CopyBytes(key) - return nil -} - // Update associates key with value in the trie. Subsequent calls to // Get will return value. If value has length zero, any existing value // is deleted from the trie and calls to Get will return nil. @@ -162,7 +137,7 @@ func (t *StateTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error // stored in the trie. func (t *StateTrie) Update(key, value []byte) { if err := t.TryUpdate(key, value); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in StateTrie.Update", "err", err) } } @@ -173,7 +148,7 @@ func (t *StateTrie) Update(key, value []byte) { // The value bytes must not be modified by the caller while they are // stored in the trie. // -// If a node was not found in the database, a MissingNodeError is returned. +// If a node is not found in the database, a MissingNodeError is returned. func (t *StateTrie) TryUpdate(key, value []byte) error { hk := t.hashKey(key) err := t.trie.TryUpdate(hk, value) @@ -184,15 +159,31 @@ func (t *StateTrie) TryUpdate(key, value []byte) error { return nil } +// TryUpdateAccount account will abstract the write of an account to the +// secure trie. +func (t *StateTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error { + hk := t.hashKey(key) + data, err := rlp.EncodeToBytes(acc) + if err != nil { + return err + } + if err := t.trie.TryUpdate(hk, data); err != nil { + return err + } + t.getSecKeyCache()[string(hk)] = common.CopyBytes(key) + return nil +} + // Delete removes any existing value for key from the trie. func (t *StateTrie) Delete(key []byte) { if err := t.TryDelete(key); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in StateTrie.Delete", "err", err) } } // TryDelete removes any existing value for key from the trie. -// If a node was not found in the database, a MissingNodeError is returned. +// If the specified trie node is not in the trie, nothing will be changed. +// If a node is not found in the database, a MissingNodeError is returned. func (t *StateTrie) TryDelete(key []byte) error { hk := t.hashKey(key) delete(t.getSecKeyCache(), string(hk)) @@ -218,10 +209,10 @@ func (t *StateTrie) GetKey(shaKey []byte) []byte { return t.preimages.preimage(common.BytesToHash(shaKey)) } -// Commit collects all dirty nodes in the trie and replace them with the -// corresponding node hash. All collected nodes(including dirty leaves if +// Commit collects all dirty nodes in the trie and replaces them with the +// corresponding node hash. All collected nodes (including dirty leaves if // collectLeaf is true) will be encapsulated into a nodeset for return. -// The returned nodeset can be nil if the trie is clean(nothing to commit). +// The returned nodeset can be nil if the trie is clean (nothing to commit). // All cached preimages will be also flushed if preimages recording is enabled. // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage @@ -237,7 +228,7 @@ func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) { } t.secKeyCache = make(map[string][]byte) } - // Commit the trie to its intermediate node database + // Commit the trie and return its modified nodeset. return t.trie.Commit(collectLeaf) } diff --git a/trie/stacktrie.go b/trie/stacktrie.go index 18dd5bd3b3..99773e11b7 100644 --- a/trie/stacktrie.go +++ b/trie/stacktrie.go @@ -31,7 +31,6 @@ import ( "bytes" "encoding/gob" "errors" - "fmt" "io" "sync" @@ -217,7 +216,7 @@ func (st *StackTrie) TryUpdate(key, value []byte) error { func (st *StackTrie) Update(key, value []byte) { if err := st.TryUpdate(key, value); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in StackTrie.Update", "err", err) } } diff --git a/trie/test_trie.go b/trie/test_trie.go index 4013d92a23..580ba15409 100644 --- a/trie/test_trie.go +++ b/trie/test_trie.go @@ -105,7 +105,7 @@ func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *Database, onLea // CorruptTrie deletes every [n]th trie node from the trie given by [root] from the trieDB. // Assumes that the trie given by root can be iterated without issue. func CorruptTrie(t *testing.T, trieDB *Database, root common.Hash, n int) { - batch := trieDB.DiskDB().NewBatch() + batch := trieDB.diskdb.NewBatch() // next delete some trie nodes tr, err := New(common.Hash{}, root, trieDB) if err != nil { diff --git a/trie/trie.go b/trie/trie.go index c6433bd692..5539f755e4 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -138,7 +138,7 @@ func (t *Trie) NodeIterator(start []byte) NodeIterator { func (t *Trie) Get(key []byte) []byte { res, err := t.TryGet(key) if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in Trie.Get", "err", err) } return res } @@ -275,7 +275,7 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new // stored in the trie. func (t *Trie) Update(key, value []byte) { if err := t.TryUpdate(key, value); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in Trie.Update", "err", err) } } @@ -394,7 +394,7 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error // Delete removes any existing value for key from the trie. func (t *Trie) Delete(key []byte) { if err := t.TryDelete(key); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in Trie.Delete", "err", err) } } @@ -588,10 +588,10 @@ func (t *Trie) Hash() common.Hash { return common.BytesToHash(hash.(hashNode)) } -// Commit collects all dirty nodes in the trie and replace them with the -// corresponding node hash. All collected nodes(including dirty leaves if +// Commit collects all dirty nodes in the trie and replaces them with the +// corresponding node hash. All collected nodes (including dirty leaves if // collectLeaf is true) will be encapsulated into a nodeset for return. -// The returned nodeset can be nil if the trie is clean(nothing to commit). +// The returned nodeset can be nil if the trie is clean (nothing to commit). // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) { diff --git a/trie/util_test.go b/trie/util_test.go index ad8a096626..95103747d5 100644 --- a/trie/util_test.go +++ b/trie/util_test.go @@ -79,7 +79,9 @@ func TestTrieTracer(t *testing.T) { // Commit the changes and re-create with new root root, nodes, _ := trie.Commit(false) - db.Update(NewWithNodeSet(nodes)) + if err := db.Update(NewWithNodeSet(nodes)); err != nil { + t.Fatal(err) + } trie, _ = New(common.Hash{}, root, db) trie.tracer = newTracer() From f27fa4762ff6d31c64f58a1d8bbbdf54e71470b2 Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Wed, 29 Mar 2023 13:36:28 -0400 Subject: [PATCH 5/8] Remove DAO fork handling (not applicable) (#1234) --- consensus/misc/dao.go | 96 ------------------- core/chain_makers.go | 17 ---- core/dao_test.go | 201 ---------------------------------------- core/state_processor.go | 5 - miner/worker.go | 4 - params/dao.go | 168 --------------------------------- 6 files changed, 491 deletions(-) delete mode 100644 consensus/misc/dao.go delete mode 100644 core/dao_test.go delete mode 100644 params/dao.go diff --git a/consensus/misc/dao.go b/consensus/misc/dao.go deleted file mode 100644 index 95a3095e07..0000000000 --- a/consensus/misc/dao.go +++ /dev/null @@ -1,96 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package misc - -import ( - "bytes" - "errors" - "math/big" - - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" -) - -var ( - // ErrBadProDAOExtra is returned if a header doesn't support the DAO fork on a - // pro-fork client. - ErrBadProDAOExtra = errors.New("bad DAO pro-fork extra-data") - - // ErrBadNoDAOExtra is returned if a header does support the DAO fork on a no- - // fork client. - ErrBadNoDAOExtra = errors.New("bad DAO no-fork extra-data") -) - -// VerifyDAOHeaderExtraData validates the extra-data field of a block header to -// ensure it conforms to DAO hard-fork rules. -// -// DAO hard-fork extension to the header validity: -// -// - if the node is no-fork, do not accept blocks in the [fork, fork+10) range -// with the fork specific extra-data set. -// - if the node is pro-fork, require blocks in the specific range to have the -// unique extra-data set. -func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header) error { - // Short circuit validation if the node doesn't care about the DAO fork - if config.DAOForkBlock == nil { - return nil - } - // Make sure the block is within the fork's modified extra-data range - limit := new(big.Int).Add(config.DAOForkBlock, params.DAOForkExtraRange) - if header.Number.Cmp(config.DAOForkBlock) < 0 || header.Number.Cmp(limit) >= 0 { - return nil - } - // Depending on whether we support or oppose the fork, validate the extra-data contents - if config.DAOForkSupport { - if !bytes.Equal(header.Extra, params.DAOForkBlockExtra) { - return ErrBadProDAOExtra - } - } else { - if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { - return ErrBadNoDAOExtra - } - } - // All ok, header has the same extra-data we expect - return nil -} - -// ApplyDAOHardFork modifies the state database according to the DAO hard-fork -// rules, transferring all balances of a set of DAO accounts to a single refund -// contract. -func ApplyDAOHardFork(statedb *state.StateDB) { - // Retrieve the contract to refund balances into - if !statedb.Exist(params.DAORefundContract) { - statedb.CreateAccount(params.DAORefundContract) - } - - // Move every DAO account and extra-balance account funds into the refund contract - for _, addr := range params.DAODrainList() { - statedb.AddBalance(params.DAORefundContract, statedb.GetBalance(addr)) - statedb.SetBalance(addr, new(big.Int)) - } -} diff --git a/core/chain_makers.go b/core/chain_makers.go index a5753c33be..8f058f2d8d 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -32,7 +32,6 @@ import ( "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/consensus/misc" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" @@ -223,22 +222,6 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} b.header = makeHeader(chainreader, config, parent, gap, statedb, b.engine) - // Mutate the state and block according to any hard-fork specs - timestamp := new(big.Int).SetUint64(b.header.Time) - if !config.IsApricotPhase3(timestamp) { - // avoid dynamic fee extra data override - if daoBlock := config.DAOForkBlock; daoBlock != nil { - limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) - if b.header.Number.Cmp(daoBlock) >= 0 && b.header.Number.Cmp(limit) < 0 { - if config.DAOForkSupport { - b.header.Extra = common.CopyBytes(params.DAOForkBlockExtra) - } - } - } - } - if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(b.header.Number) == 0 { - misc.ApplyDAOHardFork(statedb) - } // Execute any user modifications to the block if gen != nil { gen(i, b) diff --git a/core/dao_test.go b/core/dao_test.go deleted file mode 100644 index c349f29cad..0000000000 --- a/core/dao_test.go +++ /dev/null @@ -1,201 +0,0 @@ -// (c) 2021-2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "math/big" - "testing" - - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" - "github.com/ethereum/go-ethereum/common" -) - -// setDAOForkBlock makes a copy of [cfg] and assigns the DAO fork block to [forkBlock]. -// This is necessary for testing since coreth restricts the DAO fork to be enabled at -// genesis only. -func setDAOForkBlock(cfg *params.ChainConfig, forkBlock *big.Int) *params.ChainConfig { - config := *cfg - config.DAOForkBlock = forkBlock - return &config -} - -// Tests that DAO-fork enabled clients can properly filter out fork-commencing -// blocks based on their extradata fields. -func TestDAOForkRangeExtradata(t *testing.T) { - forkBlock := big.NewInt(32) - chainConfig := *params.TestApricotPhase2Config - chainConfig.DAOForkBlock = nil - - // Generate a common prefix for both pro-forkers and non-forkers - gspec := &Genesis{ - BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), - Config: &chainConfig, - } - genDb, prefix, _, _ := GenerateChainWithGenesis(gspec, dummy.NewFaker(), int(forkBlock.Int64()-1), 10, func(i int, gen *BlockGen) {}) - - // Create the concurrent, conflicting two nodes - proDb := rawdb.NewMemoryDatabase() - proConf := *params.TestApricotPhase2Config - proConf.DAOForkSupport = true - - progspec := &Genesis{ - BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), - Config: &proConf, - } - proBc, _ := NewBlockChain(proDb, DefaultCacheConfig, progspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) - proBc.chainConfig = setDAOForkBlock(proBc.chainConfig, forkBlock) - defer proBc.Stop() - - conDb := rawdb.NewMemoryDatabase() - conConf := *params.TestApricotPhase2Config - conConf.DAOForkSupport = false - congspec := &Genesis{ - BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), - Config: &conConf, - } - conBc, _ := NewBlockChain(conDb, DefaultCacheConfig, congspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) - conBc.chainConfig = setDAOForkBlock(conBc.chainConfig, forkBlock) - defer conBc.Stop() - - if _, err := proBc.InsertChain(prefix); err != nil { - t.Fatalf("pro-fork: failed to import chain prefix: %v", err) - } - if _, err := conBc.InsertChain(prefix); err != nil { - t.Fatalf("con-fork: failed to import chain prefix: %v", err) - } - // Try to expand both pro-fork and non-fork chains iteratively with other camp's blocks - for i := int64(0); i < params.DAOForkExtraRange.Int64(); i++ { - // Create a pro-fork block, and try to feed into the no-fork chain - bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, congspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) - bc.chainConfig = setDAOForkBlock(bc.chainConfig, forkBlock) - defer bc.Stop() - - blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64())) - for j := 0; j < len(blocks)/2; j++ { - blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] - } - if _, err := bc.InsertChain(blocks); err != nil { - t.Fatalf("failed to import contra-fork chain for expansion: %v", err) - } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { - t.Fatalf("failed to commit contra-fork head for expansion: %v", err) - } - blocks, _, _ = GenerateChain(&proConf, conBc.CurrentBlock(), dummy.NewFaker(), genDb, 1, 10, func(i int, gen *BlockGen) {}) - if _, err := conBc.InsertChain(blocks); err != nil { - t.Fatalf("contra-fork chain accepted pro-fork block: %v", blocks[0]) - } - // Create a proper no-fork block for the contra-forker - blocks, _, _ = GenerateChain(&conConf, conBc.CurrentBlock(), dummy.NewFaker(), genDb, 1, 10, func(i int, gen *BlockGen) {}) - if _, err := conBc.InsertChain(blocks); err != nil { - t.Fatalf("contra-fork chain didn't accepted no-fork block: %v", err) - } - // Create a no-fork block, and try to feed into the pro-fork chain - bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, progspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) - defer bc.Stop() - - blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64())) - for j := 0; j < len(blocks)/2; j++ { - blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] - } - if _, err := bc.InsertChain(blocks); err != nil { - t.Fatalf("failed to import pro-fork chain for expansion: %v", err) - } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { - t.Fatalf("failed to commit pro-fork head for expansion: %v", err) - } - blocks, _, _ = GenerateChain(&conConf, proBc.CurrentBlock(), dummy.NewFaker(), genDb, 1, 10, func(i int, gen *BlockGen) {}) - if _, err := proBc.InsertChain(blocks); err != nil { - t.Fatalf("pro-fork chain accepted contra-fork block: %v", blocks[0]) - } - // Create a proper pro-fork block for the pro-forker - blocks, _, _ = GenerateChain(&proConf, proBc.CurrentBlock(), dummy.NewFaker(), genDb, 1, 10, func(i int, gen *BlockGen) {}) - if _, err := proBc.InsertChain(blocks); err != nil { - t.Fatalf("pro-fork chain didn't accepted pro-fork block: %v", err) - } - } - // Verify that contra-forkers accept pro-fork extra-datas after forking finishes - bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, congspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) - defer bc.Stop() - - blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64())) - for j := 0; j < len(blocks)/2; j++ { - blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] - } - if _, err := bc.InsertChain(blocks); err != nil { - t.Fatalf("failed to import contra-fork chain for expansion: %v", err) - } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { - t.Fatalf("failed to commit contra-fork head for expansion: %v", err) - } - blocks, _, _ = GenerateChain(&proConf, conBc.CurrentBlock(), dummy.NewFaker(), genDb, 1, 10, func(i int, gen *BlockGen) {}) - if _, err := conBc.InsertChain(blocks); err != nil { - t.Fatalf("contra-fork chain didn't accept pro-fork block post-fork: %v", err) - } - // Verify that pro-forkers accept contra-fork extra-datas after forking finishes - bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, progspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) - defer bc.Stop() - - blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64())) - for j := 0; j < len(blocks)/2; j++ { - blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] - } - if _, err := bc.InsertChain(blocks); err != nil { - t.Fatalf("failed to import pro-fork chain for expansion: %v", err) - } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { - t.Fatalf("failed to commit pro-fork head for expansion: %v", err) - } - blocks, _, _ = GenerateChain(&conConf, proBc.CurrentBlock(), dummy.NewFaker(), genDb, 1, 10, func(i int, gen *BlockGen) {}) - if _, err := proBc.InsertChain(blocks); err != nil { - t.Fatalf("pro-fork chain didn't accept contra-fork block post-fork: %v", err) - } -} - -func TestDAOForkSupportPostApricotPhase3(t *testing.T) { - forkBlock := big.NewInt(0) - - conf := *params.TestChainConfig - conf.DAOForkSupport = true - conf.DAOForkBlock = forkBlock - - db := rawdb.NewMemoryDatabase() - gspec := &Genesis{ - BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), - Config: &conf, - } - bc, _ := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) - defer bc.Stop() - - _, blocks, _, _ := GenerateChainWithGenesis(gspec, dummy.NewFaker(), 32, 10, func(i int, gen *BlockGen) {}) - - if _, err := bc.InsertChain(blocks); err != nil { - t.Fatalf("failed to import blocks: %v", err) - } -} diff --git a/core/state_processor.go b/core/state_processor.go index c6874e31aa..68d118fd6b 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -31,7 +31,6 @@ import ( "math/big" "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/consensus/misc" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" @@ -81,10 +80,6 @@ func (p *StateProcessor) Process(block *types.Block, parent *types.Header, state // Configure any stateful precompiles that should go into effect during this block. p.config.CheckConfigurePrecompiles(new(big.Int).SetUint64(parent.Time), block, statedb) - // Mutate the block and state according to any hard-fork specs - if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 { - misc.ApplyDAOHardFork(statedb) - } blockContext := NewEVMBlockContext(header, p.bc, nil) vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) // Iterate over and process the individual transactions diff --git a/miner/worker.go b/miner/worker.go index 27f9bb9bf4..c05e66998b 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -40,7 +40,6 @@ import ( "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/consensus/misc" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" @@ -168,9 +167,6 @@ func (w *worker) commitNewWork() (*types.Block, error) { if err != nil { return nil, fmt.Errorf("failed to create new current environment: %w", err) } - if w.chainConfig.DAOForkSupport && w.chainConfig.DAOForkBlock != nil && w.chainConfig.DAOForkBlock.Cmp(header.Number) == 0 { - misc.ApplyDAOHardFork(env.state) - } // Configure any stateful precompiles that should go into effect during this block. w.chainConfig.CheckConfigurePrecompiles(new(big.Int).SetUint64(parent.Time()), types.NewBlockWithHeader(header), env.state) diff --git a/params/dao.go b/params/dao.go deleted file mode 100644 index 3d76d34e4a..0000000000 --- a/params/dao.go +++ /dev/null @@ -1,168 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package params - -import ( - "math/big" - - "github.com/ethereum/go-ethereum/common" -) - -// DAOForkBlockExtra is the block header extra-data field to set for the DAO fork -// point and a number of consecutive blocks to allow fast/light syncers to correctly -// pick the side they want ("dao-hard-fork"). -var DAOForkBlockExtra = common.FromHex("0x64616f2d686172642d666f726b") - -// DAOForkExtraRange is the number of consecutive blocks from the DAO fork point -// to override the extra-data in to prevent no-fork attacks. -var DAOForkExtraRange = big.NewInt(10) - -// DAORefundContract is the address of the refund contract to send DAO balances to. -var DAORefundContract = common.HexToAddress("0xbf4ed7b27f1d666546e30d74d50d173d20bca754") - -// DAODrainList is the list of accounts whose full balances will be moved into a -// refund contract at the beginning of the dao-fork block. -func DAODrainList() []common.Address { - return []common.Address{ - common.HexToAddress("0xd4fe7bc31cedb7bfb8a345f31e668033056b2728"), - common.HexToAddress("0xb3fb0e5aba0e20e5c49d252dfd30e102b171a425"), - common.HexToAddress("0x2c19c7f9ae8b751e37aeb2d93a699722395ae18f"), - common.HexToAddress("0xecd135fa4f61a655311e86238c92adcd779555d2"), - common.HexToAddress("0x1975bd06d486162d5dc297798dfc41edd5d160a7"), - common.HexToAddress("0xa3acf3a1e16b1d7c315e23510fdd7847b48234f6"), - common.HexToAddress("0x319f70bab6845585f412ec7724b744fec6095c85"), - common.HexToAddress("0x06706dd3f2c9abf0a21ddcc6941d9b86f0596936"), - common.HexToAddress("0x5c8536898fbb74fc7445814902fd08422eac56d0"), - common.HexToAddress("0x6966ab0d485353095148a2155858910e0965b6f9"), - common.HexToAddress("0x779543a0491a837ca36ce8c635d6154e3c4911a6"), - common.HexToAddress("0x2a5ed960395e2a49b1c758cef4aa15213cfd874c"), - common.HexToAddress("0x5c6e67ccd5849c0d29219c4f95f1a7a93b3f5dc5"), - common.HexToAddress("0x9c50426be05db97f5d64fc54bf89eff947f0a321"), - common.HexToAddress("0x200450f06520bdd6c527622a273333384d870efb"), - common.HexToAddress("0xbe8539bfe837b67d1282b2b1d61c3f723966f049"), - common.HexToAddress("0x6b0c4d41ba9ab8d8cfb5d379c69a612f2ced8ecb"), - common.HexToAddress("0xf1385fb24aad0cd7432824085e42aff90886fef5"), - common.HexToAddress("0xd1ac8b1ef1b69ff51d1d401a476e7e612414f091"), - common.HexToAddress("0x8163e7fb499e90f8544ea62bbf80d21cd26d9efd"), - common.HexToAddress("0x51e0ddd9998364a2eb38588679f0d2c42653e4a6"), - common.HexToAddress("0x627a0a960c079c21c34f7612d5d230e01b4ad4c7"), - common.HexToAddress("0xf0b1aa0eb660754448a7937c022e30aa692fe0c5"), - common.HexToAddress("0x24c4d950dfd4dd1902bbed3508144a54542bba94"), - common.HexToAddress("0x9f27daea7aca0aa0446220b98d028715e3bc803d"), - common.HexToAddress("0xa5dc5acd6a7968a4554d89d65e59b7fd3bff0f90"), - common.HexToAddress("0xd9aef3a1e38a39c16b31d1ace71bca8ef58d315b"), - common.HexToAddress("0x63ed5a272de2f6d968408b4acb9024f4cc208ebf"), - common.HexToAddress("0x6f6704e5a10332af6672e50b3d9754dc460dfa4d"), - common.HexToAddress("0x77ca7b50b6cd7e2f3fa008e24ab793fd56cb15f6"), - common.HexToAddress("0x492ea3bb0f3315521c31f273e565b868fc090f17"), - common.HexToAddress("0x0ff30d6de14a8224aa97b78aea5388d1c51c1f00"), - common.HexToAddress("0x9ea779f907f0b315b364b0cfc39a0fde5b02a416"), - common.HexToAddress("0xceaeb481747ca6c540a000c1f3641f8cef161fa7"), - common.HexToAddress("0xcc34673c6c40e791051898567a1222daf90be287"), - common.HexToAddress("0x579a80d909f346fbfb1189493f521d7f48d52238"), - common.HexToAddress("0xe308bd1ac5fda103967359b2712dd89deffb7973"), - common.HexToAddress("0x4cb31628079fb14e4bc3cd5e30c2f7489b00960c"), - common.HexToAddress("0xac1ecab32727358dba8962a0f3b261731aad9723"), - common.HexToAddress("0x4fd6ace747f06ece9c49699c7cabc62d02211f75"), - common.HexToAddress("0x440c59b325d2997a134c2c7c60a8c61611212bad"), - common.HexToAddress("0x4486a3d68fac6967006d7a517b889fd3f98c102b"), - common.HexToAddress("0x9c15b54878ba618f494b38f0ae7443db6af648ba"), - common.HexToAddress("0x27b137a85656544b1ccb5a0f2e561a5703c6a68f"), - common.HexToAddress("0x21c7fdb9ed8d291d79ffd82eb2c4356ec0d81241"), - common.HexToAddress("0x23b75c2f6791eef49c69684db4c6c1f93bf49a50"), - common.HexToAddress("0x1ca6abd14d30affe533b24d7a21bff4c2d5e1f3b"), - common.HexToAddress("0xb9637156d330c0d605a791f1c31ba5890582fe1c"), - common.HexToAddress("0x6131c42fa982e56929107413a9d526fd99405560"), - common.HexToAddress("0x1591fc0f688c81fbeb17f5426a162a7024d430c2"), - common.HexToAddress("0x542a9515200d14b68e934e9830d91645a980dd7a"), - common.HexToAddress("0xc4bbd073882dd2add2424cf47d35213405b01324"), - common.HexToAddress("0x782495b7b3355efb2833d56ecb34dc22ad7dfcc4"), - common.HexToAddress("0x58b95c9a9d5d26825e70a82b6adb139d3fd829eb"), - common.HexToAddress("0x3ba4d81db016dc2890c81f3acec2454bff5aada5"), - common.HexToAddress("0xb52042c8ca3f8aa246fa79c3feaa3d959347c0ab"), - common.HexToAddress("0xe4ae1efdfc53b73893af49113d8694a057b9c0d1"), - common.HexToAddress("0x3c02a7bc0391e86d91b7d144e61c2c01a25a79c5"), - common.HexToAddress("0x0737a6b837f97f46ebade41b9bc3e1c509c85c53"), - common.HexToAddress("0x97f43a37f595ab5dd318fb46e7a155eae057317a"), - common.HexToAddress("0x52c5317c848ba20c7504cb2c8052abd1fde29d03"), - common.HexToAddress("0x4863226780fe7c0356454236d3b1c8792785748d"), - common.HexToAddress("0x5d2b2e6fcbe3b11d26b525e085ff818dae332479"), - common.HexToAddress("0x5f9f3392e9f62f63b8eac0beb55541fc8627f42c"), - common.HexToAddress("0x057b56736d32b86616a10f619859c6cd6f59092a"), - common.HexToAddress("0x9aa008f65de0b923a2a4f02012ad034a5e2e2192"), - common.HexToAddress("0x304a554a310c7e546dfe434669c62820b7d83490"), - common.HexToAddress("0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79"), - common.HexToAddress("0x4deb0033bb26bc534b197e61d19e0733e5679784"), - common.HexToAddress("0x07f5c1e1bc2c93e0402f23341973a0e043f7bf8a"), - common.HexToAddress("0x35a051a0010aba705c9008d7a7eff6fb88f6ea7b"), - common.HexToAddress("0x4fa802324e929786dbda3b8820dc7834e9134a2a"), - common.HexToAddress("0x9da397b9e80755301a3b32173283a91c0ef6c87e"), - common.HexToAddress("0x8d9edb3054ce5c5774a420ac37ebae0ac02343c6"), - common.HexToAddress("0x0101f3be8ebb4bbd39a2e3b9a3639d4259832fd9"), - common.HexToAddress("0x5dc28b15dffed94048d73806ce4b7a4612a1d48f"), - common.HexToAddress("0xbcf899e6c7d9d5a215ab1e3444c86806fa854c76"), - common.HexToAddress("0x12e626b0eebfe86a56d633b9864e389b45dcb260"), - common.HexToAddress("0xa2f1ccba9395d7fcb155bba8bc92db9bafaeade7"), - common.HexToAddress("0xec8e57756626fdc07c63ad2eafbd28d08e7b0ca5"), - common.HexToAddress("0xd164b088bd9108b60d0ca3751da4bceb207b0782"), - common.HexToAddress("0x6231b6d0d5e77fe001c2a460bd9584fee60d409b"), - common.HexToAddress("0x1cba23d343a983e9b5cfd19496b9a9701ada385f"), - common.HexToAddress("0xa82f360a8d3455c5c41366975bde739c37bfeb8a"), - common.HexToAddress("0x9fcd2deaff372a39cc679d5c5e4de7bafb0b1339"), - common.HexToAddress("0x005f5cee7a43331d5a3d3eec71305925a62f34b6"), - common.HexToAddress("0x0e0da70933f4c7849fc0d203f5d1d43b9ae4532d"), - common.HexToAddress("0xd131637d5275fd1a68a3200f4ad25c71a2a9522e"), - common.HexToAddress("0xbc07118b9ac290e4622f5e77a0853539789effbe"), - common.HexToAddress("0x47e7aa56d6bdf3f36be34619660de61275420af8"), - common.HexToAddress("0xacd87e28b0c9d1254e868b81cba4cc20d9a32225"), - common.HexToAddress("0xadf80daec7ba8dcf15392f1ac611fff65d94f880"), - common.HexToAddress("0x5524c55fb03cf21f549444ccbecb664d0acad706"), - common.HexToAddress("0x40b803a9abce16f50f36a77ba41180eb90023925"), - common.HexToAddress("0xfe24cdd8648121a43a7c86d289be4dd2951ed49f"), - common.HexToAddress("0x17802f43a0137c506ba92291391a8a8f207f487d"), - common.HexToAddress("0x253488078a4edf4d6f42f113d1e62836a942cf1a"), - common.HexToAddress("0x86af3e9626fce1957c82e88cbf04ddf3a2ed7915"), - common.HexToAddress("0xb136707642a4ea12fb4bae820f03d2562ebff487"), - common.HexToAddress("0xdbe9b615a3ae8709af8b93336ce9b477e4ac0940"), - common.HexToAddress("0xf14c14075d6c4ed84b86798af0956deef67365b5"), - common.HexToAddress("0xca544e5c4687d109611d0f8f928b53a25af72448"), - common.HexToAddress("0xaeeb8ff27288bdabc0fa5ebb731b6f409507516c"), - common.HexToAddress("0xcbb9d3703e651b0d496cdefb8b92c25aeb2171f7"), - common.HexToAddress("0x6d87578288b6cb5549d5076a207456a1f6a63dc0"), - common.HexToAddress("0xb2c6f0dfbb716ac562e2d85d6cb2f8d5ee87603e"), - common.HexToAddress("0xaccc230e8a6e5be9160b8cdf2864dd2a001c28b6"), - common.HexToAddress("0x2b3455ec7fedf16e646268bf88846bd7a2319bb2"), - common.HexToAddress("0x4613f3bca5c44ea06337a9e439fbc6d42e501d0a"), - common.HexToAddress("0xd343b217de44030afaa275f54d31a9317c7f441e"), - common.HexToAddress("0x84ef4b2357079cd7a7c69fd7a37cd0609a679106"), - common.HexToAddress("0xda2fef9e4a3230988ff17df2165440f37e8b1708"), - common.HexToAddress("0xf4c64518ea10f995918a454158c6b61407ea345c"), - common.HexToAddress("0x7602b46df5390e432ef1c307d4f2c9ff6d65cc97"), - common.HexToAddress("0xbb9bc244d798123fde783fcc1c72d3bb8c189413"), - common.HexToAddress("0x807640a13483f8ac783c557fcdf27be11ea4ac7a"), - } -} From dce18eb9ca551d4a650c9f203c58679e1ddad3e3 Mon Sep 17 00:00:00 2001 From: aaronbuchwald Date: Tue, 11 Apr 2023 13:57:47 -0400 Subject: [PATCH 6/8] Cortina activation mainnet (#1246) * Add Cortina activation timestamp for April 6 (#241) * Bump version to v0.12.0 and bump ago dep to v1.9.16 (#242) * Bump version to v0.12.0 and bump ago dep to v1.9.16 * update gh actions * Add mainnet activation timestamp --- params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/config.go b/params/config.go index c543fd11c4..da8db97ca4 100644 --- a/params/config.go +++ b/params/config.go @@ -74,7 +74,7 @@ var ( ApricotPhase6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 6, 20, 0, 0, 0, time.UTC).Unix()), ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 7, 3, 0, 0, 0, time.UTC).Unix()), BanffBlockTimestamp: big.NewInt(time.Date(2022, time.October, 18, 16, 0, 0, 0, time.UTC).Unix()), - // TODO Add Cortina timestamp + CortinaBlockTimestamp: big.NewInt(time.Date(2023, time.April, 25, 15, 0, 0, 0, time.UTC).Unix()), // TODO Add DUpgrade timestamp } From b057c884b8d3fb2ba0751146bbd249c4094a05ae Mon Sep 17 00:00:00 2001 From: aaronbuchwald Date: Wed, 12 Apr 2023 14:13:50 -0400 Subject: [PATCH 7/8] Add v0.12.0 release notes (#243) --- RELEASES.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/RELEASES.md b/RELEASES.md index cf484b388b..a750fcccad 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,10 @@ # Release Notes +## [v0.12.0](https://github.com/ava-labs/coreth/releases/tag/v0.12.0) + +- Increase C-Chain block gas limit to 15M in Cortina +- Add Mainnet and Fuji Cortina Activation timestamps + ## [v0.11.9](https://github.com/ava-labs/coreth/releases/tag/v0.11.9) - Downgrade SetPreference log from warn to debug From d71f828d8ef564f03ce1f190d42cb911d34afbc5 Mon Sep 17 00:00:00 2001 From: evlekht Date: Fri, 9 Feb 2024 21:03:32 +0400 Subject: [PATCH 8/8] fixes --- accounts/abi/bind/backends/simulated.go | 68 ++++++++++++++---- contracts/camino_smart_contracts_test.go | 88 ++---------------------- core/blockchain.go | 12 ++-- core/blockchain_reader.go | 2 +- core/chain_makers.go | 1 - eth/backend.go | 5 +- params/config.go | 3 +- scripts/versions.sh | 4 +- 8 files changed, 74 insertions(+), 109 deletions(-) diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 5681a8be92..16d191b3bb 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -41,12 +41,12 @@ import ( "github.com/ava-labs/coreth/accounts/abi/bind" "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/admin" "github.com/ava-labs/coreth/core/bloombits" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/eth/ethadmin" "github.com/ava-labs/coreth/eth/filters" "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/interfaces" @@ -104,10 +104,49 @@ type SimulatedBackend struct { config *params.ChainConfig } +// NewSimulatedCaminoBackendWithDatabase creates a new binding backend based on the given database +// and uses a simulated blockchain for testing purposes. +// A simulated backend always uses chainID 1337. +func NewSimulatedBackendWithDatabaseAndChainConfig(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64, addr common.Address, cpcfg *params.ChainConfig) *SimulatedBackend { + cpcfg.ChainID = big.NewInt(1337) + genesis := core.Genesis{ + Config: cpcfg, + GasLimit: gasLimit, + Alloc: alloc, + InitialAdmin: addr, + } + if addr.String() != "0x0000000000000000000000000000000000000000" { + genesis.PreDeploy() + } + cacheConfig := &core.CacheConfig{} + blockchain, _ := core.NewBlockChain(database, cacheConfig, &genesis, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + backend := &SimulatedBackend{ + database: database, + blockchain: blockchain, + config: genesis.Config, + } + adminCtrl := ethadmin.NewController(backend, blockchain.Config()) + blockchain.SetAdminController(adminCtrl) + + filterBackend := &filterBackend{database, blockchain, backend} + backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{}) + backend.events = filters.NewEventSystem(backend.filterSystem, false) + + backend.rollback(blockchain.CurrentBlock()) + return backend +} + +// NewSimulatedCaminoBackendWithDatabase creates a new binding backend based on the given database +// and uses a simulated blockchain for testing purposes. +// A simulated backend always uses chainID 1337. +func NewSimulatedBackendWithChainConfig(alloc core.GenesisAlloc, gasLimit uint64, addr common.Address, cpcfg *params.ChainConfig) *SimulatedBackend { + return NewSimulatedBackendWithDatabaseAndChainConfig(rawdb.NewMemoryDatabase(), alloc, gasLimit, addr, cpcfg) +} + // NewSimulatedBackendWithDatabase creates a new binding backend based on the given database // and uses a simulated blockchain for testing purposes. // A simulated backend always uses chainID 1337. -func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64, addr common.Address, ctrl admin.AdminController) *SimulatedBackend { +func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64, addr common.Address) *SimulatedBackend { cpcfg := params.TestChainConfig cpcfg.ChainID = big.NewInt(1337) genesis := core.Genesis{ @@ -119,15 +158,15 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis if addr.String() != "0x0000000000000000000000000000000000000000" { genesis.PreDeploy() } - genesis.MustCommit(database) cacheConfig := &core.CacheConfig{} - blockchain, _ := core.NewBlockChain(database, cacheConfig, &genesis, dummy.NewFaker(), vm.Config{AdminContoller: ctrl}, common.Hash{}, false) - + blockchain, _ := core.NewBlockChain(database, cacheConfig, &genesis, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) backend := &SimulatedBackend{ database: database, blockchain: blockchain, config: genesis.Config, } + adminCtrl := ethadmin.NewController(backend, blockchain.Config()) + blockchain.SetAdminController(adminCtrl) filterBackend := &filterBackend{database, blockchain, backend} backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{}) @@ -141,21 +180,14 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis // for testing purposes. // A simulated backend always uses chainID 1337. func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { - return NewSimulatedBackendWithDatabase(rawdb.NewMemoryDatabase(), alloc, gasLimit, common.Address{}, nil) + return NewSimulatedBackendWithDatabase(rawdb.NewMemoryDatabase(), alloc, gasLimit, common.Address{}) } // NewSimulatedBackendWithInitialAdmin creates a new binding backend using a simulated blockchain // for testing purposes. // A simulated backend always uses chainID 1337. func NewSimulatedBackendWithInitialAdmin(alloc core.GenesisAlloc, gasLimit uint64, addr common.Address) *SimulatedBackend { - return NewSimulatedBackendWithDatabase(rawdb.NewMemoryDatabase(), alloc, gasLimit, addr, nil) -} - -// NewSimulatedBackendWithInitialAdmin creates a new binding backend using a simulated blockchain -// for testing purposes. -// A simulated backend always uses chainID 1337. -func NewSimulatedBackendWithInitialAdminAndAdminController(alloc core.GenesisAlloc, gasLimit uint64, addr common.Address, ctrl admin.AdminController) *SimulatedBackend { - return NewSimulatedBackendWithDatabase(rawdb.NewMemoryDatabase(), alloc, gasLimit, addr, ctrl) + return NewSimulatedBackendWithDatabase(rawdb.NewMemoryDatabase(), alloc, gasLimit, addr) } // Close terminates the underlying blockchain's update loop. @@ -869,6 +901,14 @@ func (b *SimulatedBackend) Blockchain() *core.BlockChain { return b.blockchain } +func (b *SimulatedBackend) StateByHeader(ctx context.Context, header *types.Header) (*state.StateDB, error) { + return b.blockchain.StateAt(header.Root) +} + +func (b *SimulatedBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { + return b.blockchain.SubscribeChainHeadEvent(ch) +} + // callMsg implements core.Message to allow passing it as a transaction simulator. type callMsg struct { interfaces.CallMsg diff --git a/contracts/camino_smart_contracts_test.go b/contracts/camino_smart_contracts_test.go index 9ea30cf797..24fcc3dc8f 100644 --- a/contracts/camino_smart_contracts_test.go +++ b/contracts/camino_smart_contracts_test.go @@ -5,7 +5,6 @@ package contracts import ( "context" - "crypto/rand" "math/big" "strings" "testing" @@ -13,24 +12,15 @@ import ( "github.com/ava-labs/coreth/interfaces" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/assert" - "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/coreth/accounts/abi" "github.com/ava-labs/coreth/accounts/abi/bind" "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/accounts/keystore" - "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/eth" - "github.com/ava-labs/coreth/eth/ethadmin" - "github.com/ava-labs/coreth/eth/ethconfig" - "github.com/ava-labs/coreth/node" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/vmerrs" @@ -51,14 +41,12 @@ var ( gasFeeKey, _ = crypto.HexToECDSA("04214cc61e1feaf005aa25b7771d33ca5c4aea959d21fe9a1429f822fa024171") blacklistKey, _ = crypto.HexToECDSA("b32d5aa5b8f4028218538c8c5b14b5c14f3f2e35b236e4bbbff09b669e69e46c") dummyKey, _ = crypto.HexToECDSA("62802c57c0e3c24ae0ce354f7d19f7659ddbe506547b00e9e6a722980d2fed3d") - blackHoleKey, _ = crypto.HexToECDSA("50cdff9c21002158e2a18b73c2504f8982332e05b5c3b26d3cffdd2f1291796a") adminAddr = crypto.PubkeyToAddress(adminKey.PublicKey) kycAddr = crypto.PubkeyToAddress(kycKey.PublicKey) gasFeeAddr = crypto.PubkeyToAddress(gasFeeKey.PublicKey) blacklistAddr = crypto.PubkeyToAddress(blacklistKey.PublicKey) dummyAddr = crypto.PubkeyToAddress(dummyKey.PublicKey) - blackholeAddr = crypto.PubkeyToAddress(blackHoleKey.PublicKey) AdminProxyAddr = common.HexToAddress("0x010000000000000000000000000000000000000a") @@ -68,11 +56,6 @@ var ( BLACKLIST_ROLE = big.NewInt(8) ) -type ETHChain struct { - backend *eth.Ethereum - chainConfig *params.ChainConfig -} - // Src code of factory contract: // // SPDX-License-Identifier: MIT @@ -129,11 +112,8 @@ func TestDeployContract(t *testing.T) { // Generate GenesisAlloc alloc := makeGenesisAllocation() - ethChain := newETHChain(t) - ac := ethadmin.NewController(ethChain.backend.APIBackend, ethChain.chainConfig) - // Generate SimulatedBackend - sim := backends.NewSimulatedBackendWithInitialAdminAndAdminController(alloc, gasLimit, adminAddr, ac) + sim := backends.NewSimulatedBackendWithChainConfig(alloc, gasLimit, adminAddr, params.TestCaminoChainConfig) defer func() { err := sim.Close() assert.NoError(t, err) @@ -634,13 +614,8 @@ func TestEthAdmin(t *testing.T) { // Generate GenesisAlloc alloc := makeGenesisAllocation() - // Create a bew Eth chain to generate an AdminController from its backend - // Simulated backed will not do - ethChain := newETHChain(t) - ac := ethadmin.NewController(ethChain.backend.APIBackend, ethChain.chainConfig) - // Generate SimulatedBackend - sim := backends.NewSimulatedBackendWithInitialAdminAndAdminController(alloc, gasLimit, gasFeeAddr, ac) + sim := backends.NewSimulatedBackendWithInitialAdmin(alloc, gasLimit, gasFeeAddr) defer func() { err := sim.Close() assert.NoError(t, err) @@ -648,6 +623,8 @@ func TestEthAdmin(t *testing.T) { sim.Commit(true) + ac := sim.Blockchain().AdminController() + latestHeader, state := getLatestHeaderAndState(t, sim) bf := ac.GetFixedBaseFee(latestHeader, state) @@ -692,63 +669,6 @@ func getLatestHeaderAndState(t *testing.T, sim *backends.SimulatedBackend) (*typ return latestHeader, state } -// newETHChain creates an Ethereum blockchain with the given configs. -func newETHChain(t *testing.T) *ETHChain { - chainID := big.NewInt(1) - initialBalance := big.NewInt(1000000000000000000) - - fundedKey, err := keystore.NewKey(rand.Reader) - assert.NoError(t, err) - - // configure the chain - config := ethconfig.NewDefaultConfig() - chainConfig := ¶ms.ChainConfig{ - ChainID: chainID, - HomesteadBlock: big.NewInt(0), - DAOForkBlock: big.NewInt(0), - DAOForkSupport: true, - EIP150Block: big.NewInt(0), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - SunrisePhase0BlockTimestamp: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), - ApricotPhase4BlockTimestamp: big.NewInt(0), - ApricotPhase5BlockTimestamp: big.NewInt(0), - ApricotPhasePre6BlockTimestamp: big.NewInt(0), - ApricotPhase6BlockTimestamp: big.NewInt(0), - ApricotPhasePost6BlockTimestamp: big.NewInt(0), - BanffBlockTimestamp: big.NewInt(0), - } - - config.Genesis = &core.Genesis{ - Config: chainConfig, - Nonce: 0, - Number: 0, - ExtraData: hexutil.MustDecode("0x00"), - GasLimit: gasLimit, - Difficulty: big.NewInt(0), - Alloc: core.GenesisAlloc{fundedKey.Address: {Balance: initialBalance}}, - } - - node, err := node.New(&node.Config{}) - assert.NoError(t, err) - - backend, err := eth.New(node, &config, new(dummy.ConsensusCallbacks), rawdb.NewMemoryDatabase(), eth.DefaultSettings, common.Hash{}, &mockable.Clock{}) - assert.NoError(t, err) - - chain := ÐChain{backend: backend, chainConfig: chainConfig} - backend.SetEtherbase(blackholeAddr) - - return chain -} - func makeGenesisAllocation() core.GenesisAlloc { alloc := make(core.GenesisAlloc) diff --git a/core/blockchain.go b/core/blockchain.go index 03d739d0df..466e395790 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -200,9 +200,8 @@ var DefaultCacheConfig = &CacheConfig{ // included in the canonical one where as GetBlockByNumber always represents the // canonical chain. type BlockChain struct { - chainConfig *params.ChainConfig // Chain & network configuration - cacheConfig *CacheConfig // Cache configuration for pruning - adminCtrl admin.AdminController // Block based administrative control + chainConfig *params.ChainConfig // Chain & network configuration + cacheConfig *CacheConfig // Cache configuration for pruning db ethdb.Database // Low level persistent database to store final content in @@ -326,7 +325,6 @@ func NewBlockChain( bc := &BlockChain{ chainConfig: chainConfig, cacheConfig: cacheConfig, - adminCtrl: vmConfig.AdminContoller, db: db, stateCache: state.NewDatabaseWithConfig(db, &trie.Config{ Cache: cacheConfig.TrieCleanLimit, @@ -2079,3 +2077,9 @@ func (bc *BlockChain) ResetToStateSyncedBlock(block *types.Block) error { bc.initSnapshot(head) return nil } + +// SetAdminController sets the chain admin controller. Must be called right after chain creation and only once. +// Must be also set in other vmConfig instances for consistency, cause chain copies vmConfig by value. +func (bc *BlockChain) SetAdminController(adminCtrl admin.AdminController) { + bc.vmConfig.AdminContoller = adminCtrl +} diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index f434ef4454..b3d8eca0ea 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -263,7 +263,7 @@ func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } // AdminController retrieves the chain's admin controller. -func (bc *BlockChain) AdminController() admin.AdminController { return bc.adminCtrl } +func (bc *BlockChain) AdminController() admin.AdminController { return bc.vmConfig.AdminContoller } // Engine retrieves the blockchain's consensus engine. func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } diff --git a/core/chain_makers.go b/core/chain_makers.go index 155624eb52..b0e32cffc5 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -42,7 +42,6 @@ import ( "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/consensus/misc" "github.com/ava-labs/coreth/core/admin" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" diff --git a/eth/backend.go b/eth/backend.go index 67c0331294..6c9d8663e6 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -227,14 +227,15 @@ func New( eth: eth, } - vmConfig.AdminContoller = ethadmin.NewController(eth.APIBackend, chainConfig) - var err error eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, eth.engine, vmConfig, lastAcceptedHash, config.SkipUpgradeCheck) if err != nil { return nil, err } + vmConfig.AdminContoller = ethadmin.NewController(eth.APIBackend, eth.blockchain.Config()) + eth.blockchain.SetAdminController(vmConfig.AdminContoller) + if err := eth.handleOfflinePruning(cacheConfig, config.Genesis, vmConfig, lastAcceptedHash); err != nil { return nil, err } diff --git a/params/config.go b/params/config.go index 4de0d0f517..d9c44137e0 100644 --- a/params/config.go +++ b/params/config.go @@ -53,7 +53,7 @@ var ( AvalancheMainnetChainID = big.NewInt(43114) // AvalancheFujiChainID ... AvalancheFujiChainID = big.NewInt(43113) - // LocalChainID ... + // AvalancheLocalChainID ... AvalancheLocalChainID = big.NewInt(43112) // CaminoChainID ... CaminoChainID = big.NewInt(500) @@ -154,6 +154,7 @@ var ( } TestChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0)} + TestCaminoChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0)} TestLaunchConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} TestApricotPhase1Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} TestApricotPhase2Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} diff --git a/scripts/versions.sh b/scripts/versions.sh index 8cc245c756..a8d262f46a 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Set up the versions to be used -coreth_version=${CORETH_VERSION:-'v1.1.0'} +coreth_version=${CORETH_VERSION:-'v1.1.0'} # caminoethvm version # Don't export them as they're used in the context of other calls -avalanche_version=${AVALANCHE_VERSION:-'v1.1.0'} +avalanche_version=${AVALANCHE_VERSION:-'v1.0.0-rc1'} # caminogo version