From 1f6e63900dfbd2befcd08f647d9d49a5645bd3cc Mon Sep 17 00:00:00 2001 From: 0xbstn Date: Fri, 29 Sep 2023 09:52:22 +0200 Subject: [PATCH 01/26] core: fix typos (#28218) * fix(core/txpool): fix typos * core/asm: fix typos * core/bloombits: fix typos * core/rawdb: fix typos --- core/asm/asm.go | 2 +- core/asm/compiler.go | 2 +- core/asm/lex_test.go | 4 ++-- core/bloombits/matcher.go | 4 ++-- core/bloombits/matcher_test.go | 4 ++-- core/rawdb/chain_freezer.go | 2 +- core/rawdb/database.go | 10 +++++----- core/rawdb/databases_64bit.go | 2 +- core/rawdb/table.go | 2 +- 9 files changed, 16 insertions(+), 16 deletions(-) diff --git a/core/asm/asm.go b/core/asm/asm.go index 7c1e14ec01..294eb6ffaa 100644 --- a/core/asm/asm.go +++ b/core/asm/asm.go @@ -34,7 +34,7 @@ type instructionIterator struct { started bool } -// NewInstructionIterator create a new instruction iterator. +// NewInstructionIterator creates a new instruction iterator. func NewInstructionIterator(code []byte) *instructionIterator { it := new(instructionIterator) it.code = code diff --git a/core/asm/compiler.go b/core/asm/compiler.go index 75bf726c96..02c589b2c1 100644 --- a/core/asm/compiler.go +++ b/core/asm/compiler.go @@ -49,7 +49,7 @@ func NewCompiler(debug bool) *Compiler { } } -// Feed feeds tokens in to ch and are interpreted by +// Feed feeds tokens into ch and are interpreted by // the compiler. // // feed is the first pass in the compile stage as it collects the used labels in the diff --git a/core/asm/lex_test.go b/core/asm/lex_test.go index 173031521f..1e62d776d4 100644 --- a/core/asm/lex_test.go +++ b/core/asm/lex_test.go @@ -72,12 +72,12 @@ func TestLexer(t *testing.T) { input: "@label123", tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, }, - // comment after label + // Comment after label { input: "@label123 ;; comment", tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, }, - // comment after instruction + // Comment after instruction { input: "push 3 ;; comment\nadd", tokens: []token{{typ: lineStart}, {typ: element, text: "push"}, {typ: number, text: "3"}, {typ: lineEnd, text: "\n"}, {typ: lineStart, lineno: 1}, {typ: element, lineno: 1, text: "add"}, {typ: eof, lineno: 1}}, diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go index cf799c8324..6a4cfb23db 100644 --- a/core/bloombits/matcher.go +++ b/core/bloombits/matcher.go @@ -58,7 +58,7 @@ type partialMatches struct { // bit with the given number of fetch elements, or a response for such a request. // It can also have the actual results set to be used as a delivery data struct. // -// The contest and error fields are used by the light client to terminate matching +// The context and error fields are used by the light client to terminate matching // early if an error is encountered on some path of the pipeline. type Retrieval struct { Bit uint @@ -389,7 +389,7 @@ func (m *Matcher) distributor(dist chan *request, session *MatcherSession) { shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests ) - // assign is a helper method fo try to assign a pending bit an actively + // assign is a helper method to try to assign a pending bit an actively // listening servicer, or schedule it up for later when one arrives. assign := func(bit uint) { select { diff --git a/core/bloombits/matcher_test.go b/core/bloombits/matcher_test.go index 36764c3f17..7f3d5f279c 100644 --- a/core/bloombits/matcher_test.go +++ b/core/bloombits/matcher_test.go @@ -85,7 +85,7 @@ func TestMatcherRandom(t *testing.T) { } // Tests that the matcher can properly find matches if the starting block is -// shifter from a multiple of 8. This is needed to cover an optimisation with +// shifted from a multiple of 8. This is needed to cover an optimisation with // bitset matching https://github.com/ethereum/go-ethereum/issues/15309. func TestMatcherShifted(t *testing.T) { t.Parallel() @@ -106,7 +106,7 @@ func TestWildcardMatcher(t *testing.T) { testMatcherBothModes(t, nil, 0, 10000, 0) } -// makeRandomIndexes generates a random filter system, composed on multiple filter +// makeRandomIndexes generates a random filter system, composed of multiple filter // criteria, each having one bloom list component for the address and arbitrarily // many topic bloom list components. func makeRandomIndexes(lengths []int, max int) [][]bloomIndexes { diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go index 22dbda4a21..cbfaf5b9e4 100644 --- a/core/rawdb/chain_freezer.go +++ b/core/rawdb/chain_freezer.go @@ -200,7 +200,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { } batch.Reset() - // Step into the future and delete and dangling side chains + // Step into the future and delete any dangling side chains if frozen > 0 { tip := frozen for len(dangling) > 0 { diff --git a/core/rawdb/database.go b/core/rawdb/database.go index e97eeb2aa3..0c7cf9f11b 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -34,7 +34,7 @@ import ( "github.com/olekukonko/tablewriter" ) -// freezerdb is a database wrapper that enabled freezer data retrievals. +// freezerdb is a database wrapper that enables freezer data retrievals. type freezerdb struct { ancientRoot string ethdb.KeyValueStore @@ -141,7 +141,7 @@ func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) // Unlike other ancient-related methods, this method does not return // errNotSupported when invoked. // The reason for this is that the caller might want to do several things: - // 1. Check if something is in freezer, + // 1. Check if something is in the freezer, // 2. If not, check leveldb. // // This will work, since the ancient-checks inside 'fn' will return errors, @@ -209,7 +209,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st // of the freezer and database. Ensure that we don't shoot ourselves in the foot // by serving up conflicting data, leading to both datastores getting corrupted. // - // - If both the freezer and key-value store is empty (no genesis), we just + // - If both the freezer and key-value store are empty (no genesis), we just // initialized a new empty freezer, so everything's fine. // - If the key-value store is empty, but the freezer is not, we need to make // sure the user's genesis matches the freezer. That will be checked in the @@ -218,7 +218,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st // - If neither the key-value store nor the freezer is empty, cross validate // the genesis hashes to make sure they are compatible. If they are, also // ensure that there's no gap between the freezer and subsequently leveldb. - // - If the key-value store is not empty, but the freezer is we might just be + // - If the key-value store is not empty, but the freezer is, we might just be // upgrading to the freezer release, or we might have had a small chain and // not frozen anything yet. Ensure that no blocks are missing yet from the // key-value store, since that would mean we already had an old freezer. @@ -634,7 +634,7 @@ func printChainMetadata(db ethdb.KeyValueStore) { fmt.Fprintf(os.Stderr, "\n\n") } -// ReadChainMetadata returns a set of key/value pairs that contains informatin +// ReadChainMetadata returns a set of key/value pairs that contains information // about the database chain status. This can be used for diagnostic purposes // when investigating the state of the node. func ReadChainMetadata(db ethdb.KeyValueStore) [][]string { diff --git a/core/rawdb/databases_64bit.go b/core/rawdb/databases_64bit.go index 1593e89bfe..e9f9332ad0 100644 --- a/core/rawdb/databases_64bit.go +++ b/core/rawdb/databases_64bit.go @@ -23,7 +23,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb/pebble" ) -// Pebble is unsuported on 32bit architecture +// Pebble is unsupported on 32bit architecture const PebbleEnabled = true // NewPebbleDBDatabase creates a persistent key-value database without a freezer diff --git a/core/rawdb/table.go b/core/rawdb/table.go index 1895f61da2..19e4ed5b5c 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -219,7 +219,7 @@ func (b *tableBatch) Put(key, value []byte) error { return b.batch.Put(append([]byte(b.prefix), key...), value) } -// Delete inserts the a key removal into the batch for later committing. +// Delete inserts a key removal into the batch for later committing. func (b *tableBatch) Delete(key []byte) error { return b.batch.Delete(append([]byte(b.prefix), key...)) } From 0ded110b805bcdd0ca45f1c2308b5fb9194f418c Mon Sep 17 00:00:00 2001 From: Sina Mahmoodi <1591639+s1na@users.noreply.github.com> Date: Fri, 29 Sep 2023 10:44:28 +0200 Subject: [PATCH 02/26] core: infer blobGasUsed in chain maker (#28212) Same way that the gasUsed in header is updated when a tx is added we should update blob gas used instead of requiring caller to set it manually. --- core/chain_makers.go | 8 +++----- internal/ethapi/api_test.go | 3 --- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/core/chain_makers.go b/core/chain_makers.go index c9c880dd69..3608329a13 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -88,11 +88,6 @@ func (b *BlockGen) SetPoS() { b.header.Difficulty = new(big.Int) } -// SetBlobGas sets the data gas used by the blob in the generated block. -func (b *BlockGen) SetBlobGas(blobGasUsed uint64) { - b.header.BlobGasUsed = &blobGasUsed -} - // addTx adds a transaction to the generated block. If no coinbase has // been set, the block's coinbase is set to the zero address. // @@ -111,6 +106,9 @@ func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transacti } b.txs = append(b.txs, tx) b.receipts = append(b.receipts, receipt) + if b.header.BlobGasUsed != nil { + *b.header.BlobGasUsed += receipt.BlobGasUsed + } } // AddTx adds a transaction to the generated block. If no coinbase has diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 846a4347a7..59882cd6bb 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -1448,9 +1448,6 @@ func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Ha b.AddTx(tx) txHashes[i] = tx.Hash() } - if i == 5 { - b.SetBlobGas(params.BlobTxBlobGasPerBlob) - } b.SetPoS() }) return backend, txHashes From c5ff839fb2e92657c61f4c3bb8777837bd93fa25 Mon Sep 17 00:00:00 2001 From: aaronbuchwald Date: Fri, 29 Sep 2023 10:46:23 -0400 Subject: [PATCH 03/26] core/state: small trie prefetcher nits (#28183) Small trie prefetcher nits --- core/state/trie_prefetcher.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 4e8fd1e10f..772c698dd0 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -37,7 +37,7 @@ var ( type triePrefetcher struct { db Database // Database to fetch trie nodes through root common.Hash // Root hash of the account trie for metrics - fetches map[string]Trie // Partially or fully fetcher tries + fetches map[string]Trie // Partially or fully fetched tries. Only populated for inactive copies. fetchers map[string]*subfetcher // Subfetchers for each trie deliveryMissMeter metrics.Meter @@ -197,7 +197,10 @@ func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte // trieID returns an unique trie identifier consists the trie owner and root hash. func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string { - return string(append(owner.Bytes(), root.Bytes()...)) + trieID := make([]byte, common.HashLength*2) + copy(trieID, owner.Bytes()) + copy(trieID[common.HashLength:], root.Bytes()) + return string(trieID) } // subfetcher is a trie fetcher goroutine responsible for pulling entries for a From 1f9d672df185d31e87609e912c84932123823c05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 29 Sep 2023 18:14:26 +0300 Subject: [PATCH 04/26] common: remove address.Hash footgun (#28228) --- common/types.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/common/types.go b/common/types.go index bf74e43716..7184b2b112 100644 --- a/common/types.go +++ b/common/types.go @@ -239,9 +239,6 @@ func (a Address) Cmp(other Address) int { // Bytes gets the string representation of the underlying address. func (a Address) Bytes() []byte { return a[:] } -// Hash converts an address to a hash by left-padding it with zeros. -func (a Address) Hash() Hash { return BytesToHash(a[:]) } - // Big converts an address to a big integer. func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) } From 22dcb7a77bc809506b84f4a45a448e9bedd822ff Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Sat, 30 Sep 2023 01:45:38 +0800 Subject: [PATCH 05/26] ethdb/pebble: upgrade pebble to master (aa077af62593) (#28070) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ethdb/pebble: upgrade pebble * ethdb/pebble, go.mod: update pebble to master (aa077af62593) --------- Co-authored-by: Péter Szilágyi --- ethdb/pebble/pebble.go | 25 +++++++++++++++------ go.mod | 27 ++++++++++++----------- go.sum | 49 +++++++++++++++++++++++------------------- 3 files changed, 59 insertions(+), 42 deletions(-) diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index c35a154cac..e6a8f8134c 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -27,6 +27,7 @@ import ( "sync/atomic" "time" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble" "github.com/cockroachdb/pebble/bloom" "github.com/ethereum/go-ethereum/common" @@ -118,6 +119,18 @@ func (d *Database) onWriteStallEnd() { d.writeDelayTime.Add(int64(time.Since(d.writeDelayStartTime))) } +// panicLogger is just a noop logger to disable Pebble's internal logger. +// +// TODO(karalabe): Remove when Pebble sets this as teh default. +type panicLogger struct{} + +func (l panicLogger) Infof(format string, args ...interface{}) { +} + +func (l panicLogger) Fatalf(format string, args ...interface{}) { + panic(errors.Errorf("fatal: "+format, args...)) +} + // New returns a wrapped pebble DB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. func New(file string, cache int, handles int, namespace string, readonly bool, ephemeral bool) (*Database, error) { @@ -158,7 +171,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e // The size of memory table(as well as the write buffer). // Note, there may have more than two memory tables in the system. - MemTableSize: memTableSize, + MemTableSize: uint64(memTableSize), // MemTableStopWritesThreshold places a hard limit on the size // of the existent MemTables(including the frozen one). @@ -189,6 +202,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e WriteStallBegin: db.onWriteStallBegin, WriteStallEnd: db.onWriteStallEnd, }, + Logger: panicLogger{}, // TODO(karalabe): Delete when this is upstreamed in Pebble } // Disable seek compaction explicitly. Check https://github.com/ethereum/go-ethereum/pull/20130 // for more details. @@ -305,12 +319,9 @@ func (d *Database) NewBatch() ethdb.Batch { } // NewBatchWithSize creates a write-only database batch with pre-allocated buffer. -// It's not supported by pebble, but pebble has better memory allocation strategy -// which turns out a lot faster than leveldb. It's performant enough to construct -// batch object without any pre-allocated space. -func (d *Database) NewBatchWithSize(_ int) ethdb.Batch { +func (d *Database) NewBatchWithSize(size int) ethdb.Batch { return &batch{ - b: d.db.NewBatch(), + b: d.db.NewBatchWithSize(size), db: d, } } @@ -582,7 +593,7 @@ type pebbleIterator struct { // of database content with a particular key prefix, starting at a particular // initial key (or after, if it does not exist). func (d *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { - iter := d.db.NewIter(&pebble.IterOptions{ + iter, _ := d.db.NewIter(&pebble.IterOptions{ LowerBound: append(prefix, start...), UpperBound: upperBound(prefix), }) diff --git a/go.mod b/go.mod index a43b1d3f8b..15832e80c9 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,8 @@ require ( github.com/btcsuite/btcd/btcec/v2 v2.2.0 github.com/cespare/cp v0.1.0 github.com/cloudflare/cloudflare-go v0.14.0 - github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 + github.com/cockroachdb/errors v1.8.1 + github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 github.com/consensys/gnark-crypto v0.10.0 github.com/crate-crypto/go-kzg-4844 v0.3.0 github.com/davecgh/go-spew v1.1.1 @@ -30,7 +31,7 @@ require ( github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.3.0 - github.com/golang/protobuf v1.5.2 + github.com/golang/protobuf v1.5.3 github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa github.com/google/uuid v1.3.0 @@ -49,7 +50,7 @@ require ( github.com/karalabe/usb v0.0.2 github.com/kylelemons/godebug v1.1.0 github.com/mattn/go-colorable v0.1.13 - github.com/mattn/go-isatty v0.0.16 + github.com/mattn/go-isatty v0.0.17 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.5 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 @@ -57,19 +58,19 @@ require ( github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible github.com/status-im/keycard-go v0.2.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 github.com/supranational/blst v0.3.11 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tyler-smith/go-bip39 v1.1.0 github.com/urfave/cli/v2 v2.25.7 go.uber.org/automaxprocs v1.5.2 - golang.org/x/crypto v0.12.0 - golang.org/x/exp v0.0.0-20230810033253-352e893a4cad + golang.org/x/crypto v0.13.0 + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.11.0 - golang.org/x/text v0.12.0 + golang.org/x/sys v0.12.0 + golang.org/x/text v0.13.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.9.1 + golang.org/x/tools v0.13.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -87,10 +88,10 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.5.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cockroachdb/errors v1.8.1 // indirect github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f // indirect github.com/cockroachdb/redact v1.0.8 // indirect github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 // indirect @@ -126,10 +127,10 @@ require ( github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - golang.org/x/mod v0.11.0 // indirect - golang.org/x/net v0.10.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.15.0 // indirect google.golang.org/protobuf v1.27.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gotest.tools/v3 v3.5.0 // indirect + gotest.tools/v3 v3.5.1 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index ca5617c2cc..dfbe5420bc 100644 --- a/go.sum +++ b/go.sum @@ -109,18 +109,20 @@ github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= -github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877 h1:1MLK4YpFtIEo3ZtMA5C795Wtv5VuUnrXX7mQG+aHg6o= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM= github.com/cockroachdb/errors v1.8.1 h1:A5+txlVZfOqFBDa4mGz2bUWSp0aHElvHX2bKkdbQu+Y= github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w= -github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= github.com/cockroachdb/redact v1.0.8 h1:8QG/764wK+vmEYoOlfobpe12EQcS81ukx/a4hdVMxNw= github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM= github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= @@ -262,8 +264,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= @@ -409,8 +412,9 @@ github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -536,8 +540,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -592,8 +596,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -604,8 +608,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230810033253-352e893a4cad h1:g0bG7Z4uG+OgH2QDODnjp6ggkk1bJDsINcuWmJN1iJU= -golang.org/x/exp v0.0.0-20230810033253-352e893a4cad/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -627,8 +631,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -672,8 +676,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -756,8 +760,9 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -771,8 +776,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -827,8 +832,8 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= -golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -941,8 +946,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= -gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 966e50bddb502383b795320b0a241baf36910ee4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 29 Sep 2023 20:52:55 +0300 Subject: [PATCH 06/26] ethdb/pebble: luv you linter --- ethdb/pebble/pebble.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index e6a8f8134c..5aa00aad4e 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -121,7 +121,7 @@ func (d *Database) onWriteStallEnd() { // panicLogger is just a noop logger to disable Pebble's internal logger. // -// TODO(karalabe): Remove when Pebble sets this as teh default. +// TODO(karalabe): Remove when Pebble sets this as the default. type panicLogger struct{} func (l panicLogger) Infof(format string, args ...interface{}) { From a408e37fa1165e4e9b65031dc3fa318724812454 Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Fri, 29 Sep 2023 12:27:30 -0600 Subject: [PATCH 07/26] eth/catalyst: add validation error in new paylaod hash mismatch (#28226) * eth/catalyst: add validation error in new paylaod hash mismatch * eth/catalyst/api: refactor api.invalid(..) to return nil latest valid hash if none provided --- eth/catalyst/api.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 1e5fb3ccb3..d1e1991414 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -513,7 +513,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe block, err := engine.ExecutableDataToBlock(params, versionedHashes, beaconRoot) if err != nil { log.Warn("Invalid NewPayload params", "params", params, "error", err) - return engine.PayloadStatusV1{Status: engine.INVALID}, nil + return api.invalid(err, nil), nil } // Stash away the last update to warn the user if the beacon client goes offline api.lastNewPayloadLock.Lock() @@ -694,20 +694,21 @@ func (api *ConsensusAPI) checkInvalidAncestor(check common.Hash, head common.Has } } -// invalid returns a response "INVALID" with the latest valid hash supplied by latest or to the current head -// if no latestValid block was provided. +// invalid returns a response "INVALID" with the latest valid hash supplied by latest. func (api *ConsensusAPI) invalid(err error, latestValid *types.Header) engine.PayloadStatusV1 { - currentHash := api.eth.BlockChain().CurrentBlock().Hash() + var currentHash *common.Hash if latestValid != nil { - // Set latest valid hash to 0x0 if parent is PoW block - currentHash = common.Hash{} - if latestValid.Difficulty.BitLen() == 0 { + if latestValid.Difficulty.BitLen() != 0 { + // Set latest valid hash to 0x0 if parent is PoW block + currentHash = &common.Hash{} + } else { // Otherwise set latest valid hash to parent hash - currentHash = latestValid.Hash() + h := latestValid.Hash() + currentHash = &h } } errorMsg := err.Error() - return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: ¤tHash, ValidationError: &errorMsg} + return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: currentHash, ValidationError: &errorMsg} } // heartbeat loops indefinitely, and checks if there have been beacon client updates From 7b6ff527d543a4478b4079d2b69b0a24fac75fa4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 29 Sep 2023 22:11:15 +0300 Subject: [PATCH 08/26] cmd, eth: switch the dev synctarget to hash from block (#28209) * cmd, eth: switch the dev synctarget to hash from block * cmd/utils, eth/catalyst: terminate node wyen synctarget reached --- cmd/geth/config.go | 13 ++++-- cmd/utils/flags.go | 30 ++++-------- eth/catalyst/tester.go | 54 +++++++++++----------- eth/downloader/beacondevsync.go | 81 +++++++++++++++++++++++++++++++++ eth/downloader/downloader.go | 10 ---- eth/downloader/peer.go | 27 +---------- 6 files changed, 126 insertions(+), 89 deletions(-) create mode 100644 eth/downloader/beacondevsync.go diff --git a/cmd/geth/config.go b/cmd/geth/config.go index a5d628d8af..027dac7bd6 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -32,6 +32,8 @@ import ( "github.com/ethereum/go-ethereum/accounts/scwallet" "github.com/ethereum/go-ethereum/accounts/usbwallet" "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" @@ -199,17 +201,18 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { if ctx.IsSet(utils.GraphQLEnabledFlag.Name) { utils.RegisterGraphQLService(stack, backend, filterSystem, &cfg.Node) } - // Add the Ethereum Stats daemon if requested. if cfg.Ethstats.URL != "" { utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL) } - // Configure full-sync tester service if requested - if ctx.IsSet(utils.SyncTargetFlag.Name) && cfg.Eth.SyncMode == downloader.FullSync { - utils.RegisterFullSyncTester(stack, eth, ctx.Path(utils.SyncTargetFlag.Name)) + if ctx.IsSet(utils.SyncTargetFlag.Name) { + hex := hexutil.MustDecode(ctx.String(utils.SyncTargetFlag.Name)) + if len(hex) != common.HashLength { + utils.Fatalf("invalid sync target length: have %d, want %d", len(hex), common.HashLength) + } + utils.RegisterFullSyncTester(stack, eth, common.BytesToHash(hex)) } - // Start the dev mode if requested, or launch the engine API for // interacting with external consensus client. if ctx.IsSet(utils.DeveloperFlag.Name) { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index c172d269c5..9743a7b9ca 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -18,7 +18,6 @@ package utils import ( - "bytes" "context" "crypto/ecdsa" "encoding/hex" @@ -39,11 +38,9 @@ import ( "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/fdlimit" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/txpool/legacypool" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/kzg4844" @@ -72,7 +69,6 @@ import ( "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/triedb/hashdb" @@ -595,9 +591,9 @@ var ( } // MISC settings - SyncTargetFlag = &cli.PathFlag{ + SyncTargetFlag = &cli.StringFlag{ Name: "synctarget", - Usage: `File for containing the hex-encoded block-rlp as sync target(dev feature)`, + Usage: `Hash of the block to full sync to (dev testing feature)`, TakesFile: true, Category: flags.MiscCategory, } @@ -1691,7 +1687,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) godebug.SetGCPercent(int(gogc)) - if ctx.IsSet(SyncModeFlag.Name) { + if ctx.IsSet(SyncTargetFlag.Name) { + cfg.SyncMode = downloader.FullSync // dev sync target forces full sync + } else if ctx.IsSet(SyncModeFlag.Name) { cfg.SyncMode = *flags.GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode) } if ctx.IsSet(NetworkIdFlag.Name) { @@ -1976,21 +1974,9 @@ func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconf } // RegisterFullSyncTester adds the full-sync tester service into node. -func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, path string) { - blob, err := os.ReadFile(path) - if err != nil { - Fatalf("Failed to read block file: %v", err) - } - rlpBlob, err := hexutil.Decode(string(bytes.TrimRight(blob, "\r\n"))) - if err != nil { - Fatalf("Failed to decode block blob: %v", err) - } - var block types.Block - if err := rlp.DecodeBytes(rlpBlob, &block); err != nil { - Fatalf("Failed to decode block: %v", err) - } - catalyst.RegisterFullSyncTester(stack, eth, &block) - log.Info("Registered full-sync tester", "number", block.NumberU64(), "hash", block.Hash()) +func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, target common.Hash) { + catalyst.RegisterFullSyncTester(stack, eth, target) + log.Info("Registered full-sync tester", "hash", target) } func SetupMetrics(ctx *cli.Context) { diff --git a/eth/catalyst/tester.go b/eth/catalyst/tester.go index 3e9159a175..0922ac0ba6 100644 --- a/eth/catalyst/tester.go +++ b/eth/catalyst/tester.go @@ -20,7 +20,7 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/log" @@ -28,23 +28,27 @@ import ( ) // FullSyncTester is an auxiliary service that allows Geth to perform full sync -// alone without consensus-layer attached. Users must specify a valid block as -// the sync target. This tester can be applied to different networks, no matter -// it's pre-merge or post-merge, but only for full-sync. +// alone without consensus-layer attached. Users must specify a valid block hash +// as the sync target. +// +// This tester can be applied to different networks, no matter it's pre-merge or +// post-merge, but only for full-sync. type FullSyncTester struct { - api *ConsensusAPI - block *types.Block - closed chan struct{} - wg sync.WaitGroup + stack *node.Node + backend *eth.Ethereum + target common.Hash + closed chan struct{} + wg sync.WaitGroup } // RegisterFullSyncTester registers the full-sync tester service into the node // stack for launching and stopping the service controlled by node. -func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, block *types.Block) (*FullSyncTester, error) { +func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, target common.Hash) (*FullSyncTester, error) { cl := &FullSyncTester{ - api: newConsensusAPIWithoutHeartbeat(backend), - block: block, - closed: make(chan struct{}), + stack: stack, + backend: backend, + target: target, + closed: make(chan struct{}), } stack.RegisterLifecycle(cl) return cl, nil @@ -56,29 +60,25 @@ func (tester *FullSyncTester) Start() error { go func() { defer tester.wg.Done() + // Trigger beacon sync with the provided block hash as trusted + // chain head. + err := tester.backend.Downloader().BeaconDevSync(downloader.FullSync, tester.target, tester.closed) + if err != nil { + log.Info("Failed to trigger beacon sync", "err", err) + } + ticker := time.NewTicker(time.Second * 5) defer ticker.Stop() for { select { case <-ticker.C: - // Don't bother downloader in case it's already syncing. - if tester.api.eth.Downloader().Synchronising() { - continue - } - // Short circuit in case the target block is already stored - // locally. TODO(somehow terminate the node stack if target - // is reached). - if tester.api.eth.BlockChain().HasBlock(tester.block.Hash(), tester.block.NumberU64()) { - log.Info("Full-sync target reached", "number", tester.block.NumberU64(), "hash", tester.block.Hash()) + // Stop in case the target block is already stored locally. + if block := tester.backend.BlockChain().GetBlockByHash(tester.target); block != nil { + log.Info("Full-sync target reached", "number", block.NumberU64(), "hash", block.Hash()) + go tester.stack.Close() // async since we need to close ourselves return } - // Trigger beacon sync with the provided block header as - // trusted chain head. - err := tester.api.eth.Downloader().BeaconSync(downloader.FullSync, tester.block.Header(), tester.block.Header()) - if err != nil { - log.Info("Failed to beacon sync", "err", err) - } case <-tester.closed: return diff --git a/eth/downloader/beacondevsync.go b/eth/downloader/beacondevsync.go new file mode 100644 index 0000000000..9a38fedd46 --- /dev/null +++ b/eth/downloader/beacondevsync.go @@ -0,0 +1,81 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "errors" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +// BeaconDevSync is a development helper to test synchronization by providing +// a block hash instead of header to run the beacon sync against. +// +// The method will reach out to the network to retrieve the header of the sync +// target instead of receiving it from the consensus node. +// +// Note, this must not be used in live code. If the forkchcoice endpoint where +// to use this instead of giving us the payload first, then essentially nobody +// in the network would have the block yet that we'd attempt to retrieve. +func (d *Downloader) BeaconDevSync(mode SyncMode, hash common.Hash, stop chan struct{}) error { + // Be very loud that this code should not be used in a live node + log.Warn("----------------------------------") + log.Warn("Beacon syncing with hash as target", "hash", hash) + log.Warn("This is unhealthy for a live node!") + log.Warn("----------------------------------") + + log.Info("Waiting for peers to retrieve sync target") + for { + // If the node is going down, unblock + select { + case <-stop: + return errors.New("stop requested") + default: + } + // Pick a random peer to sync from and keep retrying if none are yet + // available due to fresh startup + d.peers.lock.RLock() + var peer *peerConnection + for _, peer = range d.peers.peers { + break + } + d.peers.lock.RUnlock() + + if peer == nil { + time.Sleep(time.Second) + continue + } + // Found a peer, attempt to retrieve the header whilst blocking and + // retry if it fails for whatever reason + log.Info("Attempting to retrieve sync target", "peer", peer.id) + headers, metas, err := d.fetchHeadersByHash(peer, hash, 1, 0, false) + if err != nil || len(headers) != 1 { + log.Warn("Failed to fetch sync target", "headers", len(headers), "err", err) + time.Sleep(time.Second) + continue + } + // Head header retrieved, if the hash matches, start the actual sync + if metas[0] != hash { + log.Error("Received invalid sync target", "want", hash, "have", metas[0]) + time.Sleep(time.Second) + continue + } + return d.BeaconSync(mode, headers[0], headers[0]) + } +} diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 7fed48bdb2..2ca7e328c6 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -286,11 +286,6 @@ func (d *Downloader) Progress() ethereum.SyncProgress { } } -// Synchronising returns whether the downloader is currently retrieving blocks. -func (d *Downloader) Synchronising() bool { - return d.synchronising.Load() -} - // RegisterPeer injects a new download peer into the set of block source to be // used for fetching hashes and blocks from. func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { @@ -309,11 +304,6 @@ func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { return nil } -// RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. -func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error { - return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) -} - // UnregisterPeer remove a peer from the known list, preventing any action from // the specified peer. An effort is also made to return any pending fetches into // the queue. diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 6b82694959..4c43af5270 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -55,39 +55,16 @@ type peerConnection struct { lock sync.RWMutex } -// LightPeer encapsulates the methods required to synchronise with a remote light peer. -type LightPeer interface { +// Peer encapsulates the methods required to synchronise with a remote full peer. +type Peer interface { Head() (common.Hash, *big.Int) RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) RequestHeadersByNumber(uint64, int, int, bool, chan *eth.Response) (*eth.Request, error) -} -// Peer encapsulates the methods required to synchronise with a remote full peer. -type Peer interface { - LightPeer RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) } -// lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods. -type lightPeerWrapper struct { - peer LightPeer -} - -func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() } -func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { - return w.peer.RequestHeadersByHash(h, amount, skip, reverse, sink) -} -func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { - return w.peer.RequestHeadersByNumber(i, amount, skip, reverse, sink) -} -func (w *lightPeerWrapper) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) { - panic("RequestBodies not supported in light client mode sync") -} -func (w *lightPeerWrapper) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) { - panic("RequestReceipts not supported in light client mode sync") -} - // newPeerConnection creates a new downloader peer. func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection { return &peerConnection{ From c39cbc1a78aa275523c1b0ff9d21b16ba7bfa486 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Mon, 2 Oct 2023 11:49:29 +0200 Subject: [PATCH 09/26] core: implement BLOBBASEFEE opcode (0x4a) (#28098) Implements "EIP-7516: BLOBBASEFEE opcode" for cancun, as per spec: https://eips.ethereum.org/EIPS/eip-7516 --- cmd/evm/internal/t8ntool/execution.go | 16 +++++++++------- cmd/evm/runner.go | 4 +++- core/evm.go | 27 ++++++++++++++++----------- core/state_processor.go | 3 +-- core/state_transition.go | 5 ++--- core/vm/eips.go | 20 ++++++++++++++++++-- core/vm/evm.go | 16 ++++++++-------- core/vm/jump_table.go | 3 ++- core/vm/opcodes.go | 3 +++ core/vm/runtime/env.go | 1 + core/vm/runtime/runtime.go | 4 ++++ internal/ethapi/api.go | 18 +++++++++++------- 12 files changed, 78 insertions(+), 42 deletions(-) diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index bb14ac63ca..c522379387 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -163,17 +163,19 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, rnd := common.BigToHash(pre.Env.Random) vmContext.Random = &rnd } - // If excessBlobGas is defined, add it to the vmContext. + // Calculate the BlobBaseFee + var excessBlobGas uint64 if pre.Env.ExcessBlobGas != nil { - vmContext.ExcessBlobGas = pre.Env.ExcessBlobGas + excessBlobGas := *pre.Env.ExcessBlobGas + vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas) } else { // If it is not explicitly defined, but we have the parent values, we try // to calculate it ourselves. parentExcessBlobGas := pre.Env.ParentExcessBlobGas parentBlobGasUsed := pre.Env.ParentBlobGasUsed if parentExcessBlobGas != nil && parentBlobGasUsed != nil { - excessBlobGas := eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed) - vmContext.ExcessBlobGas = &excessBlobGas + excessBlobGas = eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed) + vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas) } } // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's @@ -189,7 +191,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, } var blobGasUsed uint64 for i, tx := range txs { - if tx.Type() == types.BlobTxType && vmContext.ExcessBlobGas == nil { + if tx.Type() == types.BlobTxType && vmContext.BlobBaseFee == nil { errMsg := "blob tx used but field env.ExcessBlobGas missing" log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg) rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg}) @@ -322,8 +324,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, h := types.DeriveSha(types.Withdrawals(pre.Env.Withdrawals), trie.NewStackTrie(nil)) execRs.WithdrawalsRoot = &h } - if vmContext.ExcessBlobGas != nil { - execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(vmContext.ExcessBlobGas) + if vmContext.BlobBaseFee != nil { + execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(&excessBlobGas) execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed) } // Re-create statedb instance with new root upon the updated database diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index 017388efb5..45fc985351 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -123,7 +123,8 @@ func runCmd(ctx *cli.Context) error { sender = common.BytesToAddress([]byte("sender")) receiver = common.BytesToAddress([]byte("receiver")) preimages = ctx.Bool(DumpFlag.Name) - blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests + blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests + blobBaseFee = new(big.Int) // TODO (MariusVanDerWijden) implement blob fee in state tests ) if ctx.Bool(MachineFlag.Name) { tracer = logger.NewJSONLogger(logconfig, os.Stdout) @@ -221,6 +222,7 @@ func runCmd(ctx *cli.Context) error { Coinbase: genesisConfig.Coinbase, BlockNumber: new(big.Int).SetUint64(genesisConfig.Number), BlobHashes: blobHashes, + BlobBaseFee: blobBaseFee, EVMConfig: vm.Config{ Tracer: tracer, }, diff --git a/core/evm.go b/core/evm.go index 104f2c09dc..46dcb31462 100644 --- a/core/evm.go +++ b/core/evm.go @@ -21,6 +21,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" ) @@ -40,6 +41,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common var ( beneficiary common.Address baseFee *big.Int + blobBaseFee *big.Int random *common.Hash ) @@ -52,21 +54,24 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common if header.BaseFee != nil { baseFee = new(big.Int).Set(header.BaseFee) } + if header.ExcessBlobGas != nil { + blobBaseFee = eip4844.CalcBlobFee(*header.ExcessBlobGas) + } if header.Difficulty.Cmp(common.Big0) == 0 { random = &header.MixDigest } return vm.BlockContext{ - CanTransfer: CanTransfer, - Transfer: Transfer, - GetHash: GetHashFn(header, chain), - Coinbase: beneficiary, - BlockNumber: new(big.Int).Set(header.Number), - Time: header.Time, - Difficulty: new(big.Int).Set(header.Difficulty), - BaseFee: baseFee, - GasLimit: header.GasLimit, - Random: random, - ExcessBlobGas: header.ExcessBlobGas, + CanTransfer: CanTransfer, + Transfer: Transfer, + GetHash: GetHashFn(header, chain), + Coinbase: beneficiary, + BlockNumber: new(big.Int).Set(header.Number), + Time: header.Time, + Difficulty: new(big.Int).Set(header.Difficulty), + BaseFee: baseFee, + BlobBaseFee: blobBaseFee, + GasLimit: header.GasLimit, + Random: random, } } diff --git a/core/state_processor.go b/core/state_processor.go index 6a208a1811..7dd81487d5 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -24,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" - "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -138,7 +137,7 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta if tx.Type() == types.BlobTxType { receipt.BlobGasUsed = uint64(len(tx.BlobHashes()) * params.BlobTxBlobGasPerBlob) - receipt.BlobGasPrice = eip4844.CalcBlobFee(*evm.Context.ExcessBlobGas) + receipt.BlobGasPrice = evm.Context.BlobBaseFee } // If the transaction created a contract, store the creation address in the receipt. diff --git a/core/state_transition.go b/core/state_transition.go index f84757be78..fb03c48aab 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -24,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/common" cmath "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" @@ -248,7 +247,7 @@ func (st *StateTransition) buyGas() error { balanceCheck.Add(balanceCheck, blobBalanceCheck) // Pay for blobGasUsed * actual blob fee blobFee := new(big.Int).SetUint64(blobGas) - blobFee.Mul(blobFee, eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas)) + blobFee.Mul(blobFee, st.evm.Context.BlobBaseFee) mgval.Add(mgval, blobFee) } } @@ -329,7 +328,7 @@ func (st *StateTransition) preCheck() error { if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) { if st.blobGasUsed() > 0 { // Check that the user is paying at least the current blob fee - blobFee := eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas) + blobFee := st.evm.Context.BlobBaseFee if st.msg.BlobGasFeeCap.Cmp(blobFee) < 0 { return fmt.Errorf("%w: address %v have %v want %v", ErrBlobFeeCapTooLow, st.msg.From.Hex(), st.msg.BlobGasFeeCap, blobFee) } diff --git a/core/vm/eips.go b/core/vm/eips.go index 704c1ce127..35f0a3f7c2 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -282,9 +282,15 @@ func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ return nil, nil } -// enable4844 applies EIP-4844 (DATAHASH opcode) +// opBlobBaseFee implements BLOBBASEFEE opcode +func opBlobBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + blobBaseFee, _ := uint256.FromBig(interpreter.evm.Context.BlobBaseFee) + scope.Stack.push(blobBaseFee) + return nil, nil +} + +// enable4844 applies EIP-4844 (BLOBHASH opcode) func enable4844(jt *JumpTable) { - // New opcode jt[BLOBHASH] = &operation{ execute: opBlobHash, constantGas: GasFastestStep, @@ -293,6 +299,16 @@ func enable4844(jt *JumpTable) { } } +// enable7516 applies EIP-7516 (BLOBBASEFEE opcode) +func enable7516(jt *JumpTable) { + jt[BLOBBASEFEE] = &operation{ + execute: opBlobBaseFee, + constantGas: GasQuickStep, + minStack: minStack(0, 1), + maxStack: maxStack(0, 1), + } +} + // enable6780 applies EIP-6780 (deactivate SELFDESTRUCT) func enable6780(jt *JumpTable) { jt[SELFDESTRUCT] = &operation{ diff --git a/core/vm/evm.go b/core/vm/evm.go index 40e2f3554f..2c6cc7d484 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -67,14 +67,14 @@ type BlockContext struct { GetHash GetHashFunc // Block information - Coinbase common.Address // Provides information for COINBASE - GasLimit uint64 // Provides information for GASLIMIT - BlockNumber *big.Int // Provides information for NUMBER - Time uint64 // Provides information for TIME - Difficulty *big.Int // Provides information for DIFFICULTY - BaseFee *big.Int // Provides information for BASEFEE - Random *common.Hash // Provides information for PREVRANDAO - ExcessBlobGas *uint64 // ExcessBlobGas field in the header, needed to compute the data + Coinbase common.Address // Provides information for COINBASE + GasLimit uint64 // Provides information for GASLIMIT + BlockNumber *big.Int // Provides information for NUMBER + Time uint64 // Provides information for TIME + Difficulty *big.Int // Provides information for DIFFICULTY + BaseFee *big.Int // Provides information for BASEFEE + BlobBaseFee *big.Int // Provides information for BLOBBASEFEE + Random *common.Hash // Provides information for PREVRANDAO } // TxContext provides the EVM with information about a transaction. diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 702b186615..fb87258326 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -82,7 +82,8 @@ func validate(jt JumpTable) JumpTable { func newCancunInstructionSet() JumpTable { instructionSet := newShanghaiInstructionSet() - enable4844(&instructionSet) // EIP-4844 (DATAHASH opcode) + enable4844(&instructionSet) // EIP-4844 (BLOBHASH opcode) + enable7516(&instructionSet) // EIP-7516 (BLOBBASEFEE opcode) enable1153(&instructionSet) // EIP-1153 "Transient Storage" enable5656(&instructionSet) // EIP-5656 (MCOPY opcode) enable6780(&instructionSet) // EIP-6780 SELFDESTRUCT only in same transaction diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go index 2929b8ce92..a11cf05a15 100644 --- a/core/vm/opcodes.go +++ b/core/vm/opcodes.go @@ -101,6 +101,7 @@ const ( SELFBALANCE OpCode = 0x47 BASEFEE OpCode = 0x48 BLOBHASH OpCode = 0x49 + BLOBBASEFEE OpCode = 0x4a ) // 0x50 range - 'storage' and execution. @@ -287,6 +288,7 @@ var opCodeToString = map[OpCode]string{ SELFBALANCE: "SELFBALANCE", BASEFEE: "BASEFEE", BLOBHASH: "BLOBHASH", + BLOBBASEFEE: "BLOBBASEFEE", // 0x50 range - 'storage' and execution. POP: "POP", @@ -444,6 +446,7 @@ var stringToOp = map[string]OpCode{ "CHAINID": CHAINID, "BASEFEE": BASEFEE, "BLOBHASH": BLOBHASH, + "BLOBBASEFEE": BLOBBASEFEE, "DELEGATECALL": DELEGATECALL, "STATICCALL": STATICCALL, "CODESIZE": CODESIZE, diff --git a/core/vm/runtime/env.go b/core/vm/runtime/env.go index 7e330e0732..64aa550a25 100644 --- a/core/vm/runtime/env.go +++ b/core/vm/runtime/env.go @@ -37,6 +37,7 @@ func NewEnv(cfg *Config) *vm.EVM { Difficulty: cfg.Difficulty, GasLimit: cfg.GasLimit, BaseFee: cfg.BaseFee, + BlobBaseFee: cfg.BlobBaseFee, Random: cfg.Random, } diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index 480e5cec67..cfd7e4dbc4 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -44,6 +44,7 @@ type Config struct { Debug bool EVMConfig vm.Config BaseFee *big.Int + BlobBaseFee *big.Int BlobHashes []common.Hash Random *common.Hash @@ -95,6 +96,9 @@ func setDefaults(cfg *Config) { if cfg.BaseFee == nil { cfg.BaseFee = big.NewInt(params.InitialBaseFee) } + if cfg.BlobBaseFee == nil { + cfg.BlobBaseFee = new(big.Int) + } } // Execute executes the code using the input as call data during the execution. diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index d22424502c..cf1960fcf6 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -991,13 +991,14 @@ func (diff *StateOverride) Apply(state *state.StateDB) error { // BlockOverrides is a set of header fields to override. type BlockOverrides struct { - Number *hexutil.Big - Difficulty *hexutil.Big - Time *hexutil.Uint64 - GasLimit *hexutil.Uint64 - Coinbase *common.Address - Random *common.Hash - BaseFee *hexutil.Big + Number *hexutil.Big + Difficulty *hexutil.Big + Time *hexutil.Uint64 + GasLimit *hexutil.Uint64 + Coinbase *common.Address + Random *common.Hash + BaseFee *hexutil.Big + BlobBaseFee *hexutil.Big } // Apply overrides the given header fields into the given block context. @@ -1026,6 +1027,9 @@ func (diff *BlockOverrides) Apply(blockCtx *vm.BlockContext) { if diff.BaseFee != nil { blockCtx.BaseFee = diff.BaseFee.ToInt() } + if diff.BlobBaseFee != nil { + blockCtx.BlobBaseFee = diff.BlobBaseFee.ToInt() + } } // ChainContextBackend provides methods required to implement ChainContext. From 705a51e566bc9215975d08f27d23ddab7baa9dd7 Mon Sep 17 00:00:00 2001 From: tylerni7 Date: Tue, 3 Oct 2023 00:23:19 -0700 Subject: [PATCH 10/26] eth, rpc: add configurable option for wsMessageSizeLimit (#27801) This change adds a configurable limit to websocket message. --------- Co-authored-by: Martin Holst Swende --- rpc/client_opt.go | 11 +++++++- rpc/server_test.go | 2 +- rpc/testservice_test.go | 4 +++ rpc/websocket.go | 14 ++++++---- rpc/websocket_test.go | 62 ++++++++++++++++++++++++++++++++++++++++- 5 files changed, 85 insertions(+), 8 deletions(-) diff --git a/rpc/client_opt.go b/rpc/client_opt.go index 5bef08cca8..3fa045a9b9 100644 --- a/rpc/client_opt.go +++ b/rpc/client_opt.go @@ -34,7 +34,8 @@ type clientConfig struct { httpAuth HTTPAuth // WebSocket options - wsDialer *websocket.Dialer + wsDialer *websocket.Dialer + wsMessageSizeLimit *int64 // wsMessageSizeLimit nil = default, 0 = no limit // RPC handler options idgen func() ID @@ -66,6 +67,14 @@ func WithWebsocketDialer(dialer websocket.Dialer) ClientOption { }) } +// WithWebsocketMessageSizeLimit configures the websocket message size limit used by the RPC +// client. Passing a limit of 0 means no limit. +func WithWebsocketMessageSizeLimit(messageSizeLimit int64) ClientOption { + return optionFunc(func(cfg *clientConfig) { + cfg.wsMessageSizeLimit = &messageSizeLimit + }) +} + // WithHeader configures HTTP headers set by the RPC client. Headers set using this option // will be used for both HTTP and WebSocket connections. func WithHeader(key, value string) ClientOption { diff --git a/rpc/server_test.go b/rpc/server_test.go index 5d3929dfdc..47a15b610a 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -45,7 +45,7 @@ func TestServerRegisterName(t *testing.T) { t.Fatalf("Expected service calc to be registered") } - wantCallbacks := 13 + wantCallbacks := 14 if len(svc.callbacks) != wantCallbacks { t.Errorf("Expected %d callbacks for service 'service', got %d", wantCallbacks, len(svc.callbacks)) } diff --git a/rpc/testservice_test.go b/rpc/testservice_test.go index eab67f1dd5..7d873af667 100644 --- a/rpc/testservice_test.go +++ b/rpc/testservice_test.go @@ -90,6 +90,10 @@ func (s *testService) EchoWithCtx(ctx context.Context, str string, i int, args * return echoResult{str, i, args} } +func (s *testService) Repeat(msg string, i int) string { + return strings.Repeat(msg, i) +} + func (s *testService) PeerInfo(ctx context.Context) PeerInfo { return PeerInfoFromContext(ctx) } diff --git a/rpc/websocket.go b/rpc/websocket.go index 86cf50594c..538e53a31b 100644 --- a/rpc/websocket.go +++ b/rpc/websocket.go @@ -38,7 +38,7 @@ const ( wsPingInterval = 30 * time.Second wsPingWriteTimeout = 5 * time.Second wsPongTimeout = 30 * time.Second - wsMessageSizeLimit = 32 * 1024 * 1024 + wsDefaultReadLimit = 32 * 1024 * 1024 ) var wsBufferPool = new(sync.Pool) @@ -60,7 +60,7 @@ func (s *Server) WebsocketHandler(allowedOrigins []string) http.Handler { log.Debug("WebSocket upgrade failed", "err", err) return } - codec := newWebsocketCodec(conn, r.Host, r.Header) + codec := newWebsocketCodec(conn, r.Host, r.Header, wsDefaultReadLimit) s.ServeCodec(codec, 0) }) } @@ -251,7 +251,11 @@ func newClientTransportWS(endpoint string, cfg *clientConfig) (reconnectFunc, er } return nil, hErr } - return newWebsocketCodec(conn, dialURL, header), nil + messageSizeLimit := int64(wsDefaultReadLimit) + if cfg.wsMessageSizeLimit != nil && *cfg.wsMessageSizeLimit >= 0 { + messageSizeLimit = *cfg.wsMessageSizeLimit + } + return newWebsocketCodec(conn, dialURL, header, messageSizeLimit), nil } return connect, nil } @@ -283,8 +287,8 @@ type websocketCodec struct { pongReceived chan struct{} } -func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header) ServerCodec { - conn.SetReadLimit(wsMessageSizeLimit) +func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header, readLimit int64) ServerCodec { + conn.SetReadLimit(readLimit) encode := func(v interface{}, isErrorResponse bool) error { return conn.WriteJSON(v) } diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go index fb9357605b..e4ac5c3fad 100644 --- a/rpc/websocket_test.go +++ b/rpc/websocket_test.go @@ -113,6 +113,66 @@ func TestWebsocketLargeCall(t *testing.T) { } } +// This test checks whether the wsMessageSizeLimit option is obeyed. +func TestWebsocketLargeRead(t *testing.T) { + t.Parallel() + + var ( + srv = newTestServer() + httpsrv = httptest.NewServer(srv.WebsocketHandler([]string{"*"})) + wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + ) + defer srv.Stop() + defer httpsrv.Close() + + testLimit := func(limit *int64) { + opts := []ClientOption{} + expLimit := int64(wsDefaultReadLimit) + if limit != nil && *limit >= 0 { + opts = append(opts, WithWebsocketMessageSizeLimit(*limit)) + if *limit > 0 { + expLimit = *limit // 0 means infinite + } + } + client, err := DialOptions(context.Background(), wsURL, opts...) + if err != nil { + t.Fatalf("can't dial: %v", err) + } + defer client.Close() + // Remove some bytes for json encoding overhead. + underLimit := int(expLimit - 128) + overLimit := expLimit + 1 + if expLimit == wsDefaultReadLimit { + // No point trying the full 32MB in tests. Just sanity-check that + // it's not obviously limited. + underLimit = 1024 + overLimit = -1 + } + var res string + // Check under limit + if err = client.Call(&res, "test_repeat", "A", underLimit); err != nil { + t.Fatalf("unexpected error with limit %d: %v", expLimit, err) + } + if len(res) != underLimit || strings.Count(res, "A") != underLimit { + t.Fatal("incorrect data") + } + // Check over limit + if overLimit > 0 { + err = client.Call(&res, "test_repeat", "A", expLimit+1) + if err == nil || err != websocket.ErrReadLimit { + t.Fatalf("wrong error with limit %d: %v expecting %v", expLimit, err, websocket.ErrReadLimit) + } + } + } + ptr := func(v int64) *int64 { return &v } + + testLimit(ptr(-1)) // Should be ignored (use default) + testLimit(ptr(0)) // Should be ignored (use default) + testLimit(nil) // Should be ignored (use default) + testLimit(ptr(200)) + testLimit(ptr(wsDefaultReadLimit * 2)) +} + func TestWebsocketPeerInfo(t *testing.T) { var ( s = newTestServer() @@ -206,7 +266,7 @@ func TestClientWebsocketLargeMessage(t *testing.T) { defer srv.Stop() defer httpsrv.Close() - respLength := wsMessageSizeLimit - 50 + respLength := wsDefaultReadLimit - 50 srv.RegisterName("test", largeRespService{respLength}) c, err := DialWebsocket(context.Background(), wsURL, "") From 07dec7a11c9e4e0edd052fffa3b47791e9fe889a Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Tue, 3 Oct 2023 01:26:33 -0600 Subject: [PATCH 11/26] cmd/evm: cancun-updates for b11r and t8n -tools (#28195) This change updates `evm b11r` (blockbuilder) and `evm t8n` (transition) tools to contain cancun updates (e.g. new header fields) --------- Co-authored-by: Mario Vega --- cmd/evm/internal/t8ntool/block.go | 88 ++++++++++++++------------ cmd/evm/internal/t8ntool/execution.go | 10 +-- cmd/evm/internal/t8ntool/gen_header.go | 86 +++++++++++++++---------- cmd/evm/internal/t8ntool/gen_stenv.go | 4 +- cmd/evm/internal/t8ntool/transition.go | 2 +- cmd/evm/testdata/28/env.json | 5 +- cmd/evm/testdata/28/exp.json | 4 +- cmd/evm/testdata/29/exp.json | 4 +- 8 files changed, 114 insertions(+), 89 deletions(-) diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go index 09dca8984e..872e2f6b2a 100644 --- a/cmd/evm/internal/t8ntool/block.go +++ b/cmd/evm/internal/t8ntool/block.go @@ -37,33 +37,38 @@ import ( //go:generate go run github.com/fjl/gencodec -type header -field-override headerMarshaling -out gen_header.go type header struct { - ParentHash common.Hash `json:"parentHash"` - OmmerHash *common.Hash `json:"sha3Uncles"` - Coinbase *common.Address `json:"miner"` - Root common.Hash `json:"stateRoot" gencodec:"required"` - TxHash *common.Hash `json:"transactionsRoot"` - ReceiptHash *common.Hash `json:"receiptsRoot"` - Bloom types.Bloom `json:"logsBloom"` - Difficulty *big.Int `json:"difficulty"` - Number *big.Int `json:"number" gencodec:"required"` - GasLimit uint64 `json:"gasLimit" gencodec:"required"` - GasUsed uint64 `json:"gasUsed"` - Time uint64 `json:"timestamp" gencodec:"required"` - Extra []byte `json:"extraData"` - MixDigest common.Hash `json:"mixHash"` - Nonce *types.BlockNonce `json:"nonce"` - BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` - WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + ParentHash common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom types.Bloom `json:"logsBloom"` + Difficulty *big.Int `json:"difficulty"` + Number *big.Int `json:"number" gencodec:"required"` + GasLimit uint64 `json:"gasLimit" gencodec:"required"` + GasUsed uint64 `json:"gasUsed"` + Time uint64 `json:"timestamp" gencodec:"required"` + Extra []byte `json:"extraData"` + MixDigest common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` + WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"` + ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"` + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` } type headerMarshaling struct { - Difficulty *math.HexOrDecimal256 - Number *math.HexOrDecimal256 - GasLimit math.HexOrDecimal64 - GasUsed math.HexOrDecimal64 - Time math.HexOrDecimal64 - Extra hexutil.Bytes - BaseFee *math.HexOrDecimal256 + Difficulty *math.HexOrDecimal256 + Number *math.HexOrDecimal256 + GasLimit math.HexOrDecimal64 + GasUsed math.HexOrDecimal64 + Time math.HexOrDecimal64 + Extra hexutil.Bytes + BaseFee *math.HexOrDecimal256 + BlobGasUsed *math.HexOrDecimal64 + ExcessBlobGas *math.HexOrDecimal64 } type bbInput struct { @@ -113,22 +118,25 @@ func (c *cliqueInput) UnmarshalJSON(input []byte) error { // ToBlock converts i into a *types.Block func (i *bbInput) ToBlock() *types.Block { header := &types.Header{ - ParentHash: i.Header.ParentHash, - UncleHash: types.EmptyUncleHash, - Coinbase: common.Address{}, - Root: i.Header.Root, - TxHash: types.EmptyTxsHash, - ReceiptHash: types.EmptyReceiptsHash, - Bloom: i.Header.Bloom, - Difficulty: common.Big0, - Number: i.Header.Number, - GasLimit: i.Header.GasLimit, - GasUsed: i.Header.GasUsed, - Time: i.Header.Time, - Extra: i.Header.Extra, - MixDigest: i.Header.MixDigest, - BaseFee: i.Header.BaseFee, - WithdrawalsHash: i.Header.WithdrawalsHash, + ParentHash: i.Header.ParentHash, + UncleHash: types.EmptyUncleHash, + Coinbase: common.Address{}, + Root: i.Header.Root, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, + Bloom: i.Header.Bloom, + Difficulty: common.Big0, + Number: i.Header.Number, + GasLimit: i.Header.GasLimit, + GasUsed: i.Header.GasUsed, + Time: i.Header.Time, + Extra: i.Header.Extra, + MixDigest: i.Header.MixDigest, + BaseFee: i.Header.BaseFee, + WithdrawalsHash: i.Header.WithdrawalsHash, + BlobGasUsed: i.Header.BlobGasUsed, + ExcessBlobGas: i.Header.ExcessBlobGas, + ParentBeaconRoot: i.Header.ParentBeaconBlockRoot, } // Fill optional values. diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index c522379387..312f427d4c 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -59,7 +59,7 @@ type ExecutionResult struct { BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"` CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"` - CurrentBlobGasUsed *math.HexOrDecimal64 `json:"currentBlobGasUsed,omitempty"` + CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"` } type ommer struct { @@ -85,7 +85,7 @@ type stEnv struct { Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` BaseFee *big.Int `json:"currentBaseFee,omitempty"` ParentUncleHash common.Hash `json:"parentUncleHash"` - ExcessBlobGas *uint64 `json:"excessBlobGas,omitempty"` + ExcessBlobGas *uint64 `json:"currentExcessBlobGas,omitempty"` ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"` ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` @@ -197,6 +197,9 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg}) continue } + if tx.Type() == types.BlobTxType { + blobGasUsed += uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes())) + } msg, err := core.TransactionToMessage(tx, signer, pre.Env.BaseFee) if err != nil { log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err) @@ -226,9 +229,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, gaspool.SetGas(prevGas) continue } - if tx.Type() == types.BlobTxType { - blobGasUsed += params.BlobTxBlobGasPerBlob - } includedTxs = append(includedTxs, tx) if hashError != nil { return nil, nil, NewError(ErrorMissingBlockhash, hashError) diff --git a/cmd/evm/internal/t8ntool/gen_header.go b/cmd/evm/internal/t8ntool/gen_header.go index 76228394dc..a8c8668978 100644 --- a/cmd/evm/internal/t8ntool/gen_header.go +++ b/cmd/evm/internal/t8ntool/gen_header.go @@ -18,23 +18,26 @@ var _ = (*headerMarshaling)(nil) // MarshalJSON marshals as JSON. func (h header) MarshalJSON() ([]byte, error) { type header struct { - ParentHash common.Hash `json:"parentHash"` - OmmerHash *common.Hash `json:"sha3Uncles"` - Coinbase *common.Address `json:"miner"` - Root common.Hash `json:"stateRoot" gencodec:"required"` - TxHash *common.Hash `json:"transactionsRoot"` - ReceiptHash *common.Hash `json:"receiptsRoot"` - Bloom types.Bloom `json:"logsBloom"` - Difficulty *math.HexOrDecimal256 `json:"difficulty"` - Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` - GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` - GasUsed math.HexOrDecimal64 `json:"gasUsed"` - Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` - Extra hexutil.Bytes `json:"extraData"` - MixDigest common.Hash `json:"mixHash"` - Nonce *types.BlockNonce `json:"nonce"` - BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` - WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + ParentHash common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom types.Bloom `json:"logsBloom"` + Difficulty *math.HexOrDecimal256 `json:"difficulty"` + Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` + GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` + GasUsed math.HexOrDecimal64 `json:"gasUsed"` + Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` + Extra hexutil.Bytes `json:"extraData"` + MixDigest common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` + WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"` + ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"` + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` } var enc header enc.ParentHash = h.ParentHash @@ -54,29 +57,35 @@ func (h header) MarshalJSON() ([]byte, error) { enc.Nonce = h.Nonce enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee) enc.WithdrawalsHash = h.WithdrawalsHash + enc.BlobGasUsed = (*math.HexOrDecimal64)(h.BlobGasUsed) + enc.ExcessBlobGas = (*math.HexOrDecimal64)(h.ExcessBlobGas) + enc.ParentBeaconBlockRoot = h.ParentBeaconBlockRoot return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (h *header) UnmarshalJSON(input []byte) error { type header struct { - ParentHash *common.Hash `json:"parentHash"` - OmmerHash *common.Hash `json:"sha3Uncles"` - Coinbase *common.Address `json:"miner"` - Root *common.Hash `json:"stateRoot" gencodec:"required"` - TxHash *common.Hash `json:"transactionsRoot"` - ReceiptHash *common.Hash `json:"receiptsRoot"` - Bloom *types.Bloom `json:"logsBloom"` - Difficulty *math.HexOrDecimal256 `json:"difficulty"` - Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` - GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` - GasUsed *math.HexOrDecimal64 `json:"gasUsed"` - Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` - Extra *hexutil.Bytes `json:"extraData"` - MixDigest *common.Hash `json:"mixHash"` - Nonce *types.BlockNonce `json:"nonce"` - BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` - WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + ParentHash *common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root *common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom *types.Bloom `json:"logsBloom"` + Difficulty *math.HexOrDecimal256 `json:"difficulty"` + Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` + GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` + GasUsed *math.HexOrDecimal64 `json:"gasUsed"` + Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` + Extra *hexutil.Bytes `json:"extraData"` + MixDigest *common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` + WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"` + ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"` + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` } var dec header if err := json.Unmarshal(input, &dec); err != nil { @@ -137,5 +146,14 @@ func (h *header) UnmarshalJSON(input []byte) error { if dec.WithdrawalsHash != nil { h.WithdrawalsHash = dec.WithdrawalsHash } + if dec.BlobGasUsed != nil { + h.BlobGasUsed = (*uint64)(dec.BlobGasUsed) + } + if dec.ExcessBlobGas != nil { + h.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) + } + if dec.ParentBeaconBlockRoot != nil { + h.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot + } return nil } diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go index bb195ef64b..d47db4a876 100644 --- a/cmd/evm/internal/t8ntool/gen_stenv.go +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -33,7 +33,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) { Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` ParentUncleHash common.Hash `json:"parentUncleHash"` - ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` + ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"` ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` @@ -81,7 +81,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` ParentUncleHash *common.Hash `json:"parentUncleHash"` - ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` + ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"` ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 396b341d2e..600bc460f7 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -334,7 +334,7 @@ func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *pa txsWithKeys = inputData.Txs } // We may have to sign the transactions. - signer := types.MakeSigner(chainConfig, big.NewInt(int64(env.Number)), env.Timestamp) + signer := types.LatestSignerForChainID(chainConfig.ChainID) return signUnsignedTransactions(txsWithKeys, signer) } diff --git a/cmd/evm/testdata/28/env.json b/cmd/evm/testdata/28/env.json index 5056fe29a4..82f22ac62f 100644 --- a/cmd/evm/testdata/28/env.json +++ b/cmd/evm/testdata/28/env.json @@ -9,8 +9,7 @@ "parentDifficulty" : "0x00", "parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "withdrawals" : [ - ], + "withdrawals" : [], "parentBaseFee" : "0x0a", "parentGasUsed" : "0x00", "parentGasLimit" : "0x7fffffffffffffff", @@ -20,4 +19,4 @@ "0" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6" }, "parentBeaconBlockRoot": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00" -} \ No newline at end of file +} diff --git a/cmd/evm/testdata/28/exp.json b/cmd/evm/testdata/28/exp.json index a55ce0aec4..75c715e972 100644 --- a/cmd/evm/testdata/28/exp.json +++ b/cmd/evm/testdata/28/exp.json @@ -42,6 +42,6 @@ "currentBaseFee": "0x9", "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "currentExcessBlobGas": "0x0", - "currentBlobGasUsed": "0x20000" + "blobGasUsed": "0x20000" } -} \ No newline at end of file +} diff --git a/cmd/evm/testdata/29/exp.json b/cmd/evm/testdata/29/exp.json index 83e1db26f9..c4c001ec14 100644 --- a/cmd/evm/testdata/29/exp.json +++ b/cmd/evm/testdata/29/exp.json @@ -40,6 +40,6 @@ "currentBaseFee": "0x9", "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "currentExcessBlobGas": "0x0", - "currentBlobGasUsed": "0x0" + "blobGasUsed": "0x0" } -} \ No newline at end of file +} From 339a4cf056bb202851e4bb221928e4309d74e175 Mon Sep 17 00:00:00 2001 From: 0xbstn Date: Tue, 3 Oct 2023 13:44:01 +0200 Subject: [PATCH 12/26] core: fix typos (#28238) --- core/types/hashing.go | 2 +- core/types/state_account.go | 2 +- core/types/transaction.go | 6 +++--- core/types/transaction_signing.go | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/core/types/hashing.go b/core/types/hashing.go index 9a6a80ac52..224d7a87ea 100644 --- a/core/types/hashing.go +++ b/core/types/hashing.go @@ -95,7 +95,7 @@ type DerivableList interface { func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte { buf.Reset() list.EncodeIndex(i, buf) - // It's really unfortunate that we need to do perform this copy. + // It's really unfortunate that we need to perform this copy. // StackTrie holds onto the values until Hash is called, so the values // written to it must not alias. return common.CopyBytes(buf.Bytes()) diff --git a/core/types/state_account.go b/core/types/state_account.go index 314f4943ec..ad07ca3f3a 100644 --- a/core/types/state_account.go +++ b/core/types/state_account.go @@ -87,7 +87,7 @@ func SlimAccountRLP(account StateAccount) []byte { return data } -// FullAccount decodes the data on the 'slim RLP' format and return +// FullAccount decodes the data on the 'slim RLP' format and returns // the consensus format account. func FullAccount(data []byte) (*StateAccount, error) { var slim SlimAccount diff --git a/core/types/transaction.go b/core/types/transaction.go index 78a1b9ba64..6f83c21d8f 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -168,7 +168,7 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error { } // UnmarshalBinary decodes the canonical encoding of transactions. -// It supports legacy RLP transactions and EIP2718 typed transactions. +// It supports legacy RLP transactions and EIP-2718 typed transactions. func (tx *Transaction) UnmarshalBinary(b []byte) error { if len(b) > 0 && b[0] > 0x7f { // It's a legacy transaction. @@ -180,7 +180,7 @@ func (tx *Transaction) UnmarshalBinary(b []byte) error { tx.setDecoded(&data, uint64(len(b))) return nil } - // It's an EIP2718 typed transaction envelope. + // It's an EIP-2718 typed transaction envelope. inner, err := tx.decodeTyped(b) if err != nil { return err @@ -395,7 +395,7 @@ func (tx *Transaction) BlobGasFeeCap() *big.Int { return nil } -// BlobHashes returns the hases of the blob commitments for blob transactions, nil otherwise. +// BlobHashes returns the hashes of the blob commitments for blob transactions, nil otherwise. func (tx *Transaction) BlobHashes() []common.Hash { if blobtx, ok := tx.inner.(*BlobTx); ok { return blobtx.BlobHashes diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index cd57effcb1..9e26642f75 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -57,7 +57,7 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint } // LatestSigner returns the 'most permissive' Signer available for the given chain -// configuration. Specifically, this enables support of all types of transacrions +// configuration. Specifically, this enables support of all types of transactions // when their respective forks are scheduled to occur at any block number (or time) // in the chain config. // From 2091ebdf5e77c641f225114add1dcc3c2d50b270 Mon Sep 17 00:00:00 2001 From: Chirag Garg <38765776+DeVil2O@users.noreply.github.com> Date: Tue, 3 Oct 2023 17:16:22 +0530 Subject: [PATCH 13/26] trie: fix benchmark by ensuring key immutability (#28221) This change fixes the bug in a benchmark, where the input to the trie is reused in a way which is not correct. --------- Co-authored-by: Martin Holst Swende --- trie/trie_test.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/trie/trie_test.go b/trie/trie_test.go index 35ccc77201..8078770e7a 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -614,7 +614,9 @@ func benchGet(b *testing.B) { k := make([]byte, 32) for i := 0; i < benchElemCount; i++ { binary.LittleEndian.PutUint64(k, uint64(i)) - trie.MustUpdate(k, k) + v := make([]byte, 32) + binary.LittleEndian.PutUint64(v, uint64(i)) + trie.MustUpdate(k, v) } binary.LittleEndian.PutUint64(k, benchElemCount/2) @@ -630,8 +632,10 @@ func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie { k := make([]byte, 32) b.ReportAllocs() for i := 0; i < b.N; i++ { + v := make([]byte, 32) e.PutUint64(k, uint64(i)) - trie.MustUpdate(k, k) + e.PutUint64(v, uint64(i)) + trie.MustUpdate(k, v) } return trie } From 7963c4e808811048a20ebbe37b10b1d3aff14d7a Mon Sep 17 00:00:00 2001 From: hyunchel <3271191+hyunchel@users.noreply.github.com> Date: Tue, 3 Oct 2023 07:48:36 -0400 Subject: [PATCH 14/26] rpc: fix erroneous error-message in test (#28227) --- rpc/server_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/rpc/server_test.go b/rpc/server_test.go index 47a15b610a..9d1c7fb5f0 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -32,7 +32,8 @@ func TestServerRegisterName(t *testing.T) { server := NewServer() service := new(testService) - if err := server.RegisterName("test", service); err != nil { + svcName := "test" + if err := server.RegisterName(svcName, service); err != nil { t.Fatalf("%v", err) } @@ -40,9 +41,9 @@ func TestServerRegisterName(t *testing.T) { t.Fatalf("Expected 2 service entries, got %d", len(server.services.services)) } - svc, ok := server.services.services["test"] + svc, ok := server.services.services[svcName] if !ok { - t.Fatalf("Expected service calc to be registered") + t.Fatalf("Expected service %s to be registered", svcName) } wantCallbacks := 14 From bc6d184872889224480cf9df58b0539b210ffa9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 3 Oct 2023 15:03:19 +0300 Subject: [PATCH 15/26] cmd/devp2p, eth: drop eth/66 (#28239) * cmd/devp2p, eth: drop eth/66 * eth/protocols/eth: yes sir, linter --- cmd/devp2p/internal/ethtest/chain_test.go | 6 +- cmd/devp2p/internal/ethtest/helpers.go | 13 +- cmd/devp2p/internal/ethtest/suite.go | 38 ++-- cmd/devp2p/internal/ethtest/types.go | 26 +-- eth/downloader/downloader_test.go | 136 ++++++------ eth/downloader/fetchers.go | 8 +- eth/downloader/fetchers_concurrent_bodies.go | 2 +- eth/downloader/fetchers_concurrent_headers.go | 2 +- .../fetchers_concurrent_receipts.go | 2 +- eth/downloader/skeleton.go | 2 +- eth/downloader/skeleton_test.go | 6 +- eth/fetcher/block_fetcher.go | 4 +- eth/fetcher/block_fetcher_test.go | 4 +- eth/handler.go | 2 +- eth/handler_eth.go | 4 +- eth/handler_eth_test.go | 15 +- eth/protocols/eth/handler.go | 66 ++---- eth/protocols/eth/handler_test.go | 208 ++++-------------- eth/protocols/eth/handlers.go | 138 ++++-------- eth/protocols/eth/handshake_test.go | 3 +- eth/protocols/eth/peer.go | 98 +++------ eth/protocols/eth/protocol.go | 197 ++++++++--------- eth/protocols/eth/protocol_test.go | 102 ++++----- eth/sync_test.go | 2 +- 24 files changed, 401 insertions(+), 683 deletions(-) diff --git a/cmd/devp2p/internal/ethtest/chain_test.go b/cmd/devp2p/internal/ethtest/chain_test.go index 67221923a6..de6acfdcda 100644 --- a/cmd/devp2p/internal/ethtest/chain_test.go +++ b/cmd/devp2p/internal/ethtest/chain_test.go @@ -145,7 +145,7 @@ func TestChain_GetHeaders(t *testing.T) { }{ { req: GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Number: uint64(2)}, Amount: uint64(5), Skip: 1, @@ -162,7 +162,7 @@ func TestChain_GetHeaders(t *testing.T) { }, { req: GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)}, Amount: uint64(3), Skip: 0, @@ -177,7 +177,7 @@ func TestChain_GetHeaders(t *testing.T) { }, { req: GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Hash: chain.Head().Hash()}, Amount: uint64(1), Skip: 0, diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go index bc901bdeb0..a0339b88cb 100644 --- a/cmd/devp2p/internal/ethtest/helpers.go +++ b/cmd/devp2p/internal/ethtest/helpers.go @@ -62,7 +62,6 @@ func (s *Suite) dial() (*Conn, error) { } // set default p2p capabilities conn.caps = []p2p.Cap{ - {Name: "eth", Version: 66}, {Name: "eth", Version: 67}, {Name: "eth", Version: 68}, } @@ -237,8 +236,8 @@ func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message { return errorf("could not get headers for inbound header request: %v", err) } resp := &BlockHeaders{ - RequestId: msg.ReqID(), - BlockHeadersPacket: eth.BlockHeadersPacket(headers), + RequestId: msg.ReqID(), + BlockHeadersRequest: eth.BlockHeadersRequest(headers), } if err := c.Write(resp); err != nil { return errorf("could not write to connection: %v", err) @@ -267,7 +266,7 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, reqID uint if !ok { return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg)) } - headers := []*types.Header(resp.BlockHeadersPacket) + headers := []*types.Header(resp.BlockHeadersRequest) return headers, nil } @@ -379,7 +378,7 @@ func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block) error { conn.SetReadDeadline(time.Now().Add(20 * time.Second)) // create request req := &GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Hash: block.Hash()}, Amount: 1, }, @@ -604,8 +603,8 @@ func (s *Suite) hashAnnounce() error { pretty.Sdump(blockHeaderReq)) } err = sendConn.Write(&BlockHeaders{ - RequestId: blockHeaderReq.ReqID(), - BlockHeadersPacket: eth.BlockHeadersPacket{nextBlock.Header()}, + RequestId: blockHeaderReq.ReqID(), + BlockHeadersRequest: eth.BlockHeadersRequest{nextBlock.Header()}, }) if err != nil { return fmt.Errorf("failed to write to connection: %v", err) diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index 815353be72..0b56c8cf4b 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -112,7 +112,7 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) { } // write request req := &GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()}, Amount: 2, Skip: 1, @@ -150,7 +150,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { // create two requests req1 := &GetBlockHeaders{ RequestId: uint64(111), - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Hash: s.chain.blocks[1].Hash(), }, @@ -161,7 +161,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { } req2 := &GetBlockHeaders{ RequestId: uint64(222), - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Hash: s.chain.blocks[1].Hash(), }, @@ -201,10 +201,10 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { if err != nil { t.Fatalf("failed to get expected headers for request 2: %v", err) } - if !headersMatch(expected1, headers1.BlockHeadersPacket) { + if !headersMatch(expected1, headers1.BlockHeadersRequest) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) } - if !headersMatch(expected2, headers2.BlockHeadersPacket) { + if !headersMatch(expected2, headers2.BlockHeadersRequest) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) } } @@ -224,7 +224,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { reqID := uint64(1234) request1 := &GetBlockHeaders{ RequestId: reqID, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Number: 1, }, @@ -233,7 +233,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { } request2 := &GetBlockHeaders{ RequestId: reqID, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Number: 33, }, @@ -270,10 +270,10 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { if err != nil { t.Fatalf("failed to get expected block headers: %v", err) } - if !headersMatch(expected1, headers1.BlockHeadersPacket) { + if !headersMatch(expected1, headers1.BlockHeadersRequest) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) } - if !headersMatch(expected2, headers2.BlockHeadersPacket) { + if !headersMatch(expected2, headers2.BlockHeadersRequest) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) } } @@ -290,7 +290,7 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) { t.Fatalf("peering failed: %v", err) } req := &GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Number: 0}, Amount: 2, }, @@ -322,7 +322,7 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { // create block bodies request req := &GetBlockBodies{ RequestId: uint64(55), - GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ + GetBlockBodiesRequest: eth.GetBlockBodiesRequest{ s.chain.blocks[54].Hash(), s.chain.blocks[75].Hash(), }, @@ -336,11 +336,11 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { if !ok { t.Fatalf("unexpected: %s", pretty.Sdump(msg)) } - bodies := resp.BlockBodiesPacket + bodies := resp.BlockBodiesResponse t.Logf("received %d block bodies", len(bodies)) - if len(bodies) != len(req.GetBlockBodiesPacket) { + if len(bodies) != len(req.GetBlockBodiesRequest) { t.Fatalf("wrong bodies in response: expected %d bodies, "+ - "got %d", len(req.GetBlockBodiesPacket), len(bodies)) + "got %d", len(req.GetBlockBodiesRequest), len(bodies)) } } @@ -481,8 +481,8 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) { hashes = append(hashes, hash) } getTxReq := &GetPooledTransactions{ - RequestId: 1234, - GetPooledTransactionsPacket: hashes, + RequestId: 1234, + GetPooledTransactionsRequest: hashes, } if err = conn.Write(getTxReq); err != nil { t.Fatalf("could not write to conn: %v", err) @@ -490,7 +490,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) { // check that all received transactions match those that were sent to node switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) { case *PooledTransactions: - for _, gotTx := range msg.PooledTransactionsPacket { + for _, gotTx := range msg.PooledTransactionsResponse { if _, exists := hashMap[gotTx.Hash()]; !exists { t.Fatalf("unexpected tx received: %v", gotTx.Hash()) } @@ -547,8 +547,8 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) { msg := conn.readAndServe(s.chain, timeout) switch msg := msg.(type) { case *GetPooledTransactions: - if len(msg.GetPooledTransactionsPacket) != len(hashes) { - t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket)) + if len(msg.GetPooledTransactionsRequest) != len(hashes) { + t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest)) } return diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go index afa9a9c8c6..805d7a81b9 100644 --- a/cmd/devp2p/internal/ethtest/types.go +++ b/cmd/devp2p/internal/ethtest/types.go @@ -99,24 +99,24 @@ func (msg Transactions) Code() int { return 18 } func (msg Transactions) ReqID() uint64 { return 18 } // GetBlockHeaders represents a block header query. -type GetBlockHeaders eth.GetBlockHeadersPacket66 +type GetBlockHeaders eth.GetBlockHeadersPacket func (msg GetBlockHeaders) Code() int { return 19 } func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId } -type BlockHeaders eth.BlockHeadersPacket66 +type BlockHeaders eth.BlockHeadersPacket func (msg BlockHeaders) Code() int { return 20 } func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId } // GetBlockBodies represents a GetBlockBodies request -type GetBlockBodies eth.GetBlockBodiesPacket66 +type GetBlockBodies eth.GetBlockBodiesPacket func (msg GetBlockBodies) Code() int { return 21 } func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId } // BlockBodies is the network packet for block content distribution. -type BlockBodies eth.BlockBodiesPacket66 +type BlockBodies eth.BlockBodiesPacket func (msg BlockBodies) Code() int { return 22 } func (msg BlockBodies) ReqID() uint64 { return msg.RequestId } @@ -128,7 +128,7 @@ func (msg NewBlock) Code() int { return 23 } func (msg NewBlock) ReqID() uint64 { return 0 } // NewPooledTransactionHashes66 is the network packet for the tx hash propagation message. -type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket66 +type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket67 func (msg NewPooledTransactionHashes66) Code() int { return 24 } func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 } @@ -139,12 +139,12 @@ type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket68 func (msg NewPooledTransactionHashes) Code() int { return 24 } func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 } -type GetPooledTransactions eth.GetPooledTransactionsPacket66 +type GetPooledTransactions eth.GetPooledTransactionsPacket func (msg GetPooledTransactions) Code() int { return 25 } func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId } -type PooledTransactions eth.PooledTransactionsPacket66 +type PooledTransactions eth.PooledTransactionsPacket func (msg PooledTransactions) Code() int { return 26 } func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId } @@ -180,25 +180,25 @@ func (c *Conn) Read() Message { case (Status{}).Code(): msg = new(Status) case (GetBlockHeaders{}).Code(): - ethMsg := new(eth.GetBlockHeadersPacket66) + ethMsg := new(eth.GetBlockHeadersPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetBlockHeaders)(ethMsg) case (BlockHeaders{}).Code(): - ethMsg := new(eth.BlockHeadersPacket66) + ethMsg := new(eth.BlockHeadersPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*BlockHeaders)(ethMsg) case (GetBlockBodies{}).Code(): - ethMsg := new(eth.GetBlockBodiesPacket66) + ethMsg := new(eth.GetBlockBodiesPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetBlockBodies)(ethMsg) case (BlockBodies{}).Code(): - ethMsg := new(eth.BlockBodiesPacket66) + ethMsg := new(eth.BlockBodiesPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } @@ -217,13 +217,13 @@ func (c *Conn) Read() Message { } msg = new(NewPooledTransactionHashes66) case (GetPooledTransactions{}.Code()): - ethMsg := new(eth.GetPooledTransactionsPacket66) + ethMsg := new(eth.GetPooledTransactionsPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetPooledTransactions)(ethMsg) case (PooledTransactions{}.Code()): - ethMsg := new(eth.PooledTransactionsPacket66) + ethMsg := new(eth.PooledTransactionsPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index ffe445ea88..e4875b959a 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -177,7 +177,7 @@ func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header { // function can be used to retrieve batches of headers from the particular peer. func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { // Service the header query via the live handler code - rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ + rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Hash: origin, }, @@ -205,7 +205,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersPacket)(&headers), + Res: (*eth.BlockHeadersRequest)(&headers), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -221,7 +221,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i // function can be used to retrieve batches of headers from the particular peer. func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { // Service the header query via the live handler code - rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ + rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Number: origin, }, @@ -249,7 +249,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersPacket)(&headers), + Res: (*eth.BlockHeadersRequest)(&headers), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -286,7 +286,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et } res := ð.Response{ Req: req, - Res: (*eth.BlockBodiesPacket)(&bodies), + Res: (*eth.BlockBodiesResponse)(&bodies), Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -317,7 +317,7 @@ func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan * } res := ð.Response{ Req: req, - Res: (*eth.ReceiptsPacket)(&receipts), + Res: (*eth.ReceiptsResponse)(&receipts), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -437,9 +437,9 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) { } } -func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) } -func TestCanonicalSynchronisation66Snap(t *testing.T) { testCanonSync(t, eth.ETH66, SnapSync) } -func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) } +func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) } +func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) } +func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) } func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) } func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) } func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) } @@ -461,8 +461,8 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that if a large batch of blocks are being downloaded, it is throttled // until the cached blocks are retrieved. -func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) } -func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) } +func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) } +func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) } func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) } func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) } @@ -543,9 +543,9 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { // Tests that simple synchronization against a forked chain works correctly. In // this test common ancestor lookup should *not* be short circuited, and a full // binary search should be executed. -func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) } -func TestForkedSync66Snap(t *testing.T) { testForkedSync(t, eth.ETH66, SnapSync) } -func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) } +func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) } +func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) } +func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) } func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) } func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) } func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) } @@ -573,9 +573,9 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that synchronising against a much shorter but much heavier fork works // currently and is not dropped. -func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } -func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) } -func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } +func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) } +func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) } +func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) } func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) } func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) } func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) } @@ -605,9 +605,9 @@ func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that chain forks are contained within a certain interval of the current // chain head, ensuring that malicious peers cannot waste resources by feeding // long dead chains. -func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) } -func TestBoundedForkedSync66Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, SnapSync) } -func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) } +func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) } +func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) } +func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) } func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) } func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) } func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) } @@ -636,14 +636,14 @@ func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that chain forks are contained within a certain interval of the current // chain head for short but heavy forks too. These are a bit special because they // take different ancestor lookup paths. -func TestBoundedHeavyForkedSync66Full(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH66, FullSync) +func TestBoundedHeavyForkedSync68Full(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH68, FullSync) } -func TestBoundedHeavyForkedSync66Snap(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync) +func TestBoundedHeavyForkedSync68Snap(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH68, SnapSync) } -func TestBoundedHeavyForkedSync66Light(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH66, LightSync) +func TestBoundedHeavyForkedSync68Light(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH68, LightSync) } func TestBoundedHeavyForkedSync67Full(t *testing.T) { testBoundedHeavyForkedSync(t, eth.ETH67, FullSync) @@ -678,9 +678,9 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { } // Tests that a canceled download wipes all previously accumulated state. -func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) } -func TestCancel66Snap(t *testing.T) { testCancel(t, eth.ETH66, SnapSync) } -func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) } +func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) } +func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) } +func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) } func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) } func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) } func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) } @@ -708,9 +708,9 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) { } // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). -func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) } -func TestMultiSynchronisation66Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, SnapSync) } -func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) } +func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) } +func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) } +func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) } func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) } func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) } func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) } @@ -735,9 +735,9 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { // Tests that synchronisations behave well in multi-version protocol environments // and not wreak havoc on other nodes in the network. -func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) } -func TestMultiProtoSynchronisation66Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH66, SnapSync) } -func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) } +func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) } +func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) } +func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) } func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) } func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) } func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) } @@ -750,7 +750,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { chain := testChainBase.shorten(blockCacheMaxItems - 15) // Create peers of every type - tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:]) + tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:]) tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:]) // Synchronise with the requested peer and make sure all blocks were retrieved @@ -760,7 +760,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { assertOwnChain(t, tester, len(chain.blocks)) // Check that no peers have been dropped off - for _, version := range []int{66, 67} { + for _, version := range []int{68, 67} { peer := fmt.Sprintf("peer %d", version) if _, ok := tester.peers[peer]; !ok { t.Errorf("%s dropped", peer) @@ -770,9 +770,9 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that if a block is empty (e.g. header only), no body request should be // made, and instead the header should be assembled into a whole block in itself. -func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) } -func TestEmptyShortCircuit66Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, SnapSync) } -func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) } +func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) } +func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) } +func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) } func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) } func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) } func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) } @@ -821,9 +821,9 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { // Tests that headers are enqueued continuously, preventing malicious nodes from // stalling the downloader by feeding gapped header chains. -func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) } -func TestMissingHeaderAttack66Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, SnapSync) } -func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) } +func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) } +func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) } +func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) } func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) } func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) } func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) } @@ -850,9 +850,9 @@ func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { // Tests that if requested headers are shifted (i.e. first is missing), the queue // detects the invalid numbering. -func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) } -func TestShiftedHeaderAttack66Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) } -func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) } +func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) } +func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) } +func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) } func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) } func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) } func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) } @@ -880,14 +880,14 @@ func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { // Tests that a peer advertising a high TD doesn't get to stall the downloader // afterwards by not sending any useful hashes. -func TestHighTDStarvationAttack66Full(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH66, FullSync) +func TestHighTDStarvationAttack68Full(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH68, FullSync) } -func TestHighTDStarvationAttack66Snap(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH66, SnapSync) +func TestHighTDStarvationAttack68Snap(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH68, SnapSync) } -func TestHighTDStarvationAttack66Light(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH66, LightSync) +func TestHighTDStarvationAttack68Light(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH68, LightSync) } func TestHighTDStarvationAttack67Full(t *testing.T) { testHighTDStarvationAttack(t, eth.ETH67, FullSync) @@ -911,7 +911,7 @@ func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { } // Tests that misbehaving peers are disconnected, whilst behaving ones are not. -func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) } +func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) } func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) } func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { @@ -960,9 +960,9 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { // Tests that synchronisation progress (origin block number, current block number // and highest block number) is tracked and updated correctly. -func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) } -func TestSyncProgress66Snap(t *testing.T) { testSyncProgress(t, eth.ETH66, SnapSync) } -func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) } +func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) } +func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) } +func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) } func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) } func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) } func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) } @@ -1040,9 +1040,9 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync // Tests that synchronisation progress (origin block number and highest block // number) is tracked and updated correctly in case of a fork (or manual head // revertal). -func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) } -func TestForkedSyncProgress66Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, SnapSync) } -func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) } +func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) } +func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) } +func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) } func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) } func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) } func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) } @@ -1114,9 +1114,9 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Tests that if synchronisation is aborted due to some failure, then the progress // origin is not updated in the next sync cycle, as it should be considered the // continuation of the previous sync and not a new instance. -func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) } -func TestFailedSyncProgress66Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, SnapSync) } -func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) } +func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) } +func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) } +func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) } func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) } func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) } func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) } @@ -1183,9 +1183,9 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Tests that if an attacker fakes a chain height, after the attack is detected, // the progress height is successfully reduced at the next sync invocation. -func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) } -func TestFakedSyncProgress66Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, SnapSync) } -func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) } +func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) } +func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) } +func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) } func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) } func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) } func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) } @@ -1330,8 +1330,10 @@ func TestRemoteHeaderRequestSpan(t *testing.T) { // Tests that peers below a pre-configured checkpoint block are prevented from // being fast-synced from, avoiding potential cheap eclipse attacks. -func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, eth.ETH66, FullSync) } -func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, eth.ETH66, SnapSync) } +func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) } +func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) } +func TestBeaconSync67Full(t *testing.T) { testBeaconSync(t, eth.ETH67, FullSync) } +func TestBeaconSync67Snap(t *testing.T) { testBeaconSync(t, eth.ETH67, SnapSync) } func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) diff --git a/eth/downloader/fetchers.go b/eth/downloader/fetchers.go index 021e8c4f9b..cc4279b0da 100644 --- a/eth/downloader/fetchers.go +++ b/eth/downloader/fetchers.go @@ -58,14 +58,14 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo case res := <-resCh: // Headers successfully retrieved, update the metrics headerReqTimer.Update(time.Since(start)) - headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) + headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest)))) // Don't reject the packet even if it turns out to be bad, downloader will // disconnect the peer on its own terms. Simply delivery the headers to // be processed by the caller res.Done <- nil - return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil + return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil } } @@ -103,13 +103,13 @@ func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amou case res := <-resCh: // Headers successfully retrieved, update the metrics headerReqTimer.Update(time.Since(start)) - headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) + headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest)))) // Don't reject the packet even if it turns out to be bad, downloader will // disconnect the peer on its own terms. Simply delivery the headers to // be processed by the caller res.Done <- nil - return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil + return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil } } diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go index 9440972c6d..5105fda66b 100644 --- a/eth/downloader/fetchers_concurrent_bodies.go +++ b/eth/downloader/fetchers_concurrent_bodies.go @@ -89,7 +89,7 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the body data and delivering it to the downloader's queue. func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesPacket).Unpack() + txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack() hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes} accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2]) diff --git a/eth/downloader/fetchers_concurrent_headers.go b/eth/downloader/fetchers_concurrent_headers.go index 84c7f20986..8201f4ca74 100644 --- a/eth/downloader/fetchers_concurrent_headers.go +++ b/eth/downloader/fetchers_concurrent_headers.go @@ -81,7 +81,7 @@ func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh cha // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the header data and delivering it to the downloader's queue. func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - headers := *packet.Res.(*eth.BlockHeadersPacket) + headers := *packet.Res.(*eth.BlockHeadersRequest) hashes := packet.Meta.([]common.Hash) accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh) diff --git a/eth/downloader/fetchers_concurrent_receipts.go b/eth/downloader/fetchers_concurrent_receipts.go index 1c853c2184..3169f030ba 100644 --- a/eth/downloader/fetchers_concurrent_receipts.go +++ b/eth/downloader/fetchers_concurrent_receipts.go @@ -88,7 +88,7 @@ func (q *receiptQueue) request(peer *peerConnection, req *fetchRequest, resCh ch // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the receipt data and delivering it to the downloader's queue. func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - receipts := *packet.Res.(*eth.ReceiptsPacket) + receipts := *packet.Res.(*eth.ReceiptsResponse) hashes := packet.Meta.([]common.Hash) // {receipt hashes} accepted, err := q.queue.DeliverReceipts(peer.id, receipts, hashes) diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go index a07e1695f5..4f1f462048 100644 --- a/eth/downloader/skeleton.go +++ b/eth/downloader/skeleton.go @@ -794,7 +794,7 @@ func (s *skeleton) executeTask(peer *peerConnection, req *headerRequest) { case res := <-resCh: // Headers successfully retrieved, update the metrics - headers := *res.Res.(*eth.BlockHeadersPacket) + headers := *res.Res.(*eth.BlockHeadersRequest) headerReqTimer.Update(time.Since(start)) s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, res.Time, len(headers)) diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go index 6a76d78ac8..c31007765a 100644 --- a/eth/downloader/skeleton_test.go +++ b/eth/downloader/skeleton_test.go @@ -173,7 +173,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersPacket)(&headers), + Res: (*eth.BlockHeadersRequest)(&headers), Meta: hashes, Time: 1, Done: make(chan error), @@ -811,7 +811,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) { // Create a peer set to feed headers through peerset := newPeerSet() for _, peer := range tt.peers { - peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id))) + peerset.Register(newPeerConnection(peer.id, eth.ETH67, peer, log.New("id", peer.id))) } // Create a peer dropper to track malicious peers dropped := make(map[string]int) @@ -913,7 +913,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) { skeleton.Sync(tt.newHead, nil, true) } if tt.newPeer != nil { - if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { + if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH67, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { t.Errorf("test %d: failed to register new peer: %v", i, err) } } diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index 35608031d9..8751c4e3ea 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -483,7 +483,7 @@ func (f *BlockFetcher) loop() { select { case res := <-resCh: res.Done <- nil - f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersPacket), time.Now().Add(res.Time)) + f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersRequest), time.Now().Add(res.Time)) case <-timeout.C: // The peer didn't respond in time. The request @@ -541,7 +541,7 @@ func (f *BlockFetcher) loop() { case res := <-resCh: res.Done <- nil // Ignoring withdrawals here, since the block fetcher is not used post-merge. - txs, uncles, _ := res.Res.(*eth.BlockBodiesPacket).Unpack() + txs, uncles, _ := res.Res.(*eth.BlockBodiesResponse).Unpack() f.FilterBodies(peer, txs, uncles, time.Now()) case <-timeout.C: diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index 7c490df3f7..6927300b1d 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -213,7 +213,7 @@ func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*t } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersPacket)(&headers), + Res: (*eth.BlockHeadersRequest)(&headers), Time: drift, Done: make(chan error, 1), // Ignore the returned status } @@ -255,7 +255,7 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ } res := ð.Response{ Req: req, - Res: (*eth.BlockBodiesPacket)(&bodies), + Res: (*eth.BlockBodiesResponse)(&bodies), Time: drift, Done: make(chan error, 1), // Ignore the returned status } diff --git a/eth/handler.go b/eth/handler.go index 33b9683740..f731efe1b8 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -414,7 +414,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { select { case res := <-resCh: - headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket)) + headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersRequest)) if len(headers) == 0 { // Required blocks are allowed to be missing if the remote // node is not yet synced diff --git a/eth/handler_eth.go b/eth/handler_eth.go index 2aba16f928..3a0944640e 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -66,7 +66,7 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { case *eth.NewBlockPacket: return h.handleBlockBroadcast(peer, packet.Block, packet.TD) - case *eth.NewPooledTransactionHashesPacket66: + case *eth.NewPooledTransactionHashesPacket67: return h.txFetcher.Notify(peer.ID(), *packet) case *eth.NewPooledTransactionHashesPacket68: @@ -75,7 +75,7 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { case *eth.TransactionsPacket: return h.txFetcher.Enqueue(peer.ID(), *packet, false) - case *eth.PooledTransactionsPacket: + case *eth.PooledTransactionsResponse: return h.txFetcher.Enqueue(peer.ID(), *packet, true) default: diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index a16abc5ed6..4cdfdf47b8 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -58,7 +58,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { h.blockBroadcasts.Send(packet.Block) return nil - case *eth.NewPooledTransactionHashesPacket66: + case *eth.NewPooledTransactionHashesPacket67: h.txAnnounces.Send(([]common.Hash)(*packet)) return nil @@ -70,7 +70,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { h.txBroadcasts.Send(([]*types.Transaction)(*packet)) return nil - case *eth.PooledTransactionsPacket: + case *eth.PooledTransactionsResponse: h.txBroadcasts.Send(([]*types.Transaction)(*packet)) return nil @@ -81,7 +81,6 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { // Tests that peers are correctly accepted (or rejected) based on the advertised // fork IDs in the protocol handshake. -func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) } func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) } func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) } @@ -237,7 +236,6 @@ func testForkIDSplit(t *testing.T, protocol uint) { } // Tests that received transactions are added to the local pool. -func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) } func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) } func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) } @@ -296,7 +294,6 @@ func testRecvTransactions(t *testing.T, protocol uint) { } // This test checks that pending transactions are sent. -func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) } func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) } func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) } @@ -356,7 +353,7 @@ func testSendTransactions(t *testing.T, protocol uint) { seen := make(map[common.Hash]struct{}) for len(seen) < len(insert) { switch protocol { - case 66, 67, 68: + case 67, 68: select { case hashes := <-anns: for _, hash := range hashes { @@ -382,7 +379,6 @@ func testSendTransactions(t *testing.T, protocol uint) { // Tests that transactions get propagated to all attached peers, either via direct // broadcasts or via announcements/retrievals. -func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) } func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) } func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) } @@ -490,8 +486,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) { defer sourcePipe.Close() defer sinkPipe.Close() - sourcePeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) - sinkPeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) + sourcePeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) + sinkPeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) defer sourcePeer.Close() defer sinkPeer.Close() @@ -543,7 +539,6 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) { // Tests that a propagated malformed block (uncles or transactions don't match // with the hashes in the header) gets discarded and not broadcast forward. -func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) } func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) } func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) } diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index b2ce883bc5..a7d6ed25a9 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -23,7 +23,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" @@ -45,10 +44,6 @@ const ( // nowadays, the practical limit will always be softResponseLimit. maxBodiesServe = 1024 - // maxNodeDataServe is the maximum number of state trie nodes to serve. This - // number is there to limit the number of disk lookups. - maxNodeDataServe = 1024 - // maxReceiptsServe is the maximum number of block receipts to serve. This // number is mostly there to limit the number of disk lookups. With block // containing 200+ transactions nowadays, the practical limit will always @@ -100,10 +95,6 @@ func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2 for _, version := range ProtocolVersions { version := version // Closure - // Path scheme does not support GetNodeData, don't advertise eth66 on it - if version <= ETH66 && backend.Chain().TrieDB().Scheme() == rawdb.PathScheme { - continue - } protocols = append(protocols, p2p.Protocol{ Name: ProtocolName, Version: version, @@ -171,36 +162,19 @@ type Decoder interface { Time() time.Time } -var eth66 = map[uint64]msgHandler{ - NewBlockHashesMsg: handleNewBlockhashes, - NewBlockMsg: handleNewBlock, - TransactionsMsg: handleTransactions, - NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66, - GetBlockHeadersMsg: handleGetBlockHeaders66, - BlockHeadersMsg: handleBlockHeaders66, - GetBlockBodiesMsg: handleGetBlockBodies66, - BlockBodiesMsg: handleBlockBodies66, - GetNodeDataMsg: handleGetNodeData66, - NodeDataMsg: handleNodeData66, - GetReceiptsMsg: handleGetReceipts66, - ReceiptsMsg: handleReceipts66, - GetPooledTransactionsMsg: handleGetPooledTransactions66, - PooledTransactionsMsg: handlePooledTransactions66, -} - var eth67 = map[uint64]msgHandler{ NewBlockHashesMsg: handleNewBlockhashes, NewBlockMsg: handleNewBlock, TransactionsMsg: handleTransactions, - NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66, - GetBlockHeadersMsg: handleGetBlockHeaders66, - BlockHeadersMsg: handleBlockHeaders66, - GetBlockBodiesMsg: handleGetBlockBodies66, - BlockBodiesMsg: handleBlockBodies66, - GetReceiptsMsg: handleGetReceipts66, - ReceiptsMsg: handleReceipts66, - GetPooledTransactionsMsg: handleGetPooledTransactions66, - PooledTransactionsMsg: handlePooledTransactions66, + NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes67, + GetBlockHeadersMsg: handleGetBlockHeaders, + BlockHeadersMsg: handleBlockHeaders, + GetBlockBodiesMsg: handleGetBlockBodies, + BlockBodiesMsg: handleBlockBodies, + GetReceiptsMsg: handleGetReceipts, + ReceiptsMsg: handleReceipts, + GetPooledTransactionsMsg: handleGetPooledTransactions, + PooledTransactionsMsg: handlePooledTransactions, } var eth68 = map[uint64]msgHandler{ @@ -208,14 +182,14 @@ var eth68 = map[uint64]msgHandler{ NewBlockMsg: handleNewBlock, TransactionsMsg: handleTransactions, NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68, - GetBlockHeadersMsg: handleGetBlockHeaders66, - BlockHeadersMsg: handleBlockHeaders66, - GetBlockBodiesMsg: handleGetBlockBodies66, - BlockBodiesMsg: handleBlockBodies66, - GetReceiptsMsg: handleGetReceipts66, - ReceiptsMsg: handleReceipts66, - GetPooledTransactionsMsg: handleGetPooledTransactions66, - PooledTransactionsMsg: handlePooledTransactions66, + GetBlockHeadersMsg: handleGetBlockHeaders, + BlockHeadersMsg: handleBlockHeaders, + GetBlockBodiesMsg: handleGetBlockBodies, + BlockBodiesMsg: handleBlockBodies, + GetReceiptsMsg: handleGetReceipts, + ReceiptsMsg: handleReceipts, + GetPooledTransactionsMsg: handleGetPooledTransactions, + PooledTransactionsMsg: handlePooledTransactions, } // handleMessage is invoked whenever an inbound message is received from a remote @@ -231,14 +205,10 @@ func handleMessage(backend Backend, peer *Peer) error { } defer msg.Discard() - var handlers = eth66 - if peer.Version() == ETH67 { - handlers = eth67 - } + var handlers = eth67 if peer.Version() >= ETH68 { handlers = eth68 } - // Track the amount of time it takes to serve the request and run the handler if metrics.Enabled { h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index bf2874721a..41e18bfb3e 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" @@ -151,7 +150,6 @@ func (b *testBackend) Handle(*Peer, Packet) error { } // Tests that block headers can be retrieved from a remote chain based on user queries. -func TestGetBlockHeaders66(t *testing.T) { testGetBlockHeaders(t, ETH66) } func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) } func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) } @@ -178,29 +176,29 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { // Create a batch of tests for various scenarios limit := uint64(maxHeadersServe) tests := []struct { - query *GetBlockHeadersPacket // The query to execute for header retrieval - expect []common.Hash // The hashes of the block whose headers are expected + query *GetBlockHeadersRequest // The query to execute for header retrieval + expect []common.Hash // The hashes of the block whose headers are expected }{ // A single random block should be retrievable by hash { - &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, }, // A single random block should be retrievable by number { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, }, // Multiple headers should be retrievable in both directions { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 + 1).Hash(), backend.chain.GetBlockByNumber(limit/2 + 2).Hash(), }, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 - 1).Hash(), @@ -209,14 +207,14 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Multiple headers with skip lists should be retrievable { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 + 4).Hash(), backend.chain.GetBlockByNumber(limit/2 + 8).Hash(), }, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 - 4).Hash(), @@ -225,31 +223,31 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // The chain endpoints should be retrievable { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 0}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 0}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(0).Hash()}, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1}, []common.Hash{backend.chain.CurrentBlock().Hash()}, }, { // If the peer requests a bit into the future, we deliver what we have - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10}, []common.Hash{backend.chain.CurrentBlock().Hash()}, }, // Ensure protocol limits are honored { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true}, getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit), }, // Check that requesting more than available is handled gracefully { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(), }, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(4).Hash(), backend.chain.GetBlockByNumber(0).Hash(), @@ -257,13 +255,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check that requesting more than available is handled gracefully, even if mid skip { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(), }, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(4).Hash(), backend.chain.GetBlockByNumber(1).Hash(), @@ -271,7 +269,7 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check a corner case where requesting more can iterate past the endpoints { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(2).Hash(), backend.chain.GetBlockByNumber(1).Hash(), @@ -280,24 +278,24 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check a corner case where skipping overflow loops back into the chain start { - &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, []common.Hash{ backend.chain.GetBlockByNumber(3).Hash(), }, }, // Check a corner case where skipping overflow loops back to the same header { - &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, []common.Hash{ backend.chain.GetBlockByNumber(1).Hash(), }, }, // Check that non existing headers aren't returned { - &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, []common.Hash{}, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1}, []common.Hash{}, }, } @@ -309,13 +307,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { headers = append(headers, backend.chain.GetBlockByHash(hash).Header()) } // Send the hash request and verify the response - p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ - RequestId: 123, - GetBlockHeadersPacket: tt.query, + p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{ + RequestId: 123, + GetBlockHeadersRequest: tt.query, }) - if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket66{ - RequestId: 123, - BlockHeadersPacket: headers, + if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket{ + RequestId: 123, + BlockHeadersRequest: headers, }); err != nil { t.Errorf("test %d: headers mismatch: %v", i, err) } @@ -324,11 +322,11 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil { tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0 - p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ - RequestId: 456, - GetBlockHeadersPacket: tt.query, + p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{ + RequestId: 456, + GetBlockHeadersRequest: tt.query, }) - expected := &BlockHeadersPacket66{RequestId: 456, BlockHeadersPacket: headers} + expected := &BlockHeadersPacket{RequestId: 456, BlockHeadersRequest: headers} if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil { t.Errorf("test %d by hash: headers mismatch: %v", i, err) } @@ -338,7 +336,6 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { } // Tests that block contents can be retrieved from a remote chain based on their hashes. -func TestGetBlockBodies66(t *testing.T) { testGetBlockBodies(t, ETH66) } func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) } func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) } @@ -420,139 +417,20 @@ func testGetBlockBodies(t *testing.T, protocol uint) { } // Send the hash request and verify the response - p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket66{ - RequestId: 123, - GetBlockBodiesPacket: hashes, + p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket{ + RequestId: 123, + GetBlockBodiesRequest: hashes, }) - if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket66{ - RequestId: 123, - BlockBodiesPacket: bodies, + if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket{ + RequestId: 123, + BlockBodiesResponse: bodies, }); err != nil { t.Fatalf("test %d: bodies mismatch: %v", i, err) } } } -// Tests that the state trie nodes can be retrieved based on hashes. -func TestGetNodeData66(t *testing.T) { testGetNodeData(t, ETH66, false) } -func TestGetNodeData67(t *testing.T) { testGetNodeData(t, ETH67, true) } -func TestGetNodeData68(t *testing.T) { testGetNodeData(t, ETH68, true) } - -func testGetNodeData(t *testing.T, protocol uint, drop bool) { - t.Parallel() - - // Define three accounts to simulate transactions with - acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey) - acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey) - - signer := types.HomesteadSigner{} - // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makers_test) - generator := func(i int, block *core.BlockGen) { - switch i { - case 0: - // In block 1, the test bank sends account #1 some ether. - tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey) - block.AddTx(tx) - case 1: - // In block 2, the test bank sends some more ether to account #1. - // acc1Addr passes it on to account #2. - tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey) - tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, acc1Key) - block.AddTx(tx1) - block.AddTx(tx2) - case 2: - // Block 3 is empty but was mined by account #2. - block.SetCoinbase(acc2Addr) - block.SetExtra([]byte("yeehaw")) - case 3: - // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). - b2 := block.PrevBlock(1).Header() - b2.Extra = []byte("foo") - block.AddUncle(b2) - b3 := block.PrevBlock(2).Header() - b3.Extra = []byte("foo") - block.AddUncle(b3) - } - } - // Assemble the test environment - backend := newTestBackendWithGenerator(4, false, generator) - defer backend.close() - - peer, _ := newTestPeer("peer", protocol, backend) - defer peer.close() - - // Collect all state tree hashes. - var hashes []common.Hash - it := backend.db.NewIterator(nil, nil) - for it.Next() { - if key := it.Key(); len(key) == common.HashLength { - hashes = append(hashes, common.BytesToHash(key)) - } - } - it.Release() - - // Request all hashes. - p2p.Send(peer.app, GetNodeDataMsg, &GetNodeDataPacket66{ - RequestId: 123, - GetNodeDataPacket: hashes, - }) - msg, err := peer.app.ReadMsg() - if !drop { - if err != nil { - t.Fatalf("failed to read node data response: %v", err) - } - } else { - if err != nil { - return - } - t.Fatalf("succeeded to read node data response on non-supporting protocol: %v", msg) - } - if msg.Code != NodeDataMsg { - t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, NodeDataMsg) - } - var res NodeDataPacket66 - if err := msg.Decode(&res); err != nil { - t.Fatalf("failed to decode response node data: %v", err) - } - - // Verify that all hashes correspond to the requested data. - data := res.NodeDataPacket - for i, want := range hashes { - if hash := crypto.Keccak256Hash(data[i]); hash != want { - t.Errorf("data hash mismatch: have %x, want %x", hash, want) - } - } - - // Reconstruct state tree from the received data. - reconstructDB := rawdb.NewMemoryDatabase() - for i := 0; i < len(data); i++ { - rawdb.WriteLegacyTrieNode(reconstructDB, hashes[i], data[i]) - } - - // Sanity check whether all state matches. - accounts := []common.Address{testAddr, acc1Addr, acc2Addr} - for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ { - root := backend.chain.GetBlockByNumber(i).Root() - reconstructed, _ := state.New(root, state.NewDatabase(reconstructDB), nil) - for j, acc := range accounts { - state, _ := backend.chain.StateAt(root) - bw := state.GetBalance(acc) - bh := reconstructed.GetBalance(acc) - - if (bw == nil) != (bh == nil) { - t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) - } - if bw != nil && bh != nil && bw.Cmp(bh) != 0 { - t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) - } - } - } -} - // Tests that the transaction receipts can be retrieved based on hashes. -func TestGetBlockReceipts66(t *testing.T) { testGetBlockReceipts(t, ETH66) } func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) } func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) } @@ -613,13 +491,13 @@ func testGetBlockReceipts(t *testing.T, protocol uint) { receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash())) } // Send the hash request and verify the response - p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket66{ - RequestId: 123, - GetReceiptsPacket: hashes, + p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket{ + RequestId: 123, + GetReceiptsRequest: hashes, }) - if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket66{ - RequestId: 123, - ReceiptsPacket: receipts, + if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket{ + RequestId: 123, + ReceiptsResponse: receipts, }); err != nil { t.Errorf("receipts mismatch: %v", err) } diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index da741791bc..da4ffd327e 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -22,27 +22,25 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" ) -// handleGetBlockHeaders66 is the eth/66 version of handleGetBlockHeaders -func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { +func handleGetBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { // Decode the complex header query - var query GetBlockHeadersPacket66 + var query GetBlockHeadersPacket if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersPacket, peer) + response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersRequest, peer) return peer.ReplyBlockHeadersRLP(query.RequestId, response) } // ServiceGetBlockHeadersQuery assembles the response to a header query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue { +func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue { if query.Skip == 0 { // The fast path: when the request is for a contiguous segment of headers. return serviceContiguousBlockHeaderQuery(chain, query) @@ -51,7 +49,7 @@ func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersP } } -func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue { +func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue { hashMode := query.Origin.Hash != (common.Hash{}) first := true maxNonCanonical := uint64(100) @@ -140,7 +138,7 @@ func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBloc return headers } -func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket) []rlp.RawValue { +func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest) []rlp.RawValue { count := query.Amount if count > maxHeadersServe { count = maxHeadersServe @@ -203,19 +201,19 @@ func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHe } } -func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { +func handleGetBlockBodies(backend Backend, msg Decoder, peer *Peer) error { // Decode the block body retrieval message - var query GetBlockBodiesPacket66 + var query GetBlockBodiesPacket if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesPacket) + response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesRequest) return peer.ReplyBlockBodiesRLP(query.RequestId, response) } // ServiceGetBlockBodiesQuery assembles the response to a body query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPacket) []rlp.RawValue { +func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesRequest) []rlp.RawValue { // Gather blocks until the fetch or network limits is reached var ( bytes int @@ -234,60 +232,19 @@ func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPack return bodies } -func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error { - // Decode the trie node data retrieval message - var query GetNodeDataPacket66 - if err := msg.Decode(&query); err != nil { - return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) - } - response := ServiceGetNodeDataQuery(backend.Chain(), query.GetNodeDataPacket) - return peer.ReplyNodeData(query.RequestId, response) -} - -// ServiceGetNodeDataQuery assembles the response to a node data query. It is -// exposed to allow external packages to test protocol behavior. -func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [][]byte { - // Request nodes by hash is not supported in path-based scheme. - if chain.TrieDB().Scheme() == rawdb.PathScheme { - return nil - } - // Gather state data until the fetch or network limits is reached - var ( - bytes int - nodes [][]byte - ) - for lookups, hash := range query { - if bytes >= softResponseLimit || len(nodes) >= maxNodeDataServe || - lookups >= 2*maxNodeDataServe { - break - } - // Retrieve the requested state entry - entry, err := chain.TrieDB().Node(hash) - if len(entry) == 0 || err != nil { - // Read the contract code with prefix only to save unnecessary lookups. - entry, err = chain.ContractCodeWithPrefix(hash) - } - if err == nil && len(entry) > 0 { - nodes = append(nodes, entry) - bytes += len(entry) - } - } - return nodes -} - -func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error { +func handleGetReceipts(backend Backend, msg Decoder, peer *Peer) error { // Decode the block receipts retrieval message - var query GetReceiptsPacket66 + var query GetReceiptsPacket if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsPacket) + response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsRequest) return peer.ReplyReceiptsRLP(query.RequestId, response) } // ServiceGetReceiptsQuery assembles the response to a receipt query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsPacket) []rlp.RawValue { +func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsRequest) []rlp.RawValue { // Gather state data until the fetch or network limits is reached var ( bytes int @@ -356,15 +313,15 @@ func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error { return backend.Handle(peer, ann) } -func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { +func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { // A batch of headers arrived to one of our previous requests - res := new(BlockHeadersPacket66) + res := new(BlockHeadersPacket) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { - hashes := make([]common.Hash, len(res.BlockHeadersPacket)) - for i, header := range res.BlockHeadersPacket { + hashes := make([]common.Hash, len(res.BlockHeadersRequest)) + for i, header := range res.BlockHeadersRequest { hashes[i] = header.Hash() } return hashes @@ -372,24 +329,24 @@ func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: BlockHeadersMsg, - Res: &res.BlockHeadersPacket, + Res: &res.BlockHeadersRequest, }, metadata) } -func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { +func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error { // A batch of block bodies arrived to one of our previous requests - res := new(BlockBodiesPacket66) + res := new(BlockBodiesPacket) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { var ( - txsHashes = make([]common.Hash, len(res.BlockBodiesPacket)) - uncleHashes = make([]common.Hash, len(res.BlockBodiesPacket)) - withdrawalHashes = make([]common.Hash, len(res.BlockBodiesPacket)) + txsHashes = make([]common.Hash, len(res.BlockBodiesResponse)) + uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse)) + withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse)) ) hasher := trie.NewStackTrie(nil) - for i, body := range res.BlockBodiesPacket { + for i, body := range res.BlockBodiesResponse { txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) uncleHashes[i] = types.CalcUncleHash(body.Uncles) if body.Withdrawals != nil { @@ -401,33 +358,20 @@ func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: BlockBodiesMsg, - Res: &res.BlockBodiesPacket, + Res: &res.BlockBodiesResponse, }, metadata) } -func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error { - // A batch of node state data arrived to one of our previous requests - res := new(NodeDataPacket66) - if err := msg.Decode(res); err != nil { - return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) - } - return peer.dispatchResponse(&Response{ - id: res.RequestId, - code: NodeDataMsg, - Res: &res.NodeDataPacket, - }, nil) // No post-processing, we're not using this packet anymore -} - -func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error { +func handleReceipts(backend Backend, msg Decoder, peer *Peer) error { // A batch of receipts arrived to one of our previous requests - res := new(ReceiptsPacket66) + res := new(ReceiptsPacket) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { hasher := trie.NewStackTrie(nil) - hashes := make([]common.Hash, len(res.ReceiptsPacket)) - for i, receipt := range res.ReceiptsPacket { + hashes := make([]common.Hash, len(res.ReceiptsResponse)) + for i, receipt := range res.ReceiptsResponse { hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) } return hashes @@ -435,17 +379,17 @@ func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: ReceiptsMsg, - Res: &res.ReceiptsPacket, + Res: &res.ReceiptsResponse, }, metadata) } -func handleNewPooledTransactionHashes66(backend Backend, msg Decoder, peer *Peer) error { +func handleNewPooledTransactionHashes67(backend Backend, msg Decoder, peer *Peer) error { // New transaction announcement arrived, make sure we have // a valid and fresh chain to handle them if !backend.AcceptTxs() { return nil } - ann := new(NewPooledTransactionHashesPacket66) + ann := new(NewPooledTransactionHashesPacket67) if err := msg.Decode(ann); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } @@ -476,17 +420,17 @@ func handleNewPooledTransactionHashes68(backend Backend, msg Decoder, peer *Peer return backend.Handle(peer, ann) } -func handleGetPooledTransactions66(backend Backend, msg Decoder, peer *Peer) error { +func handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error { // Decode the pooled transactions retrieval message - var query GetPooledTransactionsPacket66 + var query GetPooledTransactionsPacket if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsPacket, peer) + hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsRequest, peer) return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs) } -func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsPacket, peer *Peer) ([]common.Hash, []rlp.RawValue) { +func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsRequest, peer *Peer) ([]common.Hash, []rlp.RawValue) { // Gather transactions until the fetch or network limits is reached var ( bytes int @@ -534,17 +478,17 @@ func handleTransactions(backend Backend, msg Decoder, peer *Peer) error { return backend.Handle(peer, &txs) } -func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error { +func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error { // Transactions arrived, make sure we have a valid and fresh chain to handle them if !backend.AcceptTxs() { return nil } // Transactions can be processed, parse all of them and deliver to the pool - var txs PooledTransactionsPacket66 + var txs PooledTransactionsPacket if err := msg.Decode(&txs); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - for i, tx := range txs.PooledTransactionsPacket { + for i, tx := range txs.PooledTransactionsResponse { // Validate and mark the remote transaction if tx == nil { return fmt.Errorf("%w: transaction %d is nil", errDecode, i) @@ -553,5 +497,5 @@ func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error } requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId) - return backend.Handle(peer, &txs.PooledTransactionsPacket) + return backend.Handle(peer, &txs.PooledTransactionsResponse) } diff --git a/eth/protocols/eth/handshake_test.go b/eth/protocols/eth/handshake_test.go index dca66e0c57..d96cfc8165 100644 --- a/eth/protocols/eth/handshake_test.go +++ b/eth/protocols/eth/handshake_test.go @@ -27,7 +27,8 @@ import ( ) // Tests that handshake failures are detected and reported correctly. -func TestHandshake66(t *testing.T) { testHandshake(t, ETH66) } +func TestHandshake67(t *testing.T) { testHandshake(t, ETH67) } +func TestHandshake68(t *testing.T) { testHandshake(t, ETH68) } func testHandshake(t *testing.T, protocol uint) { t.Parallel() diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go index 219f486c8e..938af0cab0 100644 --- a/eth/protocols/eth/peer.go +++ b/eth/protocols/eth/peer.go @@ -219,7 +219,7 @@ func (p *Peer) AsyncSendTransactions(hashes []common.Hash) { func (p *Peer) sendPooledTransactionHashes66(hashes []common.Hash) error { // Mark all the transactions as known, but ensure we don't overflow our limits p.knownTxs.Add(hashes...) - return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket66(hashes)) + return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket67(hashes)) } // sendPooledTransactionHashes68 sends transaction hashes (tagged with their type @@ -248,15 +248,15 @@ func (p *Peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) { } } -// ReplyPooledTransactionsRLP is the eth/66 version of SendPooledTransactionsRLP. +// ReplyPooledTransactionsRLP is the response to RequestTxs. func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error { // Mark all the transactions as known, but ensure we don't overflow our limits p.knownTxs.Add(hashes...) - // Not packed into PooledTransactionsPacket to avoid RLP decoding - return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket66{ - RequestId: id, - PooledTransactionsRLPPacket: txs, + // Not packed into PooledTransactionsResponse to avoid RLP decoding + return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket{ + RequestId: id, + PooledTransactionsRLPResponse: txs, }) } @@ -309,36 +309,28 @@ func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) { } } -// ReplyBlockHeadersRLP is the eth/66 response to GetBlockHeaders. +// ReplyBlockHeadersRLP is the response to GetBlockHeaders. func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error { - return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket66{ - RequestId: id, - BlockHeadersRLPPacket: headers, + return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket{ + RequestId: id, + BlockHeadersRLPResponse: headers, }) } -// ReplyBlockBodiesRLP is the eth/66 response to GetBlockBodies. +// ReplyBlockBodiesRLP is the response to GetBlockBodies. func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error { - // Not packed into BlockBodiesPacket to avoid RLP decoding - return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket66{ - RequestId: id, - BlockBodiesRLPPacket: bodies, + // Not packed into BlockBodiesResponse to avoid RLP decoding + return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket{ + RequestId: id, + BlockBodiesRLPResponse: bodies, }) } -// ReplyNodeData is the eth/66 response to GetNodeData. -func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error { - return p2p.Send(p.rw, NodeDataMsg, &NodeDataPacket66{ - RequestId: id, - NodeDataPacket: data, - }) -} - -// ReplyReceiptsRLP is the eth/66 response to GetReceipts. +// ReplyReceiptsRLP is the response to GetReceipts. func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error { - return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket66{ - RequestId: id, - ReceiptsRLPPacket: receipts, + return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket{ + RequestId: id, + ReceiptsRLPResponse: receipts, }) } @@ -353,9 +345,9 @@ func (p *Peer) RequestOneHeader(hash common.Hash, sink chan *Response) (*Request sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket66{ + data: &GetBlockHeadersPacket{ RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ + GetBlockHeadersRequest: &GetBlockHeadersRequest{ Origin: HashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), @@ -380,9 +372,9 @@ func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, re sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket66{ + data: &GetBlockHeadersPacket{ RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ + GetBlockHeadersRequest: &GetBlockHeadersRequest{ Origin: HashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), @@ -407,9 +399,9 @@ func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, rever sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket66{ + data: &GetBlockHeadersPacket{ RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ + GetBlockHeadersRequest: &GetBlockHeadersRequest{ Origin: HashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), @@ -434,31 +426,9 @@ func (p *Peer) RequestBodies(hashes []common.Hash, sink chan *Response) (*Reques sink: sink, code: GetBlockBodiesMsg, want: BlockBodiesMsg, - data: &GetBlockBodiesPacket66{ - RequestId: id, - GetBlockBodiesPacket: hashes, - }, - } - if err := p.dispatchRequest(req); err != nil { - return nil, err - } - return req, nil -} - -// RequestNodeData fetches a batch of arbitrary data from a node's known state -// data, corresponding to the specified hashes. -func (p *Peer) RequestNodeData(hashes []common.Hash, sink chan *Response) (*Request, error) { - p.Log().Debug("Fetching batch of state data", "count", len(hashes)) - id := rand.Uint64() - - req := &Request{ - id: id, - sink: sink, - code: GetNodeDataMsg, - want: NodeDataMsg, - data: &GetNodeDataPacket66{ - RequestId: id, - GetNodeDataPacket: hashes, + data: &GetBlockBodiesPacket{ + RequestId: id, + GetBlockBodiesRequest: hashes, }, } if err := p.dispatchRequest(req); err != nil { @@ -477,9 +447,9 @@ func (p *Peer) RequestReceipts(hashes []common.Hash, sink chan *Response) (*Requ sink: sink, code: GetReceiptsMsg, want: ReceiptsMsg, - data: &GetReceiptsPacket66{ - RequestId: id, - GetReceiptsPacket: hashes, + data: &GetReceiptsPacket{ + RequestId: id, + GetReceiptsRequest: hashes, }, } if err := p.dispatchRequest(req); err != nil { @@ -494,9 +464,9 @@ func (p *Peer) RequestTxs(hashes []common.Hash) error { id := rand.Uint64() requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id) - return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{ - RequestId: id, - GetPooledTransactionsPacket: hashes, + return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket{ + RequestId: id, + GetPooledTransactionsRequest: hashes, }) } diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index 4b9f5ad6ba..0f44f83de1 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -30,7 +30,6 @@ import ( // Constants to match up protocol versions and messages const ( - ETH66 = 66 ETH67 = 67 ETH68 = 68 ) @@ -41,11 +40,11 @@ const ProtocolName = "eth" // ProtocolVersions are the supported versions of the `eth` protocol (first // is primary). -var ProtocolVersions = []uint{ETH68, ETH67, ETH66} +var ProtocolVersions = []uint{ETH68, ETH67} // protocolLengths are the number of implemented message corresponding to // different protocol versions. -var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17, ETH66: 17} +var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17} // maxMessageSize is the maximum cap on the size of a protocol message. const maxMessageSize = 10 * 1024 * 1024 @@ -62,8 +61,6 @@ const ( NewPooledTransactionHashesMsg = 0x08 GetPooledTransactionsMsg = 0x09 PooledTransactionsMsg = 0x0a - GetNodeDataMsg = 0x0d - NodeDataMsg = 0x0e GetReceiptsMsg = 0x0f ReceiptsMsg = 0x10 ) @@ -85,7 +82,7 @@ type Packet interface { Kind() byte // Kind returns the message type. } -// StatusPacket is the network packet for the status message for eth/64 and later. +// StatusPacket is the network packet for the status message. type StatusPacket struct { ProtocolVersion uint32 NetworkID uint64 @@ -118,18 +115,18 @@ func (p *NewBlockHashesPacket) Unpack() ([]common.Hash, []uint64) { // TransactionsPacket is the network packet for broadcasting new transactions. type TransactionsPacket []*types.Transaction -// GetBlockHeadersPacket represents a block header query. -type GetBlockHeadersPacket struct { +// GetBlockHeadersRequest represents a block header query. +type GetBlockHeadersRequest struct { Origin HashOrNumber // Block from which to retrieve headers Amount uint64 // Maximum number of headers to retrieve Skip uint64 // Blocks to skip between consecutive headers Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) } -// GetBlockHeadersPacket66 represents a block header query over eth/66 -type GetBlockHeadersPacket66 struct { +// GetBlockHeadersPacket represents a block header query with request ID wrapping. +type GetBlockHeadersPacket struct { RequestId uint64 - *GetBlockHeadersPacket + *GetBlockHeadersRequest } // HashOrNumber is a combined field for specifying an origin block. @@ -168,23 +165,23 @@ func (hn *HashOrNumber) DecodeRLP(s *rlp.Stream) error { } } -// BlockHeadersPacket represents a block header response. -type BlockHeadersPacket []*types.Header +// BlockHeadersRequest represents a block header response. +type BlockHeadersRequest []*types.Header -// BlockHeadersPacket66 represents a block header response over eth/66. -type BlockHeadersPacket66 struct { +// BlockHeadersPacket represents a block header response over with request ID wrapping. +type BlockHeadersPacket struct { RequestId uint64 - BlockHeadersPacket + BlockHeadersRequest } -// BlockHeadersRLPPacket represents a block header response, to use when we already +// BlockHeadersRLPResponse represents a block header response, to use when we already // have the headers rlp encoded. -type BlockHeadersRLPPacket []rlp.RawValue +type BlockHeadersRLPResponse []rlp.RawValue -// BlockHeadersRLPPacket66 represents a block header response over eth/66. -type BlockHeadersRLPPacket66 struct { +// BlockHeadersRLPPacket represents a block header response with request ID wrapping. +type BlockHeadersRLPPacket struct { RequestId uint64 - BlockHeadersRLPPacket + BlockHeadersRLPResponse } // NewBlockPacket is the network packet for the block propagation message. @@ -206,33 +203,34 @@ func (request *NewBlockPacket) sanityCheck() error { return nil } -// GetBlockBodiesPacket represents a block body query. -type GetBlockBodiesPacket []common.Hash +// GetBlockBodiesRequest represents a block body query. +type GetBlockBodiesRequest []common.Hash -// GetBlockBodiesPacket66 represents a block body query over eth/66. -type GetBlockBodiesPacket66 struct { +// GetBlockBodiesPacket represents a block body query with request ID wrapping. +type GetBlockBodiesPacket struct { RequestId uint64 - GetBlockBodiesPacket + GetBlockBodiesRequest } -// BlockBodiesPacket is the network packet for block content distribution. -type BlockBodiesPacket []*BlockBody +// BlockBodiesResponse is the network packet for block content distribution. +type BlockBodiesResponse []*BlockBody -// BlockBodiesPacket66 is the network packet for block content distribution over eth/66. -type BlockBodiesPacket66 struct { +// BlockBodiesPacket is the network packet for block content distribution with +// request ID wrapping. +type BlockBodiesPacket struct { RequestId uint64 - BlockBodiesPacket + BlockBodiesResponse } -// BlockBodiesRLPPacket is used for replying to block body requests, in cases +// BlockBodiesRLPResponse is used for replying to block body requests, in cases // where we already have them RLP-encoded, and thus can avoid the decode-encode // roundtrip. -type BlockBodiesRLPPacket []rlp.RawValue +type BlockBodiesRLPResponse []rlp.RawValue -// BlockBodiesRLPPacket66 is the BlockBodiesRLPPacket over eth/66 -type BlockBodiesRLPPacket66 struct { +// BlockBodiesRLPPacket is the BlockBodiesRLPResponse with request ID wrapping. +type BlockBodiesRLPPacket struct { RequestId uint64 - BlockBodiesRLPPacket + BlockBodiesRLPResponse } // BlockBody represents the data content of a single block. @@ -244,7 +242,7 @@ type BlockBody struct { // Unpack retrieves the transactions and uncles from the range packet and returns // them in a split flat format that's more consistent with the internal data structures. -func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) { +func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) { // TODO(matt): add support for withdrawals to fetchers var ( txset = make([][]*types.Transaction, len(*p)) @@ -257,53 +255,36 @@ func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header, return txset, uncleset, withdrawalset } -// GetNodeDataPacket represents a trie node data query. -type GetNodeDataPacket []common.Hash - -// GetNodeDataPacket66 represents a trie node data query over eth/66. -type GetNodeDataPacket66 struct { - RequestId uint64 - GetNodeDataPacket -} - -// NodeDataPacket is the network packet for trie node data distribution. -type NodeDataPacket [][]byte +// GetReceiptsRequest represents a block receipts query. +type GetReceiptsRequest []common.Hash -// NodeDataPacket66 is the network packet for trie node data distribution over eth/66. -type NodeDataPacket66 struct { +// GetReceiptsPacket represents a block receipts query with request ID wrapping. +type GetReceiptsPacket struct { RequestId uint64 - NodeDataPacket + GetReceiptsRequest } -// GetReceiptsPacket represents a block receipts query. -type GetReceiptsPacket []common.Hash +// ReceiptsResponse is the network packet for block receipts distribution. +type ReceiptsResponse [][]*types.Receipt -// GetReceiptsPacket66 represents a block receipts query over eth/66. -type GetReceiptsPacket66 struct { +// ReceiptsPacket is the network packet for block receipts distribution with +// request ID wrapping. +type ReceiptsPacket struct { RequestId uint64 - GetReceiptsPacket + ReceiptsResponse } -// ReceiptsPacket is the network packet for block receipts distribution. -type ReceiptsPacket [][]*types.Receipt +// ReceiptsRLPResponse is used for receipts, when we already have it encoded +type ReceiptsRLPResponse []rlp.RawValue -// ReceiptsPacket66 is the network packet for block receipts distribution over eth/66. -type ReceiptsPacket66 struct { +// ReceiptsRLPPacket is ReceiptsRLPResponse with request ID wrapping. +type ReceiptsRLPPacket struct { RequestId uint64 - ReceiptsPacket + ReceiptsRLPResponse } -// ReceiptsRLPPacket is used for receipts, when we already have it encoded -type ReceiptsRLPPacket []rlp.RawValue - -// ReceiptsRLPPacket66 is the eth-66 version of ReceiptsRLPPacket -type ReceiptsRLPPacket66 struct { - RequestId uint64 - ReceiptsRLPPacket -} - -// NewPooledTransactionHashesPacket66 represents a transaction announcement packet on eth/66 and eth/67. -type NewPooledTransactionHashesPacket66 []common.Hash +// NewPooledTransactionHashesPacket67 represents a transaction announcement packet on eth/67. +type NewPooledTransactionHashesPacket67 []common.Hash // NewPooledTransactionHashesPacket68 represents a transaction announcement packet on eth/68 and newer. type NewPooledTransactionHashesPacket68 struct { @@ -312,31 +293,33 @@ type NewPooledTransactionHashesPacket68 struct { Hashes []common.Hash } -// GetPooledTransactionsPacket represents a transaction query. -type GetPooledTransactionsPacket []common.Hash +// GetPooledTransactionsRequest represents a transaction query. +type GetPooledTransactionsRequest []common.Hash -type GetPooledTransactionsPacket66 struct { +// GetPooledTransactionsPacket represents a transaction query with request ID wrapping. +type GetPooledTransactionsPacket struct { RequestId uint64 - GetPooledTransactionsPacket + GetPooledTransactionsRequest } -// PooledTransactionsPacket is the network packet for transaction distribution. -type PooledTransactionsPacket []*types.Transaction +// PooledTransactionsResponse is the network packet for transaction distribution. +type PooledTransactionsResponse []*types.Transaction -// PooledTransactionsPacket66 is the network packet for transaction distribution over eth/66. -type PooledTransactionsPacket66 struct { +// PooledTransactionsPacket is the network packet for transaction distribution +// with request ID wrapping. +type PooledTransactionsPacket struct { RequestId uint64 - PooledTransactionsPacket + PooledTransactionsResponse } -// PooledTransactionsRLPPacket is the network packet for transaction distribution, used +// PooledTransactionsRLPResponse is the network packet for transaction distribution, used // in the cases we already have them in rlp-encoded form -type PooledTransactionsRLPPacket []rlp.RawValue +type PooledTransactionsRLPResponse []rlp.RawValue -// PooledTransactionsRLPPacket66 is the eth/66 form of PooledTransactionsRLPPacket -type PooledTransactionsRLPPacket66 struct { +// PooledTransactionsRLPPacket is PooledTransactionsRLPResponse with request ID wrapping. +type PooledTransactionsRLPPacket struct { RequestId uint64 - PooledTransactionsRLPPacket + PooledTransactionsRLPResponse } func (*StatusPacket) Name() string { return "Status" } @@ -348,40 +331,34 @@ func (*NewBlockHashesPacket) Kind() byte { return NewBlockHashesMsg } func (*TransactionsPacket) Name() string { return "Transactions" } func (*TransactionsPacket) Kind() byte { return TransactionsMsg } -func (*GetBlockHeadersPacket) Name() string { return "GetBlockHeaders" } -func (*GetBlockHeadersPacket) Kind() byte { return GetBlockHeadersMsg } +func (*GetBlockHeadersRequest) Name() string { return "GetBlockHeaders" } +func (*GetBlockHeadersRequest) Kind() byte { return GetBlockHeadersMsg } -func (*BlockHeadersPacket) Name() string { return "BlockHeaders" } -func (*BlockHeadersPacket) Kind() byte { return BlockHeadersMsg } +func (*BlockHeadersRequest) Name() string { return "BlockHeaders" } +func (*BlockHeadersRequest) Kind() byte { return BlockHeadersMsg } -func (*GetBlockBodiesPacket) Name() string { return "GetBlockBodies" } -func (*GetBlockBodiesPacket) Kind() byte { return GetBlockBodiesMsg } +func (*GetBlockBodiesRequest) Name() string { return "GetBlockBodies" } +func (*GetBlockBodiesRequest) Kind() byte { return GetBlockBodiesMsg } -func (*BlockBodiesPacket) Name() string { return "BlockBodies" } -func (*BlockBodiesPacket) Kind() byte { return BlockBodiesMsg } +func (*BlockBodiesResponse) Name() string { return "BlockBodies" } +func (*BlockBodiesResponse) Kind() byte { return BlockBodiesMsg } func (*NewBlockPacket) Name() string { return "NewBlock" } func (*NewBlockPacket) Kind() byte { return NewBlockMsg } -func (*NewPooledTransactionHashesPacket66) Name() string { return "NewPooledTransactionHashes" } -func (*NewPooledTransactionHashesPacket66) Kind() byte { return NewPooledTransactionHashesMsg } +func (*NewPooledTransactionHashesPacket67) Name() string { return "NewPooledTransactionHashes" } +func (*NewPooledTransactionHashesPacket67) Kind() byte { return NewPooledTransactionHashesMsg } func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" } func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg } -func (*GetPooledTransactionsPacket) Name() string { return "GetPooledTransactions" } -func (*GetPooledTransactionsPacket) Kind() byte { return GetPooledTransactionsMsg } - -func (*PooledTransactionsPacket) Name() string { return "PooledTransactions" } -func (*PooledTransactionsPacket) Kind() byte { return PooledTransactionsMsg } - -func (*GetNodeDataPacket) Name() string { return "GetNodeData" } -func (*GetNodeDataPacket) Kind() byte { return GetNodeDataMsg } +func (*GetPooledTransactionsRequest) Name() string { return "GetPooledTransactions" } +func (*GetPooledTransactionsRequest) Kind() byte { return GetPooledTransactionsMsg } -func (*NodeDataPacket) Name() string { return "NodeData" } -func (*NodeDataPacket) Kind() byte { return NodeDataMsg } +func (*PooledTransactionsResponse) Name() string { return "PooledTransactions" } +func (*PooledTransactionsResponse) Kind() byte { return PooledTransactionsMsg } -func (*GetReceiptsPacket) Name() string { return "GetReceipts" } -func (*GetReceiptsPacket) Kind() byte { return GetReceiptsMsg } +func (*GetReceiptsRequest) Name() string { return "GetReceipts" } +func (*GetReceiptsRequest) Kind() byte { return GetReceiptsMsg } -func (*ReceiptsPacket) Name() string { return "Receipts" } -func (*ReceiptsPacket) Kind() byte { return ReceiptsMsg } +func (*ReceiptsResponse) Name() string { return "Receipts" } +func (*ReceiptsResponse) Kind() byte { return ReceiptsMsg } diff --git a/eth/protocols/eth/protocol_test.go b/eth/protocols/eth/protocol_test.go index a86fbb0a69..bc2545dea2 100644 --- a/eth/protocols/eth/protocol_test.go +++ b/eth/protocols/eth/protocol_test.go @@ -35,19 +35,19 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { } // Assemble some table driven tests tests := []struct { - packet *GetBlockHeadersPacket + packet *GetBlockHeadersRequest fail bool }{ // Providing the origin as either a hash or a number should both work - {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}}}, - {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}}}, + {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}}}, + {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}}}, // Providing arbitrary query field should also work - {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}}, - {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}}, + {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}}, + {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}}, // Providing both the origin hash and origin number must fail - {fail: true, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash, Number: 314}}}, + {fail: true, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash, Number: 314}}}, } // Iterate over each of the tests and try to encode and then decode for i, tt := range tests { @@ -58,7 +58,7 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { t.Fatalf("test %d: encode should have failed", i) } if !tt.fail { - packet := new(GetBlockHeadersPacket) + packet := new(GetBlockHeadersRequest) if err := rlp.DecodeBytes(bytes, packet); err != nil { t.Fatalf("test %d: failed to decode packet: %v", i, err) } @@ -70,46 +70,40 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { } } -// TestEth66EmptyMessages tests encoding of empty eth66 messages -func TestEth66EmptyMessages(t *testing.T) { +// TestEmptyMessages tests encoding of empty messages. +func TestEmptyMessages(t *testing.T) { // All empty messages encodes to the same format want := common.FromHex("c4820457c0") for i, msg := range []interface{}{ // Headers - GetBlockHeadersPacket66{1111, nil}, - BlockHeadersPacket66{1111, nil}, + GetBlockHeadersPacket{1111, nil}, + BlockHeadersPacket{1111, nil}, // Bodies - GetBlockBodiesPacket66{1111, nil}, - BlockBodiesPacket66{1111, nil}, - BlockBodiesRLPPacket66{1111, nil}, - // Node data - GetNodeDataPacket66{1111, nil}, - NodeDataPacket66{1111, nil}, + GetBlockBodiesPacket{1111, nil}, + BlockBodiesPacket{1111, nil}, + BlockBodiesRLPPacket{1111, nil}, // Receipts - GetReceiptsPacket66{1111, nil}, - ReceiptsPacket66{1111, nil}, + GetReceiptsPacket{1111, nil}, + ReceiptsPacket{1111, nil}, // Transactions - GetPooledTransactionsPacket66{1111, nil}, - PooledTransactionsPacket66{1111, nil}, - PooledTransactionsRLPPacket66{1111, nil}, + GetPooledTransactionsPacket{1111, nil}, + PooledTransactionsPacket{1111, nil}, + PooledTransactionsRLPPacket{1111, nil}, // Headers - BlockHeadersPacket66{1111, BlockHeadersPacket([]*types.Header{})}, + BlockHeadersPacket{1111, BlockHeadersRequest([]*types.Header{})}, // Bodies - GetBlockBodiesPacket66{1111, GetBlockBodiesPacket([]common.Hash{})}, - BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{})}, - BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{})}, - // Node data - GetNodeDataPacket66{1111, GetNodeDataPacket([]common.Hash{})}, - NodeDataPacket66{1111, NodeDataPacket([][]byte{})}, + GetBlockBodiesPacket{1111, GetBlockBodiesRequest([]common.Hash{})}, + BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{})}, + BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{})}, // Receipts - GetReceiptsPacket66{1111, GetReceiptsPacket([]common.Hash{})}, - ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{})}, + GetReceiptsPacket{1111, GetReceiptsRequest([]common.Hash{})}, + ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{})}, // Transactions - GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket([]common.Hash{})}, - PooledTransactionsPacket66{1111, PooledTransactionsPacket([]*types.Transaction{})}, - PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket([]rlp.RawValue{})}, + GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest([]common.Hash{})}, + PooledTransactionsPacket{1111, PooledTransactionsResponse([]*types.Transaction{})}, + PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse([]rlp.RawValue{})}, } { if have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) { t.Errorf("test %d, type %T, have\n\t%x\nwant\n\t%x", i, msg, have, want) @@ -117,8 +111,8 @@ func TestEth66EmptyMessages(t *testing.T) { } } -// TestEth66Messages tests the encoding of all redefined eth66 messages -func TestEth66Messages(t *testing.T) { +// TestMessages tests the encoding of all messages. +func TestMessages(t *testing.T) { // Some basic structs used during testing var ( header *types.Header @@ -169,10 +163,6 @@ func TestEth66Messages(t *testing.T) { common.HexToHash("deadc0de"), common.HexToHash("feedbeef"), } - byteSlices := [][]byte{ - common.FromHex("deadc0de"), - common.FromHex("feedbeef"), - } // init the receipts { receipts = []*types.Receipt{ @@ -203,59 +193,51 @@ func TestEth66Messages(t *testing.T) { want []byte }{ { - GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{hashes[0], 0}, 5, 5, false}}, + GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{hashes[0], 0}, 5, 5, false}}, common.FromHex("e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580"), }, { - GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}}, + GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}}, common.FromHex("ca820457c682270f050580"), }, { - BlockHeadersPacket66{1111, BlockHeadersPacket{header}}, + BlockHeadersPacket{1111, BlockHeadersRequest{header}}, common.FromHex("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { - GetBlockBodiesPacket66{1111, GetBlockBodiesPacket(hashes)}, + GetBlockBodiesPacket{1111, GetBlockBodiesRequest(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{blockBody})}, + BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{blockBody})}, common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { // Identical to non-rlp-shortcut version - BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{blockBodyRlp})}, + BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{blockBodyRlp})}, common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { - GetNodeDataPacket66{1111, GetNodeDataPacket(hashes)}, - common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), - }, - { - NodeDataPacket66{1111, NodeDataPacket(byteSlices)}, - common.FromHex("ce820457ca84deadc0de84feedbeef"), - }, - { - GetReceiptsPacket66{1111, GetReceiptsPacket(hashes)}, + GetReceiptsPacket{1111, GetReceiptsRequest(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{receipts})}, + ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{receipts})}, common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"), }, { - ReceiptsRLPPacket66{1111, ReceiptsRLPPacket([]rlp.RawValue{receiptsRlp})}, + ReceiptsRLPPacket{1111, ReceiptsRLPResponse([]rlp.RawValue{receiptsRlp})}, common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"), }, { - GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket(hashes)}, + GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - PooledTransactionsPacket66{1111, PooledTransactionsPacket(txs)}, + PooledTransactionsPacket{1111, PooledTransactionsResponse(txs)}, common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), }, { - PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket(txRlps)}, + PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse(txRlps)}, common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), }, } { diff --git a/eth/sync_test.go b/eth/sync_test.go index b5e00298b9..d26cbb66ea 100644 --- a/eth/sync_test.go +++ b/eth/sync_test.go @@ -28,8 +28,8 @@ import ( ) // Tests that snap sync is disabled after a successful sync cycle. -func TestSnapSyncDisabling66(t *testing.T) { testSnapSyncDisabling(t, eth.ETH66, snap.SNAP1) } func TestSnapSyncDisabling67(t *testing.T) { testSnapSyncDisabling(t, eth.ETH67, snap.SNAP1) } +func TestSnapSyncDisabling68(t *testing.T) { testSnapSyncDisabling(t, eth.ETH68, snap.SNAP1) } // Tests that snap sync gets disabled as soon as a real block is successfully // imported into the blockchain. From a8a9c8e4b00c5b9f84242181839234b8e9fd54e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 4 Oct 2023 12:36:36 +0300 Subject: [PATCH 16/26] core, eth, miner: start propagating and consuming blob txs (#28243) * core, eth, miner: start propagating and consuming blob txs * eth/protocols/eth: disable eth/67 if Cancun is enabled * core/txpool, eth, miner: pass gas limit infos in lazy tx for mienr filtering * core/txpool, miner: add lazy resolver for pending txs too * core, eth: fix review noticed bugs * eth, miner: minor polishes in the mining and announcing logs * core/expool: unsubscribe the event scope --- core/txpool/blobpool/blobpool.go | 55 ++++++++++++++++++++-------- core/txpool/legacypool/legacypool.go | 18 +++++---- core/txpool/subpool.go | 19 ++++++++-- core/txpool/txpool.go | 12 +++--- eth/api_backend.go | 2 +- eth/catalyst/simulated_beacon.go | 2 +- eth/handler.go | 41 ++++++++++++--------- eth/handler_eth.go | 14 ++++--- eth/handler_eth_test.go | 4 +- eth/handler_test.go | 6 ++- eth/protocols/eth/handler.go | 4 ++ eth/protocols/eth/handlers.go | 4 +- miner/ordering_test.go | 4 ++ miner/worker.go | 33 +++++++++++------ 14 files changed, 145 insertions(+), 73 deletions(-) diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 36916c3f0b..32c6c0e8fe 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -97,6 +97,8 @@ type blobTxMeta struct { execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump execFeeCap *uint256.Int // Needed to validate replacement price bump blobFeeCap *uint256.Int // Needed to validate replacement price bump + execGas uint64 // Needed to check inclusion validity before reading the blob + blobGas uint64 // Needed to check inclusion validity before reading the blob basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap @@ -118,6 +120,8 @@ func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta { execTipCap: uint256.MustFromBig(tx.GasTipCap()), execFeeCap: uint256.MustFromBig(tx.GasFeeCap()), blobFeeCap: uint256.MustFromBig(tx.BlobGasFeeCap()), + execGas: tx.Gas(), + blobGas: tx.BlobGas(), } meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap) meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap) @@ -307,8 +311,8 @@ type BlobPool struct { spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts evict *evictHeap // Heap of cheapest accounts for eviction when full - eventFeed event.Feed // Event feed to send out new tx events on pool inclusion - eventScope event.SubscriptionScope // Event scope to track and mass unsubscribe on termination + discoverFeed event.Feed // Event feed to send out new tx events on pool discovery (reorg excluded) + insertFeed event.Feed // Event feed to send out new tx events on pool inclusion (reorg included) lock sync.RWMutex // Mutex protecting the pool during reorg handling } @@ -436,8 +440,6 @@ func (p *BlobPool) Close() error { if err := p.store.Close(); err != nil { errs = append(errs, err) } - p.eventScope.Close() - switch { case errs == nil: return nil @@ -758,15 +760,21 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) { // Run the reorg between the old and new head and figure out which accounts // need to be rechecked and which transactions need to be readded if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil { + var adds []*types.Transaction for addr, txs := range reinject { // Blindly push all the lost transactions back into the pool for _, tx := range txs { - p.reinject(addr, tx.Hash()) + if err := p.reinject(addr, tx.Hash()); err == nil { + adds = append(adds, tx.WithoutBlobTxSidecar()) + } } // Recheck the account's pooled transactions to drop included and // invalidated one p.recheck(addr, inclusions) } + if len(adds) > 0 { + p.insertFeed.Send(core.NewTxsEvent{Txs: adds}) + } } // Flush out any blobs from limbo that are older than the latest finality if p.chain.Config().IsCancun(p.head.Number, p.head.Time) { @@ -921,13 +929,13 @@ func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]* // Note, the method will not initialize the eviction cache values as those will // be done once for all transactions belonging to an account after all individual // transactions are injected back into the pool. -func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { +func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error { // Retrieve the associated blob from the limbo. Without the blobs, we cannot // add the transaction back into the pool as it is not mineable. tx, err := p.limbo.pull(txhash) if err != nil { log.Error("Blobs unavailable, dropping reorged tx", "err", err) - return + return err } // TODO: seems like an easy optimization here would be getting the serialized tx // from limbo instead of re-serializing it here. @@ -936,12 +944,12 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { blob, err := rlp.EncodeToBytes(tx) if err != nil { log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err) - return + return err } id, err := p.store.Put(blob) if err != nil { log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err) - return + return err } // Update the indixes and metrics @@ -949,7 +957,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { if _, ok := p.index[addr]; !ok { if err := p.reserve(addr, true); err != nil { log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err) - return + return err } p.index[addr] = []*blobTxMeta{meta} p.spent[addr] = meta.costCap @@ -960,6 +968,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { } p.lookup[meta.hash] = meta.id p.stored += uint64(meta.size) + return nil } // SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements @@ -1154,9 +1163,19 @@ func (p *BlobPool) Get(hash common.Hash) *types.Transaction { // Add inserts a set of blob transactions into the pool if they pass validation (both // consensus validity and pool restictions). func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error { - errs := make([]error, len(txs)) + var ( + adds = make([]*types.Transaction, 0, len(txs)) + errs = make([]error, len(txs)) + ) for i, tx := range txs { errs[i] = p.add(tx) + if errs[i] == nil { + adds = append(adds, tx.WithoutBlobTxSidecar()) + } + } + if len(adds) > 0 { + p.discoverFeed.Send(core.NewTxsEvent{Txs: adds}) + p.insertFeed.Send(core.NewTxsEvent{Txs: adds}) } return errs } @@ -1384,6 +1403,8 @@ func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTr Time: time.Now(), // TODO(karalabe): Maybe save these and use that? GasFeeCap: tx.execFeeCap.ToBig(), GasTipCap: tx.execTipCap.ToBig(), + Gas: tx.execGas, + BlobGas: tx.blobGas, }) } if len(lazies) > 0 { @@ -1468,10 +1489,14 @@ func (p *BlobPool) updateLimboMetrics() { limboSlotusedGauge.Update(int64(slotused)) } -// SubscribeTransactions registers a subscription of NewTxsEvent and -// starts sending event to the given channel. -func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { - return p.eventScope.Track(p.eventFeed.Subscribe(ch)) +// SubscribeTransactions registers a subscription for new transaction events, +// supporting feeding only newly seen or also resurrected transactions. +func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription { + if reorgs { + return p.insertFeed.Subscribe(ch) + } else { + return p.discoverFeed.Subscribe(ch) + } } // Nonce returns the next nonce of an account, with all transactions executable diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 2430028f9d..e71204185f 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -208,7 +208,6 @@ type LegacyPool struct { chain BlockChain gasTip atomic.Pointer[big.Int] txFeed event.Feed - scope event.SubscriptionScope signer types.Signer mu sync.RWMutex @@ -404,9 +403,6 @@ func (pool *LegacyPool) loop() { // Close terminates the transaction pool. func (pool *LegacyPool) Close() error { - // Unsubscribe all subscriptions registered from txpool - pool.scope.Close() - // Terminate the pool reorger and return close(pool.reorgShutdownCh) pool.wg.Wait() @@ -425,10 +421,14 @@ func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) { <-wait } -// SubscribeTransactions registers a subscription of NewTxsEvent and -// starts sending event to the given channel. -func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { - return pool.scope.Track(pool.txFeed.Subscribe(ch)) +// SubscribeTransactions registers a subscription for new transaction events, +// supporting feeding only newly seen or also resurrected transactions. +func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription { + // The legacy pool has a very messed up internal shuffling, so it's kind of + // hard to separate newly discovered transaction from resurrected ones. This + // is because the new txs are added to the queue, resurrected ones too and + // reorgs run lazily, so separating the two would need a marker. + return pool.txFeed.Subscribe(ch) } // SetGasTip updates the minimum gas tip required by the transaction pool for a @@ -552,6 +552,8 @@ func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.L Time: txs[i].Time(), GasFeeCap: txs[i].GasFeeCap(), GasTipCap: txs[i].GasTipCap(), + Gas: txs[i].Gas(), + BlobGas: txs[i].BlobGas(), } } pending[addr] = lazies diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go index 85312c4318..de05b38d43 100644 --- a/core/txpool/subpool.go +++ b/core/txpool/subpool.go @@ -30,13 +30,16 @@ import ( // enough for the miner and other APIs to handle large batches of transactions; // and supports pulling up the entire transaction when really needed. type LazyTransaction struct { - Pool SubPool // Transaction subpool to pull the real transaction up + Pool LazyResolver // Transaction resolver to pull the real transaction up Hash common.Hash // Transaction hash to pull up if needed Tx *types.Transaction // Transaction if already resolved Time time.Time // Time when the transaction was first seen GasFeeCap *big.Int // Maximum fee per gas the transaction may consume GasTipCap *big.Int // Maximum miner tip per gas the transaction can pay + + Gas uint64 // Amount of gas required by the transaction + BlobGas uint64 // Amount of blob gas required by the transaction } // Resolve retrieves the full transaction belonging to a lazy handle if it is still @@ -48,6 +51,14 @@ func (ltx *LazyTransaction) Resolve() *types.Transaction { return ltx.Tx } +// LazyResolver is a minimal interface needed for a transaction pool to satisfy +// resolving lazy transactions. It's mostly a helper to avoid the entire sub- +// pool being injected into the lazy transaction. +type LazyResolver interface { + // Get returns a transaction if it is contained in the pool, or nil otherwise. + Get(hash common.Hash) *types.Transaction +} + // AddressReserver is passed by the main transaction pool to subpools, so they // may request (and relinquish) exclusive access to certain addresses. type AddressReserver func(addr common.Address, reserve bool) error @@ -99,8 +110,10 @@ type SubPool interface { // account and sorted by nonce. Pending(enforceTips bool) map[common.Address][]*LazyTransaction - // SubscribeTransactions subscribes to new transaction events. - SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription + // SubscribeTransactions subscribes to new transaction events. The subscriber + // can decide whether to receive notifications only for newly seen transactions + // or also for reorged out ones. + SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription // Nonce returns the next nonce of an account, with all transactions executable // by the pool already applied on top. diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index cacae7bc00..0d4e05da4c 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -155,13 +155,15 @@ func (p *TxPool) Close() error { if err := <-errc; err != nil { errs = append(errs, err) } - // Terminate each subpool for _, subpool := range p.subpools { if err := subpool.Close(); err != nil { errs = append(errs, err) } } + // Unsubscribe anyone still listening for tx events + p.subs.Close() + if len(errs) > 0 { return fmt.Errorf("subpool close errors: %v", errs) } @@ -316,12 +318,12 @@ func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*LazyTransaction return txs } -// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and starts sending -// events to the given channel. -func (p *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { +// SubscribeTransactions registers a subscription for new transaction events, +// supporting feeding only newly seen or also resurrected transactions. +func (p *TxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription { subs := make([]event.Subscription, len(p.subpools)) for i, subpool := range p.subpools { - subs[i] = subpool.SubscribeTransactions(ch) + subs[i] = subpool.SubscribeTransactions(ch, reorgs) } return p.subs.Track(event.JoinSubscriptions(subs...)) } diff --git a/eth/api_backend.go b/eth/api_backend.go index a0c14f1338..601e555158 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -334,7 +334,7 @@ func (b *EthAPIBackend) TxPool() *txpool.TxPool { } func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { - return b.eth.txPool.SubscribeNewTxsEvent(ch) + return b.eth.txPool.SubscribeTransactions(ch, true) } func (b *EthAPIBackend) SyncProgress() ethereum.SyncProgress { diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go index 1f7a3266cd..a9a2bb4a9a 100644 --- a/eth/catalyst/simulated_beacon.go +++ b/eth/catalyst/simulated_beacon.go @@ -199,7 +199,7 @@ func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal) error { func (c *SimulatedBeacon) loopOnDemand() { var ( newTxs = make(chan core.NewTxsEvent) - sub = c.eth.TxPool().SubscribeNewTxsEvent(newTxs) + sub = c.eth.TxPool().SubscribeTransactions(newTxs, true) ) defer sub.Unsubscribe() diff --git a/eth/handler.go b/eth/handler.go index f731efe1b8..665df7d8cf 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -75,9 +75,10 @@ type txPool interface { // The slice should be modifiable by the caller. Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction - // SubscribeNewTxsEvent should return an event subscription of - // NewTxsEvent and send events to the given channel. - SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription + // SubscribeTransactions subscribes to new transaction events. The subscriber + // can decide whether to receive notifications only for newly seen transactions + // or also for reorged out ones. + SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription } // handlerConfig is the collection of initialization parameters to create a full @@ -509,10 +510,10 @@ func (h *handler) unregisterPeer(id string) { func (h *handler) Start(maxPeers int) { h.maxPeers = maxPeers - // broadcast transactions + // broadcast and announce transactions (only new ones, not resurrected ones) h.wg.Add(1) h.txsCh = make(chan core.NewTxsEvent, txChanSize) - h.txsSub = h.txpool.SubscribeNewTxsEvent(h.txsCh) + h.txsSub = h.txpool.SubscribeTransactions(h.txsCh, false) go h.txBroadcastLoop() // broadcast mined blocks @@ -592,26 +593,33 @@ func (h *handler) BroadcastBlock(block *types.Block, propagate bool) { } // BroadcastTransactions will propagate a batch of transactions -// - To a square root of all peers +// - To a square root of all peers for non-blob transactions // - And, separately, as announcements to all peers which are not known to // already have the given transaction. func (h *handler) BroadcastTransactions(txs types.Transactions) { var ( - annoCount int // Count of announcements made - annoPeers int - directCount int // Count of the txs sent directly to peers - directPeers int // Count of the peers that were sent transactions directly + blobTxs int // Number of blob transactions to announce only + largeTxs int // Number of large transactions to announce only + + directCount int // Number of transactions sent directly to peers (duplicates included) + directPeers int // Number of peers that were sent transactions directly + annCount int // Number of transactions announced across all peers (duplicates included) + annPeers int // Number of peers announced about transactions txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce - ) // Broadcast transactions to a batch of peers not knowing about it for _, tx := range txs { peers := h.peers.peersWithoutTransaction(tx.Hash()) var numDirect int - if tx.Size() <= txMaxBroadcastSize { + switch { + case tx.Type() == types.BlobTxType: + blobTxs++ + case tx.Size() > txMaxBroadcastSize: + largeTxs++ + default: numDirect = int(math.Sqrt(float64(len(peers)))) } // Send the tx unconditionally to a subset of our peers @@ -629,13 +637,12 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) { peer.AsyncSendTransactions(hashes) } for peer, hashes := range annos { - annoPeers++ - annoCount += len(hashes) + annPeers++ + annCount += len(hashes) peer.AsyncSendPooledTransactionHashes(hashes) } - log.Debug("Transaction broadcast", "txs", len(txs), - "announce packs", annoPeers, "announced hashes", annoCount, - "tx packs", directPeers, "broadcast txs", directCount) + log.Debug("Distributed transactions", "plaintxs", len(txs)-blobTxs-largeTxs, "blobtxs", blobTxs, "largetxs", largeTxs, + "bcastpeers", directPeers, "bcastcount", directCount, "annpeers", annPeers, "anncount", annCount) } // minedBroadcastLoop sends mined blocks to connected peers. diff --git a/eth/handler_eth.go b/eth/handler_eth.go index 3a0944640e..e844b36cca 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -17,6 +17,7 @@ package eth import ( + "errors" "fmt" "math/big" "time" @@ -73,6 +74,11 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { return h.txFetcher.Notify(peer.ID(), packet.Hashes) case *eth.TransactionsPacket: + for _, tx := range *packet { + if tx.Type() == types.BlobTxType { + return errors.New("disallowed broadcast blob transaction") + } + } return h.txFetcher.Enqueue(peer.ID(), *packet, false) case *eth.PooledTransactionsResponse: @@ -90,9 +96,7 @@ func (h *ethHandler) handleBlockAnnounces(peer *eth.Peer, hashes []common.Hash, // the chain already entered the pos stage and disconnect the // remote peer. if h.merger.PoSFinalized() { - // TODO (MariusVanDerWijden) drop non-updated peers after the merge - return nil - // return errors.New("unexpected block announces") + return errors.New("disallowed block announcement") } // Schedule all the unknown hashes for retrieval var ( @@ -118,9 +122,7 @@ func (h *ethHandler) handleBlockBroadcast(peer *eth.Peer, block *types.Block, td // the chain already entered the pos stage and disconnect the // remote peer. if h.merger.PoSFinalized() { - // TODO (MariusVanDerWijden) drop non-updated peers after the merge - return nil - // return errors.New("unexpected block announces") + return errors.New("disallowed block broadcast") } // Schedule the block for import h.blockFetcher.Enqueue(peer.ID(), block) diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index 4cdfdf47b8..bb342acc18 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -249,7 +249,7 @@ func testRecvTransactions(t *testing.T, protocol uint) { handler.handler.synced.Store(true) // mark synced to accept transactions txs := make(chan core.NewTxsEvent) - sub := handler.txpool.SubscribeNewTxsEvent(txs) + sub := handler.txpool.SubscribeTransactions(txs, false) defer sub.Unsubscribe() // Create a source peer to send messages through and a sink handler to receive them @@ -424,7 +424,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) { for i := 0; i < len(sinks); i++ { txChs[i] = make(chan core.NewTxsEvent, 1024) - sub := sinks[i].txpool.SubscribeNewTxsEvent(txChs[i]) + sub := sinks[i].txpool.SubscribeTransactions(txChs[i], false) defer sub.Unsubscribe() } // Fill the source pool with transactions and wait for them at the sinks diff --git a/eth/handler_test.go b/eth/handler_test.go index 2e0a988452..6d6132ee4c 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -113,15 +113,17 @@ func (p *testTxPool) Pending(enforceTips bool) map[common.Address][]*txpool.Lazy Time: tx.Time(), GasFeeCap: tx.GasFeeCap(), GasTipCap: tx.GasTipCap(), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), }) } } return pending } -// SubscribeNewTxsEvent should return an event subscription of NewTxsEvent and +// SubscribeTransactions should return an event subscription of NewTxsEvent and // send events to the given channel. -func (p *testTxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { +func (p *testTxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription { return p.txFeed.Subscribe(ch) } diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index a7d6ed25a9..42d0412a12 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -93,6 +93,10 @@ type TxPool interface { func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol { protocols := make([]p2p.Protocol, 0, len(ProtocolVersions)) for _, version := range ProtocolVersions { + // Blob transactions require eth/68 announcements, disable everything else + if version <= ETH67 && backend.Chain().Config().CancunTime != nil { + continue + } version := version // Closure protocols = append(protocols, p2p.Protocol{ diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index da4ffd327e..069e92dadf 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -426,11 +426,11 @@ func handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsRequest, peer) + hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsRequest) return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs) } -func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsRequest, peer *Peer) ([]common.Hash, []rlp.RawValue) { +func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsRequest) ([]common.Hash, []rlp.RawValue) { // Gather transactions until the fetch or network limits is reached var ( bytes int diff --git a/miner/ordering_test.go b/miner/ordering_test.go index bdbdc32148..59d478274d 100644 --- a/miner/ordering_test.go +++ b/miner/ordering_test.go @@ -92,6 +92,8 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) { Time: tx.Time(), GasFeeCap: tx.GasFeeCap(), GasTipCap: tx.GasTipCap(), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), }) } expectedCount += count @@ -157,6 +159,8 @@ func TestTransactionTimeSort(t *testing.T) { Time: tx.Time(), GasFeeCap: tx.GasFeeCap(), GasTipCap: tx.GasTipCap(), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), }) } // Sort the transactions and cross check the nonce ordering diff --git a/miner/worker.go b/miner/worker.go index 711149232b..f680702814 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -263,8 +263,8 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus resubmitIntervalCh: make(chan time.Duration), resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), } - // Subscribe NewTxsEvent for tx pool - worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) + // Subscribe for transaction insertion events (whether from network or resurrects) + worker.txsSub = eth.TxPool().SubscribeTransactions(worker.txsCh, true) // Subscribe events for blockchain worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) @@ -542,11 +542,14 @@ func (w *worker) mainLoop() { for _, tx := range ev.Txs { acc, _ := types.Sender(w.current.signer, tx) txs[acc] = append(txs[acc], &txpool.LazyTransaction{ + Pool: w.eth.TxPool(), // We don't know where this came from, yolo resolve from everywhere Hash: tx.Hash(), - Tx: tx.WithoutBlobTxSidecar(), + Tx: nil, // Do *not* set this! We need to resolve it later to pull blobs in Time: tx.Time(), GasFeeCap: tx.GasFeeCap(), GasTipCap: tx.GasTipCap(), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), }) } txset := newTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee) @@ -742,7 +745,6 @@ func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]* if tx.Type() == types.BlobTxType { return w.commitBlobTransaction(env, tx) } - receipt, err := w.applyTransaction(env, tx) if err != nil { return nil, err @@ -764,7 +766,6 @@ func (w *worker) commitBlobTransaction(env *environment, tx *types.Transaction) if (env.blobs+len(sc.Blobs))*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock { return nil, errors.New("max data blobs reached") } - receipt, err := w.applyTransaction(env, tx) if err != nil { return nil, err @@ -815,13 +816,24 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn if ltx == nil { break } + // If we don't have enough space for the next transaction, skip the account. + if env.gasPool.Gas() < ltx.Gas { + log.Trace("Not enough gas left for transaction", "hash", ltx.Hash, "left", env.gasPool.Gas(), "needed", ltx.Gas) + txs.Pop() + continue + } + if left := uint64(params.MaxBlobGasPerBlock - env.blobs*params.BlobTxBlobGasPerBlob); left < ltx.BlobGas { + log.Trace("Not enough blob gas left for transaction", "hash", ltx.Hash, "left", left, "needed", ltx.BlobGas) + txs.Pop() + continue + } + // Transaction seems to fit, pull it up from the pool tx := ltx.Resolve() if tx == nil { - log.Warn("Ignoring evicted transaction") + log.Trace("Ignoring evicted transaction", "hash", ltx.Hash) txs.Pop() continue } - // Error may be ignored here. The error has already been checked // during transaction acceptance is the transaction pool. from, _ := types.Sender(env.signer, tx) @@ -829,11 +841,10 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn // Check whether the tx is replay protected. If we're not in the EIP155 hf // phase, start ignoring the sender until we do. if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { - log.Trace("Ignoring replay protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) + log.Trace("Ignoring replay protected transaction", "hash", ltx.Hash, "eip155", w.chainConfig.EIP155Block) txs.Pop() continue } - // Start executing the transaction env.state.SetTxContext(tx.Hash(), env.tcount) @@ -841,7 +852,7 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn switch { case errors.Is(err, core.ErrNonceTooLow): // New head notification data race between the transaction pool and miner, shift - log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) + log.Trace("Skipping transaction with low nonce", "hash", ltx.Hash, "sender", from, "nonce", tx.Nonce()) txs.Shift() case errors.Is(err, nil): @@ -853,7 +864,7 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn default: // Transaction is regarded as invalid, drop all consecutive transactions from // the same sender because of `nonce-too-high` clause. - log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) + log.Debug("Transaction failed, account skipped", "hash", ltx.Hash, "err", err) txs.Pop() } } From 95b0555c84ec29c0d8b6947403804d2bccc4189e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 4 Oct 2023 12:37:04 +0300 Subject: [PATCH 17/26] eth: when snap is complaining for missing eth, be verbose about the details (#28249) * eth: when snap is complaining for missing eth, be verbost about the details * eth: lower snapshot registration error verbosity --- eth/handler.go | 2 +- eth/peerset.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/eth/handler.go b/eth/handler.go index 665df7d8cf..0c0c17fee1 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -462,7 +462,7 @@ func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error snap.EgressRegistrationErrorMeter.Mark(1) } } - peer.Log().Warn("Snapshot extension registration failed", "err", err) + peer.Log().Debug("Snapshot extension registration failed", "err", err) return err } return handler(peer) diff --git a/eth/peerset.go b/eth/peerset.go index b9cc1e03ac..b27d3964a1 100644 --- a/eth/peerset.go +++ b/eth/peerset.go @@ -18,6 +18,7 @@ package eth import ( "errors" + "fmt" "math/big" "sync" @@ -74,7 +75,7 @@ func (ps *peerSet) registerSnapExtension(peer *snap.Peer) error { // Reject the peer if it advertises `snap` without `eth` as `snap` is only a // satellite protocol meaningful with the chain selection of `eth` if !peer.RunningCap(eth.ProtocolName, eth.ProtocolVersions) { - return errSnapWithoutEth + return fmt.Errorf("%w: have %v", errSnapWithoutEth, peer.Caps()) } // Ensure nobody can double connect ps.lock.Lock() From 052355f5e2b1726552fdb38a94cf6ea1506caf95 Mon Sep 17 00:00:00 2001 From: tactical_retreat Date: Wed, 4 Oct 2023 05:38:25 -0400 Subject: [PATCH 18/26] cmd/evm/internal/t8ntoo: tiny bugfix for difficulty field (#28245) --- cmd/evm/internal/t8ntool/block.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go index 872e2f6b2a..5c0e28e284 100644 --- a/cmd/evm/internal/t8ntool/block.go +++ b/cmd/evm/internal/t8ntool/block.go @@ -158,7 +158,7 @@ func (i *bbInput) ToBlock() *types.Block { if i.Header.Nonce != nil { header.Nonce = *i.Header.Nonce } - if header.Difficulty != nil { + if i.Header.Difficulty != nil { header.Difficulty = i.Header.Difficulty } return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals) From ec001e93cc7a40019260c9409f03bf78c0085945 Mon Sep 17 00:00:00 2001 From: lightclient Date: Fri, 29 Sep 2023 15:43:36 -0600 Subject: [PATCH 19/26] eth/catalyst: add timestamp checks to fcu and new payload and improve param checks --- eth/catalyst/api.go | 99 ++++++++++++++++++++++------------------ eth/catalyst/api_test.go | 23 ++++++++-- 2 files changed, 74 insertions(+), 48 deletions(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index d1e1991414..5312d1e736 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -184,47 +184,37 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV1(update engine.ForkchoiceStateV1, pa } // ForkchoiceUpdatedV2 is equivalent to V1 with the addition of withdrawals in the payload attributes. -func (api *ConsensusAPI) ForkchoiceUpdatedV2(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { - if payloadAttributes != nil { - if err := api.verifyPayloadAttributes(payloadAttributes); err != nil { - return engine.STATUS_INVALID, engine.InvalidParams.With(err) +func (api *ConsensusAPI) ForkchoiceUpdatedV2(update engine.ForkchoiceStateV1, params *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { + if params != nil { + if params.Withdrawals == nil { + return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("missing withdrawals")) + } + if params.BeaconRoot != nil { + return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("missing beacon root")) + } + c := api.eth.BlockChain().Config() + if !active(c.IsShanghai, c.IsCancun, c.LondonBlock, params.Timestamp) { + return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdatedV2 must only be called for shanghai payloads")) } } - return api.forkchoiceUpdated(update, payloadAttributes) + return api.forkchoiceUpdated(update, params) } // ForkchoiceUpdatedV3 is equivalent to V2 with the addition of parent beacon block root in the payload attributes. -func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { - if payloadAttributes != nil { - if err := api.verifyPayloadAttributes(payloadAttributes); err != nil { - return engine.STATUS_INVALID, engine.InvalidParams.With(err) +func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, params *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { + if params != nil { + if params.Withdrawals == nil { + return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("missing withdrawals")) + } + if params.BeaconRoot == nil { + return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("missing beacon root")) + } + c := api.eth.BlockChain().Config() + if !active(c.IsCancun, c.IsPrague, c.LondonBlock, params.Timestamp) { + return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdatedV3 must only be called for shanghai payloads")) } } - return api.forkchoiceUpdated(update, payloadAttributes) -} - -func (api *ConsensusAPI) verifyPayloadAttributes(attr *engine.PayloadAttributes) error { - c := api.eth.BlockChain().Config() - - // Verify withdrawals attribute for Shanghai. - if err := checkAttribute(c.IsShanghai, attr.Withdrawals != nil, c.LondonBlock, attr.Timestamp); err != nil { - return fmt.Errorf("invalid withdrawals: %w", err) - } - // Verify beacon root attribute for Cancun. - if err := checkAttribute(c.IsCancun, attr.BeaconRoot != nil, c.LondonBlock, attr.Timestamp); err != nil { - return fmt.Errorf("invalid parent beacon block root: %w", err) - } - return nil -} - -func checkAttribute(active func(*big.Int, uint64) bool, exists bool, block *big.Int, time uint64) error { - if active(block, time) && !exists { - return errors.New("fork active, missing expected attribute") - } - if !active(block, time) && exists { - return errors.New("fork inactive, unexpected attribute set") - } - return nil + return api.forkchoiceUpdated(update, params) } func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { @@ -457,27 +447,36 @@ func (api *ConsensusAPI) NewPayloadV1(params engine.ExecutableData) (engine.Payl // NewPayloadV2 creates an Eth1 block, inserts it in the chain, and returns the status of the chain. func (api *ConsensusAPI) NewPayloadV2(params engine.ExecutableData) (engine.PayloadStatusV1, error) { - if api.eth.BlockChain().Config().IsShanghai(new(big.Int).SetUint64(params.Number), params.Timestamp) { - if params.Withdrawals == nil { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai")) - } - } else if params.Withdrawals != nil { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil withdrawals pre-shanghai")) + if params.Withdrawals == nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai")) + } + if params.ExcessBlobGas != nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil excessBlobGas pre-cancun")) } - if api.eth.BlockChain().Config().IsCancun(new(big.Int).SetUint64(params.Number), params.Timestamp) { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("newPayloadV2 called post-cancun")) + if params.BlobGasUsed != nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil params.BlobGasUsed pre-cancun")) + } + + c := api.eth.BlockChain().Config() + if !active(c.IsShanghai, c.IsCancun, c.LondonBlock, params.Timestamp) { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV2 must only be called for shanghai payloads")) } + return api.newPayload(params, nil, nil) } // NewPayloadV3 creates an Eth1 block, inserts it in the chain, and returns the status of the chain. func (api *ConsensusAPI) NewPayloadV3(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) { + if params.Withdrawals == nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai")) + } if params.ExcessBlobGas == nil { return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil excessBlobGas post-cancun")) } if params.BlobGasUsed == nil { return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil params.BlobGasUsed post-cancun")) } + if versionedHashes == nil { return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun")) } @@ -485,13 +484,25 @@ func (api *ConsensusAPI) NewPayloadV3(params engine.ExecutableData, versionedHas return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil parentBeaconBlockRoot post-cancun")) } - if !api.eth.BlockChain().Config().IsCancun(new(big.Int).SetUint64(params.Number), params.Timestamp) { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV3 called pre-cancun")) + c := api.eth.BlockChain().Config() + if !active(c.IsCancun, c.IsPrague, c.LondonBlock, params.Timestamp) { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV3 must only be called for cancun payloads")) } return api.newPayload(params, versionedHashes, beaconRoot) } +// active returns true only if current returns true and next returns false. It returns false otherwise. +func active(current, next func(*big.Int, uint64) bool, london *big.Int, time uint64) bool { + if !current(london, time) { + return false + } + if next(london, time) { + return false + } + return true +} + func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) { // The locking here is, strictly, not required. Without these locks, this can happen: // diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index 59f44fafea..81fbec7335 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -1237,7 +1237,15 @@ func TestNilWithdrawals(t *testing.T) { } for _, test := range tests { - _, err := api.ForkchoiceUpdatedV2(fcState, &test.blockParams) + var ( + err error + shanghai = genesis.Config.IsShanghai(genesis.Config.LondonBlock, test.blockParams.Timestamp) + ) + if !shanghai { + _, err = api.ForkchoiceUpdatedV1(fcState, &test.blockParams) + } else { + _, err = api.ForkchoiceUpdatedV2(fcState, &test.blockParams) + } if test.wantErr { if err == nil { t.Fatal("wanted error on fcuv2 with invalid withdrawals") @@ -1254,14 +1262,21 @@ func TestNilWithdrawals(t *testing.T) { Timestamp: test.blockParams.Timestamp, FeeRecipient: test.blockParams.SuggestedFeeRecipient, Random: test.blockParams.Random, - BeaconRoot: test.blockParams.BeaconRoot, }).Id() execData, err := api.GetPayloadV2(payloadID) if err != nil { t.Fatalf("error getting payload, err=%v", err) } - if status, err := api.NewPayloadV2(*execData.ExecutionPayload); err != nil { - t.Fatalf("error validating payload: %v", err) + var ( + status engine.PayloadStatusV1 + ) + if !shanghai { + status, err = api.NewPayloadV1(*execData.ExecutionPayload) + } else { + status, err = api.NewPayloadV2(*execData.ExecutionPayload) + } + if err != nil { + t.Fatalf("error validating payload: %v", err.(*engine.EngineAPIError).ErrorData()) } else if status.Status != engine.VALID { t.Fatalf("invalid payload") } From b0715df54a3847f59c9beb84038b1cd23aff7394 Mon Sep 17 00:00:00 2001 From: lightclient Date: Fri, 29 Sep 2023 16:21:21 -0600 Subject: [PATCH 20/26] eth/catalyst: simplify var in test --- eth/catalyst/api_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index 81fbec7335..b99e3962a0 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -1267,9 +1267,7 @@ func TestNilWithdrawals(t *testing.T) { if err != nil { t.Fatalf("error getting payload, err=%v", err) } - var ( - status engine.PayloadStatusV1 - ) + var status engine.PayloadStatusV1 if !shanghai { status, err = api.NewPayloadV1(*execData.ExecutionPayload) } else { From fd8a58dbd7d01393a8602cf4858c6ecd02c3239c Mon Sep 17 00:00:00 2001 From: lightclient Date: Tue, 3 Oct 2023 06:35:15 -0600 Subject: [PATCH 21/26] eth/catalyst: fix parent beacon block test --- eth/catalyst/api.go | 2 +- eth/catalyst/api_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 5312d1e736..83cc5e71b2 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -190,7 +190,7 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV2(update engine.ForkchoiceStateV1, pa return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("missing withdrawals")) } if params.BeaconRoot != nil { - return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("missing beacon root")) + return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("unexpected beacon root")) } c := api.eth.BlockChain().Config() if !active(c.IsShanghai, c.IsCancun, c.LondonBlock, params.Timestamp) { diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index b99e3962a0..aa55b8cc94 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -1600,7 +1600,7 @@ func TestParentBeaconBlockRoot(t *testing.T) { fcState := engine.ForkchoiceStateV1{ HeadBlockHash: parent.Hash(), } - resp, err := api.ForkchoiceUpdatedV2(fcState, &blockParams) + resp, err := api.ForkchoiceUpdatedV3(fcState, &blockParams) if err != nil { t.Fatalf("error preparing payload, err=%v", err.(*engine.EngineAPIError).ErrorData()) } From 409399140129622eb5fa628a0c8f032bdac04bd3 Mon Sep 17 00:00:00 2001 From: lightclient Date: Tue, 3 Oct 2023 06:41:10 -0600 Subject: [PATCH 22/26] eth/catalyst: support new payload v1 in v2 again --- eth/catalyst/api.go | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 83cc5e71b2..9e201449cf 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -447,21 +447,28 @@ func (api *ConsensusAPI) NewPayloadV1(params engine.ExecutableData) (engine.Payl // NewPayloadV2 creates an Eth1 block, inserts it in the chain, and returns the status of the chain. func (api *ConsensusAPI) NewPayloadV2(params engine.ExecutableData) (engine.PayloadStatusV1, error) { - if params.Withdrawals == nil { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai")) - } - if params.ExcessBlobGas != nil { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil excessBlobGas pre-cancun")) - } - if params.BlobGasUsed != nil { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil params.BlobGasUsed pre-cancun")) - } - c := api.eth.BlockChain().Config() - if !active(c.IsShanghai, c.IsCancun, c.LondonBlock, params.Timestamp) { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV2 must only be called for shanghai payloads")) + if active(c.IsShanghai, c.IsCancun, c.LondonBlock, params.Timestamp) { + if params.Withdrawals == nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai")) + } + if params.ExcessBlobGas != nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil excessBlobGas pre-cancun")) + } + if params.BlobGasUsed != nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil params.BlobGasUsed pre-cancun")) + } + } else { + if params.Withdrawals != nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil withdrawals pre-shanghai")) + } + if params.ExcessBlobGas != nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil excessBlobGas pre-cancun")) + } + if params.BlobGasUsed != nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil params.BlobGasUsed pre-cancun")) + } } - return api.newPayload(params, nil, nil) } From fa574ad328863197b03b3fe0af0246bdce233ff4 Mon Sep 17 00:00:00 2001 From: lightclient Date: Wed, 4 Oct 2023 07:34:26 -0600 Subject: [PATCH 23/26] eth/catalyst: simplify new payload param checks --- eth/catalyst/api.go | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 9e201449cf..e58d7d4ca0 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -452,22 +452,16 @@ func (api *ConsensusAPI) NewPayloadV2(params engine.ExecutableData) (engine.Payl if params.Withdrawals == nil { return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai")) } - if params.ExcessBlobGas != nil { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil excessBlobGas pre-cancun")) - } - if params.BlobGasUsed != nil { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil params.BlobGasUsed pre-cancun")) - } } else { if params.Withdrawals != nil { return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil withdrawals pre-shanghai")) } - if params.ExcessBlobGas != nil { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil excessBlobGas pre-cancun")) - } - if params.BlobGasUsed != nil { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil params.BlobGasUsed pre-cancun")) - } + } + if params.ExcessBlobGas != nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil excessBlobGas pre-cancun")) + } + if params.BlobGasUsed != nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil params.BlobGasUsed pre-cancun")) } return api.newPayload(params, nil, nil) } From 53766a668125445b0dc2674495f221c7e4fdda71 Mon Sep 17 00:00:00 2001 From: lightclient Date: Tue, 3 Oct 2023 09:18:53 -0600 Subject: [PATCH 24/26] beacon/engine,eth/catalyst,miner: prefix payload ID so that get payload can easily fail for payloads not constructed via analogous fcu --- beacon/engine/types.go | 8 ++++++++ eth/catalyst/api.go | 9 +++++---- eth/catalyst/api_test.go | 12 ++++++++++-- miner/payload_building.go | 14 ++++++++------ 4 files changed, 31 insertions(+), 12 deletions(-) diff --git a/beacon/engine/types.go b/beacon/engine/types.go index 67f30d4455..ac017b6e1a 100644 --- a/beacon/engine/types.go +++ b/beacon/engine/types.go @@ -26,6 +26,14 @@ import ( "github.com/ethereum/go-ethereum/trie" ) +type PayloadVersion byte + +var ( + PayloadV1 PayloadVersion = 0x1 + PayloadV2 PayloadVersion = 0x2 + PayloadV3 PayloadVersion = 0x3 +) + //go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go // PayloadAttributes describes the environment context in which a block should diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index e58d7d4ca0..0f82af8514 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -180,7 +180,7 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV1(update engine.ForkchoiceStateV1, pa return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("forkChoiceUpdateV1 called post-shanghai")) } } - return api.forkchoiceUpdated(update, payloadAttributes) + return api.forkchoiceUpdated(update, payloadAttributes, engine.PayloadV1) } // ForkchoiceUpdatedV2 is equivalent to V1 with the addition of withdrawals in the payload attributes. @@ -197,7 +197,7 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV2(update engine.ForkchoiceStateV1, pa return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdatedV2 must only be called for shanghai payloads")) } } - return api.forkchoiceUpdated(update, params) + return api.forkchoiceUpdated(update, params, engine.PayloadV2) } // ForkchoiceUpdatedV3 is equivalent to V2 with the addition of parent beacon block root in the payload attributes. @@ -214,10 +214,10 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, pa return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdatedV3 must only be called for shanghai payloads")) } } - return api.forkchoiceUpdated(update, params) + return api.forkchoiceUpdated(update, params, engine.PayloadV3) } -func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { +func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes, payloadVersion engine.PayloadVersion) (engine.ForkChoiceResponse, error) { api.forkchoiceLock.Lock() defer api.forkchoiceLock.Unlock() @@ -361,6 +361,7 @@ func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payl Random: payloadAttributes.Random, Withdrawals: payloadAttributes.Withdrawals, BeaconRoot: payloadAttributes.BeaconRoot, + Version: payloadVersion, } id := args.Id() // If we already are busy generating this work, then we do not need diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index aa55b8cc94..b608dd3834 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -210,6 +210,7 @@ func TestEth2PrepareAndGetPayload(t *testing.T) { FeeRecipient: blockParams.SuggestedFeeRecipient, Random: blockParams.Random, BeaconRoot: blockParams.BeaconRoot, + Version: engine.PayloadV1, }).Id() execData, err := api.GetPayloadV1(payloadID) if err != nil { @@ -1076,6 +1077,7 @@ func TestWithdrawals(t *testing.T) { Random: blockParams.Random, Withdrawals: blockParams.Withdrawals, BeaconRoot: blockParams.BeaconRoot, + Version: engine.PayloadV2, }).Id() execData, err := api.GetPayloadV2(payloadID) if err != nil { @@ -1124,6 +1126,7 @@ func TestWithdrawals(t *testing.T) { Random: blockParams.Random, Withdrawals: blockParams.Withdrawals, BeaconRoot: blockParams.BeaconRoot, + Version: engine.PayloadV2, }).Id() execData, err = api.GetPayloadV2(payloadID) if err != nil { @@ -1238,12 +1241,15 @@ func TestNilWithdrawals(t *testing.T) { for _, test := range tests { var ( - err error - shanghai = genesis.Config.IsShanghai(genesis.Config.LondonBlock, test.blockParams.Timestamp) + err error + payloadVersion engine.PayloadVersion + shanghai = genesis.Config.IsShanghai(genesis.Config.LondonBlock, test.blockParams.Timestamp) ) if !shanghai { + payloadVersion = engine.PayloadV1 _, err = api.ForkchoiceUpdatedV1(fcState, &test.blockParams) } else { + payloadVersion = engine.PayloadV2 _, err = api.ForkchoiceUpdatedV2(fcState, &test.blockParams) } if test.wantErr { @@ -1262,6 +1268,7 @@ func TestNilWithdrawals(t *testing.T) { Timestamp: test.blockParams.Timestamp, FeeRecipient: test.blockParams.SuggestedFeeRecipient, Random: test.blockParams.Random, + Version: payloadVersion, }).Id() execData, err := api.GetPayloadV2(payloadID) if err != nil { @@ -1616,6 +1623,7 @@ func TestParentBeaconBlockRoot(t *testing.T) { Random: blockParams.Random, Withdrawals: blockParams.Withdrawals, BeaconRoot: blockParams.BeaconRoot, + Version: engine.PayloadV3, }).Id() execData, err := api.GetPayloadV3(payloadID) if err != nil { diff --git a/miner/payload_building.go b/miner/payload_building.go index 7d8c4368bf..70ebf43260 100644 --- a/miner/payload_building.go +++ b/miner/payload_building.go @@ -35,12 +35,13 @@ import ( // Check engine-api specification for more details. // https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#payloadattributesv1 type BuildPayloadArgs struct { - Parent common.Hash // The parent block to build payload on top - Timestamp uint64 // The provided timestamp of generated payload - FeeRecipient common.Address // The provided recipient address for collecting transaction fee - Random common.Hash // The provided randomness value - Withdrawals types.Withdrawals // The provided withdrawals - BeaconRoot *common.Hash // The provided beaconRoot (Cancun) + Parent common.Hash // The parent block to build payload on top + Timestamp uint64 // The provided timestamp of generated payload + FeeRecipient common.Address // The provided recipient address for collecting transaction fee + Random common.Hash // The provided randomness value + Withdrawals types.Withdrawals // The provided withdrawals + BeaconRoot *common.Hash // The provided beaconRoot (Cancun) + Version engine.PayloadVersion // Versioning byte for payload id calculation. } // Id computes an 8-byte identifier by hashing the components of the payload arguments. @@ -57,6 +58,7 @@ func (args *BuildPayloadArgs) Id() engine.PayloadID { } var out engine.PayloadID copy(out[:], hasher.Sum(nil)[:8]) + out[0] = byte(args.Version) return out } From 280891b9a75a574435e4b58d3e9cf2ac399aa9e5 Mon Sep 17 00:00:00 2001 From: lightclient Date: Tue, 3 Oct 2023 09:22:19 -0600 Subject: [PATCH 25/26] beacon/engine,eth/catalyst: gate on get payload on id version --- beacon/engine/types.go | 4 ++++ eth/catalyst/api.go | 10 ++++++++++ 2 files changed, 14 insertions(+) diff --git a/beacon/engine/types.go b/beacon/engine/types.go index ac017b6e1a..0b9e4f9803 100644 --- a/beacon/engine/types.go +++ b/beacon/engine/types.go @@ -123,6 +123,10 @@ type TransitionConfigurationV1 struct { // PayloadID is an identifier of the payload build process type PayloadID [8]byte +func (b PayloadID) Version() PayloadVersion { + return PayloadVersion(b[0]) +} + func (b PayloadID) String() string { return hexutil.Encode(b[:]) } diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 0f82af8514..7cbeabc2ad 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -412,6 +412,9 @@ func (api *ConsensusAPI) ExchangeTransitionConfigurationV1(config engine.Transit // GetPayloadV1 returns a cached payload by id. func (api *ConsensusAPI) GetPayloadV1(payloadID engine.PayloadID) (*engine.ExecutableData, error) { + if payloadID.Version() != engine.PayloadV1 { + return nil, engine.UnsupportedFork + } data, err := api.getPayload(payloadID, false) if err != nil { return nil, err @@ -421,11 +424,18 @@ func (api *ConsensusAPI) GetPayloadV1(payloadID engine.PayloadID) (*engine.Execu // GetPayloadV2 returns a cached payload by id. func (api *ConsensusAPI) GetPayloadV2(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { + version := payloadID.Version() + if version != engine.PayloadV1 && version != engine.PayloadV2 { + return nil, engine.UnsupportedFork + } return api.getPayload(payloadID, false) } // GetPayloadV3 returns a cached payload by id. func (api *ConsensusAPI) GetPayloadV3(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { + if payloadID.Version() != engine.PayloadV3 { + return nil, engine.UnsupportedFork + } return api.getPayload(payloadID, false) } From 6e6c723b4d74fa2ec4491f11354facadb32e8cfb Mon Sep 17 00:00:00 2001 From: lightclient Date: Tue, 3 Oct 2023 13:58:32 -0600 Subject: [PATCH 26/26] beacon/engine: add docs for payload version type --- beacon/engine/types.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/beacon/engine/types.go b/beacon/engine/types.go index 0b9e4f9803..baaa3b925f 100644 --- a/beacon/engine/types.go +++ b/beacon/engine/types.go @@ -26,6 +26,8 @@ import ( "github.com/ethereum/go-ethereum/trie" ) +// PayloadVersion denotes the version of PayloadAttributes used to request the +// building of the payload to commence. type PayloadVersion byte var (