diff --git a/beacon/engine/types.go b/beacon/engine/types.go index 42713ed4d..36cd24067 100644 --- a/beacon/engine/types.go +++ b/beacon/engine/types.go @@ -32,6 +32,16 @@ import ( "github.com/ethereum/go-ethereum/trie" ) +// PayloadVersion denotes the version of PayloadAttributes used to request the +// building of the payload to commence. +type PayloadVersion byte + +var ( + PayloadV1 PayloadVersion = 0x1 + PayloadV2 PayloadVersion = 0x2 + PayloadV3 PayloadVersion = 0x3 +) + //go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go // PayloadAttributes describes the environment context in which a block should @@ -123,6 +133,10 @@ type TransitionConfigurationV1 struct { // PayloadID is an identifier of the payload build process type PayloadID [8]byte +func (b PayloadID) Version() PayloadVersion { + return PayloadVersion(b[0]) +} + func (b PayloadID) String() string { return hexutil.Encode(b[:]) } diff --git a/cmd/devp2p/internal/ethtest/chain_test.go b/cmd/devp2p/internal/ethtest/chain_test.go index 67221923a..de6acfdcd 100644 --- a/cmd/devp2p/internal/ethtest/chain_test.go +++ b/cmd/devp2p/internal/ethtest/chain_test.go @@ -145,7 +145,7 @@ func TestChain_GetHeaders(t *testing.T) { }{ { req: GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Number: uint64(2)}, Amount: uint64(5), Skip: 1, @@ -162,7 +162,7 @@ func TestChain_GetHeaders(t *testing.T) { }, { req: GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)}, Amount: uint64(3), Skip: 0, @@ -177,7 +177,7 @@ func TestChain_GetHeaders(t *testing.T) { }, { req: GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Hash: chain.Head().Hash()}, Amount: uint64(1), Skip: 0, diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go index bc901bdeb..a0339b88c 100644 --- a/cmd/devp2p/internal/ethtest/helpers.go +++ b/cmd/devp2p/internal/ethtest/helpers.go @@ -62,7 +62,6 @@ func (s *Suite) dial() (*Conn, error) { } // set default p2p capabilities conn.caps = []p2p.Cap{ - {Name: "eth", Version: 66}, {Name: "eth", Version: 67}, {Name: "eth", Version: 68}, } @@ -237,8 +236,8 @@ func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message { return errorf("could not get headers for inbound header request: %v", err) } resp := &BlockHeaders{ - RequestId: msg.ReqID(), - BlockHeadersPacket: eth.BlockHeadersPacket(headers), + RequestId: msg.ReqID(), + BlockHeadersRequest: eth.BlockHeadersRequest(headers), } if err := c.Write(resp); err != nil { return errorf("could not write to connection: %v", err) @@ -267,7 +266,7 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, reqID uint if !ok { return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg)) } - headers := []*types.Header(resp.BlockHeadersPacket) + headers := []*types.Header(resp.BlockHeadersRequest) return headers, nil } @@ -379,7 +378,7 @@ func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block) error { conn.SetReadDeadline(time.Now().Add(20 * time.Second)) // create request req := &GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Hash: block.Hash()}, Amount: 1, }, @@ -604,8 +603,8 @@ func (s *Suite) hashAnnounce() error { pretty.Sdump(blockHeaderReq)) } err = sendConn.Write(&BlockHeaders{ - RequestId: blockHeaderReq.ReqID(), - BlockHeadersPacket: eth.BlockHeadersPacket{nextBlock.Header()}, + RequestId: blockHeaderReq.ReqID(), + BlockHeadersRequest: eth.BlockHeadersRequest{nextBlock.Header()}, }) if err != nil { return fmt.Errorf("failed to write to connection: %v", err) diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index 815353be7..0b56c8cf4 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -112,7 +112,7 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) { } // write request req := &GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()}, Amount: 2, Skip: 1, @@ -150,7 +150,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { // create two requests req1 := &GetBlockHeaders{ RequestId: uint64(111), - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Hash: s.chain.blocks[1].Hash(), }, @@ -161,7 +161,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { } req2 := &GetBlockHeaders{ RequestId: uint64(222), - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Hash: s.chain.blocks[1].Hash(), }, @@ -201,10 +201,10 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { if err != nil { t.Fatalf("failed to get expected headers for request 2: %v", err) } - if !headersMatch(expected1, headers1.BlockHeadersPacket) { + if !headersMatch(expected1, headers1.BlockHeadersRequest) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) } - if !headersMatch(expected2, headers2.BlockHeadersPacket) { + if !headersMatch(expected2, headers2.BlockHeadersRequest) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) } } @@ -224,7 +224,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { reqID := uint64(1234) request1 := &GetBlockHeaders{ RequestId: reqID, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Number: 1, }, @@ -233,7 +233,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { } request2 := &GetBlockHeaders{ RequestId: reqID, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Number: 33, }, @@ -270,10 +270,10 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { if err != nil { t.Fatalf("failed to get expected block headers: %v", err) } - if !headersMatch(expected1, headers1.BlockHeadersPacket) { + if !headersMatch(expected1, headers1.BlockHeadersRequest) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) } - if !headersMatch(expected2, headers2.BlockHeadersPacket) { + if !headersMatch(expected2, headers2.BlockHeadersRequest) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) } } @@ -290,7 +290,7 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) { t.Fatalf("peering failed: %v", err) } req := &GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Number: 0}, Amount: 2, }, @@ -322,7 +322,7 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { // create block bodies request req := &GetBlockBodies{ RequestId: uint64(55), - GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ + GetBlockBodiesRequest: eth.GetBlockBodiesRequest{ s.chain.blocks[54].Hash(), s.chain.blocks[75].Hash(), }, @@ -336,11 +336,11 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { if !ok { t.Fatalf("unexpected: %s", pretty.Sdump(msg)) } - bodies := resp.BlockBodiesPacket + bodies := resp.BlockBodiesResponse t.Logf("received %d block bodies", len(bodies)) - if len(bodies) != len(req.GetBlockBodiesPacket) { + if len(bodies) != len(req.GetBlockBodiesRequest) { t.Fatalf("wrong bodies in response: expected %d bodies, "+ - "got %d", len(req.GetBlockBodiesPacket), len(bodies)) + "got %d", len(req.GetBlockBodiesRequest), len(bodies)) } } @@ -481,8 +481,8 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) { hashes = append(hashes, hash) } getTxReq := &GetPooledTransactions{ - RequestId: 1234, - GetPooledTransactionsPacket: hashes, + RequestId: 1234, + GetPooledTransactionsRequest: hashes, } if err = conn.Write(getTxReq); err != nil { t.Fatalf("could not write to conn: %v", err) @@ -490,7 +490,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) { // check that all received transactions match those that were sent to node switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) { case *PooledTransactions: - for _, gotTx := range msg.PooledTransactionsPacket { + for _, gotTx := range msg.PooledTransactionsResponse { if _, exists := hashMap[gotTx.Hash()]; !exists { t.Fatalf("unexpected tx received: %v", gotTx.Hash()) } @@ -547,8 +547,8 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) { msg := conn.readAndServe(s.chain, timeout) switch msg := msg.(type) { case *GetPooledTransactions: - if len(msg.GetPooledTransactionsPacket) != len(hashes) { - t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket)) + if len(msg.GetPooledTransactionsRequest) != len(hashes) { + t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest)) } return diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go index afa9a9c8c..805d7a81b 100644 --- a/cmd/devp2p/internal/ethtest/types.go +++ b/cmd/devp2p/internal/ethtest/types.go @@ -99,24 +99,24 @@ func (msg Transactions) Code() int { return 18 } func (msg Transactions) ReqID() uint64 { return 18 } // GetBlockHeaders represents a block header query. -type GetBlockHeaders eth.GetBlockHeadersPacket66 +type GetBlockHeaders eth.GetBlockHeadersPacket func (msg GetBlockHeaders) Code() int { return 19 } func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId } -type BlockHeaders eth.BlockHeadersPacket66 +type BlockHeaders eth.BlockHeadersPacket func (msg BlockHeaders) Code() int { return 20 } func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId } // GetBlockBodies represents a GetBlockBodies request -type GetBlockBodies eth.GetBlockBodiesPacket66 +type GetBlockBodies eth.GetBlockBodiesPacket func (msg GetBlockBodies) Code() int { return 21 } func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId } // BlockBodies is the network packet for block content distribution. -type BlockBodies eth.BlockBodiesPacket66 +type BlockBodies eth.BlockBodiesPacket func (msg BlockBodies) Code() int { return 22 } func (msg BlockBodies) ReqID() uint64 { return msg.RequestId } @@ -128,7 +128,7 @@ func (msg NewBlock) Code() int { return 23 } func (msg NewBlock) ReqID() uint64 { return 0 } // NewPooledTransactionHashes66 is the network packet for the tx hash propagation message. -type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket66 +type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket67 func (msg NewPooledTransactionHashes66) Code() int { return 24 } func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 } @@ -139,12 +139,12 @@ type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket68 func (msg NewPooledTransactionHashes) Code() int { return 24 } func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 } -type GetPooledTransactions eth.GetPooledTransactionsPacket66 +type GetPooledTransactions eth.GetPooledTransactionsPacket func (msg GetPooledTransactions) Code() int { return 25 } func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId } -type PooledTransactions eth.PooledTransactionsPacket66 +type PooledTransactions eth.PooledTransactionsPacket func (msg PooledTransactions) Code() int { return 26 } func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId } @@ -180,25 +180,25 @@ func (c *Conn) Read() Message { case (Status{}).Code(): msg = new(Status) case (GetBlockHeaders{}).Code(): - ethMsg := new(eth.GetBlockHeadersPacket66) + ethMsg := new(eth.GetBlockHeadersPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetBlockHeaders)(ethMsg) case (BlockHeaders{}).Code(): - ethMsg := new(eth.BlockHeadersPacket66) + ethMsg := new(eth.BlockHeadersPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*BlockHeaders)(ethMsg) case (GetBlockBodies{}).Code(): - ethMsg := new(eth.GetBlockBodiesPacket66) + ethMsg := new(eth.GetBlockBodiesPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetBlockBodies)(ethMsg) case (BlockBodies{}).Code(): - ethMsg := new(eth.BlockBodiesPacket66) + ethMsg := new(eth.BlockBodiesPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } @@ -217,13 +217,13 @@ func (c *Conn) Read() Message { } msg = new(NewPooledTransactionHashes66) case (GetPooledTransactions{}.Code()): - ethMsg := new(eth.GetPooledTransactionsPacket66) + ethMsg := new(eth.GetPooledTransactionsPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetPooledTransactions)(ethMsg) case (PooledTransactions{}.Code()): - ethMsg := new(eth.PooledTransactionsPacket66) + ethMsg := new(eth.PooledTransactionsPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go index 09dca8984..5c0e28e28 100644 --- a/cmd/evm/internal/t8ntool/block.go +++ b/cmd/evm/internal/t8ntool/block.go @@ -37,33 +37,38 @@ import ( //go:generate go run github.com/fjl/gencodec -type header -field-override headerMarshaling -out gen_header.go type header struct { - ParentHash common.Hash `json:"parentHash"` - OmmerHash *common.Hash `json:"sha3Uncles"` - Coinbase *common.Address `json:"miner"` - Root common.Hash `json:"stateRoot" gencodec:"required"` - TxHash *common.Hash `json:"transactionsRoot"` - ReceiptHash *common.Hash `json:"receiptsRoot"` - Bloom types.Bloom `json:"logsBloom"` - Difficulty *big.Int `json:"difficulty"` - Number *big.Int `json:"number" gencodec:"required"` - GasLimit uint64 `json:"gasLimit" gencodec:"required"` - GasUsed uint64 `json:"gasUsed"` - Time uint64 `json:"timestamp" gencodec:"required"` - Extra []byte `json:"extraData"` - MixDigest common.Hash `json:"mixHash"` - Nonce *types.BlockNonce `json:"nonce"` - BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` - WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + ParentHash common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom types.Bloom `json:"logsBloom"` + Difficulty *big.Int `json:"difficulty"` + Number *big.Int `json:"number" gencodec:"required"` + GasLimit uint64 `json:"gasLimit" gencodec:"required"` + GasUsed uint64 `json:"gasUsed"` + Time uint64 `json:"timestamp" gencodec:"required"` + Extra []byte `json:"extraData"` + MixDigest common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` + WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"` + ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"` + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` } type headerMarshaling struct { - Difficulty *math.HexOrDecimal256 - Number *math.HexOrDecimal256 - GasLimit math.HexOrDecimal64 - GasUsed math.HexOrDecimal64 - Time math.HexOrDecimal64 - Extra hexutil.Bytes - BaseFee *math.HexOrDecimal256 + Difficulty *math.HexOrDecimal256 + Number *math.HexOrDecimal256 + GasLimit math.HexOrDecimal64 + GasUsed math.HexOrDecimal64 + Time math.HexOrDecimal64 + Extra hexutil.Bytes + BaseFee *math.HexOrDecimal256 + BlobGasUsed *math.HexOrDecimal64 + ExcessBlobGas *math.HexOrDecimal64 } type bbInput struct { @@ -113,22 +118,25 @@ func (c *cliqueInput) UnmarshalJSON(input []byte) error { // ToBlock converts i into a *types.Block func (i *bbInput) ToBlock() *types.Block { header := &types.Header{ - ParentHash: i.Header.ParentHash, - UncleHash: types.EmptyUncleHash, - Coinbase: common.Address{}, - Root: i.Header.Root, - TxHash: types.EmptyTxsHash, - ReceiptHash: types.EmptyReceiptsHash, - Bloom: i.Header.Bloom, - Difficulty: common.Big0, - Number: i.Header.Number, - GasLimit: i.Header.GasLimit, - GasUsed: i.Header.GasUsed, - Time: i.Header.Time, - Extra: i.Header.Extra, - MixDigest: i.Header.MixDigest, - BaseFee: i.Header.BaseFee, - WithdrawalsHash: i.Header.WithdrawalsHash, + ParentHash: i.Header.ParentHash, + UncleHash: types.EmptyUncleHash, + Coinbase: common.Address{}, + Root: i.Header.Root, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, + Bloom: i.Header.Bloom, + Difficulty: common.Big0, + Number: i.Header.Number, + GasLimit: i.Header.GasLimit, + GasUsed: i.Header.GasUsed, + Time: i.Header.Time, + Extra: i.Header.Extra, + MixDigest: i.Header.MixDigest, + BaseFee: i.Header.BaseFee, + WithdrawalsHash: i.Header.WithdrawalsHash, + BlobGasUsed: i.Header.BlobGasUsed, + ExcessBlobGas: i.Header.ExcessBlobGas, + ParentBeaconRoot: i.Header.ParentBeaconBlockRoot, } // Fill optional values. @@ -150,7 +158,7 @@ func (i *bbInput) ToBlock() *types.Block { if i.Header.Nonce != nil { header.Nonce = *i.Header.Nonce } - if header.Difficulty != nil { + if i.Header.Difficulty != nil { header.Difficulty = i.Header.Difficulty } return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals) diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index eb26bde58..312f427d4 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -59,7 +59,7 @@ type ExecutionResult struct { BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"` CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"` - CurrentBlobGasUsed *math.HexOrDecimal64 `json:"currentBlobGasUsed,omitempty"` + CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"` } type ommer struct { @@ -85,7 +85,7 @@ type stEnv struct { Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` BaseFee *big.Int `json:"currentBaseFee,omitempty"` ParentUncleHash common.Hash `json:"parentUncleHash"` - ExcessBlobGas *uint64 `json:"excessBlobGas,omitempty"` + ExcessBlobGas *uint64 `json:"currentExcessBlobGas,omitempty"` ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"` ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` @@ -163,18 +163,18 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, rnd := common.BigToHash(pre.Env.Random) vmContext.Random = &rnd } - // If excessBlobGas is defined, add it to the vmContext. + // Calculate the BlobBaseFee + var excessBlobGas uint64 if pre.Env.ExcessBlobGas != nil { - vmContext.ExcessBlobGas = pre.Env.ExcessBlobGas - vmContext.BlobBaseFee = eip4844.CalcBlobFee(*pre.Env.ExcessBlobGas) + excessBlobGas := *pre.Env.ExcessBlobGas + vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas) } else { // If it is not explicitly defined, but we have the parent values, we try // to calculate it ourselves. parentExcessBlobGas := pre.Env.ParentExcessBlobGas parentBlobGasUsed := pre.Env.ParentBlobGasUsed if parentExcessBlobGas != nil && parentBlobGasUsed != nil { - excessBlobGas := eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed) - vmContext.ExcessBlobGas = &excessBlobGas + excessBlobGas = eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed) vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas) } } @@ -191,12 +191,15 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, } var blobGasUsed uint64 for i, tx := range txs { - if tx.Type() == types.BlobTxType && vmContext.ExcessBlobGas == nil { + if tx.Type() == types.BlobTxType && vmContext.BlobBaseFee == nil { errMsg := "blob tx used but field env.ExcessBlobGas missing" log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg) rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg}) continue } + if tx.Type() == types.BlobTxType { + blobGasUsed += uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes())) + } msg, err := core.TransactionToMessage(tx, signer, pre.Env.BaseFee) if err != nil { log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err) @@ -226,9 +229,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, gaspool.SetGas(prevGas) continue } - if tx.Type() == types.BlobTxType { - blobGasUsed += params.BlobTxBlobGasPerBlob - } includedTxs = append(includedTxs, tx) if hashError != nil { return nil, nil, NewError(ErrorMissingBlockhash, hashError) @@ -324,8 +324,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, h := types.DeriveSha(types.Withdrawals(pre.Env.Withdrawals), trie.NewStackTrie(nil)) execRs.WithdrawalsRoot = &h } - if vmContext.ExcessBlobGas != nil { - execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(vmContext.ExcessBlobGas) + if vmContext.BlobBaseFee != nil { + execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(&excessBlobGas) execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed) } // Re-create statedb instance with new root upon the updated database diff --git a/cmd/evm/internal/t8ntool/gen_header.go b/cmd/evm/internal/t8ntool/gen_header.go index 76228394d..a8c866897 100644 --- a/cmd/evm/internal/t8ntool/gen_header.go +++ b/cmd/evm/internal/t8ntool/gen_header.go @@ -18,23 +18,26 @@ var _ = (*headerMarshaling)(nil) // MarshalJSON marshals as JSON. func (h header) MarshalJSON() ([]byte, error) { type header struct { - ParentHash common.Hash `json:"parentHash"` - OmmerHash *common.Hash `json:"sha3Uncles"` - Coinbase *common.Address `json:"miner"` - Root common.Hash `json:"stateRoot" gencodec:"required"` - TxHash *common.Hash `json:"transactionsRoot"` - ReceiptHash *common.Hash `json:"receiptsRoot"` - Bloom types.Bloom `json:"logsBloom"` - Difficulty *math.HexOrDecimal256 `json:"difficulty"` - Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` - GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` - GasUsed math.HexOrDecimal64 `json:"gasUsed"` - Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` - Extra hexutil.Bytes `json:"extraData"` - MixDigest common.Hash `json:"mixHash"` - Nonce *types.BlockNonce `json:"nonce"` - BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` - WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + ParentHash common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom types.Bloom `json:"logsBloom"` + Difficulty *math.HexOrDecimal256 `json:"difficulty"` + Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` + GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` + GasUsed math.HexOrDecimal64 `json:"gasUsed"` + Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` + Extra hexutil.Bytes `json:"extraData"` + MixDigest common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` + WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"` + ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"` + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` } var enc header enc.ParentHash = h.ParentHash @@ -54,29 +57,35 @@ func (h header) MarshalJSON() ([]byte, error) { enc.Nonce = h.Nonce enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee) enc.WithdrawalsHash = h.WithdrawalsHash + enc.BlobGasUsed = (*math.HexOrDecimal64)(h.BlobGasUsed) + enc.ExcessBlobGas = (*math.HexOrDecimal64)(h.ExcessBlobGas) + enc.ParentBeaconBlockRoot = h.ParentBeaconBlockRoot return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (h *header) UnmarshalJSON(input []byte) error { type header struct { - ParentHash *common.Hash `json:"parentHash"` - OmmerHash *common.Hash `json:"sha3Uncles"` - Coinbase *common.Address `json:"miner"` - Root *common.Hash `json:"stateRoot" gencodec:"required"` - TxHash *common.Hash `json:"transactionsRoot"` - ReceiptHash *common.Hash `json:"receiptsRoot"` - Bloom *types.Bloom `json:"logsBloom"` - Difficulty *math.HexOrDecimal256 `json:"difficulty"` - Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` - GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` - GasUsed *math.HexOrDecimal64 `json:"gasUsed"` - Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` - Extra *hexutil.Bytes `json:"extraData"` - MixDigest *common.Hash `json:"mixHash"` - Nonce *types.BlockNonce `json:"nonce"` - BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` - WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + ParentHash *common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root *common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom *types.Bloom `json:"logsBloom"` + Difficulty *math.HexOrDecimal256 `json:"difficulty"` + Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` + GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` + GasUsed *math.HexOrDecimal64 `json:"gasUsed"` + Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` + Extra *hexutil.Bytes `json:"extraData"` + MixDigest *common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` + WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"` + ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"` + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` } var dec header if err := json.Unmarshal(input, &dec); err != nil { @@ -137,5 +146,14 @@ func (h *header) UnmarshalJSON(input []byte) error { if dec.WithdrawalsHash != nil { h.WithdrawalsHash = dec.WithdrawalsHash } + if dec.BlobGasUsed != nil { + h.BlobGasUsed = (*uint64)(dec.BlobGasUsed) + } + if dec.ExcessBlobGas != nil { + h.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) + } + if dec.ParentBeaconBlockRoot != nil { + h.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot + } return nil } diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go index bb195ef64..d47db4a87 100644 --- a/cmd/evm/internal/t8ntool/gen_stenv.go +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -33,7 +33,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) { Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` ParentUncleHash common.Hash `json:"parentUncleHash"` - ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` + ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"` ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` @@ -81,7 +81,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` ParentUncleHash *common.Hash `json:"parentUncleHash"` - ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` + ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"` ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 396b341d2..600bc460f 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -334,7 +334,7 @@ func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *pa txsWithKeys = inputData.Txs } // We may have to sign the transactions. - signer := types.MakeSigner(chainConfig, big.NewInt(int64(env.Number)), env.Timestamp) + signer := types.LatestSignerForChainID(chainConfig.ChainID) return signUnsignedTransactions(txsWithKeys, signer) } diff --git a/cmd/evm/testdata/28/env.json b/cmd/evm/testdata/28/env.json index 5056fe29a..82f22ac62 100644 --- a/cmd/evm/testdata/28/env.json +++ b/cmd/evm/testdata/28/env.json @@ -9,8 +9,7 @@ "parentDifficulty" : "0x00", "parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "withdrawals" : [ - ], + "withdrawals" : [], "parentBaseFee" : "0x0a", "parentGasUsed" : "0x00", "parentGasLimit" : "0x7fffffffffffffff", @@ -20,4 +19,4 @@ "0" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6" }, "parentBeaconBlockRoot": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00" -} \ No newline at end of file +} diff --git a/cmd/evm/testdata/28/exp.json b/cmd/evm/testdata/28/exp.json index a55ce0aec..75c715e97 100644 --- a/cmd/evm/testdata/28/exp.json +++ b/cmd/evm/testdata/28/exp.json @@ -42,6 +42,6 @@ "currentBaseFee": "0x9", "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "currentExcessBlobGas": "0x0", - "currentBlobGasUsed": "0x20000" + "blobGasUsed": "0x20000" } -} \ No newline at end of file +} diff --git a/cmd/evm/testdata/29/exp.json b/cmd/evm/testdata/29/exp.json index 83e1db26f..c4c001ec1 100644 --- a/cmd/evm/testdata/29/exp.json +++ b/cmd/evm/testdata/29/exp.json @@ -40,6 +40,6 @@ "currentBaseFee": "0x9", "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "currentExcessBlobGas": "0x0", - "currentBlobGasUsed": "0x0" + "blobGasUsed": "0x0" } -} \ No newline at end of file +} diff --git a/cmd/geth/config.go b/cmd/geth/config.go index bd5d85acb..e6400f9fc 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -34,6 +34,8 @@ import ( "github.com/ethereum/go-ethereum/builder" "github.com/ethereum/go-ethereum/cmd/utils" blockvalidationapi "github.com/ethereum/go-ethereum/eth/block-validation" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" @@ -218,17 +220,18 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { if ctx.IsSet(utils.GraphQLEnabledFlag.Name) { utils.RegisterGraphQLService(stack, backend, filterSystem, &cfg.Node) } - // Add the Ethereum Stats daemon if requested. if cfg.Ethstats.URL != "" { utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL) } - // Configure full-sync tester service if requested - if ctx.IsSet(utils.SyncTargetFlag.Name) && cfg.Eth.SyncMode == downloader.FullSync { - utils.RegisterFullSyncTester(stack, eth, ctx.Path(utils.SyncTargetFlag.Name)) + if ctx.IsSet(utils.SyncTargetFlag.Name) { + hex := hexutil.MustDecode(ctx.String(utils.SyncTargetFlag.Name)) + if len(hex) != common.HashLength { + utils.Fatalf("invalid sync target length: have %d, want %d", len(hex), common.HashLength) + } + utils.RegisterFullSyncTester(stack, eth, common.BytesToHash(hex)) } - // Start the dev mode if requested, or launch the engine API for // interacting with external consensus client. if ctx.IsSet(utils.DeveloperFlag.Name) { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 8d1aa72d2..10eed0dbd 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -18,7 +18,6 @@ package utils import ( - "bytes" "context" "crypto/ecdsa" "encoding/hex" @@ -41,11 +40,9 @@ import ( "github.com/ethereum/go-ethereum/builder" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/fdlimit" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/txpool/legacypool" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/kzg4844" @@ -74,7 +71,6 @@ import ( "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/triedb/hashdb" @@ -631,9 +627,9 @@ var ( } // MISC settings - SyncTargetFlag = &cli.PathFlag{ + SyncTargetFlag = &cli.StringFlag{ Name: "synctarget", - Usage: `File for containing the hex-encoded block-rlp as sync target(dev feature)`, + Usage: `Hash of the block to full sync to (dev testing feature)`, TakesFile: true, Category: flags.MiscCategory, } @@ -2021,7 +2017,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) godebug.SetGCPercent(int(gogc)) - if ctx.IsSet(SyncModeFlag.Name) { + if ctx.IsSet(SyncTargetFlag.Name) { + cfg.SyncMode = downloader.FullSync // dev sync target forces full sync + } else if ctx.IsSet(SyncModeFlag.Name) { cfg.SyncMode = *flags.GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode) } if ctx.IsSet(NetworkIdFlag.Name) { @@ -2313,21 +2311,9 @@ func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconf } // RegisterFullSyncTester adds the full-sync tester service into node. -func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, path string) { - blob, err := os.ReadFile(path) - if err != nil { - Fatalf("Failed to read block file: %v", err) - } - rlpBlob, err := hexutil.Decode(string(bytes.TrimRight(blob, "\r\n"))) - if err != nil { - Fatalf("Failed to decode block blob: %v", err) - } - var block types.Block - if err := rlp.DecodeBytes(rlpBlob, &block); err != nil { - Fatalf("Failed to decode block: %v", err) - } - catalyst.RegisterFullSyncTester(stack, eth, &block) - log.Info("Registered full-sync tester", "number", block.NumberU64(), "hash", block.Hash()) +func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, target common.Hash) { + catalyst.RegisterFullSyncTester(stack, eth, target) + log.Info("Registered full-sync tester", "hash", target) } func SetupMetrics(ctx *cli.Context) { diff --git a/common/types.go b/common/types.go index bf74e4371..7184b2b11 100644 --- a/common/types.go +++ b/common/types.go @@ -239,9 +239,6 @@ func (a Address) Cmp(other Address) int { // Bytes gets the string representation of the underlying address. func (a Address) Bytes() []byte { return a[:] } -// Hash converts an address to a hash by left-padding it with zeros. -func (a Address) Hash() Hash { return BytesToHash(a[:]) } - // Big converts an address to a big integer. func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) } diff --git a/core/asm/asm.go b/core/asm/asm.go index 7c1e14ec0..294eb6ffa 100644 --- a/core/asm/asm.go +++ b/core/asm/asm.go @@ -34,7 +34,7 @@ type instructionIterator struct { started bool } -// NewInstructionIterator create a new instruction iterator. +// NewInstructionIterator creates a new instruction iterator. func NewInstructionIterator(code []byte) *instructionIterator { it := new(instructionIterator) it.code = code diff --git a/core/asm/compiler.go b/core/asm/compiler.go index 75bf726c9..02c589b2c 100644 --- a/core/asm/compiler.go +++ b/core/asm/compiler.go @@ -49,7 +49,7 @@ func NewCompiler(debug bool) *Compiler { } } -// Feed feeds tokens in to ch and are interpreted by +// Feed feeds tokens into ch and are interpreted by // the compiler. // // feed is the first pass in the compile stage as it collects the used labels in the diff --git a/core/asm/lex_test.go b/core/asm/lex_test.go index 173031521..1e62d776d 100644 --- a/core/asm/lex_test.go +++ b/core/asm/lex_test.go @@ -72,12 +72,12 @@ func TestLexer(t *testing.T) { input: "@label123", tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, }, - // comment after label + // Comment after label { input: "@label123 ;; comment", tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, }, - // comment after instruction + // Comment after instruction { input: "push 3 ;; comment\nadd", tokens: []token{{typ: lineStart}, {typ: element, text: "push"}, {typ: number, text: "3"}, {typ: lineEnd, text: "\n"}, {typ: lineStart, lineno: 1}, {typ: element, lineno: 1, text: "add"}, {typ: eof, lineno: 1}}, diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go index cf799c832..6a4cfb23d 100644 --- a/core/bloombits/matcher.go +++ b/core/bloombits/matcher.go @@ -58,7 +58,7 @@ type partialMatches struct { // bit with the given number of fetch elements, or a response for such a request. // It can also have the actual results set to be used as a delivery data struct. // -// The contest and error fields are used by the light client to terminate matching +// The context and error fields are used by the light client to terminate matching // early if an error is encountered on some path of the pipeline. type Retrieval struct { Bit uint @@ -389,7 +389,7 @@ func (m *Matcher) distributor(dist chan *request, session *MatcherSession) { shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests ) - // assign is a helper method fo try to assign a pending bit an actively + // assign is a helper method to try to assign a pending bit an actively // listening servicer, or schedule it up for later when one arrives. assign := func(bit uint) { select { diff --git a/core/bloombits/matcher_test.go b/core/bloombits/matcher_test.go index 36764c3f1..7f3d5f279 100644 --- a/core/bloombits/matcher_test.go +++ b/core/bloombits/matcher_test.go @@ -85,7 +85,7 @@ func TestMatcherRandom(t *testing.T) { } // Tests that the matcher can properly find matches if the starting block is -// shifter from a multiple of 8. This is needed to cover an optimisation with +// shifted from a multiple of 8. This is needed to cover an optimisation with // bitset matching https://github.com/ethereum/go-ethereum/issues/15309. func TestMatcherShifted(t *testing.T) { t.Parallel() @@ -106,7 +106,7 @@ func TestWildcardMatcher(t *testing.T) { testMatcherBothModes(t, nil, 0, 10000, 0) } -// makeRandomIndexes generates a random filter system, composed on multiple filter +// makeRandomIndexes generates a random filter system, composed of multiple filter // criteria, each having one bloom list component for the address and arbitrarily // many topic bloom list components. func makeRandomIndexes(lengths []int, max int) [][]bloomIndexes { diff --git a/core/chain_makers.go b/core/chain_makers.go index 14e755996..e8d814089 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -88,11 +88,6 @@ func (b *BlockGen) SetPoS() { b.header.Difficulty = new(big.Int) } -// SetBlobGas sets the data gas used by the blob in the generated block. -func (b *BlockGen) SetBlobGas(blobGasUsed uint64) { - b.header.BlobGasUsed = &blobGasUsed -} - // addTx adds a transaction to the generated block. If no coinbase has // been set, the block's coinbase is set to the zero address. // @@ -111,6 +106,9 @@ func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transacti } b.txs = append(b.txs, tx) b.receipts = append(b.receipts, receipt) + if b.header.BlobGasUsed != nil { + *b.header.BlobGasUsed += receipt.BlobGasUsed + } } // AddTx adds a transaction to the generated block. If no coinbase has diff --git a/core/evm.go b/core/evm.go index c6625af70..46dcb3146 100644 --- a/core/evm.go +++ b/core/evm.go @@ -61,18 +61,17 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common random = &header.MixDigest } return vm.BlockContext{ - CanTransfer: CanTransfer, - Transfer: Transfer, - GetHash: GetHashFn(header, chain), - Coinbase: beneficiary, - BlockNumber: new(big.Int).Set(header.Number), - Time: header.Time, - Difficulty: new(big.Int).Set(header.Difficulty), - BaseFee: baseFee, - BlobBaseFee: blobBaseFee, - GasLimit: header.GasLimit, - Random: random, - ExcessBlobGas: header.ExcessBlobGas, + CanTransfer: CanTransfer, + Transfer: Transfer, + GetHash: GetHashFn(header, chain), + Coinbase: beneficiary, + BlockNumber: new(big.Int).Set(header.Number), + Time: header.Time, + Difficulty: new(big.Int).Set(header.Difficulty), + BaseFee: baseFee, + BlobBaseFee: blobBaseFee, + GasLimit: header.GasLimit, + Random: random, } } diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go index 22dbda4a2..cbfaf5b9e 100644 --- a/core/rawdb/chain_freezer.go +++ b/core/rawdb/chain_freezer.go @@ -200,7 +200,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { } batch.Reset() - // Step into the future and delete and dangling side chains + // Step into the future and delete any dangling side chains if frozen > 0 { tip := frozen for len(dangling) > 0 { diff --git a/core/rawdb/database.go b/core/rawdb/database.go index e97eeb2aa..0c7cf9f11 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -34,7 +34,7 @@ import ( "github.com/olekukonko/tablewriter" ) -// freezerdb is a database wrapper that enabled freezer data retrievals. +// freezerdb is a database wrapper that enables freezer data retrievals. type freezerdb struct { ancientRoot string ethdb.KeyValueStore @@ -141,7 +141,7 @@ func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) // Unlike other ancient-related methods, this method does not return // errNotSupported when invoked. // The reason for this is that the caller might want to do several things: - // 1. Check if something is in freezer, + // 1. Check if something is in the freezer, // 2. If not, check leveldb. // // This will work, since the ancient-checks inside 'fn' will return errors, @@ -209,7 +209,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st // of the freezer and database. Ensure that we don't shoot ourselves in the foot // by serving up conflicting data, leading to both datastores getting corrupted. // - // - If both the freezer and key-value store is empty (no genesis), we just + // - If both the freezer and key-value store are empty (no genesis), we just // initialized a new empty freezer, so everything's fine. // - If the key-value store is empty, but the freezer is not, we need to make // sure the user's genesis matches the freezer. That will be checked in the @@ -218,7 +218,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st // - If neither the key-value store nor the freezer is empty, cross validate // the genesis hashes to make sure they are compatible. If they are, also // ensure that there's no gap between the freezer and subsequently leveldb. - // - If the key-value store is not empty, but the freezer is we might just be + // - If the key-value store is not empty, but the freezer is, we might just be // upgrading to the freezer release, or we might have had a small chain and // not frozen anything yet. Ensure that no blocks are missing yet from the // key-value store, since that would mean we already had an old freezer. @@ -634,7 +634,7 @@ func printChainMetadata(db ethdb.KeyValueStore) { fmt.Fprintf(os.Stderr, "\n\n") } -// ReadChainMetadata returns a set of key/value pairs that contains informatin +// ReadChainMetadata returns a set of key/value pairs that contains information // about the database chain status. This can be used for diagnostic purposes // when investigating the state of the node. func ReadChainMetadata(db ethdb.KeyValueStore) [][]string { diff --git a/core/rawdb/databases_64bit.go b/core/rawdb/databases_64bit.go index 1593e89bf..e9f9332ad 100644 --- a/core/rawdb/databases_64bit.go +++ b/core/rawdb/databases_64bit.go @@ -23,7 +23,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb/pebble" ) -// Pebble is unsuported on 32bit architecture +// Pebble is unsupported on 32bit architecture const PebbleEnabled = true // NewPebbleDBDatabase creates a persistent key-value database without a freezer diff --git a/core/rawdb/table.go b/core/rawdb/table.go index 1895f61da..19e4ed5b5 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -219,7 +219,7 @@ func (b *tableBatch) Put(key, value []byte) error { return b.batch.Put(append([]byte(b.prefix), key...), value) } -// Delete inserts the a key removal into the batch for later committing. +// Delete inserts a key removal into the batch for later committing. func (b *tableBatch) Delete(key []byte) error { return b.batch.Delete(append([]byte(b.prefix), key...)) } diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 4e8fd1e10..772c698dd 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -37,7 +37,7 @@ var ( type triePrefetcher struct { db Database // Database to fetch trie nodes through root common.Hash // Root hash of the account trie for metrics - fetches map[string]Trie // Partially or fully fetcher tries + fetches map[string]Trie // Partially or fully fetched tries. Only populated for inactive copies. fetchers map[string]*subfetcher // Subfetchers for each trie deliveryMissMeter metrics.Meter @@ -197,7 +197,10 @@ func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte // trieID returns an unique trie identifier consists the trie owner and root hash. func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string { - return string(append(owner.Bytes(), root.Bytes()...)) + trieID := make([]byte, common.HashLength*2) + copy(trieID, owner.Bytes()) + copy(trieID[common.HashLength:], root.Bytes()) + return string(trieID) } // subfetcher is a trie fetcher goroutine responsible for pulling entries for a diff --git a/core/state_processor.go b/core/state_processor.go index 0bb19130f..2d4ae3b86 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -24,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" - "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -146,7 +145,7 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta if tx.Type() == types.BlobTxType { receipt.BlobGasUsed = uint64(len(tx.BlobHashes()) * params.BlobTxBlobGasPerBlob) - receipt.BlobGasPrice = eip4844.CalcBlobFee(*evm.Context.ExcessBlobGas) + receipt.BlobGasPrice = evm.Context.BlobBaseFee } // If the transaction created a contract, store the creation address in the receipt. diff --git a/core/state_transition.go b/core/state_transition.go index f84757be7..fb03c48aa 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -24,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/common" cmath "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" @@ -248,7 +247,7 @@ func (st *StateTransition) buyGas() error { balanceCheck.Add(balanceCheck, blobBalanceCheck) // Pay for blobGasUsed * actual blob fee blobFee := new(big.Int).SetUint64(blobGas) - blobFee.Mul(blobFee, eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas)) + blobFee.Mul(blobFee, st.evm.Context.BlobBaseFee) mgval.Add(mgval, blobFee) } } @@ -329,7 +328,7 @@ func (st *StateTransition) preCheck() error { if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) { if st.blobGasUsed() > 0 { // Check that the user is paying at least the current blob fee - blobFee := eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas) + blobFee := st.evm.Context.BlobBaseFee if st.msg.BlobGasFeeCap.Cmp(blobFee) < 0 { return fmt.Errorf("%w: address %v have %v want %v", ErrBlobFeeCapTooLow, st.msg.From.Hex(), st.msg.BlobGasFeeCap, blobFee) } diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 1df830751..958cc409e 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -97,6 +97,8 @@ type blobTxMeta struct { execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump execFeeCap *uint256.Int // Needed to validate replacement price bump blobFeeCap *uint256.Int // Needed to validate replacement price bump + execGas uint64 // Needed to check inclusion validity before reading the blob + blobGas uint64 // Needed to check inclusion validity before reading the blob basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap @@ -118,6 +120,8 @@ func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta { execTipCap: uint256.MustFromBig(tx.GasTipCap()), execFeeCap: uint256.MustFromBig(tx.GasFeeCap()), blobFeeCap: uint256.MustFromBig(tx.BlobGasFeeCap()), + execGas: tx.Gas(), + blobGas: tx.BlobGas(), } meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap) meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap) @@ -307,8 +311,8 @@ type BlobPool struct { spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts evict *evictHeap // Heap of cheapest accounts for eviction when full - eventFeed event.Feed // Event feed to send out new tx events on pool inclusion - eventScope event.SubscriptionScope // Event scope to track and mass unsubscribe on termination + discoverFeed event.Feed // Event feed to send out new tx events on pool discovery (reorg excluded) + insertFeed event.Feed // Event feed to send out new tx events on pool inclusion (reorg included) lock sync.RWMutex // Mutex protecting the pool during reorg handling @@ -439,8 +443,6 @@ func (p *BlobPool) Close() error { if err := p.store.Close(); err != nil { errs = append(errs, err) } - p.eventScope.Close() - switch { case errs == nil: return nil @@ -763,15 +765,21 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) { // Run the reorg between the old and new head and figure out which accounts // need to be rechecked and which transactions need to be readded if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil { + var adds []*types.Transaction for addr, txs := range reinject { // Blindly push all the lost transactions back into the pool for _, tx := range txs { - p.reinject(addr, tx.Hash()) + if err := p.reinject(addr, tx.Hash()); err == nil { + adds = append(adds, tx.WithoutBlobTxSidecar()) + } } // Recheck the account's pooled transactions to drop included and // invalidated one p.recheck(addr, inclusions) } + if len(adds) > 0 { + p.insertFeed.Send(core.NewTxsEvent{Txs: adds}) + } } // Flush out any blobs from limbo that are older than the latest finality if p.chain.Config().IsCancun(p.head.Number, p.head.Time) { @@ -929,13 +937,13 @@ func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]* // Note, the method will not initialize the eviction cache values as those will // be done once for all transactions belonging to an account after all individual // transactions are injected back into the pool. -func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { +func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error { // Retrieve the associated blob from the limbo. Without the blobs, we cannot // add the transaction back into the pool as it is not mineable. tx, err := p.limbo.pull(txhash) if err != nil { log.Error("Blobs unavailable, dropping reorged tx", "err", err) - return + return err } // TODO: seems like an easy optimization here would be getting the serialized tx // from limbo instead of re-serializing it here. @@ -944,12 +952,12 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { blob, err := rlp.EncodeToBytes(tx) if err != nil { log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err) - return + return err } id, err := p.store.Put(blob) if err != nil { log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err) - return + return err } // Update the indixes and metrics @@ -957,7 +965,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { if _, ok := p.index[addr]; !ok { if err := p.reserve(addr, true); err != nil { log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err) - return + return err } p.index[addr] = []*blobTxMeta{meta} p.spent[addr] = meta.costCap @@ -968,6 +976,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { } p.lookup[meta.hash] = meta.id p.stored += uint64(meta.size) + return nil } // SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements @@ -1162,9 +1171,19 @@ func (p *BlobPool) Get(hash common.Hash) *types.Transaction { // Add inserts a set of blob transactions into the pool if they pass validation (both // consensus validity and pool restictions). func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool, private bool) []error { - errs := make([]error, len(txs)) + var ( + adds = make([]*types.Transaction, 0, len(txs)) + errs = make([]error, len(txs)) + ) for i, tx := range txs { errs[i] = p.add(tx, private) + if errs[i] == nil && !private { + adds = append(adds, tx.WithoutBlobTxSidecar()) + } + } + if len(adds) > 0 { + p.discoverFeed.Send(core.NewTxsEvent{Txs: adds}) + p.insertFeed.Send(core.NewTxsEvent{Txs: adds}) } return errs } @@ -1398,6 +1417,8 @@ func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTr Time: time.Now(), // TODO(karalabe): Maybe save these and use that? GasFeeCap: tx.execFeeCap.ToBig(), GasTipCap: tx.execTipCap.ToBig(), + Gas: tx.execGas, + BlobGas: tx.blobGas, }) } if len(lazies) > 0 { @@ -1482,10 +1503,14 @@ func (p *BlobPool) updateLimboMetrics() { limboSlotusedGauge.Update(int64(slotused)) } -// SubscribeTransactions registers a subscription of NewTxsEvent and -// starts sending event to the given channel. -func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { - return p.eventScope.Track(p.eventFeed.Subscribe(ch)) +// SubscribeTransactions registers a subscription for new transaction events, +// supporting feeding only newly seen or also resurrected transactions. +func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription { + if reorgs { + return p.insertFeed.Subscribe(ch) + } else { + return p.discoverFeed.Subscribe(ch) + } } // Nonce returns the next nonce of an account, with all transactions executable diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index e2aa22bbb..fea3c4c5f 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -217,7 +217,6 @@ type LegacyPool struct { chain BlockChain gasTip atomic.Pointer[big.Int] txFeed event.Feed - scope event.SubscriptionScope signer types.Signer mu sync.RWMutex @@ -432,9 +431,6 @@ func (pool *LegacyPool) loop() { // Close terminates the transaction pool. func (pool *LegacyPool) Close() error { - // Unsubscribe all subscriptions registered from txpool - pool.scope.Close() - // Terminate the pool reorger and return close(pool.reorgShutdownCh) pool.wg.Wait() @@ -453,10 +449,14 @@ func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) { <-wait } -// SubscribeTransactions registers a subscription of NewTxsEvent and -// starts sending event to the given channel. -func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { - return pool.scope.Track(pool.txFeed.Subscribe(ch)) +// SubscribeTransactions registers a subscription for new transaction events, +// supporting feeding only newly seen or also resurrected transactions. +func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription { + // The legacy pool has a very messed up internal shuffling, so it's kind of + // hard to separate newly discovered transaction from resurrected ones. This + // is because the new txs are added to the queue, resurrected ones too and + // reorgs run lazily, so separating the two would need a marker. + return pool.txFeed.Subscribe(ch) } // SetGasTip updates the minimum gas tip required by the transaction pool for a @@ -728,6 +728,8 @@ func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.L Time: txs[i].Time(), GasFeeCap: txs[i].GasFeeCap(), GasTipCap: txs[i].GasTipCap(), + Gas: txs[i].Gas(), + BlobGas: txs[i].BlobGas(), } } pending[addr] = lazies diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go index 7a1c3ba26..b359d72d6 100644 --- a/core/txpool/subpool.go +++ b/core/txpool/subpool.go @@ -30,13 +30,16 @@ import ( // enough for the miner and other APIs to handle large batches of transactions; // and supports pulling up the entire transaction when really needed. type LazyTransaction struct { - Pool SubPool // Transaction subpool to pull the real transaction up + Pool LazyResolver // Transaction resolver to pull the real transaction up Hash common.Hash // Transaction hash to pull up if needed Tx *types.Transaction // Transaction if already resolved Time time.Time // Time when the transaction was first seen GasFeeCap *big.Int // Maximum fee per gas the transaction may consume GasTipCap *big.Int // Maximum miner tip per gas the transaction can pay + + Gas uint64 // Amount of gas required by the transaction + BlobGas uint64 // Amount of blob gas required by the transaction } // Resolve retrieves the full transaction belonging to a lazy handle if it is still @@ -48,6 +51,14 @@ func (ltx *LazyTransaction) Resolve() *types.Transaction { return ltx.Tx } +// LazyResolver is a minimal interface needed for a transaction pool to satisfy +// resolving lazy transactions. It's mostly a helper to avoid the entire sub- +// pool being injected into the lazy transaction. +type LazyResolver interface { + // Get returns a transaction if it is contained in the pool, or nil otherwise. + Get(hash common.Hash) *types.Transaction +} + // AddressReserver is passed by the main transaction pool to subpools, so they // may request (and relinquish) exclusive access to certain addresses. type AddressReserver func(addr common.Address, reserve bool) error @@ -120,8 +131,10 @@ type SubPool interface { // account and sorted by nonce. Pending(enforceTips bool) map[common.Address][]*LazyTransaction - // SubscribeTransactions subscribes to new transaction events. - SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription + // SubscribeTransactions subscribes to new transaction events. The subscriber + // can decide whether to receive notifications only for newly seen transactions + // or also for reorged out ones. + SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription // Nonce returns the next nonce of an account, with all transactions executable // by the pool already applied on top. diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index 1402fe9c9..33d4fefd9 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -162,13 +162,15 @@ func (p *TxPool) Close() error { if err := <-errc; err != nil { errs = append(errs, err) } - // Terminate each subpool for _, subpool := range p.subpools { if err := subpool.Close(); err != nil { errs = append(errs, err) } } + // Unsubscribe anyone still listening for tx events + p.subs.Close() + if len(errs) > 0 { return fmt.Errorf("subpool close errors: %v", errs) } @@ -410,12 +412,12 @@ func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*LazyTransaction return txs } -// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and starts sending -// events to the given channel. -func (p *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { +// SubscribeTransactions registers a subscription for new transaction events, +// supporting feeding only newly seen or also resurrected transactions. +func (p *TxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription { subs := make([]event.Subscription, len(p.subpools)) for i, subpool := range p.subpools { - subs[i] = subpool.SubscribeTransactions(ch) + subs[i] = subpool.SubscribeTransactions(ch, reorgs) } return p.subs.Track(event.JoinSubscriptions(subs...)) } diff --git a/core/types/hashing.go b/core/types/hashing.go index 9a6a80ac5..224d7a87e 100644 --- a/core/types/hashing.go +++ b/core/types/hashing.go @@ -95,7 +95,7 @@ type DerivableList interface { func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte { buf.Reset() list.EncodeIndex(i, buf) - // It's really unfortunate that we need to do perform this copy. + // It's really unfortunate that we need to perform this copy. // StackTrie holds onto the values until Hash is called, so the values // written to it must not alias. return common.CopyBytes(buf.Bytes()) diff --git a/core/types/state_account.go b/core/types/state_account.go index 314f4943e..ad07ca3f3 100644 --- a/core/types/state_account.go +++ b/core/types/state_account.go @@ -87,7 +87,7 @@ func SlimAccountRLP(account StateAccount) []byte { return data } -// FullAccount decodes the data on the 'slim RLP' format and return +// FullAccount decodes the data on the 'slim RLP' format and returns // the consensus format account. func FullAccount(data []byte) (*StateAccount, error) { var slim SlimAccount diff --git a/core/types/transaction.go b/core/types/transaction.go index ba2783415..d24b41522 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -170,7 +170,7 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error { } // UnmarshalBinary decodes the canonical encoding of transactions. -// It supports legacy RLP transactions and EIP2718 typed transactions. +// It supports legacy RLP transactions and EIP-2718 typed transactions. func (tx *Transaction) UnmarshalBinary(b []byte) error { if len(b) > 0 && b[0] > 0x7f { // It's a legacy transaction. @@ -182,7 +182,7 @@ func (tx *Transaction) UnmarshalBinary(b []byte) error { tx.setDecoded(&data, uint64(len(b))) return nil } - // It's an EIP2718 typed transaction envelope. + // It's an EIP-2718 typed transaction envelope. inner, err := tx.decodeTyped(b) if err != nil { return err @@ -397,7 +397,7 @@ func (tx *Transaction) BlobGasFeeCap() *big.Int { return nil } -// BlobHashes returns the hases of the blob commitments for blob transactions, nil otherwise. +// BlobHashes returns the hashes of the blob commitments for blob transactions, nil otherwise. func (tx *Transaction) BlobHashes() []common.Hash { if blobtx, ok := tx.inner.(*BlobTx); ok { return blobtx.BlobHashes diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index cd57effcb..9e26642f7 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -57,7 +57,7 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint } // LatestSigner returns the 'most permissive' Signer available for the given chain -// configuration. Specifically, this enables support of all types of transacrions +// configuration. Specifically, this enables support of all types of transactions // when their respective forks are scheduled to occur at any block number (or time) // in the chain config. // diff --git a/core/vm/evm.go b/core/vm/evm.go index 1dbcfff19..2c6cc7d48 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -67,15 +67,14 @@ type BlockContext struct { GetHash GetHashFunc // Block information - Coinbase common.Address // Provides information for COINBASE - GasLimit uint64 // Provides information for GASLIMIT - BlockNumber *big.Int // Provides information for NUMBER - Time uint64 // Provides information for TIME - Difficulty *big.Int // Provides information for DIFFICULTY - BaseFee *big.Int // Provides information for BASEFEE - BlobBaseFee *big.Int // Provides information for BLOBBASEFEE - Random *common.Hash // Provides information for PREVRANDAO - ExcessBlobGas *uint64 // ExcessBlobGas field in the header, needed to compute the data + Coinbase common.Address // Provides information for COINBASE + GasLimit uint64 // Provides information for GASLIMIT + BlockNumber *big.Int // Provides information for NUMBER + Time uint64 // Provides information for TIME + Difficulty *big.Int // Provides information for DIFFICULTY + BaseFee *big.Int // Provides information for BASEFEE + BlobBaseFee *big.Int // Provides information for BLOBBASEFEE + Random *common.Hash // Provides information for PREVRANDAO } // TxContext provides the EVM with information about a transaction. diff --git a/eth/api_backend.go b/eth/api_backend.go index 3db716f5c..b53b4d0ed 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -351,7 +351,7 @@ func (b *EthAPIBackend) TxPool() *txpool.TxPool { } func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { - return b.eth.txPool.SubscribeNewTxsEvent(ch) + return b.eth.txPool.SubscribeTransactions(ch, true) } func (b *EthAPIBackend) SyncProgress() ethereum.SyncProgress { diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 7963d800c..1ee1b5905 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -181,47 +181,44 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV1(update engine.ForkchoiceStateV1, at return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdateV1 called after shanghai")) } } - return api.forkchoiceUpdated(update, attr) + return api.forkchoiceUpdated(update, attr, engine.PayloadV1) } // ForkchoiceUpdatedV2 is equivalent to V1 with the addition of withdrawals in the payload attributes. -func (api *ConsensusAPI) ForkchoiceUpdatedV2(update engine.ForkchoiceStateV1, attr *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { - if attr != nil { - if attr.Withdrawals == nil { +func (api *ConsensusAPI) ForkchoiceUpdatedV2(update engine.ForkchoiceStateV1, params *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { + if params != nil { + if params.Withdrawals == nil { return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("missing withdrawals")) } - if attr.BeaconRoot != nil { + if params.BeaconRoot != nil { return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("unexpected beacon root")) } c := api.eth.BlockChain().Config() - if !c.IsShanghai(c.LondonBlock, attr.Timestamp) { - return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdateV2 should only be called during shanghai")) - } - if c.IsCancun(c.LondonBlock, attr.Timestamp) { - return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdateV2 should only be called during shanghai")) + if !active(c.IsShanghai, c.IsCancun, c.LondonBlock, params.Timestamp) { + return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdatedV2 must only be called for shanghai payloads")) } } - return api.forkchoiceUpdated(update, attr) + return api.forkchoiceUpdated(update, params, engine.PayloadV2) } // ForkchoiceUpdatedV3 is equivalent to V2 with the addition of parent beacon block root in the payload attributes. -func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, attr *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { - if attr != nil { - if attr.Withdrawals == nil { +func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, params *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { + if params != nil { + if params.Withdrawals == nil { return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("missing withdrawals")) } - if attr.BeaconRoot == nil { + if params.BeaconRoot == nil { return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("missing beacon root")) } c := api.eth.BlockChain().Config() - if !c.IsCancun(c.LondonBlock, attr.Timestamp) { - return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdateV3 should only be called during cancun")) + if !active(c.IsCancun, c.IsPrague, c.LondonBlock, params.Timestamp) { + return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdatedV3 must only be called for shanghai payloads")) } } - return api.forkchoiceUpdated(update, attr) + return api.forkchoiceUpdated(update, params, engine.PayloadV3) } -func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { +func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes, payloadVersion engine.PayloadVersion) (engine.ForkChoiceResponse, error) { api.forkchoiceLock.Lock() defer api.forkchoiceLock.Unlock() @@ -366,6 +363,7 @@ func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payl Random: payloadAttributes.Random, Withdrawals: payloadAttributes.Withdrawals, BeaconRoot: payloadAttributes.BeaconRoot, + Version: payloadVersion, BlockHook: nil, } id := args.Id() @@ -417,6 +415,9 @@ func (api *ConsensusAPI) ExchangeTransitionConfigurationV1(config engine.Transit // GetPayloadV1 returns a cached payload by id. func (api *ConsensusAPI) GetPayloadV1(payloadID engine.PayloadID) (*engine.ExecutableData, error) { + if payloadID.Version() != engine.PayloadV1 { + return nil, engine.UnsupportedFork + } data, err := api.getPayload(payloadID, false) if err != nil { return nil, err @@ -426,34 +427,19 @@ func (api *ConsensusAPI) GetPayloadV1(payloadID engine.PayloadID) (*engine.Execu // GetPayloadV2 returns a cached payload by id. func (api *ConsensusAPI) GetPayloadV2(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { - data, err := api.getPayload(payloadID, false) - if err != nil { - return nil, err - } - p := data.ExecutionPayload - if p.Withdrawals == nil { - return nil, engine.UnsupportedFork.With(errors.New("GetPayloadV2 called for pre-shanghai payload")) - } - if p.ExcessBlobGas != nil { - return nil, engine.UnsupportedFork.With(errors.New("GetPayloadV2 called for cancun payload")) + version := payloadID.Version() + if version != engine.PayloadV1 && version != engine.PayloadV2 { + return nil, engine.UnsupportedFork } - return data, nil + return api.getPayload(payloadID, false) } // GetPayloadV3 returns a cached payload by id. func (api *ConsensusAPI) GetPayloadV3(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { - data, err := api.getPayload(payloadID, false) - if err != nil { - return nil, err - } - p := data.ExecutionPayload - if p.Withdrawals == nil { - return nil, engine.UnsupportedFork.With(errors.New("GetPayloadV3 called for pre-shanghai payload")) + if payloadID.Version() != engine.PayloadV3 { + return nil, engine.UnsupportedFork } - if p.ExcessBlobGas == nil { - return nil, engine.UnsupportedFork.With(errors.New("GetPayloadV3 called for pre-cancun payload")) - } - return data, nil + return api.getPayload(payloadID, false) } func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID, full bool) (*engine.ExecutionPayloadEnvelope, error) { @@ -475,27 +461,37 @@ func (api *ConsensusAPI) NewPayloadV1(params engine.ExecutableData) (engine.Payl // NewPayloadV2 creates an Eth1 block, inserts it in the chain, and returns the status of the chain. func (api *ConsensusAPI) NewPayloadV2(params engine.ExecutableData) (engine.PayloadStatusV1, error) { - if api.eth.BlockChain().Config().IsShanghai(new(big.Int).SetUint64(params.Number), params.Timestamp) { + c := api.eth.BlockChain().Config() + if active(c.IsShanghai, c.IsCancun, c.LondonBlock, params.Timestamp) { if params.Withdrawals == nil { return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai")) } - } else if params.Withdrawals != nil { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil withdrawals pre-shanghai")) + } else { + if params.Withdrawals != nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil withdrawals pre-shanghai")) + } + } + if params.ExcessBlobGas != nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil excessBlobGas pre-cancun")) } - if api.eth.BlockChain().Config().IsCancun(new(big.Int).SetUint64(params.Number), params.Timestamp) { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("newPayloadV2 called post-cancun")) + if params.BlobGasUsed != nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil params.BlobGasUsed pre-cancun")) } return api.newPayload(params, nil, nil) } // NewPayloadV3 creates an Eth1 block, inserts it in the chain, and returns the status of the chain. func (api *ConsensusAPI) NewPayloadV3(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) { + if params.Withdrawals == nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai")) + } if params.ExcessBlobGas == nil { return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil excessBlobGas post-cancun")) } if params.BlobGasUsed == nil { return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil params.BlobGasUsed post-cancun")) } + if versionedHashes == nil { return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun")) } @@ -503,13 +499,25 @@ func (api *ConsensusAPI) NewPayloadV3(params engine.ExecutableData, versionedHas return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil parentBeaconBlockRoot post-cancun")) } - if !api.eth.BlockChain().Config().IsCancun(new(big.Int).SetUint64(params.Number), params.Timestamp) { - return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV3 called pre-cancun")) + c := api.eth.BlockChain().Config() + if !active(c.IsCancun, c.IsPrague, c.LondonBlock, params.Timestamp) { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV3 must only be called for cancun payloads")) } return api.newPayload(params, versionedHashes, beaconRoot) } +// active returns true only if current returns true and next returns false. It returns false otherwise. +func active(current, next func(*big.Int, uint64) bool, london *big.Int, time uint64) bool { + if !current(london, time) { + return false + } + if next(london, time) { + return false + } + return true +} + func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) { // The locking here is, strictly, not required. Without these locks, this can happen: // @@ -531,8 +539,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe block, err := engine.ExecutableDataToBlock(params, versionedHashes, beaconRoot) if err != nil { log.Warn("Invalid NewPayload params", "params", params, "error", err) - errMsg := err.Error() - return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: nil, ValidationError: &errMsg}, nil + return api.invalid(err, nil), nil } // Stash away the last update to warn the user if the beacon client goes offline api.lastNewPayloadLock.Lock() @@ -713,20 +720,21 @@ func (api *ConsensusAPI) checkInvalidAncestor(check common.Hash, head common.Has } } -// invalid returns a response "INVALID" with the latest valid hash supplied by latest or to the current head -// if no latestValid block was provided. +// invalid returns a response "INVALID" with the latest valid hash supplied by latest. func (api *ConsensusAPI) invalid(err error, latestValid *types.Header) engine.PayloadStatusV1 { - currentHash := api.eth.BlockChain().CurrentBlock().Hash() + var currentHash *common.Hash if latestValid != nil { - // Set latest valid hash to 0x0 if parent is PoW block - currentHash = common.Hash{} - if latestValid.Difficulty.BitLen() == 0 { + if latestValid.Difficulty.BitLen() != 0 { + // Set latest valid hash to 0x0 if parent is PoW block + currentHash = &common.Hash{} + } else { // Otherwise set latest valid hash to parent hash - currentHash = latestValid.Hash() + h := latestValid.Hash() + currentHash = &h } } errorMsg := err.Error() - return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: ¤tHash, ValidationError: &errorMsg} + return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: currentHash, ValidationError: &errorMsg} } // heartbeat loops indefinitely, and checks if there have been beacon client updates diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index 3f4bca72b..e5e55ae41 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -211,6 +211,7 @@ func TestEth2PrepareAndGetPayload(t *testing.T) { GasLimit: blockParams.GasLimit, Random: blockParams.Random, BeaconRoot: blockParams.BeaconRoot, + Version: engine.PayloadV1, }).Id() execData, err := api.GetPayloadV1(payloadID) if err != nil { @@ -1080,6 +1081,7 @@ func TestWithdrawals(t *testing.T) { Random: blockParams.Random, Withdrawals: blockParams.Withdrawals, BeaconRoot: blockParams.BeaconRoot, + Version: engine.PayloadV2, }).Id() execData, err := api.GetPayloadV2(payloadID) if err != nil { @@ -1129,6 +1131,7 @@ func TestWithdrawals(t *testing.T) { Random: blockParams.Random, Withdrawals: blockParams.Withdrawals, BeaconRoot: blockParams.BeaconRoot, + Version: engine.PayloadV2, }).Id() execData, err = api.GetPayloadV2(payloadID) if err != nil { @@ -1160,123 +1163,135 @@ func TestWithdrawals(t *testing.T) { } } -// func TestNilWithdrawals(t *testing.T) { -// genesis, blocks := generateMergeChain(10, true) -// // Set shanghai time to last block + 4 seconds (first post-merge block) -// time := blocks[len(blocks)-1].Time() + 4 -// genesis.Config.ShanghaiTime = &time - -// n, ethservice := startEthService(t, genesis, blocks) -// ethservice.Merger().ReachTTD() -// defer n.Close() - -// api := NewConsensusAPI(ethservice) -// parent := ethservice.BlockChain().CurrentHeader() -// aa := common.Address{0xaa} - -// type test struct { -// blockParams engine.PayloadAttributes -// wantErr bool -// } -// tests := []test{ -// // Before Shanghai -// { -// blockParams: engine.PayloadAttributes{ -// Timestamp: parent.Time + 2, -// Withdrawals: nil, -// }, -// wantErr: false, -// }, -// { -// blockParams: engine.PayloadAttributes{ -// Timestamp: parent.Time + 2, -// Withdrawals: make([]*types.Withdrawal, 0), -// }, -// wantErr: true, -// }, -// { -// blockParams: engine.PayloadAttributes{ -// Timestamp: parent.Time + 2, -// Withdrawals: []*types.Withdrawal{ -// { -// Index: 0, -// Address: aa, -// Amount: 32, -// }, -// }, -// }, -// wantErr: true, -// }, -// // After Shanghai -// { -// blockParams: engine.PayloadAttributes{ -// Timestamp: parent.Time + 5, -// Withdrawals: nil, -// }, -// wantErr: true, -// }, -// { -// blockParams: engine.PayloadAttributes{ -// Timestamp: parent.Time + 5, -// Withdrawals: make([]*types.Withdrawal, 0), -// }, -// wantErr: false, -// }, -// { -// blockParams: engine.PayloadAttributes{ -// Timestamp: parent.Time + 5, -// Withdrawals: []*types.Withdrawal{ -// { -// Index: 0, -// Address: aa, -// Amount: 32, -// }, -// }, -// }, -// wantErr: false, -// }, -// } - -// fcState := engine.ForkchoiceStateV1{ -// HeadBlockHash: parent.Hash(), -// } - -// for _, test := range tests { -// var err error -// if genesis.Config.IsShanghai(genesis.Config.LondonBlock, test.blockParams.Timestamp) { -// _, err = api.ForkchoiceUpdatedV2(fcState, &test.blockParams) -// } else { -// _, err = api.ForkchoiceUpdatedV1(fcState, &test.blockParams) -// } -// if test.wantErr { -// if err == nil { -// t.Fatal("wanted error on fcuv2 with invalid withdrawals") -// } -// continue -// } -// if err != nil { -// t.Fatalf("error preparing payload, err=%v", err) -// } - -// // 11: verify locally build block. -// payloadID := (&miner.BuildPayloadArgs{ -// Parent: fcState.HeadBlockHash, -// Timestamp: test.blockParams.Timestamp, -// FeeRecipient: test.blockParams.SuggestedFeeRecipient, -// Random: test.blockParams.Random, -// BeaconRoot: test.blockParams.BeaconRoot, -// }).Id() -// execData, err := api.GetPayloadV2(payloadID) -// if err != nil { -// t.Fatalf("error getting payload, err=%v", err) -// } -// if status, err := api.NewPayloadV2(*execData.ExecutionPayload); err != nil { -// t.Fatalf("error validating payload: %v", err) -// } else if status.Status != engine.VALID { -// t.Fatalf("invalid payload") -// } -// } -// } +func TestNilWithdrawals(t *testing.T) { + genesis, blocks := generateMergeChain(10, true) + // Set shanghai time to last block + 4 seconds (first post-merge block) + time := blocks[len(blocks)-1].Time() + 4 + genesis.Config.ShanghaiTime = &time + + n, ethservice := startEthService(t, genesis, blocks) + ethservice.Merger().ReachTTD() + defer n.Close() + + api := NewConsensusAPI(ethservice) + parent := ethservice.BlockChain().CurrentHeader() + aa := common.Address{0xaa} + + type test struct { + blockParams engine.PayloadAttributes + wantErr bool + } + tests := []test{ + // Before Shanghai + { + blockParams: engine.PayloadAttributes{ + Timestamp: parent.Time + 2, + Withdrawals: nil, + }, + wantErr: false, + }, + { + blockParams: engine.PayloadAttributes{ + Timestamp: parent.Time + 2, + Withdrawals: make([]*types.Withdrawal, 0), + }, + wantErr: true, + }, + { + blockParams: engine.PayloadAttributes{ + Timestamp: parent.Time + 2, + Withdrawals: []*types.Withdrawal{ + { + Index: 0, + Address: aa, + Amount: 32, + }, + }, + }, + wantErr: true, + }, + // After Shanghai + { + blockParams: engine.PayloadAttributes{ + Timestamp: parent.Time + 5, + Withdrawals: nil, + }, + wantErr: true, + }, + { + blockParams: engine.PayloadAttributes{ + Timestamp: parent.Time + 5, + Withdrawals: make([]*types.Withdrawal, 0), + }, + wantErr: false, + }, + { + blockParams: engine.PayloadAttributes{ + Timestamp: parent.Time + 5, + Withdrawals: []*types.Withdrawal{ + { + Index: 0, + Address: aa, + Amount: 32, + }, + }, + }, + wantErr: false, + }, + } + + fcState := engine.ForkchoiceStateV1{ + HeadBlockHash: parent.Hash(), + } + + for _, test := range tests { + var ( + err error + payloadVersion engine.PayloadVersion + shanghai = genesis.Config.IsShanghai(genesis.Config.LondonBlock, test.blockParams.Timestamp) + ) + if !shanghai { + payloadVersion = engine.PayloadV1 + _, err = api.ForkchoiceUpdatedV1(fcState, &test.blockParams) + } else { + payloadVersion = engine.PayloadV2 + _, err = api.ForkchoiceUpdatedV2(fcState, &test.blockParams) + } + if test.wantErr { + if err == nil { + t.Fatal("wanted error on fcuv2 with invalid withdrawals") + } + continue + } + if err != nil { + t.Fatalf("error preparing payload, err=%v", err) + } + + // 11: verify locally build block. + payloadID := (&miner.BuildPayloadArgs{ + Parent: fcState.HeadBlockHash, + Timestamp: test.blockParams.Timestamp, + FeeRecipient: test.blockParams.SuggestedFeeRecipient, + Random: test.blockParams.Random, + Version: payloadVersion, + }).Id() + execData, err := api.GetPayloadV2(payloadID) + if err != nil { + t.Fatalf("error getting payload, err=%v", err) + } + var status engine.PayloadStatusV1 + if !shanghai { + status, err = api.NewPayloadV1(*execData.ExecutionPayload) + } else { + status, err = api.NewPayloadV2(*execData.ExecutionPayload) + } + if err != nil { + t.Fatalf("error validating payload: %v", err.(*engine.EngineAPIError).ErrorData()) + } else if status.Status != engine.VALID { + t.Fatalf("invalid payload") + } + } +} func setupBodies(t *testing.T) (*node.Node, *eth.Ethereum, []*types.Block) { genesis, blocks := generateMergeChain(10, true) @@ -1613,6 +1628,7 @@ func TestParentBeaconBlockRoot(t *testing.T) { Random: blockParams.Random, Withdrawals: blockParams.Withdrawals, BeaconRoot: blockParams.BeaconRoot, + Version: engine.PayloadV3, }).Id() execData, err := api.GetPayloadV3(payloadID) if err != nil { diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go index 1f7a3266c..a9a2bb4a9 100644 --- a/eth/catalyst/simulated_beacon.go +++ b/eth/catalyst/simulated_beacon.go @@ -199,7 +199,7 @@ func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal) error { func (c *SimulatedBeacon) loopOnDemand() { var ( newTxs = make(chan core.NewTxsEvent) - sub = c.eth.TxPool().SubscribeNewTxsEvent(newTxs) + sub = c.eth.TxPool().SubscribeTransactions(newTxs, true) ) defer sub.Unsubscribe() diff --git a/eth/catalyst/tester.go b/eth/catalyst/tester.go index 3e9159a17..0922ac0ba 100644 --- a/eth/catalyst/tester.go +++ b/eth/catalyst/tester.go @@ -20,7 +20,7 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/log" @@ -28,23 +28,27 @@ import ( ) // FullSyncTester is an auxiliary service that allows Geth to perform full sync -// alone without consensus-layer attached. Users must specify a valid block as -// the sync target. This tester can be applied to different networks, no matter -// it's pre-merge or post-merge, but only for full-sync. +// alone without consensus-layer attached. Users must specify a valid block hash +// as the sync target. +// +// This tester can be applied to different networks, no matter it's pre-merge or +// post-merge, but only for full-sync. type FullSyncTester struct { - api *ConsensusAPI - block *types.Block - closed chan struct{} - wg sync.WaitGroup + stack *node.Node + backend *eth.Ethereum + target common.Hash + closed chan struct{} + wg sync.WaitGroup } // RegisterFullSyncTester registers the full-sync tester service into the node // stack for launching and stopping the service controlled by node. -func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, block *types.Block) (*FullSyncTester, error) { +func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, target common.Hash) (*FullSyncTester, error) { cl := &FullSyncTester{ - api: newConsensusAPIWithoutHeartbeat(backend), - block: block, - closed: make(chan struct{}), + stack: stack, + backend: backend, + target: target, + closed: make(chan struct{}), } stack.RegisterLifecycle(cl) return cl, nil @@ -56,29 +60,25 @@ func (tester *FullSyncTester) Start() error { go func() { defer tester.wg.Done() + // Trigger beacon sync with the provided block hash as trusted + // chain head. + err := tester.backend.Downloader().BeaconDevSync(downloader.FullSync, tester.target, tester.closed) + if err != nil { + log.Info("Failed to trigger beacon sync", "err", err) + } + ticker := time.NewTicker(time.Second * 5) defer ticker.Stop() for { select { case <-ticker.C: - // Don't bother downloader in case it's already syncing. - if tester.api.eth.Downloader().Synchronising() { - continue - } - // Short circuit in case the target block is already stored - // locally. TODO(somehow terminate the node stack if target - // is reached). - if tester.api.eth.BlockChain().HasBlock(tester.block.Hash(), tester.block.NumberU64()) { - log.Info("Full-sync target reached", "number", tester.block.NumberU64(), "hash", tester.block.Hash()) + // Stop in case the target block is already stored locally. + if block := tester.backend.BlockChain().GetBlockByHash(tester.target); block != nil { + log.Info("Full-sync target reached", "number", block.NumberU64(), "hash", block.Hash()) + go tester.stack.Close() // async since we need to close ourselves return } - // Trigger beacon sync with the provided block header as - // trusted chain head. - err := tester.api.eth.Downloader().BeaconSync(downloader.FullSync, tester.block.Header(), tester.block.Header()) - if err != nil { - log.Info("Failed to beacon sync", "err", err) - } case <-tester.closed: return diff --git a/eth/downloader/beacondevsync.go b/eth/downloader/beacondevsync.go new file mode 100644 index 000000000..9a38fedd4 --- /dev/null +++ b/eth/downloader/beacondevsync.go @@ -0,0 +1,81 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "errors" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +// BeaconDevSync is a development helper to test synchronization by providing +// a block hash instead of header to run the beacon sync against. +// +// The method will reach out to the network to retrieve the header of the sync +// target instead of receiving it from the consensus node. +// +// Note, this must not be used in live code. If the forkchcoice endpoint where +// to use this instead of giving us the payload first, then essentially nobody +// in the network would have the block yet that we'd attempt to retrieve. +func (d *Downloader) BeaconDevSync(mode SyncMode, hash common.Hash, stop chan struct{}) error { + // Be very loud that this code should not be used in a live node + log.Warn("----------------------------------") + log.Warn("Beacon syncing with hash as target", "hash", hash) + log.Warn("This is unhealthy for a live node!") + log.Warn("----------------------------------") + + log.Info("Waiting for peers to retrieve sync target") + for { + // If the node is going down, unblock + select { + case <-stop: + return errors.New("stop requested") + default: + } + // Pick a random peer to sync from and keep retrying if none are yet + // available due to fresh startup + d.peers.lock.RLock() + var peer *peerConnection + for _, peer = range d.peers.peers { + break + } + d.peers.lock.RUnlock() + + if peer == nil { + time.Sleep(time.Second) + continue + } + // Found a peer, attempt to retrieve the header whilst blocking and + // retry if it fails for whatever reason + log.Info("Attempting to retrieve sync target", "peer", peer.id) + headers, metas, err := d.fetchHeadersByHash(peer, hash, 1, 0, false) + if err != nil || len(headers) != 1 { + log.Warn("Failed to fetch sync target", "headers", len(headers), "err", err) + time.Sleep(time.Second) + continue + } + // Head header retrieved, if the hash matches, start the actual sync + if metas[0] != hash { + log.Error("Received invalid sync target", "want", hash, "have", metas[0]) + time.Sleep(time.Second) + continue + } + return d.BeaconSync(mode, headers[0], headers[0]) + } +} diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 7fed48bdb..2ca7e328c 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -286,11 +286,6 @@ func (d *Downloader) Progress() ethereum.SyncProgress { } } -// Synchronising returns whether the downloader is currently retrieving blocks. -func (d *Downloader) Synchronising() bool { - return d.synchronising.Load() -} - // RegisterPeer injects a new download peer into the set of block source to be // used for fetching hashes and blocks from. func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { @@ -309,11 +304,6 @@ func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { return nil } -// RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. -func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error { - return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) -} - // UnregisterPeer remove a peer from the known list, preventing any action from // the specified peer. An effort is also made to return any pending fetches into // the queue. diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index ffe445ea8..e4875b959 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -177,7 +177,7 @@ func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header { // function can be used to retrieve batches of headers from the particular peer. func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { // Service the header query via the live handler code - rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ + rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Hash: origin, }, @@ -205,7 +205,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersPacket)(&headers), + Res: (*eth.BlockHeadersRequest)(&headers), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -221,7 +221,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i // function can be used to retrieve batches of headers from the particular peer. func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { // Service the header query via the live handler code - rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ + rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Number: origin, }, @@ -249,7 +249,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersPacket)(&headers), + Res: (*eth.BlockHeadersRequest)(&headers), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -286,7 +286,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et } res := ð.Response{ Req: req, - Res: (*eth.BlockBodiesPacket)(&bodies), + Res: (*eth.BlockBodiesResponse)(&bodies), Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -317,7 +317,7 @@ func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan * } res := ð.Response{ Req: req, - Res: (*eth.ReceiptsPacket)(&receipts), + Res: (*eth.ReceiptsResponse)(&receipts), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -437,9 +437,9 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) { } } -func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) } -func TestCanonicalSynchronisation66Snap(t *testing.T) { testCanonSync(t, eth.ETH66, SnapSync) } -func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) } +func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) } +func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) } +func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) } func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) } func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) } func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) } @@ -461,8 +461,8 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that if a large batch of blocks are being downloaded, it is throttled // until the cached blocks are retrieved. -func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) } -func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) } +func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) } +func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) } func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) } func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) } @@ -543,9 +543,9 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { // Tests that simple synchronization against a forked chain works correctly. In // this test common ancestor lookup should *not* be short circuited, and a full // binary search should be executed. -func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) } -func TestForkedSync66Snap(t *testing.T) { testForkedSync(t, eth.ETH66, SnapSync) } -func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) } +func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) } +func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) } +func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) } func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) } func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) } func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) } @@ -573,9 +573,9 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that synchronising against a much shorter but much heavier fork works // currently and is not dropped. -func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } -func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) } -func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } +func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) } +func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) } +func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) } func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) } func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) } func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) } @@ -605,9 +605,9 @@ func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that chain forks are contained within a certain interval of the current // chain head, ensuring that malicious peers cannot waste resources by feeding // long dead chains. -func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) } -func TestBoundedForkedSync66Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, SnapSync) } -func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) } +func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) } +func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) } +func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) } func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) } func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) } func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) } @@ -636,14 +636,14 @@ func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that chain forks are contained within a certain interval of the current // chain head for short but heavy forks too. These are a bit special because they // take different ancestor lookup paths. -func TestBoundedHeavyForkedSync66Full(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH66, FullSync) +func TestBoundedHeavyForkedSync68Full(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH68, FullSync) } -func TestBoundedHeavyForkedSync66Snap(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync) +func TestBoundedHeavyForkedSync68Snap(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH68, SnapSync) } -func TestBoundedHeavyForkedSync66Light(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH66, LightSync) +func TestBoundedHeavyForkedSync68Light(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH68, LightSync) } func TestBoundedHeavyForkedSync67Full(t *testing.T) { testBoundedHeavyForkedSync(t, eth.ETH67, FullSync) @@ -678,9 +678,9 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { } // Tests that a canceled download wipes all previously accumulated state. -func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) } -func TestCancel66Snap(t *testing.T) { testCancel(t, eth.ETH66, SnapSync) } -func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) } +func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) } +func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) } +func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) } func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) } func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) } func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) } @@ -708,9 +708,9 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) { } // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). -func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) } -func TestMultiSynchronisation66Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, SnapSync) } -func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) } +func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) } +func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) } +func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) } func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) } func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) } func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) } @@ -735,9 +735,9 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { // Tests that synchronisations behave well in multi-version protocol environments // and not wreak havoc on other nodes in the network. -func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) } -func TestMultiProtoSynchronisation66Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH66, SnapSync) } -func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) } +func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) } +func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) } +func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) } func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) } func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) } func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) } @@ -750,7 +750,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { chain := testChainBase.shorten(blockCacheMaxItems - 15) // Create peers of every type - tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:]) + tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:]) tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:]) // Synchronise with the requested peer and make sure all blocks were retrieved @@ -760,7 +760,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { assertOwnChain(t, tester, len(chain.blocks)) // Check that no peers have been dropped off - for _, version := range []int{66, 67} { + for _, version := range []int{68, 67} { peer := fmt.Sprintf("peer %d", version) if _, ok := tester.peers[peer]; !ok { t.Errorf("%s dropped", peer) @@ -770,9 +770,9 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that if a block is empty (e.g. header only), no body request should be // made, and instead the header should be assembled into a whole block in itself. -func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) } -func TestEmptyShortCircuit66Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, SnapSync) } -func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) } +func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) } +func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) } +func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) } func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) } func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) } func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) } @@ -821,9 +821,9 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { // Tests that headers are enqueued continuously, preventing malicious nodes from // stalling the downloader by feeding gapped header chains. -func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) } -func TestMissingHeaderAttack66Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, SnapSync) } -func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) } +func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) } +func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) } +func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) } func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) } func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) } func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) } @@ -850,9 +850,9 @@ func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { // Tests that if requested headers are shifted (i.e. first is missing), the queue // detects the invalid numbering. -func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) } -func TestShiftedHeaderAttack66Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) } -func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) } +func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) } +func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) } +func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) } func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) } func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) } func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) } @@ -880,14 +880,14 @@ func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { // Tests that a peer advertising a high TD doesn't get to stall the downloader // afterwards by not sending any useful hashes. -func TestHighTDStarvationAttack66Full(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH66, FullSync) +func TestHighTDStarvationAttack68Full(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH68, FullSync) } -func TestHighTDStarvationAttack66Snap(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH66, SnapSync) +func TestHighTDStarvationAttack68Snap(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH68, SnapSync) } -func TestHighTDStarvationAttack66Light(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH66, LightSync) +func TestHighTDStarvationAttack68Light(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH68, LightSync) } func TestHighTDStarvationAttack67Full(t *testing.T) { testHighTDStarvationAttack(t, eth.ETH67, FullSync) @@ -911,7 +911,7 @@ func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { } // Tests that misbehaving peers are disconnected, whilst behaving ones are not. -func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) } +func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) } func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) } func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { @@ -960,9 +960,9 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { // Tests that synchronisation progress (origin block number, current block number // and highest block number) is tracked and updated correctly. -func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) } -func TestSyncProgress66Snap(t *testing.T) { testSyncProgress(t, eth.ETH66, SnapSync) } -func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) } +func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) } +func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) } +func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) } func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) } func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) } func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) } @@ -1040,9 +1040,9 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync // Tests that synchronisation progress (origin block number and highest block // number) is tracked and updated correctly in case of a fork (or manual head // revertal). -func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) } -func TestForkedSyncProgress66Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, SnapSync) } -func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) } +func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) } +func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) } +func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) } func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) } func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) } func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) } @@ -1114,9 +1114,9 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Tests that if synchronisation is aborted due to some failure, then the progress // origin is not updated in the next sync cycle, as it should be considered the // continuation of the previous sync and not a new instance. -func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) } -func TestFailedSyncProgress66Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, SnapSync) } -func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) } +func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) } +func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) } +func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) } func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) } func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) } func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) } @@ -1183,9 +1183,9 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Tests that if an attacker fakes a chain height, after the attack is detected, // the progress height is successfully reduced at the next sync invocation. -func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) } -func TestFakedSyncProgress66Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, SnapSync) } -func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) } +func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) } +func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) } +func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) } func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) } func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) } func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) } @@ -1330,8 +1330,10 @@ func TestRemoteHeaderRequestSpan(t *testing.T) { // Tests that peers below a pre-configured checkpoint block are prevented from // being fast-synced from, avoiding potential cheap eclipse attacks. -func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, eth.ETH66, FullSync) } -func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, eth.ETH66, SnapSync) } +func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) } +func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) } +func TestBeaconSync67Full(t *testing.T) { testBeaconSync(t, eth.ETH67, FullSync) } +func TestBeaconSync67Snap(t *testing.T) { testBeaconSync(t, eth.ETH67, SnapSync) } func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) diff --git a/eth/downloader/fetchers.go b/eth/downloader/fetchers.go index 021e8c4f9..cc4279b0d 100644 --- a/eth/downloader/fetchers.go +++ b/eth/downloader/fetchers.go @@ -58,14 +58,14 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo case res := <-resCh: // Headers successfully retrieved, update the metrics headerReqTimer.Update(time.Since(start)) - headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) + headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest)))) // Don't reject the packet even if it turns out to be bad, downloader will // disconnect the peer on its own terms. Simply delivery the headers to // be processed by the caller res.Done <- nil - return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil + return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil } } @@ -103,13 +103,13 @@ func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amou case res := <-resCh: // Headers successfully retrieved, update the metrics headerReqTimer.Update(time.Since(start)) - headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) + headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest)))) // Don't reject the packet even if it turns out to be bad, downloader will // disconnect the peer on its own terms. Simply delivery the headers to // be processed by the caller res.Done <- nil - return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil + return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil } } diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go index 9440972c6..5105fda66 100644 --- a/eth/downloader/fetchers_concurrent_bodies.go +++ b/eth/downloader/fetchers_concurrent_bodies.go @@ -89,7 +89,7 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the body data and delivering it to the downloader's queue. func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesPacket).Unpack() + txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack() hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes} accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2]) diff --git a/eth/downloader/fetchers_concurrent_headers.go b/eth/downloader/fetchers_concurrent_headers.go index 84c7f2098..8201f4ca7 100644 --- a/eth/downloader/fetchers_concurrent_headers.go +++ b/eth/downloader/fetchers_concurrent_headers.go @@ -81,7 +81,7 @@ func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh cha // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the header data and delivering it to the downloader's queue. func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - headers := *packet.Res.(*eth.BlockHeadersPacket) + headers := *packet.Res.(*eth.BlockHeadersRequest) hashes := packet.Meta.([]common.Hash) accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh) diff --git a/eth/downloader/fetchers_concurrent_receipts.go b/eth/downloader/fetchers_concurrent_receipts.go index 1c853c218..3169f030b 100644 --- a/eth/downloader/fetchers_concurrent_receipts.go +++ b/eth/downloader/fetchers_concurrent_receipts.go @@ -88,7 +88,7 @@ func (q *receiptQueue) request(peer *peerConnection, req *fetchRequest, resCh ch // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the receipt data and delivering it to the downloader's queue. func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - receipts := *packet.Res.(*eth.ReceiptsPacket) + receipts := *packet.Res.(*eth.ReceiptsResponse) hashes := packet.Meta.([]common.Hash) // {receipt hashes} accepted, err := q.queue.DeliverReceipts(peer.id, receipts, hashes) diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 6b8269495..4c43af527 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -55,39 +55,16 @@ type peerConnection struct { lock sync.RWMutex } -// LightPeer encapsulates the methods required to synchronise with a remote light peer. -type LightPeer interface { +// Peer encapsulates the methods required to synchronise with a remote full peer. +type Peer interface { Head() (common.Hash, *big.Int) RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) RequestHeadersByNumber(uint64, int, int, bool, chan *eth.Response) (*eth.Request, error) -} -// Peer encapsulates the methods required to synchronise with a remote full peer. -type Peer interface { - LightPeer RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) } -// lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods. -type lightPeerWrapper struct { - peer LightPeer -} - -func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() } -func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { - return w.peer.RequestHeadersByHash(h, amount, skip, reverse, sink) -} -func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { - return w.peer.RequestHeadersByNumber(i, amount, skip, reverse, sink) -} -func (w *lightPeerWrapper) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) { - panic("RequestBodies not supported in light client mode sync") -} -func (w *lightPeerWrapper) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) { - panic("RequestReceipts not supported in light client mode sync") -} - // newPeerConnection creates a new downloader peer. func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection { return &peerConnection{ diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go index a07e1695f..4f1f46204 100644 --- a/eth/downloader/skeleton.go +++ b/eth/downloader/skeleton.go @@ -794,7 +794,7 @@ func (s *skeleton) executeTask(peer *peerConnection, req *headerRequest) { case res := <-resCh: // Headers successfully retrieved, update the metrics - headers := *res.Res.(*eth.BlockHeadersPacket) + headers := *res.Res.(*eth.BlockHeadersRequest) headerReqTimer.Update(time.Since(start)) s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, res.Time, len(headers)) diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go index 6a76d78ac..c31007765 100644 --- a/eth/downloader/skeleton_test.go +++ b/eth/downloader/skeleton_test.go @@ -173,7 +173,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersPacket)(&headers), + Res: (*eth.BlockHeadersRequest)(&headers), Meta: hashes, Time: 1, Done: make(chan error), @@ -811,7 +811,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) { // Create a peer set to feed headers through peerset := newPeerSet() for _, peer := range tt.peers { - peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id))) + peerset.Register(newPeerConnection(peer.id, eth.ETH67, peer, log.New("id", peer.id))) } // Create a peer dropper to track malicious peers dropped := make(map[string]int) @@ -913,7 +913,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) { skeleton.Sync(tt.newHead, nil, true) } if tt.newPeer != nil { - if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { + if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH67, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { t.Errorf("test %d: failed to register new peer: %v", i, err) } } diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index 35608031d..8751c4e3e 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -483,7 +483,7 @@ func (f *BlockFetcher) loop() { select { case res := <-resCh: res.Done <- nil - f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersPacket), time.Now().Add(res.Time)) + f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersRequest), time.Now().Add(res.Time)) case <-timeout.C: // The peer didn't respond in time. The request @@ -541,7 +541,7 @@ func (f *BlockFetcher) loop() { case res := <-resCh: res.Done <- nil // Ignoring withdrawals here, since the block fetcher is not used post-merge. - txs, uncles, _ := res.Res.(*eth.BlockBodiesPacket).Unpack() + txs, uncles, _ := res.Res.(*eth.BlockBodiesResponse).Unpack() f.FilterBodies(peer, txs, uncles, time.Now()) case <-timeout.C: diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index 7c490df3f..6927300b1 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -213,7 +213,7 @@ func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*t } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersPacket)(&headers), + Res: (*eth.BlockHeadersRequest)(&headers), Time: drift, Done: make(chan error, 1), // Ignore the returned status } @@ -255,7 +255,7 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ } res := ð.Response{ Req: req, - Res: (*eth.BlockBodiesPacket)(&bodies), + Res: (*eth.BlockBodiesResponse)(&bodies), Time: drift, Done: make(chan error, 1), // Ignore the returned status } diff --git a/eth/handler.go b/eth/handler.go index 241027117..1b554d7a7 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -75,9 +75,10 @@ type txPool interface { // The slice should be modifiable by the caller. Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction - // SubscribeNewTxsEvent should return an event subscription of - // NewTxsEvent and send events to the given channel. - SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription + // SubscribeTransactions subscribes to new transaction events. The subscriber + // can decide whether to receive notifications only for newly seen transactions + // or also for reorged out ones. + SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription // IsPrivateTxHash indicates if the transaction hash should not // be broadcast on public channels @@ -418,7 +419,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { select { case res := <-resCh: - headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket)) + headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersRequest)) if len(headers) == 0 { // Required blocks are allowed to be missing if the remote // node is not yet synced @@ -465,7 +466,7 @@ func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error snap.EgressRegistrationErrorMeter.Mark(1) } } - peer.Log().Warn("Snapshot extension registration failed", "err", err) + peer.Log().Debug("Snapshot extension registration failed", "err", err) return err } return handler(peer) @@ -513,10 +514,10 @@ func (h *handler) unregisterPeer(id string) { func (h *handler) Start(maxPeers int) { h.maxPeers = maxPeers - // broadcast transactions + // broadcast and announce transactions (only new ones, not resurrected ones) h.wg.Add(1) h.txsCh = make(chan core.NewTxsEvent, txChanSize) - h.txsSub = h.txpool.SubscribeNewTxsEvent(h.txsCh) + h.txsSub = h.txpool.SubscribeTransactions(h.txsCh, false) go h.txBroadcastLoop() // broadcast mined blocks @@ -596,26 +597,33 @@ func (h *handler) BroadcastBlock(block *types.Block, propagate bool) { } // BroadcastTransactions will propagate a batch of transactions -// - To a square root of all peers +// - To a square root of all peers for non-blob transactions // - And, separately, as announcements to all peers which are not known to // already have the given transaction. func (h *handler) BroadcastTransactions(txs types.Transactions) { var ( - annoCount int // Count of announcements made - annoPeers int - directCount int // Count of the txs sent directly to peers - directPeers int // Count of the peers that were sent transactions directly + blobTxs int // Number of blob transactions to announce only + largeTxs int // Number of large transactions to announce only + + directCount int // Number of transactions sent directly to peers (duplicates included) + directPeers int // Number of peers that were sent transactions directly + annCount int // Number of transactions announced across all peers (duplicates included) + annPeers int // Number of peers announced about transactions txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce - ) // Broadcast transactions to a batch of peers not knowing about it for _, tx := range txs { peers := h.peers.peersWithoutTransaction(tx.Hash()) var numDirect int - if tx.Size() <= txMaxBroadcastSize { + switch { + case tx.Type() == types.BlobTxType: + blobTxs++ + case tx.Size() > txMaxBroadcastSize: + largeTxs++ + default: numDirect = int(math.Sqrt(float64(len(peers)))) } // Send the tx unconditionally to a subset of our peers @@ -633,13 +641,12 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) { peer.AsyncSendTransactions(hashes) } for peer, hashes := range annos { - annoPeers++ - annoCount += len(hashes) + annPeers++ + annCount += len(hashes) peer.AsyncSendPooledTransactionHashes(hashes) } - log.Debug("Transaction broadcast", "txs", len(txs), - "announce packs", annoPeers, "announced hashes", annoCount, - "tx packs", directPeers, "broadcast txs", directCount) + log.Debug("Distributed transactions", "plaintxs", len(txs)-blobTxs-largeTxs, "blobtxs", blobTxs, "largetxs", largeTxs, + "bcastpeers", directPeers, "bcastcount", directCount, "annpeers", annPeers, "anncount", annCount) } // minedBroadcastLoop sends mined blocks to connected peers. diff --git a/eth/handler_eth.go b/eth/handler_eth.go index 2aba16f92..e844b36cc 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -17,6 +17,7 @@ package eth import ( + "errors" "fmt" "math/big" "time" @@ -66,16 +67,21 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { case *eth.NewBlockPacket: return h.handleBlockBroadcast(peer, packet.Block, packet.TD) - case *eth.NewPooledTransactionHashesPacket66: + case *eth.NewPooledTransactionHashesPacket67: return h.txFetcher.Notify(peer.ID(), *packet) case *eth.NewPooledTransactionHashesPacket68: return h.txFetcher.Notify(peer.ID(), packet.Hashes) case *eth.TransactionsPacket: + for _, tx := range *packet { + if tx.Type() == types.BlobTxType { + return errors.New("disallowed broadcast blob transaction") + } + } return h.txFetcher.Enqueue(peer.ID(), *packet, false) - case *eth.PooledTransactionsPacket: + case *eth.PooledTransactionsResponse: return h.txFetcher.Enqueue(peer.ID(), *packet, true) default: @@ -90,9 +96,7 @@ func (h *ethHandler) handleBlockAnnounces(peer *eth.Peer, hashes []common.Hash, // the chain already entered the pos stage and disconnect the // remote peer. if h.merger.PoSFinalized() { - // TODO (MariusVanDerWijden) drop non-updated peers after the merge - return nil - // return errors.New("unexpected block announces") + return errors.New("disallowed block announcement") } // Schedule all the unknown hashes for retrieval var ( @@ -118,9 +122,7 @@ func (h *ethHandler) handleBlockBroadcast(peer *eth.Peer, block *types.Block, td // the chain already entered the pos stage and disconnect the // remote peer. if h.merger.PoSFinalized() { - // TODO (MariusVanDerWijden) drop non-updated peers after the merge - return nil - // return errors.New("unexpected block announces") + return errors.New("disallowed block broadcast") } // Schedule the block for import h.blockFetcher.Enqueue(peer.ID(), block) diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index fe9be8bf3..3e3441d57 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -58,7 +58,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { h.blockBroadcasts.Send(packet.Block) return nil - case *eth.NewPooledTransactionHashesPacket66: + case *eth.NewPooledTransactionHashesPacket67: h.txAnnounces.Send(([]common.Hash)(*packet)) return nil @@ -70,7 +70,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { h.txBroadcasts.Send(([]*types.Transaction)(*packet)) return nil - case *eth.PooledTransactionsPacket: + case *eth.PooledTransactionsResponse: h.txBroadcasts.Send(([]*types.Transaction)(*packet)) return nil @@ -81,7 +81,6 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { // Tests that peers are correctly accepted (or rejected) based on the advertised // fork IDs in the protocol handshake. -func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) } func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) } func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) } @@ -237,7 +236,6 @@ func testForkIDSplit(t *testing.T, protocol uint) { } // Tests that received transactions are added to the local pool. -func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) } func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) } func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) } @@ -251,7 +249,7 @@ func testRecvTransactions(t *testing.T, protocol uint) { handler.handler.synced.Store(true) // mark synced to accept transactions txs := make(chan core.NewTxsEvent) - sub := handler.txpool.SubscribeNewTxsEvent(txs) + sub := handler.txpool.SubscribeTransactions(txs, false) defer sub.Unsubscribe() // Create a source peer to send messages through and a sink handler to receive them @@ -296,7 +294,6 @@ func testRecvTransactions(t *testing.T, protocol uint) { } // This test checks that pending transactions are sent. -func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) } func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) } func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) } @@ -356,7 +353,7 @@ func testSendTransactions(t *testing.T, protocol uint) { seen := make(map[common.Hash]struct{}) for len(seen) < len(insert) { switch protocol { - case 66, 67, 68: + case 67, 68: select { case hashes := <-anns: for _, hash := range hashes { @@ -382,7 +379,6 @@ func testSendTransactions(t *testing.T, protocol uint) { // Tests that transactions get propagated to all attached peers, either via direct // broadcasts or via announcements/retrievals. -func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) } func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) } func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) } @@ -428,7 +424,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) { for i := 0; i < len(sinks); i++ { txChs[i] = make(chan core.NewTxsEvent, 1024) - sub := sinks[i].txpool.SubscribeNewTxsEvent(txChs[i]) + sub := sinks[i].txpool.SubscribeTransactions(txChs[i], false) defer sub.Unsubscribe() } // Fill the source pool with transactions and wait for them at the sinks @@ -490,8 +486,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) { defer sourcePipe.Close() defer sinkPipe.Close() - sourcePeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) - sinkPeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) + sourcePeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) + sinkPeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) defer sourcePeer.Close() defer sinkPeer.Close() @@ -543,7 +539,6 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) { // Tests that a propagated malformed block (uncles or transactions don't match // with the hashes in the header) gets discarded and not broadcast forward. -func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) } func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) } func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) } diff --git a/eth/handler_test.go b/eth/handler_test.go index 373f43f28..d255b393d 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -113,15 +113,17 @@ func (p *testTxPool) Pending(enforceTips bool) map[common.Address][]*txpool.Lazy Time: tx.Time(), GasFeeCap: tx.GasFeeCap(), GasTipCap: tx.GasTipCap(), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), }) } } return pending } -// SubscribeNewTxsEvent should return an event subscription of NewTxsEvent and +// SubscribeTransactions should return an event subscription of NewTxsEvent and // send events to the given channel. -func (p *testTxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { +func (p *testTxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription { return p.txFeed.Subscribe(ch) } diff --git a/eth/peerset.go b/eth/peerset.go index b9cc1e03a..b27d3964a 100644 --- a/eth/peerset.go +++ b/eth/peerset.go @@ -18,6 +18,7 @@ package eth import ( "errors" + "fmt" "math/big" "sync" @@ -74,7 +75,7 @@ func (ps *peerSet) registerSnapExtension(peer *snap.Peer) error { // Reject the peer if it advertises `snap` without `eth` as `snap` is only a // satellite protocol meaningful with the chain selection of `eth` if !peer.RunningCap(eth.ProtocolName, eth.ProtocolVersions) { - return errSnapWithoutEth + return fmt.Errorf("%w: have %v", errSnapWithoutEth, peer.Caps()) } // Ensure nobody can double connect ps.lock.Lock() diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index 51038d73f..bec2ec0e3 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -23,7 +23,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" @@ -45,10 +44,6 @@ const ( // nowadays, the practical limit will always be softResponseLimit. maxBodiesServe = 1024 - // maxNodeDataServe is the maximum number of state trie nodes to serve. This - // number is there to limit the number of disk lookups. - maxNodeDataServe = 1024 - // maxReceiptsServe is the maximum number of block receipts to serve. This // number is mostly there to limit the number of disk lookups. With block // containing 200+ transactions nowadays, the practical limit will always @@ -102,12 +97,12 @@ type TxPool interface { func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol { protocols := make([]p2p.Protocol, 0, len(ProtocolVersions)) for _, version := range ProtocolVersions { - version := version // Closure - - // Path scheme does not support GetNodeData, don't advertise eth66 on it - if version <= ETH66 && backend.Chain().TrieDB().Scheme() == rawdb.PathScheme { + // Blob transactions require eth/68 announcements, disable everything else + if version <= ETH67 && backend.Chain().Config().CancunTime != nil { continue } + version := version // Closure + protocols = append(protocols, p2p.Protocol{ Name: ProtocolName, Version: version, @@ -175,36 +170,19 @@ type Decoder interface { Time() time.Time } -var eth66 = map[uint64]msgHandler{ - NewBlockHashesMsg: handleNewBlockhashes, - NewBlockMsg: handleNewBlock, - TransactionsMsg: handleTransactions, - NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66, - GetBlockHeadersMsg: handleGetBlockHeaders66, - BlockHeadersMsg: handleBlockHeaders66, - GetBlockBodiesMsg: handleGetBlockBodies66, - BlockBodiesMsg: handleBlockBodies66, - GetNodeDataMsg: handleGetNodeData66, - NodeDataMsg: handleNodeData66, - GetReceiptsMsg: handleGetReceipts66, - ReceiptsMsg: handleReceipts66, - GetPooledTransactionsMsg: handleGetPooledTransactions66, - PooledTransactionsMsg: handlePooledTransactions66, -} - var eth67 = map[uint64]msgHandler{ NewBlockHashesMsg: handleNewBlockhashes, NewBlockMsg: handleNewBlock, TransactionsMsg: handleTransactions, - NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66, - GetBlockHeadersMsg: handleGetBlockHeaders66, - BlockHeadersMsg: handleBlockHeaders66, - GetBlockBodiesMsg: handleGetBlockBodies66, - BlockBodiesMsg: handleBlockBodies66, - GetReceiptsMsg: handleGetReceipts66, - ReceiptsMsg: handleReceipts66, - GetPooledTransactionsMsg: handleGetPooledTransactions66, - PooledTransactionsMsg: handlePooledTransactions66, + NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes67, + GetBlockHeadersMsg: handleGetBlockHeaders, + BlockHeadersMsg: handleBlockHeaders, + GetBlockBodiesMsg: handleGetBlockBodies, + BlockBodiesMsg: handleBlockBodies, + GetReceiptsMsg: handleGetReceipts, + ReceiptsMsg: handleReceipts, + GetPooledTransactionsMsg: handleGetPooledTransactions, + PooledTransactionsMsg: handlePooledTransactions, } var eth68 = map[uint64]msgHandler{ @@ -212,14 +190,14 @@ var eth68 = map[uint64]msgHandler{ NewBlockMsg: handleNewBlock, TransactionsMsg: handleTransactions, NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68, - GetBlockHeadersMsg: handleGetBlockHeaders66, - BlockHeadersMsg: handleBlockHeaders66, - GetBlockBodiesMsg: handleGetBlockBodies66, - BlockBodiesMsg: handleBlockBodies66, - GetReceiptsMsg: handleGetReceipts66, - ReceiptsMsg: handleReceipts66, - GetPooledTransactionsMsg: handleGetPooledTransactions66, - PooledTransactionsMsg: handlePooledTransactions66, + GetBlockHeadersMsg: handleGetBlockHeaders, + BlockHeadersMsg: handleBlockHeaders, + GetBlockBodiesMsg: handleGetBlockBodies, + BlockBodiesMsg: handleBlockBodies, + GetReceiptsMsg: handleGetReceipts, + ReceiptsMsg: handleReceipts, + GetPooledTransactionsMsg: handleGetPooledTransactions, + PooledTransactionsMsg: handlePooledTransactions, } // handleMessage is invoked whenever an inbound message is received from a remote @@ -235,14 +213,10 @@ func handleMessage(backend Backend, peer *Peer) error { } defer msg.Discard() - var handlers = eth66 - if peer.Version() == ETH67 { - handlers = eth67 - } + var handlers = eth67 if peer.Version() >= ETH68 { handlers = eth68 } - // Track the amount of time it takes to serve the request and run the handler if metrics.Enabled { h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index bf2874721..41e18bfb3 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" @@ -151,7 +150,6 @@ func (b *testBackend) Handle(*Peer, Packet) error { } // Tests that block headers can be retrieved from a remote chain based on user queries. -func TestGetBlockHeaders66(t *testing.T) { testGetBlockHeaders(t, ETH66) } func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) } func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) } @@ -178,29 +176,29 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { // Create a batch of tests for various scenarios limit := uint64(maxHeadersServe) tests := []struct { - query *GetBlockHeadersPacket // The query to execute for header retrieval - expect []common.Hash // The hashes of the block whose headers are expected + query *GetBlockHeadersRequest // The query to execute for header retrieval + expect []common.Hash // The hashes of the block whose headers are expected }{ // A single random block should be retrievable by hash { - &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, }, // A single random block should be retrievable by number { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, }, // Multiple headers should be retrievable in both directions { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 + 1).Hash(), backend.chain.GetBlockByNumber(limit/2 + 2).Hash(), }, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 - 1).Hash(), @@ -209,14 +207,14 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Multiple headers with skip lists should be retrievable { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 + 4).Hash(), backend.chain.GetBlockByNumber(limit/2 + 8).Hash(), }, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 - 4).Hash(), @@ -225,31 +223,31 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // The chain endpoints should be retrievable { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 0}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 0}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(0).Hash()}, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1}, []common.Hash{backend.chain.CurrentBlock().Hash()}, }, { // If the peer requests a bit into the future, we deliver what we have - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10}, []common.Hash{backend.chain.CurrentBlock().Hash()}, }, // Ensure protocol limits are honored { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true}, getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit), }, // Check that requesting more than available is handled gracefully { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(), }, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(4).Hash(), backend.chain.GetBlockByNumber(0).Hash(), @@ -257,13 +255,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check that requesting more than available is handled gracefully, even if mid skip { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(), }, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(4).Hash(), backend.chain.GetBlockByNumber(1).Hash(), @@ -271,7 +269,7 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check a corner case where requesting more can iterate past the endpoints { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(2).Hash(), backend.chain.GetBlockByNumber(1).Hash(), @@ -280,24 +278,24 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check a corner case where skipping overflow loops back into the chain start { - &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, []common.Hash{ backend.chain.GetBlockByNumber(3).Hash(), }, }, // Check a corner case where skipping overflow loops back to the same header { - &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, []common.Hash{ backend.chain.GetBlockByNumber(1).Hash(), }, }, // Check that non existing headers aren't returned { - &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, []common.Hash{}, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1}, []common.Hash{}, }, } @@ -309,13 +307,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { headers = append(headers, backend.chain.GetBlockByHash(hash).Header()) } // Send the hash request and verify the response - p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ - RequestId: 123, - GetBlockHeadersPacket: tt.query, + p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{ + RequestId: 123, + GetBlockHeadersRequest: tt.query, }) - if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket66{ - RequestId: 123, - BlockHeadersPacket: headers, + if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket{ + RequestId: 123, + BlockHeadersRequest: headers, }); err != nil { t.Errorf("test %d: headers mismatch: %v", i, err) } @@ -324,11 +322,11 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil { tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0 - p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ - RequestId: 456, - GetBlockHeadersPacket: tt.query, + p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{ + RequestId: 456, + GetBlockHeadersRequest: tt.query, }) - expected := &BlockHeadersPacket66{RequestId: 456, BlockHeadersPacket: headers} + expected := &BlockHeadersPacket{RequestId: 456, BlockHeadersRequest: headers} if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil { t.Errorf("test %d by hash: headers mismatch: %v", i, err) } @@ -338,7 +336,6 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { } // Tests that block contents can be retrieved from a remote chain based on their hashes. -func TestGetBlockBodies66(t *testing.T) { testGetBlockBodies(t, ETH66) } func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) } func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) } @@ -420,139 +417,20 @@ func testGetBlockBodies(t *testing.T, protocol uint) { } // Send the hash request and verify the response - p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket66{ - RequestId: 123, - GetBlockBodiesPacket: hashes, + p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket{ + RequestId: 123, + GetBlockBodiesRequest: hashes, }) - if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket66{ - RequestId: 123, - BlockBodiesPacket: bodies, + if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket{ + RequestId: 123, + BlockBodiesResponse: bodies, }); err != nil { t.Fatalf("test %d: bodies mismatch: %v", i, err) } } } -// Tests that the state trie nodes can be retrieved based on hashes. -func TestGetNodeData66(t *testing.T) { testGetNodeData(t, ETH66, false) } -func TestGetNodeData67(t *testing.T) { testGetNodeData(t, ETH67, true) } -func TestGetNodeData68(t *testing.T) { testGetNodeData(t, ETH68, true) } - -func testGetNodeData(t *testing.T, protocol uint, drop bool) { - t.Parallel() - - // Define three accounts to simulate transactions with - acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey) - acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey) - - signer := types.HomesteadSigner{} - // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makers_test) - generator := func(i int, block *core.BlockGen) { - switch i { - case 0: - // In block 1, the test bank sends account #1 some ether. - tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey) - block.AddTx(tx) - case 1: - // In block 2, the test bank sends some more ether to account #1. - // acc1Addr passes it on to account #2. - tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey) - tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, acc1Key) - block.AddTx(tx1) - block.AddTx(tx2) - case 2: - // Block 3 is empty but was mined by account #2. - block.SetCoinbase(acc2Addr) - block.SetExtra([]byte("yeehaw")) - case 3: - // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). - b2 := block.PrevBlock(1).Header() - b2.Extra = []byte("foo") - block.AddUncle(b2) - b3 := block.PrevBlock(2).Header() - b3.Extra = []byte("foo") - block.AddUncle(b3) - } - } - // Assemble the test environment - backend := newTestBackendWithGenerator(4, false, generator) - defer backend.close() - - peer, _ := newTestPeer("peer", protocol, backend) - defer peer.close() - - // Collect all state tree hashes. - var hashes []common.Hash - it := backend.db.NewIterator(nil, nil) - for it.Next() { - if key := it.Key(); len(key) == common.HashLength { - hashes = append(hashes, common.BytesToHash(key)) - } - } - it.Release() - - // Request all hashes. - p2p.Send(peer.app, GetNodeDataMsg, &GetNodeDataPacket66{ - RequestId: 123, - GetNodeDataPacket: hashes, - }) - msg, err := peer.app.ReadMsg() - if !drop { - if err != nil { - t.Fatalf("failed to read node data response: %v", err) - } - } else { - if err != nil { - return - } - t.Fatalf("succeeded to read node data response on non-supporting protocol: %v", msg) - } - if msg.Code != NodeDataMsg { - t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, NodeDataMsg) - } - var res NodeDataPacket66 - if err := msg.Decode(&res); err != nil { - t.Fatalf("failed to decode response node data: %v", err) - } - - // Verify that all hashes correspond to the requested data. - data := res.NodeDataPacket - for i, want := range hashes { - if hash := crypto.Keccak256Hash(data[i]); hash != want { - t.Errorf("data hash mismatch: have %x, want %x", hash, want) - } - } - - // Reconstruct state tree from the received data. - reconstructDB := rawdb.NewMemoryDatabase() - for i := 0; i < len(data); i++ { - rawdb.WriteLegacyTrieNode(reconstructDB, hashes[i], data[i]) - } - - // Sanity check whether all state matches. - accounts := []common.Address{testAddr, acc1Addr, acc2Addr} - for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ { - root := backend.chain.GetBlockByNumber(i).Root() - reconstructed, _ := state.New(root, state.NewDatabase(reconstructDB), nil) - for j, acc := range accounts { - state, _ := backend.chain.StateAt(root) - bw := state.GetBalance(acc) - bh := reconstructed.GetBalance(acc) - - if (bw == nil) != (bh == nil) { - t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) - } - if bw != nil && bh != nil && bw.Cmp(bh) != 0 { - t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) - } - } - } -} - // Tests that the transaction receipts can be retrieved based on hashes. -func TestGetBlockReceipts66(t *testing.T) { testGetBlockReceipts(t, ETH66) } func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) } func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) } @@ -613,13 +491,13 @@ func testGetBlockReceipts(t *testing.T, protocol uint) { receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash())) } // Send the hash request and verify the response - p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket66{ - RequestId: 123, - GetReceiptsPacket: hashes, + p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket{ + RequestId: 123, + GetReceiptsRequest: hashes, }) - if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket66{ - RequestId: 123, - ReceiptsPacket: receipts, + if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket{ + RequestId: 123, + ReceiptsResponse: receipts, }); err != nil { t.Errorf("receipts mismatch: %v", err) } diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index da741791b..069e92dad 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -22,27 +22,25 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" ) -// handleGetBlockHeaders66 is the eth/66 version of handleGetBlockHeaders -func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { +func handleGetBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { // Decode the complex header query - var query GetBlockHeadersPacket66 + var query GetBlockHeadersPacket if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersPacket, peer) + response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersRequest, peer) return peer.ReplyBlockHeadersRLP(query.RequestId, response) } // ServiceGetBlockHeadersQuery assembles the response to a header query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue { +func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue { if query.Skip == 0 { // The fast path: when the request is for a contiguous segment of headers. return serviceContiguousBlockHeaderQuery(chain, query) @@ -51,7 +49,7 @@ func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersP } } -func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue { +func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue { hashMode := query.Origin.Hash != (common.Hash{}) first := true maxNonCanonical := uint64(100) @@ -140,7 +138,7 @@ func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBloc return headers } -func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket) []rlp.RawValue { +func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest) []rlp.RawValue { count := query.Amount if count > maxHeadersServe { count = maxHeadersServe @@ -203,19 +201,19 @@ func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHe } } -func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { +func handleGetBlockBodies(backend Backend, msg Decoder, peer *Peer) error { // Decode the block body retrieval message - var query GetBlockBodiesPacket66 + var query GetBlockBodiesPacket if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesPacket) + response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesRequest) return peer.ReplyBlockBodiesRLP(query.RequestId, response) } // ServiceGetBlockBodiesQuery assembles the response to a body query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPacket) []rlp.RawValue { +func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesRequest) []rlp.RawValue { // Gather blocks until the fetch or network limits is reached var ( bytes int @@ -234,60 +232,19 @@ func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPack return bodies } -func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error { - // Decode the trie node data retrieval message - var query GetNodeDataPacket66 - if err := msg.Decode(&query); err != nil { - return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) - } - response := ServiceGetNodeDataQuery(backend.Chain(), query.GetNodeDataPacket) - return peer.ReplyNodeData(query.RequestId, response) -} - -// ServiceGetNodeDataQuery assembles the response to a node data query. It is -// exposed to allow external packages to test protocol behavior. -func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [][]byte { - // Request nodes by hash is not supported in path-based scheme. - if chain.TrieDB().Scheme() == rawdb.PathScheme { - return nil - } - // Gather state data until the fetch or network limits is reached - var ( - bytes int - nodes [][]byte - ) - for lookups, hash := range query { - if bytes >= softResponseLimit || len(nodes) >= maxNodeDataServe || - lookups >= 2*maxNodeDataServe { - break - } - // Retrieve the requested state entry - entry, err := chain.TrieDB().Node(hash) - if len(entry) == 0 || err != nil { - // Read the contract code with prefix only to save unnecessary lookups. - entry, err = chain.ContractCodeWithPrefix(hash) - } - if err == nil && len(entry) > 0 { - nodes = append(nodes, entry) - bytes += len(entry) - } - } - return nodes -} - -func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error { +func handleGetReceipts(backend Backend, msg Decoder, peer *Peer) error { // Decode the block receipts retrieval message - var query GetReceiptsPacket66 + var query GetReceiptsPacket if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsPacket) + response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsRequest) return peer.ReplyReceiptsRLP(query.RequestId, response) } // ServiceGetReceiptsQuery assembles the response to a receipt query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsPacket) []rlp.RawValue { +func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsRequest) []rlp.RawValue { // Gather state data until the fetch or network limits is reached var ( bytes int @@ -356,15 +313,15 @@ func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error { return backend.Handle(peer, ann) } -func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { +func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { // A batch of headers arrived to one of our previous requests - res := new(BlockHeadersPacket66) + res := new(BlockHeadersPacket) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { - hashes := make([]common.Hash, len(res.BlockHeadersPacket)) - for i, header := range res.BlockHeadersPacket { + hashes := make([]common.Hash, len(res.BlockHeadersRequest)) + for i, header := range res.BlockHeadersRequest { hashes[i] = header.Hash() } return hashes @@ -372,24 +329,24 @@ func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: BlockHeadersMsg, - Res: &res.BlockHeadersPacket, + Res: &res.BlockHeadersRequest, }, metadata) } -func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { +func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error { // A batch of block bodies arrived to one of our previous requests - res := new(BlockBodiesPacket66) + res := new(BlockBodiesPacket) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { var ( - txsHashes = make([]common.Hash, len(res.BlockBodiesPacket)) - uncleHashes = make([]common.Hash, len(res.BlockBodiesPacket)) - withdrawalHashes = make([]common.Hash, len(res.BlockBodiesPacket)) + txsHashes = make([]common.Hash, len(res.BlockBodiesResponse)) + uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse)) + withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse)) ) hasher := trie.NewStackTrie(nil) - for i, body := range res.BlockBodiesPacket { + for i, body := range res.BlockBodiesResponse { txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) uncleHashes[i] = types.CalcUncleHash(body.Uncles) if body.Withdrawals != nil { @@ -401,33 +358,20 @@ func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: BlockBodiesMsg, - Res: &res.BlockBodiesPacket, + Res: &res.BlockBodiesResponse, }, metadata) } -func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error { - // A batch of node state data arrived to one of our previous requests - res := new(NodeDataPacket66) - if err := msg.Decode(res); err != nil { - return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) - } - return peer.dispatchResponse(&Response{ - id: res.RequestId, - code: NodeDataMsg, - Res: &res.NodeDataPacket, - }, nil) // No post-processing, we're not using this packet anymore -} - -func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error { +func handleReceipts(backend Backend, msg Decoder, peer *Peer) error { // A batch of receipts arrived to one of our previous requests - res := new(ReceiptsPacket66) + res := new(ReceiptsPacket) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { hasher := trie.NewStackTrie(nil) - hashes := make([]common.Hash, len(res.ReceiptsPacket)) - for i, receipt := range res.ReceiptsPacket { + hashes := make([]common.Hash, len(res.ReceiptsResponse)) + for i, receipt := range res.ReceiptsResponse { hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) } return hashes @@ -435,17 +379,17 @@ func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: ReceiptsMsg, - Res: &res.ReceiptsPacket, + Res: &res.ReceiptsResponse, }, metadata) } -func handleNewPooledTransactionHashes66(backend Backend, msg Decoder, peer *Peer) error { +func handleNewPooledTransactionHashes67(backend Backend, msg Decoder, peer *Peer) error { // New transaction announcement arrived, make sure we have // a valid and fresh chain to handle them if !backend.AcceptTxs() { return nil } - ann := new(NewPooledTransactionHashesPacket66) + ann := new(NewPooledTransactionHashesPacket67) if err := msg.Decode(ann); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } @@ -476,17 +420,17 @@ func handleNewPooledTransactionHashes68(backend Backend, msg Decoder, peer *Peer return backend.Handle(peer, ann) } -func handleGetPooledTransactions66(backend Backend, msg Decoder, peer *Peer) error { +func handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error { // Decode the pooled transactions retrieval message - var query GetPooledTransactionsPacket66 + var query GetPooledTransactionsPacket if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsPacket, peer) + hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsRequest) return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs) } -func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsPacket, peer *Peer) ([]common.Hash, []rlp.RawValue) { +func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsRequest) ([]common.Hash, []rlp.RawValue) { // Gather transactions until the fetch or network limits is reached var ( bytes int @@ -534,17 +478,17 @@ func handleTransactions(backend Backend, msg Decoder, peer *Peer) error { return backend.Handle(peer, &txs) } -func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error { +func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error { // Transactions arrived, make sure we have a valid and fresh chain to handle them if !backend.AcceptTxs() { return nil } // Transactions can be processed, parse all of them and deliver to the pool - var txs PooledTransactionsPacket66 + var txs PooledTransactionsPacket if err := msg.Decode(&txs); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - for i, tx := range txs.PooledTransactionsPacket { + for i, tx := range txs.PooledTransactionsResponse { // Validate and mark the remote transaction if tx == nil { return fmt.Errorf("%w: transaction %d is nil", errDecode, i) @@ -553,5 +497,5 @@ func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error } requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId) - return backend.Handle(peer, &txs.PooledTransactionsPacket) + return backend.Handle(peer, &txs.PooledTransactionsResponse) } diff --git a/eth/protocols/eth/handshake_test.go b/eth/protocols/eth/handshake_test.go index dca66e0c5..d96cfc816 100644 --- a/eth/protocols/eth/handshake_test.go +++ b/eth/protocols/eth/handshake_test.go @@ -27,7 +27,8 @@ import ( ) // Tests that handshake failures are detected and reported correctly. -func TestHandshake66(t *testing.T) { testHandshake(t, ETH66) } +func TestHandshake67(t *testing.T) { testHandshake(t, ETH67) } +func TestHandshake68(t *testing.T) { testHandshake(t, ETH68) } func testHandshake(t *testing.T, protocol uint) { t.Parallel() diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go index 219f486c8..938af0cab 100644 --- a/eth/protocols/eth/peer.go +++ b/eth/protocols/eth/peer.go @@ -219,7 +219,7 @@ func (p *Peer) AsyncSendTransactions(hashes []common.Hash) { func (p *Peer) sendPooledTransactionHashes66(hashes []common.Hash) error { // Mark all the transactions as known, but ensure we don't overflow our limits p.knownTxs.Add(hashes...) - return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket66(hashes)) + return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket67(hashes)) } // sendPooledTransactionHashes68 sends transaction hashes (tagged with their type @@ -248,15 +248,15 @@ func (p *Peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) { } } -// ReplyPooledTransactionsRLP is the eth/66 version of SendPooledTransactionsRLP. +// ReplyPooledTransactionsRLP is the response to RequestTxs. func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error { // Mark all the transactions as known, but ensure we don't overflow our limits p.knownTxs.Add(hashes...) - // Not packed into PooledTransactionsPacket to avoid RLP decoding - return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket66{ - RequestId: id, - PooledTransactionsRLPPacket: txs, + // Not packed into PooledTransactionsResponse to avoid RLP decoding + return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket{ + RequestId: id, + PooledTransactionsRLPResponse: txs, }) } @@ -309,36 +309,28 @@ func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) { } } -// ReplyBlockHeadersRLP is the eth/66 response to GetBlockHeaders. +// ReplyBlockHeadersRLP is the response to GetBlockHeaders. func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error { - return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket66{ - RequestId: id, - BlockHeadersRLPPacket: headers, + return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket{ + RequestId: id, + BlockHeadersRLPResponse: headers, }) } -// ReplyBlockBodiesRLP is the eth/66 response to GetBlockBodies. +// ReplyBlockBodiesRLP is the response to GetBlockBodies. func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error { - // Not packed into BlockBodiesPacket to avoid RLP decoding - return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket66{ - RequestId: id, - BlockBodiesRLPPacket: bodies, + // Not packed into BlockBodiesResponse to avoid RLP decoding + return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket{ + RequestId: id, + BlockBodiesRLPResponse: bodies, }) } -// ReplyNodeData is the eth/66 response to GetNodeData. -func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error { - return p2p.Send(p.rw, NodeDataMsg, &NodeDataPacket66{ - RequestId: id, - NodeDataPacket: data, - }) -} - -// ReplyReceiptsRLP is the eth/66 response to GetReceipts. +// ReplyReceiptsRLP is the response to GetReceipts. func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error { - return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket66{ - RequestId: id, - ReceiptsRLPPacket: receipts, + return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket{ + RequestId: id, + ReceiptsRLPResponse: receipts, }) } @@ -353,9 +345,9 @@ func (p *Peer) RequestOneHeader(hash common.Hash, sink chan *Response) (*Request sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket66{ + data: &GetBlockHeadersPacket{ RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ + GetBlockHeadersRequest: &GetBlockHeadersRequest{ Origin: HashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), @@ -380,9 +372,9 @@ func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, re sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket66{ + data: &GetBlockHeadersPacket{ RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ + GetBlockHeadersRequest: &GetBlockHeadersRequest{ Origin: HashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), @@ -407,9 +399,9 @@ func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, rever sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket66{ + data: &GetBlockHeadersPacket{ RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ + GetBlockHeadersRequest: &GetBlockHeadersRequest{ Origin: HashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), @@ -434,31 +426,9 @@ func (p *Peer) RequestBodies(hashes []common.Hash, sink chan *Response) (*Reques sink: sink, code: GetBlockBodiesMsg, want: BlockBodiesMsg, - data: &GetBlockBodiesPacket66{ - RequestId: id, - GetBlockBodiesPacket: hashes, - }, - } - if err := p.dispatchRequest(req); err != nil { - return nil, err - } - return req, nil -} - -// RequestNodeData fetches a batch of arbitrary data from a node's known state -// data, corresponding to the specified hashes. -func (p *Peer) RequestNodeData(hashes []common.Hash, sink chan *Response) (*Request, error) { - p.Log().Debug("Fetching batch of state data", "count", len(hashes)) - id := rand.Uint64() - - req := &Request{ - id: id, - sink: sink, - code: GetNodeDataMsg, - want: NodeDataMsg, - data: &GetNodeDataPacket66{ - RequestId: id, - GetNodeDataPacket: hashes, + data: &GetBlockBodiesPacket{ + RequestId: id, + GetBlockBodiesRequest: hashes, }, } if err := p.dispatchRequest(req); err != nil { @@ -477,9 +447,9 @@ func (p *Peer) RequestReceipts(hashes []common.Hash, sink chan *Response) (*Requ sink: sink, code: GetReceiptsMsg, want: ReceiptsMsg, - data: &GetReceiptsPacket66{ - RequestId: id, - GetReceiptsPacket: hashes, + data: &GetReceiptsPacket{ + RequestId: id, + GetReceiptsRequest: hashes, }, } if err := p.dispatchRequest(req); err != nil { @@ -494,9 +464,9 @@ func (p *Peer) RequestTxs(hashes []common.Hash) error { id := rand.Uint64() requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id) - return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{ - RequestId: id, - GetPooledTransactionsPacket: hashes, + return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket{ + RequestId: id, + GetPooledTransactionsRequest: hashes, }) } diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index 4b9f5ad6b..0f44f83de 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -30,7 +30,6 @@ import ( // Constants to match up protocol versions and messages const ( - ETH66 = 66 ETH67 = 67 ETH68 = 68 ) @@ -41,11 +40,11 @@ const ProtocolName = "eth" // ProtocolVersions are the supported versions of the `eth` protocol (first // is primary). -var ProtocolVersions = []uint{ETH68, ETH67, ETH66} +var ProtocolVersions = []uint{ETH68, ETH67} // protocolLengths are the number of implemented message corresponding to // different protocol versions. -var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17, ETH66: 17} +var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17} // maxMessageSize is the maximum cap on the size of a protocol message. const maxMessageSize = 10 * 1024 * 1024 @@ -62,8 +61,6 @@ const ( NewPooledTransactionHashesMsg = 0x08 GetPooledTransactionsMsg = 0x09 PooledTransactionsMsg = 0x0a - GetNodeDataMsg = 0x0d - NodeDataMsg = 0x0e GetReceiptsMsg = 0x0f ReceiptsMsg = 0x10 ) @@ -85,7 +82,7 @@ type Packet interface { Kind() byte // Kind returns the message type. } -// StatusPacket is the network packet for the status message for eth/64 and later. +// StatusPacket is the network packet for the status message. type StatusPacket struct { ProtocolVersion uint32 NetworkID uint64 @@ -118,18 +115,18 @@ func (p *NewBlockHashesPacket) Unpack() ([]common.Hash, []uint64) { // TransactionsPacket is the network packet for broadcasting new transactions. type TransactionsPacket []*types.Transaction -// GetBlockHeadersPacket represents a block header query. -type GetBlockHeadersPacket struct { +// GetBlockHeadersRequest represents a block header query. +type GetBlockHeadersRequest struct { Origin HashOrNumber // Block from which to retrieve headers Amount uint64 // Maximum number of headers to retrieve Skip uint64 // Blocks to skip between consecutive headers Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) } -// GetBlockHeadersPacket66 represents a block header query over eth/66 -type GetBlockHeadersPacket66 struct { +// GetBlockHeadersPacket represents a block header query with request ID wrapping. +type GetBlockHeadersPacket struct { RequestId uint64 - *GetBlockHeadersPacket + *GetBlockHeadersRequest } // HashOrNumber is a combined field for specifying an origin block. @@ -168,23 +165,23 @@ func (hn *HashOrNumber) DecodeRLP(s *rlp.Stream) error { } } -// BlockHeadersPacket represents a block header response. -type BlockHeadersPacket []*types.Header +// BlockHeadersRequest represents a block header response. +type BlockHeadersRequest []*types.Header -// BlockHeadersPacket66 represents a block header response over eth/66. -type BlockHeadersPacket66 struct { +// BlockHeadersPacket represents a block header response over with request ID wrapping. +type BlockHeadersPacket struct { RequestId uint64 - BlockHeadersPacket + BlockHeadersRequest } -// BlockHeadersRLPPacket represents a block header response, to use when we already +// BlockHeadersRLPResponse represents a block header response, to use when we already // have the headers rlp encoded. -type BlockHeadersRLPPacket []rlp.RawValue +type BlockHeadersRLPResponse []rlp.RawValue -// BlockHeadersRLPPacket66 represents a block header response over eth/66. -type BlockHeadersRLPPacket66 struct { +// BlockHeadersRLPPacket represents a block header response with request ID wrapping. +type BlockHeadersRLPPacket struct { RequestId uint64 - BlockHeadersRLPPacket + BlockHeadersRLPResponse } // NewBlockPacket is the network packet for the block propagation message. @@ -206,33 +203,34 @@ func (request *NewBlockPacket) sanityCheck() error { return nil } -// GetBlockBodiesPacket represents a block body query. -type GetBlockBodiesPacket []common.Hash +// GetBlockBodiesRequest represents a block body query. +type GetBlockBodiesRequest []common.Hash -// GetBlockBodiesPacket66 represents a block body query over eth/66. -type GetBlockBodiesPacket66 struct { +// GetBlockBodiesPacket represents a block body query with request ID wrapping. +type GetBlockBodiesPacket struct { RequestId uint64 - GetBlockBodiesPacket + GetBlockBodiesRequest } -// BlockBodiesPacket is the network packet for block content distribution. -type BlockBodiesPacket []*BlockBody +// BlockBodiesResponse is the network packet for block content distribution. +type BlockBodiesResponse []*BlockBody -// BlockBodiesPacket66 is the network packet for block content distribution over eth/66. -type BlockBodiesPacket66 struct { +// BlockBodiesPacket is the network packet for block content distribution with +// request ID wrapping. +type BlockBodiesPacket struct { RequestId uint64 - BlockBodiesPacket + BlockBodiesResponse } -// BlockBodiesRLPPacket is used for replying to block body requests, in cases +// BlockBodiesRLPResponse is used for replying to block body requests, in cases // where we already have them RLP-encoded, and thus can avoid the decode-encode // roundtrip. -type BlockBodiesRLPPacket []rlp.RawValue +type BlockBodiesRLPResponse []rlp.RawValue -// BlockBodiesRLPPacket66 is the BlockBodiesRLPPacket over eth/66 -type BlockBodiesRLPPacket66 struct { +// BlockBodiesRLPPacket is the BlockBodiesRLPResponse with request ID wrapping. +type BlockBodiesRLPPacket struct { RequestId uint64 - BlockBodiesRLPPacket + BlockBodiesRLPResponse } // BlockBody represents the data content of a single block. @@ -244,7 +242,7 @@ type BlockBody struct { // Unpack retrieves the transactions and uncles from the range packet and returns // them in a split flat format that's more consistent with the internal data structures. -func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) { +func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) { // TODO(matt): add support for withdrawals to fetchers var ( txset = make([][]*types.Transaction, len(*p)) @@ -257,53 +255,36 @@ func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header, return txset, uncleset, withdrawalset } -// GetNodeDataPacket represents a trie node data query. -type GetNodeDataPacket []common.Hash - -// GetNodeDataPacket66 represents a trie node data query over eth/66. -type GetNodeDataPacket66 struct { - RequestId uint64 - GetNodeDataPacket -} - -// NodeDataPacket is the network packet for trie node data distribution. -type NodeDataPacket [][]byte +// GetReceiptsRequest represents a block receipts query. +type GetReceiptsRequest []common.Hash -// NodeDataPacket66 is the network packet for trie node data distribution over eth/66. -type NodeDataPacket66 struct { +// GetReceiptsPacket represents a block receipts query with request ID wrapping. +type GetReceiptsPacket struct { RequestId uint64 - NodeDataPacket + GetReceiptsRequest } -// GetReceiptsPacket represents a block receipts query. -type GetReceiptsPacket []common.Hash +// ReceiptsResponse is the network packet for block receipts distribution. +type ReceiptsResponse [][]*types.Receipt -// GetReceiptsPacket66 represents a block receipts query over eth/66. -type GetReceiptsPacket66 struct { +// ReceiptsPacket is the network packet for block receipts distribution with +// request ID wrapping. +type ReceiptsPacket struct { RequestId uint64 - GetReceiptsPacket + ReceiptsResponse } -// ReceiptsPacket is the network packet for block receipts distribution. -type ReceiptsPacket [][]*types.Receipt +// ReceiptsRLPResponse is used for receipts, when we already have it encoded +type ReceiptsRLPResponse []rlp.RawValue -// ReceiptsPacket66 is the network packet for block receipts distribution over eth/66. -type ReceiptsPacket66 struct { +// ReceiptsRLPPacket is ReceiptsRLPResponse with request ID wrapping. +type ReceiptsRLPPacket struct { RequestId uint64 - ReceiptsPacket + ReceiptsRLPResponse } -// ReceiptsRLPPacket is used for receipts, when we already have it encoded -type ReceiptsRLPPacket []rlp.RawValue - -// ReceiptsRLPPacket66 is the eth-66 version of ReceiptsRLPPacket -type ReceiptsRLPPacket66 struct { - RequestId uint64 - ReceiptsRLPPacket -} - -// NewPooledTransactionHashesPacket66 represents a transaction announcement packet on eth/66 and eth/67. -type NewPooledTransactionHashesPacket66 []common.Hash +// NewPooledTransactionHashesPacket67 represents a transaction announcement packet on eth/67. +type NewPooledTransactionHashesPacket67 []common.Hash // NewPooledTransactionHashesPacket68 represents a transaction announcement packet on eth/68 and newer. type NewPooledTransactionHashesPacket68 struct { @@ -312,31 +293,33 @@ type NewPooledTransactionHashesPacket68 struct { Hashes []common.Hash } -// GetPooledTransactionsPacket represents a transaction query. -type GetPooledTransactionsPacket []common.Hash +// GetPooledTransactionsRequest represents a transaction query. +type GetPooledTransactionsRequest []common.Hash -type GetPooledTransactionsPacket66 struct { +// GetPooledTransactionsPacket represents a transaction query with request ID wrapping. +type GetPooledTransactionsPacket struct { RequestId uint64 - GetPooledTransactionsPacket + GetPooledTransactionsRequest } -// PooledTransactionsPacket is the network packet for transaction distribution. -type PooledTransactionsPacket []*types.Transaction +// PooledTransactionsResponse is the network packet for transaction distribution. +type PooledTransactionsResponse []*types.Transaction -// PooledTransactionsPacket66 is the network packet for transaction distribution over eth/66. -type PooledTransactionsPacket66 struct { +// PooledTransactionsPacket is the network packet for transaction distribution +// with request ID wrapping. +type PooledTransactionsPacket struct { RequestId uint64 - PooledTransactionsPacket + PooledTransactionsResponse } -// PooledTransactionsRLPPacket is the network packet for transaction distribution, used +// PooledTransactionsRLPResponse is the network packet for transaction distribution, used // in the cases we already have them in rlp-encoded form -type PooledTransactionsRLPPacket []rlp.RawValue +type PooledTransactionsRLPResponse []rlp.RawValue -// PooledTransactionsRLPPacket66 is the eth/66 form of PooledTransactionsRLPPacket -type PooledTransactionsRLPPacket66 struct { +// PooledTransactionsRLPPacket is PooledTransactionsRLPResponse with request ID wrapping. +type PooledTransactionsRLPPacket struct { RequestId uint64 - PooledTransactionsRLPPacket + PooledTransactionsRLPResponse } func (*StatusPacket) Name() string { return "Status" } @@ -348,40 +331,34 @@ func (*NewBlockHashesPacket) Kind() byte { return NewBlockHashesMsg } func (*TransactionsPacket) Name() string { return "Transactions" } func (*TransactionsPacket) Kind() byte { return TransactionsMsg } -func (*GetBlockHeadersPacket) Name() string { return "GetBlockHeaders" } -func (*GetBlockHeadersPacket) Kind() byte { return GetBlockHeadersMsg } +func (*GetBlockHeadersRequest) Name() string { return "GetBlockHeaders" } +func (*GetBlockHeadersRequest) Kind() byte { return GetBlockHeadersMsg } -func (*BlockHeadersPacket) Name() string { return "BlockHeaders" } -func (*BlockHeadersPacket) Kind() byte { return BlockHeadersMsg } +func (*BlockHeadersRequest) Name() string { return "BlockHeaders" } +func (*BlockHeadersRequest) Kind() byte { return BlockHeadersMsg } -func (*GetBlockBodiesPacket) Name() string { return "GetBlockBodies" } -func (*GetBlockBodiesPacket) Kind() byte { return GetBlockBodiesMsg } +func (*GetBlockBodiesRequest) Name() string { return "GetBlockBodies" } +func (*GetBlockBodiesRequest) Kind() byte { return GetBlockBodiesMsg } -func (*BlockBodiesPacket) Name() string { return "BlockBodies" } -func (*BlockBodiesPacket) Kind() byte { return BlockBodiesMsg } +func (*BlockBodiesResponse) Name() string { return "BlockBodies" } +func (*BlockBodiesResponse) Kind() byte { return BlockBodiesMsg } func (*NewBlockPacket) Name() string { return "NewBlock" } func (*NewBlockPacket) Kind() byte { return NewBlockMsg } -func (*NewPooledTransactionHashesPacket66) Name() string { return "NewPooledTransactionHashes" } -func (*NewPooledTransactionHashesPacket66) Kind() byte { return NewPooledTransactionHashesMsg } +func (*NewPooledTransactionHashesPacket67) Name() string { return "NewPooledTransactionHashes" } +func (*NewPooledTransactionHashesPacket67) Kind() byte { return NewPooledTransactionHashesMsg } func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" } func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg } -func (*GetPooledTransactionsPacket) Name() string { return "GetPooledTransactions" } -func (*GetPooledTransactionsPacket) Kind() byte { return GetPooledTransactionsMsg } - -func (*PooledTransactionsPacket) Name() string { return "PooledTransactions" } -func (*PooledTransactionsPacket) Kind() byte { return PooledTransactionsMsg } - -func (*GetNodeDataPacket) Name() string { return "GetNodeData" } -func (*GetNodeDataPacket) Kind() byte { return GetNodeDataMsg } +func (*GetPooledTransactionsRequest) Name() string { return "GetPooledTransactions" } +func (*GetPooledTransactionsRequest) Kind() byte { return GetPooledTransactionsMsg } -func (*NodeDataPacket) Name() string { return "NodeData" } -func (*NodeDataPacket) Kind() byte { return NodeDataMsg } +func (*PooledTransactionsResponse) Name() string { return "PooledTransactions" } +func (*PooledTransactionsResponse) Kind() byte { return PooledTransactionsMsg } -func (*GetReceiptsPacket) Name() string { return "GetReceipts" } -func (*GetReceiptsPacket) Kind() byte { return GetReceiptsMsg } +func (*GetReceiptsRequest) Name() string { return "GetReceipts" } +func (*GetReceiptsRequest) Kind() byte { return GetReceiptsMsg } -func (*ReceiptsPacket) Name() string { return "Receipts" } -func (*ReceiptsPacket) Kind() byte { return ReceiptsMsg } +func (*ReceiptsResponse) Name() string { return "Receipts" } +func (*ReceiptsResponse) Kind() byte { return ReceiptsMsg } diff --git a/eth/protocols/eth/protocol_test.go b/eth/protocols/eth/protocol_test.go index a86fbb0a6..bc2545dea 100644 --- a/eth/protocols/eth/protocol_test.go +++ b/eth/protocols/eth/protocol_test.go @@ -35,19 +35,19 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { } // Assemble some table driven tests tests := []struct { - packet *GetBlockHeadersPacket + packet *GetBlockHeadersRequest fail bool }{ // Providing the origin as either a hash or a number should both work - {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}}}, - {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}}}, + {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}}}, + {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}}}, // Providing arbitrary query field should also work - {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}}, - {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}}, + {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}}, + {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}}, // Providing both the origin hash and origin number must fail - {fail: true, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash, Number: 314}}}, + {fail: true, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash, Number: 314}}}, } // Iterate over each of the tests and try to encode and then decode for i, tt := range tests { @@ -58,7 +58,7 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { t.Fatalf("test %d: encode should have failed", i) } if !tt.fail { - packet := new(GetBlockHeadersPacket) + packet := new(GetBlockHeadersRequest) if err := rlp.DecodeBytes(bytes, packet); err != nil { t.Fatalf("test %d: failed to decode packet: %v", i, err) } @@ -70,46 +70,40 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { } } -// TestEth66EmptyMessages tests encoding of empty eth66 messages -func TestEth66EmptyMessages(t *testing.T) { +// TestEmptyMessages tests encoding of empty messages. +func TestEmptyMessages(t *testing.T) { // All empty messages encodes to the same format want := common.FromHex("c4820457c0") for i, msg := range []interface{}{ // Headers - GetBlockHeadersPacket66{1111, nil}, - BlockHeadersPacket66{1111, nil}, + GetBlockHeadersPacket{1111, nil}, + BlockHeadersPacket{1111, nil}, // Bodies - GetBlockBodiesPacket66{1111, nil}, - BlockBodiesPacket66{1111, nil}, - BlockBodiesRLPPacket66{1111, nil}, - // Node data - GetNodeDataPacket66{1111, nil}, - NodeDataPacket66{1111, nil}, + GetBlockBodiesPacket{1111, nil}, + BlockBodiesPacket{1111, nil}, + BlockBodiesRLPPacket{1111, nil}, // Receipts - GetReceiptsPacket66{1111, nil}, - ReceiptsPacket66{1111, nil}, + GetReceiptsPacket{1111, nil}, + ReceiptsPacket{1111, nil}, // Transactions - GetPooledTransactionsPacket66{1111, nil}, - PooledTransactionsPacket66{1111, nil}, - PooledTransactionsRLPPacket66{1111, nil}, + GetPooledTransactionsPacket{1111, nil}, + PooledTransactionsPacket{1111, nil}, + PooledTransactionsRLPPacket{1111, nil}, // Headers - BlockHeadersPacket66{1111, BlockHeadersPacket([]*types.Header{})}, + BlockHeadersPacket{1111, BlockHeadersRequest([]*types.Header{})}, // Bodies - GetBlockBodiesPacket66{1111, GetBlockBodiesPacket([]common.Hash{})}, - BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{})}, - BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{})}, - // Node data - GetNodeDataPacket66{1111, GetNodeDataPacket([]common.Hash{})}, - NodeDataPacket66{1111, NodeDataPacket([][]byte{})}, + GetBlockBodiesPacket{1111, GetBlockBodiesRequest([]common.Hash{})}, + BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{})}, + BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{})}, // Receipts - GetReceiptsPacket66{1111, GetReceiptsPacket([]common.Hash{})}, - ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{})}, + GetReceiptsPacket{1111, GetReceiptsRequest([]common.Hash{})}, + ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{})}, // Transactions - GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket([]common.Hash{})}, - PooledTransactionsPacket66{1111, PooledTransactionsPacket([]*types.Transaction{})}, - PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket([]rlp.RawValue{})}, + GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest([]common.Hash{})}, + PooledTransactionsPacket{1111, PooledTransactionsResponse([]*types.Transaction{})}, + PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse([]rlp.RawValue{})}, } { if have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) { t.Errorf("test %d, type %T, have\n\t%x\nwant\n\t%x", i, msg, have, want) @@ -117,8 +111,8 @@ func TestEth66EmptyMessages(t *testing.T) { } } -// TestEth66Messages tests the encoding of all redefined eth66 messages -func TestEth66Messages(t *testing.T) { +// TestMessages tests the encoding of all messages. +func TestMessages(t *testing.T) { // Some basic structs used during testing var ( header *types.Header @@ -169,10 +163,6 @@ func TestEth66Messages(t *testing.T) { common.HexToHash("deadc0de"), common.HexToHash("feedbeef"), } - byteSlices := [][]byte{ - common.FromHex("deadc0de"), - common.FromHex("feedbeef"), - } // init the receipts { receipts = []*types.Receipt{ @@ -203,59 +193,51 @@ func TestEth66Messages(t *testing.T) { want []byte }{ { - GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{hashes[0], 0}, 5, 5, false}}, + GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{hashes[0], 0}, 5, 5, false}}, common.FromHex("e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580"), }, { - GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}}, + GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}}, common.FromHex("ca820457c682270f050580"), }, { - BlockHeadersPacket66{1111, BlockHeadersPacket{header}}, + BlockHeadersPacket{1111, BlockHeadersRequest{header}}, common.FromHex("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { - GetBlockBodiesPacket66{1111, GetBlockBodiesPacket(hashes)}, + GetBlockBodiesPacket{1111, GetBlockBodiesRequest(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{blockBody})}, + BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{blockBody})}, common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { // Identical to non-rlp-shortcut version - BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{blockBodyRlp})}, + BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{blockBodyRlp})}, common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { - GetNodeDataPacket66{1111, GetNodeDataPacket(hashes)}, - common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), - }, - { - NodeDataPacket66{1111, NodeDataPacket(byteSlices)}, - common.FromHex("ce820457ca84deadc0de84feedbeef"), - }, - { - GetReceiptsPacket66{1111, GetReceiptsPacket(hashes)}, + GetReceiptsPacket{1111, GetReceiptsRequest(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{receipts})}, + ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{receipts})}, common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"), }, { - ReceiptsRLPPacket66{1111, ReceiptsRLPPacket([]rlp.RawValue{receiptsRlp})}, + ReceiptsRLPPacket{1111, ReceiptsRLPResponse([]rlp.RawValue{receiptsRlp})}, common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"), }, { - GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket(hashes)}, + GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - PooledTransactionsPacket66{1111, PooledTransactionsPacket(txs)}, + PooledTransactionsPacket{1111, PooledTransactionsResponse(txs)}, common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), }, { - PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket(txRlps)}, + PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse(txRlps)}, common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), }, } { diff --git a/eth/sync_test.go b/eth/sync_test.go index b5e00298b..d26cbb66e 100644 --- a/eth/sync_test.go +++ b/eth/sync_test.go @@ -28,8 +28,8 @@ import ( ) // Tests that snap sync is disabled after a successful sync cycle. -func TestSnapSyncDisabling66(t *testing.T) { testSnapSyncDisabling(t, eth.ETH66, snap.SNAP1) } func TestSnapSyncDisabling67(t *testing.T) { testSnapSyncDisabling(t, eth.ETH67, snap.SNAP1) } +func TestSnapSyncDisabling68(t *testing.T) { testSnapSyncDisabling(t, eth.ETH68, snap.SNAP1) } // Tests that snap sync gets disabled as soon as a real block is successfully // imported into the blockchain. diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index c35a154ca..5aa00aad4 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -27,6 +27,7 @@ import ( "sync/atomic" "time" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble" "github.com/cockroachdb/pebble/bloom" "github.com/ethereum/go-ethereum/common" @@ -118,6 +119,18 @@ func (d *Database) onWriteStallEnd() { d.writeDelayTime.Add(int64(time.Since(d.writeDelayStartTime))) } +// panicLogger is just a noop logger to disable Pebble's internal logger. +// +// TODO(karalabe): Remove when Pebble sets this as the default. +type panicLogger struct{} + +func (l panicLogger) Infof(format string, args ...interface{}) { +} + +func (l panicLogger) Fatalf(format string, args ...interface{}) { + panic(errors.Errorf("fatal: "+format, args...)) +} + // New returns a wrapped pebble DB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. func New(file string, cache int, handles int, namespace string, readonly bool, ephemeral bool) (*Database, error) { @@ -158,7 +171,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e // The size of memory table(as well as the write buffer). // Note, there may have more than two memory tables in the system. - MemTableSize: memTableSize, + MemTableSize: uint64(memTableSize), // MemTableStopWritesThreshold places a hard limit on the size // of the existent MemTables(including the frozen one). @@ -189,6 +202,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e WriteStallBegin: db.onWriteStallBegin, WriteStallEnd: db.onWriteStallEnd, }, + Logger: panicLogger{}, // TODO(karalabe): Delete when this is upstreamed in Pebble } // Disable seek compaction explicitly. Check https://github.com/ethereum/go-ethereum/pull/20130 // for more details. @@ -305,12 +319,9 @@ func (d *Database) NewBatch() ethdb.Batch { } // NewBatchWithSize creates a write-only database batch with pre-allocated buffer. -// It's not supported by pebble, but pebble has better memory allocation strategy -// which turns out a lot faster than leveldb. It's performant enough to construct -// batch object without any pre-allocated space. -func (d *Database) NewBatchWithSize(_ int) ethdb.Batch { +func (d *Database) NewBatchWithSize(size int) ethdb.Batch { return &batch{ - b: d.db.NewBatch(), + b: d.db.NewBatchWithSize(size), db: d, } } @@ -582,7 +593,7 @@ type pebbleIterator struct { // of database content with a particular key prefix, starting at a particular // initial key (or after, if it does not exist). func (d *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { - iter := d.db.NewIter(&pebble.IterOptions{ + iter, _ := d.db.NewIter(&pebble.IterOptions{ LowerBound: append(prefix, start...), UpperBound: upperBound(prefix), }) diff --git a/go.mod b/go.mod index a2de9eaed..0c1ae95c3 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,8 @@ require ( github.com/btcsuite/btcd/btcec/v2 v2.3.2 github.com/cespare/cp v1.1.1 github.com/cloudflare/cloudflare-go v0.14.0 - github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 + github.com/cockroachdb/errors v1.9.1 + github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 github.com/consensys/gnark-crypto v0.11.2 github.com/crate-crypto/go-kzg-4844 v0.3.0 github.com/davecgh/go-spew v1.1.1 @@ -56,8 +57,7 @@ require ( github.com/kylelemons/godebug v1.1.0 github.com/lib/pq v1.2.0 github.com/mattn/go-colorable v0.1.13 - github.com/mattn/go-isatty v0.0.16 - github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 + github.com/mattn/go-isatty v0.0.17 github.com/olekukonko/tablewriter v0.0.5 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 @@ -70,13 +70,13 @@ require ( github.com/tyler-smith/go-bip39 v1.1.0 github.com/urfave/cli/v2 v2.25.7 go.uber.org/automaxprocs v1.5.2 - golang.org/x/crypto v0.12.0 - golang.org/x/exp v0.0.0-20230810033253-352e893a4cad + golang.org/x/crypto v0.13.0 + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.11.0 - golang.org/x/text v0.12.0 + golang.org/x/sys v0.12.0 + golang.org/x/text v0.13.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.9.1 + golang.org/x/tools v0.13.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -87,6 +87,7 @@ require ( github.com/getsentry/sentry-go v0.18.0 // indirect github.com/klauspost/cpuid/v2 v2.1.2 // indirect github.com/minio/sha256-simd v1.0.0 // indirect + github.com/naoina/go-stringutil v0.1.0 // indirect github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 // indirect github.com/sirupsen/logrus v1.9.0 // indirect go.uber.org/atomic v1.10.0 // indirect @@ -94,7 +95,7 @@ require ( go.uber.org/zap v1.23.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect - gotest.tools/v3 v3.5.0 // indirect + gotest.tools/v3 v3.5.1 // indirect ) require ( @@ -109,9 +110,9 @@ require ( github.com/aws/smithy-go v1.1.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/redact v1.1.3 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 // indirect @@ -135,7 +136,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect - github.com/naoina/go-stringutil v0.1.0 // indirect + github.com/naoina/toml v0.1.1 github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -149,8 +150,8 @@ require ( github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - golang.org/x/mod v0.11.0 // indirect - golang.org/x/net v0.10.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.15.0 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect diff --git a/go.sum b/go.sum index 6ad2b94db..138ab3eb4 100644 --- a/go.sum +++ b/go.sum @@ -70,16 +70,18 @@ github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877 h1:1MLK4YpFtIEo3ZtMA5C795Wtv5VuUnrXX7mQG+aHg6o= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w= -github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= @@ -353,8 +355,9 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -385,8 +388,8 @@ github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3P github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= -github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/naoina/toml v0.1.1 h1:PT/lllxVVN0gzzSqSlHEmP8MJB4MY2U7STGxiouV4X8= +github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= @@ -529,11 +532,11 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230810033253-352e893a4cad h1:g0bG7Z4uG+OgH2QDODnjp6ggkk1bJDsINcuWmJN1iJU= -golang.org/x/exp v0.0.0-20230810033253-352e893a4cad/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -543,8 +546,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -572,8 +575,8 @@ golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -630,8 +633,9 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -643,8 +647,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= @@ -664,8 +668,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= -golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -729,8 +733,8 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= -gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index cf86db82c..489266649 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -1456,9 +1456,6 @@ func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Ha b.AddTx(tx) txHashes[i] = tx.Hash() } - if i == 5 { - b.SetBlobGas(params.BlobTxBlobGasPerBlob) - } b.SetPoS() }) return backend, txHashes diff --git a/miner/ordering_test.go b/miner/ordering_test.go index ba3e7cc14..739717fa9 100644 --- a/miner/ordering_test.go +++ b/miner/ordering_test.go @@ -92,6 +92,8 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) { Time: tx.Time(), GasFeeCap: tx.GasFeeCap(), GasTipCap: tx.GasTipCap(), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), }) } expectedCount += count @@ -157,6 +159,8 @@ func TestTransactionTimeSort(t *testing.T) { Time: tx.Time(), GasFeeCap: tx.GasFeeCap(), GasTipCap: tx.GasTipCap(), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), }) } // Sort the transactions and cross check the nonce ordering diff --git a/miner/payload_building.go b/miner/payload_building.go index 1b0fde931..8166da834 100644 --- a/miner/payload_building.go +++ b/miner/payload_building.go @@ -35,13 +35,14 @@ import ( // Check engine-api specification for more details. // https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#payloadattributesv1 type BuildPayloadArgs struct { - Parent common.Hash // The parent block to build payload on top - Timestamp uint64 // The provided timestamp of generated payload - FeeRecipient common.Address // The provided recipient address for collecting transaction fee + Parent common.Hash // The parent block to build payload on top + Timestamp uint64 // The provided timestamp of generated payload + FeeRecipient common.Address // The provided recipient address for collecting transaction fee + Random common.Hash // The provided randomness value + Withdrawals types.Withdrawals // The provided withdrawals + BeaconRoot *common.Hash // The provided beaconRoot (Cancun) + Version engine.PayloadVersion // Versioning byte for payload id calculation. GasLimit uint64 - Random common.Hash // The provided randomness value - Withdrawals types.Withdrawals // The provided withdrawals - BeaconRoot *common.Hash // The provided beaconRoot (Cancun) BlockHook BlockHookFn } @@ -60,6 +61,7 @@ func (args *BuildPayloadArgs) Id() engine.PayloadID { } var out engine.PayloadID copy(out[:], hasher.Sum(nil)[:8]) + out[0] = byte(args.Version) return out } diff --git a/miner/worker.go b/miner/worker.go index a1f458f25..2033a9c32 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -320,9 +320,8 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus coinbase: builderCoinbase, flashbots: flashbots, } - - // Subscribe NewTxsEvent for tx pool - worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) + // Subscribe for transaction insertion events (whether from network or resurrects) + worker.txsSub = eth.TxPool().SubscribeTransactions(worker.txsCh, true) // Subscribe events for blockchain worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) @@ -619,11 +618,14 @@ func (w *worker) mainLoop() { for _, tx := range ev.Txs { acc, _ := types.Sender(w.current.signer, tx) txs[acc] = append(txs[acc], &txpool.LazyTransaction{ + Pool: w.eth.TxPool(), // We don't know where this came from, yolo resolve from everywhere Hash: tx.Hash(), - Tx: tx.WithoutBlobTxSidecar(), + Tx: nil, // Do *not* set this! We need to resolve it later to pull blobs in Time: tx.Time(), GasFeeCap: tx.GasFeeCap(), GasTipCap: tx.GasTipCap(), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), }) } txset := newTransactionsByPriceAndNonce(w.current.signer, txs, nil, nil, w.current.header.BaseFee) @@ -834,7 +836,6 @@ func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]* if tx.Type() == types.BlobTxType { return w.commitBlobTransaction(env, tx) } - receipt, err := w.applyTransaction(env, tx) if err != nil { return nil, err @@ -856,7 +857,6 @@ func (w *worker) commitBlobTransaction(env *environment, tx *types.Transaction) if (env.blobs+len(sc.Blobs))*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock { return nil, errors.New("max data blobs reached") } - receipt, err := w.applyTransaction(env, tx) if err != nil { return nil, err @@ -1023,37 +1023,54 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn break } // Retrieve the next transaction and abort if all done. - ltx := txs.Peek() - if ltx == nil { + txWithMinerFee := txs.Peek() + if txWithMinerFee == nil { break } - tx := ltx.Tx() - if tx == nil || tx.Resolve() == nil { - log.Warn("Ignoring evicted transaction") - txs.Pop() + + ltx := txWithMinerFee.Tx() + if ltx == nil { + log.Trace("Ignoring evicted transaction") continue } + // If we don't have enough space for the next transaction, skip the account. + if env.gasPool.Gas() < ltx.Gas { + log.Trace("Not enough gas left for transaction", "hash", ltx.Hash, "left", env.gasPool.Gas(), "needed", ltx.Gas) + txs.Pop() + continue + } + if left := uint64(params.MaxBlobGasPerBlock - env.blobs*params.BlobTxBlobGasPerBlob); left < ltx.BlobGas { + log.Trace("Not enough blob gas left for transaction", "hash", ltx.Hash, "left", left, "needed", ltx.BlobGas) + txs.Pop() + continue + } + // Transaction seems to fit, pull it up from the pool + tx := ltx.Resolve() + if tx == nil { + log.Trace("Ignoring evicted transaction", "hash", ltx.Hash) + txs.Pop() + continue + } // Error may be ignored here. The error has already been checked // during transaction acceptance is the transaction pool. - from, _ := types.Sender(env.signer, tx.Tx) + from, _ := types.Sender(env.signer, tx) // Check whether the tx is replay protected. If we're not in the EIP155 hf // phase, start ignoring the sender until we do. - if tx.Tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { - log.Trace("Ignoring replay protected transaction", "hash", tx.Hash, "eip155", w.chainConfig.EIP155Block) + if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { + log.Trace("Ignoring replay protected transaction", "hash", ltx.Hash, "eip155", w.chainConfig.EIP155Block) txs.Pop() continue } - // Start executing the transaction - env.state.SetTxContext(tx.Hash, env.tcount) + env.state.SetTxContext(tx.Hash(), env.tcount) - logs, err := w.commitTransaction(env, tx.Tx) + logs, err := w.commitTransaction(env, tx) switch { case errors.Is(err, core.ErrNonceTooLow): // New head notification data race between the transaction pool and miner, shift - log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Tx.Nonce()) + log.Trace("Skipping transaction with low nonce", "hash", ltx.Hash, "sender", from, "nonce", tx.Nonce()) txs.Shift() case errors.Is(err, nil): @@ -1065,7 +1082,7 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn default: // Transaction is regarded as invalid, drop all consecutive transactions from // the same sender because of `nonce-too-high` clause. - log.Debug("Transaction failed, account skipped", "hash", tx.Hash, "err", err) + log.Debug("Transaction failed, account skipped", "hash", ltx.Hash, "err", err) txs.Pop() } } diff --git a/rpc/client_opt.go b/rpc/client_opt.go index 5bef08cca..3fa045a9b 100644 --- a/rpc/client_opt.go +++ b/rpc/client_opt.go @@ -34,7 +34,8 @@ type clientConfig struct { httpAuth HTTPAuth // WebSocket options - wsDialer *websocket.Dialer + wsDialer *websocket.Dialer + wsMessageSizeLimit *int64 // wsMessageSizeLimit nil = default, 0 = no limit // RPC handler options idgen func() ID @@ -66,6 +67,14 @@ func WithWebsocketDialer(dialer websocket.Dialer) ClientOption { }) } +// WithWebsocketMessageSizeLimit configures the websocket message size limit used by the RPC +// client. Passing a limit of 0 means no limit. +func WithWebsocketMessageSizeLimit(messageSizeLimit int64) ClientOption { + return optionFunc(func(cfg *clientConfig) { + cfg.wsMessageSizeLimit = &messageSizeLimit + }) +} + // WithHeader configures HTTP headers set by the RPC client. Headers set using this option // will be used for both HTTP and WebSocket connections. func WithHeader(key, value string) ClientOption { diff --git a/rpc/server_test.go b/rpc/server_test.go index 5d3929dfd..9d1c7fb5f 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -32,7 +32,8 @@ func TestServerRegisterName(t *testing.T) { server := NewServer() service := new(testService) - if err := server.RegisterName("test", service); err != nil { + svcName := "test" + if err := server.RegisterName(svcName, service); err != nil { t.Fatalf("%v", err) } @@ -40,12 +41,12 @@ func TestServerRegisterName(t *testing.T) { t.Fatalf("Expected 2 service entries, got %d", len(server.services.services)) } - svc, ok := server.services.services["test"] + svc, ok := server.services.services[svcName] if !ok { - t.Fatalf("Expected service calc to be registered") + t.Fatalf("Expected service %s to be registered", svcName) } - wantCallbacks := 13 + wantCallbacks := 14 if len(svc.callbacks) != wantCallbacks { t.Errorf("Expected %d callbacks for service 'service', got %d", wantCallbacks, len(svc.callbacks)) } diff --git a/rpc/testservice_test.go b/rpc/testservice_test.go index eab67f1dd..7d873af66 100644 --- a/rpc/testservice_test.go +++ b/rpc/testservice_test.go @@ -90,6 +90,10 @@ func (s *testService) EchoWithCtx(ctx context.Context, str string, i int, args * return echoResult{str, i, args} } +func (s *testService) Repeat(msg string, i int) string { + return strings.Repeat(msg, i) +} + func (s *testService) PeerInfo(ctx context.Context) PeerInfo { return PeerInfoFromContext(ctx) } diff --git a/rpc/websocket.go b/rpc/websocket.go index 86cf50594..538e53a31 100644 --- a/rpc/websocket.go +++ b/rpc/websocket.go @@ -38,7 +38,7 @@ const ( wsPingInterval = 30 * time.Second wsPingWriteTimeout = 5 * time.Second wsPongTimeout = 30 * time.Second - wsMessageSizeLimit = 32 * 1024 * 1024 + wsDefaultReadLimit = 32 * 1024 * 1024 ) var wsBufferPool = new(sync.Pool) @@ -60,7 +60,7 @@ func (s *Server) WebsocketHandler(allowedOrigins []string) http.Handler { log.Debug("WebSocket upgrade failed", "err", err) return } - codec := newWebsocketCodec(conn, r.Host, r.Header) + codec := newWebsocketCodec(conn, r.Host, r.Header, wsDefaultReadLimit) s.ServeCodec(codec, 0) }) } @@ -251,7 +251,11 @@ func newClientTransportWS(endpoint string, cfg *clientConfig) (reconnectFunc, er } return nil, hErr } - return newWebsocketCodec(conn, dialURL, header), nil + messageSizeLimit := int64(wsDefaultReadLimit) + if cfg.wsMessageSizeLimit != nil && *cfg.wsMessageSizeLimit >= 0 { + messageSizeLimit = *cfg.wsMessageSizeLimit + } + return newWebsocketCodec(conn, dialURL, header, messageSizeLimit), nil } return connect, nil } @@ -283,8 +287,8 @@ type websocketCodec struct { pongReceived chan struct{} } -func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header) ServerCodec { - conn.SetReadLimit(wsMessageSizeLimit) +func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header, readLimit int64) ServerCodec { + conn.SetReadLimit(readLimit) encode := func(v interface{}, isErrorResponse bool) error { return conn.WriteJSON(v) } diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go index fb9357605..e4ac5c3fa 100644 --- a/rpc/websocket_test.go +++ b/rpc/websocket_test.go @@ -113,6 +113,66 @@ func TestWebsocketLargeCall(t *testing.T) { } } +// This test checks whether the wsMessageSizeLimit option is obeyed. +func TestWebsocketLargeRead(t *testing.T) { + t.Parallel() + + var ( + srv = newTestServer() + httpsrv = httptest.NewServer(srv.WebsocketHandler([]string{"*"})) + wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + ) + defer srv.Stop() + defer httpsrv.Close() + + testLimit := func(limit *int64) { + opts := []ClientOption{} + expLimit := int64(wsDefaultReadLimit) + if limit != nil && *limit >= 0 { + opts = append(opts, WithWebsocketMessageSizeLimit(*limit)) + if *limit > 0 { + expLimit = *limit // 0 means infinite + } + } + client, err := DialOptions(context.Background(), wsURL, opts...) + if err != nil { + t.Fatalf("can't dial: %v", err) + } + defer client.Close() + // Remove some bytes for json encoding overhead. + underLimit := int(expLimit - 128) + overLimit := expLimit + 1 + if expLimit == wsDefaultReadLimit { + // No point trying the full 32MB in tests. Just sanity-check that + // it's not obviously limited. + underLimit = 1024 + overLimit = -1 + } + var res string + // Check under limit + if err = client.Call(&res, "test_repeat", "A", underLimit); err != nil { + t.Fatalf("unexpected error with limit %d: %v", expLimit, err) + } + if len(res) != underLimit || strings.Count(res, "A") != underLimit { + t.Fatal("incorrect data") + } + // Check over limit + if overLimit > 0 { + err = client.Call(&res, "test_repeat", "A", expLimit+1) + if err == nil || err != websocket.ErrReadLimit { + t.Fatalf("wrong error with limit %d: %v expecting %v", expLimit, err, websocket.ErrReadLimit) + } + } + } + ptr := func(v int64) *int64 { return &v } + + testLimit(ptr(-1)) // Should be ignored (use default) + testLimit(ptr(0)) // Should be ignored (use default) + testLimit(nil) // Should be ignored (use default) + testLimit(ptr(200)) + testLimit(ptr(wsDefaultReadLimit * 2)) +} + func TestWebsocketPeerInfo(t *testing.T) { var ( s = newTestServer() @@ -206,7 +266,7 @@ func TestClientWebsocketLargeMessage(t *testing.T) { defer srv.Stop() defer httpsrv.Close() - respLength := wsMessageSizeLimit - 50 + respLength := wsDefaultReadLimit - 50 srv.RegisterName("test", largeRespService{respLength}) c, err := DialWebsocket(context.Background(), wsURL, "") diff --git a/trie/trie_test.go b/trie/trie_test.go index 35ccc7720..8078770e7 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -614,7 +614,9 @@ func benchGet(b *testing.B) { k := make([]byte, 32) for i := 0; i < benchElemCount; i++ { binary.LittleEndian.PutUint64(k, uint64(i)) - trie.MustUpdate(k, k) + v := make([]byte, 32) + binary.LittleEndian.PutUint64(v, uint64(i)) + trie.MustUpdate(k, v) } binary.LittleEndian.PutUint64(k, benchElemCount/2) @@ -630,8 +632,10 @@ func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie { k := make([]byte, 32) b.ReportAllocs() for i := 0; i < b.N; i++ { + v := make([]byte, 32) e.PutUint64(k, uint64(i)) - trie.MustUpdate(k, k) + e.PutUint64(v, uint64(i)) + trie.MustUpdate(k, v) } return trie }