diff --git a/builder/local_relay_test.go b/builder/local_relay_test.go index 268e222ea..ff68399cd 100644 --- a/builder/local_relay_test.go +++ b/builder/local_relay_test.go @@ -67,7 +67,6 @@ func testRequest(t *testing.T, localRelay *LocalRelay, method, path string, payl req, err = http.NewRequest(method, path, nil) } else { payloadBytes, err2 := json.Marshal(payload) - fmt.Println(string(payloadBytes)) require.NoError(t, err2) req, err = http.NewRequest(method, path, bytes.NewReader(payloadBytes)) } diff --git a/builder/relay.go b/builder/relay.go index 44caad71a..181f03b4d 100644 --- a/builder/relay.go +++ b/builder/relay.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "net/http" - "os" "strings" "sync" "time" @@ -153,17 +152,6 @@ func (r *RemoteRelay) SubmitBlock(msg *builderSpec.VersionedSubmitBlockRequest, bodyBytes, err = msg.Capella.MarshalSSZ() case spec.DataVersionDeneb: bodyBytes, err = msg.Deneb.MarshalSSZ() - if len(msg.Deneb.BlobsBundle.Blobs) > 0 { - jsonBid, _ := msg.Deneb.MarshalJSON() - err := os.WriteFile("/home/ubuntu/submitBlockPayloadDeneb_Goerli.json", jsonBid, 0644) - if err != nil { - fmt.Println("Error writing JSON to file:", err) - } - err = os.WriteFile("/home/ubuntu/submitBlockPayloadDeneb_Goerli.ssz", bodyBytes, 0644) - if err != nil { - fmt.Println("Error writing SSZ to file:", err) - } - } default: return fmt.Errorf("unknown data version %d", msg.Version) } diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 272c4b362..f4162acac 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -315,8 +315,6 @@ type BlobPool struct { insertFeed event.Feed // Event feed to send out new tx events on pool inclusion (reorg included) lock sync.RWMutex // Mutex protecting the pool during reorg handling - - privateTxs *types.TimestampedTxHashSet } // New creates a new blob transaction pool to gather, sort and filter inbound @@ -327,13 +325,12 @@ func New(config Config, chain BlockChain) *BlobPool { // Create the transaction pool with its initial settings return &BlobPool{ - config: config, - signer: types.LatestSigner(chain.Config()), - chain: chain, - lookup: make(map[common.Hash]uint64), - index: make(map[common.Address][]*blobTxMeta), - spent: make(map[common.Address]*uint256.Int), - privateTxs: types.NewExpiringTxHashSet(config.PrivateTxLifetime), + config: config, + signer: types.LatestSigner(chain.Config()), + chain: chain, + lookup: make(map[common.Hash]uint64), + index: make(map[common.Address][]*blobTxMeta), + spent: make(map[common.Address]*uint256.Int), } } @@ -528,7 +525,6 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6 // Included transactions blobs need to be moved to the limbo if filled && inclusions != nil { - p.privateTxs.Remove(txs[i].hash) p.offload(addr, txs[i].nonce, txs[i].id, inclusions) } } @@ -568,7 +564,6 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6 // Included transactions blobs need to be moved to the limbo if inclusions != nil { - p.privateTxs.Remove(txs[0].hash) p.offload(addr, txs[0].nonce, txs[0].id, inclusions) } txs = txs[1:] @@ -797,9 +792,6 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) { basefeeGauge.Update(int64(basefee.Uint64())) blobfeeGauge.Update(int64(blobfee.Uint64())) - - p.privateTxs.Prune() - p.updateStorageMetrics() } @@ -1170,14 +1162,14 @@ func (p *BlobPool) Get(hash common.Hash) *types.Transaction { // Add inserts a set of blob transactions into the pool if they pass validation (both // consensus validity and pool restictions). -func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool, private bool) []error { +func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error { var ( adds = make([]*types.Transaction, 0, len(txs)) errs = make([]error, len(txs)) ) for i, tx := range txs { - errs[i] = p.add(tx, private) - if errs[i] == nil && !private { + errs[i] = p.add(tx) + if errs[i] == nil { adds = append(adds, tx.WithoutBlobTxSidecar()) } } @@ -1190,7 +1182,7 @@ func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool, private // Add inserts a new blob transaction into the pool if it passes validation (both // consensus validity and pool restictions). -func (p *BlobPool) add(tx *types.Transaction, private bool) (err error) { +func (p *BlobPool) add(tx *types.Transaction) (err error) { // The blob pool blocks on adding a transaction. This is because blob txs are // only even pulled form the network, so this method will act as the overload // protection for fetches. @@ -1208,7 +1200,6 @@ func (p *BlobPool) add(tx *types.Transaction, private bool) (err error) { log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err) return err } - // If the address is not yet known, request exclusivity to track the account // only by this subpool until all transactions are evicted from, _ := types.Sender(p.signer, tx) // already validated above @@ -1241,11 +1232,6 @@ func (p *BlobPool) add(tx *types.Transaction, private bool) (err error) { } meta := newBlobTxMeta(id, p.store.Size(id), tx) - // Track private transactions, so they don't get leaked to the public mempool - if private { - p.privateTxs.Add(tx.Hash()) - } - var ( next = p.state.GetNonce(from) offset = int(tx.Nonce() - next) @@ -1419,7 +1405,6 @@ func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTr GasTipCap: tx.execTipCap.ToBig(), Gas: tx.execGas, BlobGas: tx.blobGas, - GasPrice: tx.execFeeCap.ToBig(), }) } if len(lazies) > 0 { diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go index d5792fc73..7dd5ad4b2 100644 --- a/core/txpool/blobpool/blobpool_test.go +++ b/core/txpool/blobpool/blobpool_test.go @@ -1227,7 +1227,7 @@ func TestAdd(t *testing.T) { // Add each transaction one by one, verifying the pool internals in between for j, add := range tt.adds { signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(testChainConfig), add.tx) - if err := pool.add(signed, false); !errors.Is(err, add.err) { + if err := pool.add(signed); !errors.Is(err, add.err) { t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, err, add.err) } verifyPoolInternals(t, pool) diff --git a/core/txpool/blobpool/config.go b/core/txpool/blobpool/config.go index 2c1d55480..99a2002a3 100644 --- a/core/txpool/blobpool/config.go +++ b/core/txpool/blobpool/config.go @@ -17,25 +17,21 @@ package blobpool import ( - "time" - "github.com/ethereum/go-ethereum/log" ) // Config are the configuration parameters of the blob transaction pool. type Config struct { - Datadir string // Data directory containing the currently executable blobs - Datacap uint64 // Soft-cap of database storage (hard cap is larger due to overhead) - PriceBump uint64 // Minimum price bump percentage to replace an already existing nonce - PrivateTxLifetime time.Duration // Maximum amount of time to keep private transactions private + Datadir string // Data directory containing the currently executable blobs + Datacap uint64 // Soft-cap of database storage (hard cap is larger due to overhead) + PriceBump uint64 // Minimum price bump percentage to replace an already existing nonce } // DefaultConfig contains the default configurations for the transaction pool. var DefaultConfig = Config{ - Datadir: "blobpool", - Datacap: 10 * 1024 * 1024 * 1024, - PriceBump: 100, // either have patience or be aggressive, no mushy ground - PrivateTxLifetime: 3 * 24 * time.Hour, + Datadir: "blobpool", + Datacap: 10 * 1024 * 1024 * 1024, + PriceBump: 100, // either have patience or be aggressive, no mushy ground } // sanitize checks the provided user configurations and changes anything that's @@ -50,9 +46,5 @@ func (config *Config) sanitize() Config { log.Warn("Sanitizing invalid blobpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump) conf.PriceBump = DefaultConfig.PriceBump } - if conf.PrivateTxLifetime < 1 { - log.Warn("Sanitizing invalid txpool private tx lifetime", "provided", conf.PrivateTxLifetime, "updated", DefaultConfig.PrivateTxLifetime) - conf.PrivateTxLifetime = DefaultConfig.PrivateTxLifetime - } return conf } diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 6e835845a..606ba0f01 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -918,7 +918,7 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ // This method is used to add transactions from the RPC API and performs synchronous pool // reorganization and event propagation. func (pool *LegacyPool) addLocals(txs []*types.Transaction) []error { - return pool.Add(txs, !pool.config.NoLocals, true, false) + return pool.Add(txs, !pool.config.NoLocals, true) } // addLocal enqueues a single local transaction into the pool if it is valid. This is @@ -933,7 +933,7 @@ func (pool *LegacyPool) addLocal(tx *types.Transaction) error { // This method is used to add transactions from the p2p network and does not wait for pool // reorganization and internal event propagation. func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error { - return pool.Add(txs, false, false, false) + return pool.Add(txs, false, false) } // addRemote enqueues a single transaction into the pool if it is valid. This is a convenience @@ -944,12 +944,12 @@ func (pool *LegacyPool) addRemote(tx *types.Transaction) error { // addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method. func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error { - return pool.Add(txs, false, true, false) + return pool.Add(txs, false, true) } // This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method. func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error { - return pool.Add([]*types.Transaction{tx}, false, true, false)[0] + return pool.Add([]*types.Transaction{tx}, false, true)[0] } // Add enqueues a batch of transactions into the pool if they are valid. Depending @@ -957,7 +957,7 @@ func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error { // // If sync is set, the method will block until all internal maintenance related // to the add is finished. Only use this during tests for determinism! -func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync, private bool) []error { +func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error { // Do not treat as local if local transactions have been disabled local = local && !pool.config.NoLocals diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go index 265ef5644..c340d46ec 100644 --- a/core/txpool/subpool.go +++ b/core/txpool/subpool.go @@ -105,7 +105,7 @@ type SubPool interface { // Add enqueues a batch of transactions into the pool if they are valid. Due // to the large transaction churn, add may postpone fully integrating the tx // to a later point to batch multiple ones together. - Add(txs []*types.Transaction, local bool, sync bool, private bool) []error + Add(txs []*types.Transaction, local bool, sync bool) []error // Pending retrieves all currently processable transactions, grouped by origin // account and sorted by nonce. diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index 5746179d1..8a0d59336 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -357,7 +357,7 @@ func (p *TxPool) Add(txs []*types.Transaction, local bool, sync bool, private bo // back the errors into the original sort order. errsets := make([][]error, len(p.subpools)) for i := 0; i < len(p.subpools); i++ { - errsets[i] = p.subpools[i].Add(txsets[i], local, sync, private) + errsets[i] = p.subpools[i].Add(txsets[i], local, sync) } errs := make([]error, len(txs)) for i, split := range splits {