Skip to content
This repository has been archived by the owner on Oct 25, 2024. It is now read-only.

Commit

Permalink
remove private txs from blobs
Browse files Browse the repository at this point in the history
  • Loading branch information
avalonche committed Feb 7, 2024
1 parent e50dd00 commit 001c352
Show file tree
Hide file tree
Showing 8 changed files with 24 additions and 60 deletions.
1 change: 0 additions & 1 deletion builder/local_relay_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@ func testRequest(t *testing.T, localRelay *LocalRelay, method, path string, payl
req, err = http.NewRequest(method, path, nil)
} else {
payloadBytes, err2 := json.Marshal(payload)
fmt.Println(string(payloadBytes))
require.NoError(t, err2)
req, err = http.NewRequest(method, path, bytes.NewReader(payloadBytes))
}
Expand Down
12 changes: 0 additions & 12 deletions builder/relay.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"net/http"
"os"
"strings"
"sync"
"time"
Expand Down Expand Up @@ -153,17 +152,6 @@ func (r *RemoteRelay) SubmitBlock(msg *builderSpec.VersionedSubmitBlockRequest,
bodyBytes, err = msg.Capella.MarshalSSZ()
case spec.DataVersionDeneb:
bodyBytes, err = msg.Deneb.MarshalSSZ()
if len(msg.Deneb.BlobsBundle.Blobs) > 0 {
jsonBid, _ := msg.Deneb.MarshalJSON()
err := os.WriteFile("/home/ubuntu/submitBlockPayloadDeneb_Goerli.json", jsonBid, 0644)
if err != nil {
fmt.Println("Error writing JSON to file:", err)
}
err = os.WriteFile("/home/ubuntu/submitBlockPayloadDeneb_Goerli.ssz", bodyBytes, 0644)
if err != nil {
fmt.Println("Error writing SSZ to file:", err)
}
}
default:
return fmt.Errorf("unknown data version %d", msg.Version)
}
Expand Down
35 changes: 10 additions & 25 deletions core/txpool/blobpool/blobpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -315,8 +315,6 @@ type BlobPool struct {
insertFeed event.Feed // Event feed to send out new tx events on pool inclusion (reorg included)

lock sync.RWMutex // Mutex protecting the pool during reorg handling

privateTxs *types.TimestampedTxHashSet
}

// New creates a new blob transaction pool to gather, sort and filter inbound
Expand All @@ -327,13 +325,12 @@ func New(config Config, chain BlockChain) *BlobPool {

// Create the transaction pool with its initial settings
return &BlobPool{
config: config,
signer: types.LatestSigner(chain.Config()),
chain: chain,
lookup: make(map[common.Hash]uint64),
index: make(map[common.Address][]*blobTxMeta),
spent: make(map[common.Address]*uint256.Int),
privateTxs: types.NewExpiringTxHashSet(config.PrivateTxLifetime),
config: config,
signer: types.LatestSigner(chain.Config()),
chain: chain,
lookup: make(map[common.Hash]uint64),
index: make(map[common.Address][]*blobTxMeta),
spent: make(map[common.Address]*uint256.Int),
}
}

Expand Down Expand Up @@ -528,7 +525,6 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6

// Included transactions blobs need to be moved to the limbo
if filled && inclusions != nil {
p.privateTxs.Remove(txs[i].hash)
p.offload(addr, txs[i].nonce, txs[i].id, inclusions)
}
}
Expand Down Expand Up @@ -568,7 +564,6 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6

// Included transactions blobs need to be moved to the limbo
if inclusions != nil {
p.privateTxs.Remove(txs[0].hash)
p.offload(addr, txs[0].nonce, txs[0].id, inclusions)
}
txs = txs[1:]
Expand Down Expand Up @@ -797,9 +792,6 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) {

basefeeGauge.Update(int64(basefee.Uint64()))
blobfeeGauge.Update(int64(blobfee.Uint64()))

p.privateTxs.Prune()

p.updateStorageMetrics()
}

Expand Down Expand Up @@ -1170,14 +1162,14 @@ func (p *BlobPool) Get(hash common.Hash) *types.Transaction {

// Add inserts a set of blob transactions into the pool if they pass validation (both
// consensus validity and pool restictions).
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool, private bool) []error {
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
var (
adds = make([]*types.Transaction, 0, len(txs))
errs = make([]error, len(txs))
)
for i, tx := range txs {
errs[i] = p.add(tx, private)
if errs[i] == nil && !private {
errs[i] = p.add(tx)
if errs[i] == nil {
adds = append(adds, tx.WithoutBlobTxSidecar())
}
}
Expand All @@ -1190,7 +1182,7 @@ func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool, private

// Add inserts a new blob transaction into the pool if it passes validation (both
// consensus validity and pool restictions).
func (p *BlobPool) add(tx *types.Transaction, private bool) (err error) {
func (p *BlobPool) add(tx *types.Transaction) (err error) {
// The blob pool blocks on adding a transaction. This is because blob txs are
// only even pulled form the network, so this method will act as the overload
// protection for fetches.
Expand All @@ -1208,7 +1200,6 @@ func (p *BlobPool) add(tx *types.Transaction, private bool) (err error) {
log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err)
return err
}

// If the address is not yet known, request exclusivity to track the account
// only by this subpool until all transactions are evicted
from, _ := types.Sender(p.signer, tx) // already validated above
Expand Down Expand Up @@ -1241,11 +1232,6 @@ func (p *BlobPool) add(tx *types.Transaction, private bool) (err error) {
}
meta := newBlobTxMeta(id, p.store.Size(id), tx)

// Track private transactions, so they don't get leaked to the public mempool
if private {
p.privateTxs.Add(tx.Hash())
}

var (
next = p.state.GetNonce(from)
offset = int(tx.Nonce() - next)
Expand Down Expand Up @@ -1419,7 +1405,6 @@ func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTr
GasTipCap: tx.execTipCap.ToBig(),
Gas: tx.execGas,
BlobGas: tx.blobGas,
GasPrice: tx.execFeeCap.ToBig(),
})
}
if len(lazies) > 0 {
Expand Down
2 changes: 1 addition & 1 deletion core/txpool/blobpool/blobpool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1227,7 +1227,7 @@ func TestAdd(t *testing.T) {
// Add each transaction one by one, verifying the pool internals in between
for j, add := range tt.adds {
signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(testChainConfig), add.tx)
if err := pool.add(signed, false); !errors.Is(err, add.err) {
if err := pool.add(signed); !errors.Is(err, add.err) {
t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, err, add.err)
}
verifyPoolInternals(t, pool)
Expand Down
20 changes: 6 additions & 14 deletions core/txpool/blobpool/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,25 +17,21 @@
package blobpool

import (
"time"

"github.com/ethereum/go-ethereum/log"
)

// Config are the configuration parameters of the blob transaction pool.
type Config struct {
Datadir string // Data directory containing the currently executable blobs
Datacap uint64 // Soft-cap of database storage (hard cap is larger due to overhead)
PriceBump uint64 // Minimum price bump percentage to replace an already existing nonce
PrivateTxLifetime time.Duration // Maximum amount of time to keep private transactions private
Datadir string // Data directory containing the currently executable blobs
Datacap uint64 // Soft-cap of database storage (hard cap is larger due to overhead)
PriceBump uint64 // Minimum price bump percentage to replace an already existing nonce
}

// DefaultConfig contains the default configurations for the transaction pool.
var DefaultConfig = Config{
Datadir: "blobpool",
Datacap: 10 * 1024 * 1024 * 1024,
PriceBump: 100, // either have patience or be aggressive, no mushy ground
PrivateTxLifetime: 3 * 24 * time.Hour,
Datadir: "blobpool",
Datacap: 10 * 1024 * 1024 * 1024,
PriceBump: 100, // either have patience or be aggressive, no mushy ground
}

// sanitize checks the provided user configurations and changes anything that's
Expand All @@ -50,9 +46,5 @@ func (config *Config) sanitize() Config {
log.Warn("Sanitizing invalid blobpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump)
conf.PriceBump = DefaultConfig.PriceBump
}
if conf.PrivateTxLifetime < 1 {
log.Warn("Sanitizing invalid txpool private tx lifetime", "provided", conf.PrivateTxLifetime, "updated", DefaultConfig.PrivateTxLifetime)
conf.PrivateTxLifetime = DefaultConfig.PrivateTxLifetime
}
return conf
}
10 changes: 5 additions & 5 deletions core/txpool/legacypool/legacypool.go
Original file line number Diff line number Diff line change
Expand Up @@ -918,7 +918,7 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ
// This method is used to add transactions from the RPC API and performs synchronous pool
// reorganization and event propagation.
func (pool *LegacyPool) addLocals(txs []*types.Transaction) []error {
return pool.Add(txs, !pool.config.NoLocals, true, false)
return pool.Add(txs, !pool.config.NoLocals, true)
}

// addLocal enqueues a single local transaction into the pool if it is valid. This is
Expand All @@ -933,7 +933,7 @@ func (pool *LegacyPool) addLocal(tx *types.Transaction) error {
// This method is used to add transactions from the p2p network and does not wait for pool
// reorganization and internal event propagation.
func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error {
return pool.Add(txs, false, false, false)
return pool.Add(txs, false, false)
}

// addRemote enqueues a single transaction into the pool if it is valid. This is a convenience
Expand All @@ -944,20 +944,20 @@ func (pool *LegacyPool) addRemote(tx *types.Transaction) error {

// addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method.
func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error {
return pool.Add(txs, false, true, false)
return pool.Add(txs, false, true)
}

// This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error {
return pool.Add([]*types.Transaction{tx}, false, true, false)[0]
return pool.Add([]*types.Transaction{tx}, false, true)[0]
}

// Add enqueues a batch of transactions into the pool if they are valid. Depending
// on the local flag, full pricing constraints will or will not be applied.
//
// If sync is set, the method will block until all internal maintenance related
// to the add is finished. Only use this during tests for determinism!
func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync, private bool) []error {
func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error {
// Do not treat as local if local transactions have been disabled
local = local && !pool.config.NoLocals

Expand Down
2 changes: 1 addition & 1 deletion core/txpool/subpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ type SubPool interface {
// Add enqueues a batch of transactions into the pool if they are valid. Due
// to the large transaction churn, add may postpone fully integrating the tx
// to a later point to batch multiple ones together.
Add(txs []*types.Transaction, local bool, sync bool, private bool) []error
Add(txs []*types.Transaction, local bool, sync bool) []error

// Pending retrieves all currently processable transactions, grouped by origin
// account and sorted by nonce.
Expand Down
2 changes: 1 addition & 1 deletion core/txpool/txpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ func (p *TxPool) Add(txs []*types.Transaction, local bool, sync bool, private bo
// back the errors into the original sort order.
errsets := make([][]error, len(p.subpools))
for i := 0; i < len(p.subpools); i++ {
errsets[i] = p.subpools[i].Add(txsets[i], local, sync, private)
errsets[i] = p.subpools[i].Add(txsets[i], local, sync)
}
errs := make([]error, len(txs))
for i, split := range splits {
Expand Down

0 comments on commit 001c352

Please sign in to comment.