From cd74c757586050d3d42122c640d500e07c146653 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 27 Sep 2023 18:06:24 -0500 Subject: [PATCH] Builder Efficient reverts (#90) * Initial commit for efficient revert experimentation * Add additional unit tests and fix unit test errors, reduce duplicate code, add support for sbundles, expose CLI flag and environment variable to enable multi-tx-snapshots * Update godoc, remove unused getter for access list, add CLI flag to builder flag list, update log level for multi-tx-snap error * Add retry logic for multi-tx-snapshot block build algorithm * Update unit tests to test EnableMultiTxSnap * Change account touch tracer to access list tracer for env changes * Update greedy builder to use passed in algorithm configuration rather than default * Add new multi-transaction snapshot stack to support more than one active snapshot, useful for cases like nested bundle applying and rollback, optional bundle discard, and bundle merging * Clean up code, add comprehensive stack tests with fuzzing, fix edge cases where merge operation for stack commit was not properly updated * Add refund support to efficient revert so state returns to correct refund value on discard Address PR feedback, separate block build function initializing to de-dupe logic, split types into separate definitions Add panic if copy is performed with non-empty stack of snapshots Update env changes method name Fix unit test and update log to trace Remove refund in case it causes bad state * Add unit tests for state comparison, potential fix for gas and root mismatch through snapshot revert and removal of tx rollback * Debug commit * DRY profit logic * Revert commit This rolls back to commit before fc32a84e178b32581d118f7cf26055de5606d5dd. * Add fuzz state using state smart contract, add Copy method for multi-tx, pass gas limit for test setup, add abigen bindings and abi for compiled state fuzz test smart contract * fix a bug with state reverts of accounts that are not touched according to the journal (#102) * Simplify test * Use different builders instead of configuration switch since major refactor required to handle more dynamic configurations. * Update env changes to reduce redundancy and make control flow easier to follow * Remove debug validation * Update comments, add touch change to state fuzz test smart contract * Add fuzz tests for transient storage and account touch operations * Remove unused code --------- Co-authored-by: Vitaly Drogan --- core/state/multi_tx_snapshot.go | 692 ++++++++++++++ core/state/multi_tx_snapshot_test.go | 929 +++++++++++++++++++ core/state/state_object.go | 3 + core/state/statedb.go | 40 + miner/algo_common.go | 550 +++-------- miner/algo_common_test.go | 38 +- miner/algo_greedy.go | 3 +- miner/algo_greedy_buckets.go | 30 +- miner/algo_greedy_buckets_multisnap.go | 241 +++++ miner/algo_greedy_multisnap.go | 134 +++ miner/algo_greedy_test.go | 18 +- miner/algo_state_test.go | 1056 ++++++++++++++++++++++ miner/algo_test.go | 30 +- miner/contract_simulator_test.go | 248 ++--- miner/env_changes.go | 427 +++++++++ miner/env_changes_test.go | 390 ++++++++ miner/environment_diff.go | 417 +++++++++ miner/miner.go | 12 +- miner/multi_worker.go | 2 +- miner/sbundle_test.go | 2 +- miner/state_fuzz_test_abigen_bindings.go | 453 ++++++++++ miner/testdata/state_fuzz_test.abi | 1 + miner/verify_bundles_test.go | 6 +- miner/worker.go | 42 +- 24 files changed, 5137 insertions(+), 627 deletions(-) create mode 100644 core/state/multi_tx_snapshot.go create mode 100644 core/state/multi_tx_snapshot_test.go create mode 100644 miner/algo_greedy_buckets_multisnap.go create mode 100644 miner/algo_greedy_multisnap.go create mode 100644 miner/algo_state_test.go create mode 100644 miner/env_changes.go create mode 100644 miner/env_changes_test.go create mode 100644 miner/environment_diff.go create mode 100644 miner/state_fuzz_test_abigen_bindings.go create mode 100644 miner/testdata/state_fuzz_test.abi diff --git a/core/state/multi_tx_snapshot.go b/core/state/multi_tx_snapshot.go new file mode 100644 index 0000000000..f70842a98f --- /dev/null +++ b/core/state/multi_tx_snapshot.go @@ -0,0 +1,692 @@ +package state + +import ( + "errors" + "fmt" + "math/big" + "reflect" + + "github.com/ethereum/go-ethereum/common" +) + +// MultiTxSnapshot retains StateDB changes for multiple transactions. +type MultiTxSnapshot struct { + invalid bool + + numLogsAdded map[common.Hash]int + + prevObjects map[common.Address]*stateObject + + accountStorage map[common.Address]map[common.Hash]*common.Hash + accountBalance map[common.Address]*big.Int + accountNonce map[common.Address]uint64 + accountCode map[common.Address][]byte + accountCodeHash map[common.Address][]byte + + accountSuicided map[common.Address]bool + accountDeleted map[common.Address]bool + + accountNotPending map[common.Address]struct{} + accountNotDirty map[common.Address]struct{} + + // touched accounts are accounts that can be affected when snapshot is reverted + // we clear dirty storage for touched accounts when snapshot is reverted + touchedAccounts map[common.Address]struct{} + + // TODO: snapdestructs, snapaccount storage +} + +// NewMultiTxSnapshot creates a new MultiTxSnapshot +func NewMultiTxSnapshot() *MultiTxSnapshot { + multiTxSnapshot := newMultiTxSnapshot() + return &multiTxSnapshot +} + +func newMultiTxSnapshot() MultiTxSnapshot { + return MultiTxSnapshot{ + numLogsAdded: make(map[common.Hash]int), + prevObjects: make(map[common.Address]*stateObject), + accountStorage: make(map[common.Address]map[common.Hash]*common.Hash), + accountBalance: make(map[common.Address]*big.Int), + accountNonce: make(map[common.Address]uint64), + accountCode: make(map[common.Address][]byte), + accountCodeHash: make(map[common.Address][]byte), + accountSuicided: make(map[common.Address]bool), + accountDeleted: make(map[common.Address]bool), + accountNotPending: make(map[common.Address]struct{}), + accountNotDirty: make(map[common.Address]struct{}), + touchedAccounts: make(map[common.Address]struct{}), + } +} + +func (s MultiTxSnapshot) Copy() MultiTxSnapshot { + newSnapshot := newMultiTxSnapshot() + newSnapshot.invalid = s.invalid + + for txHash, numLogs := range s.numLogsAdded { + newSnapshot.numLogsAdded[txHash] = numLogs + } + + for address, object := range s.prevObjects { + newSnapshot.prevObjects[address] = object + } + + for address, storage := range s.accountStorage { + newSnapshot.accountStorage[address] = make(map[common.Hash]*common.Hash) + for key, value := range storage { + newSnapshot.accountStorage[address][key] = value + } + } + + for address, balance := range s.accountBalance { + newSnapshot.accountBalance[address] = balance + } + + for address, nonce := range s.accountNonce { + newSnapshot.accountNonce[address] = nonce + } + + for address, code := range s.accountCode { + newSnapshot.accountCode[address] = code + } + + for address, codeHash := range s.accountCodeHash { + newSnapshot.accountCodeHash[address] = codeHash + } + + for address, suicided := range s.accountSuicided { + newSnapshot.accountSuicided[address] = suicided + } + + for address, deleted := range s.accountDeleted { + newSnapshot.accountDeleted[address] = deleted + } + + for address := range s.accountNotPending { + newSnapshot.accountNotPending[address] = struct{}{} + } + + for address := range s.accountNotDirty { + newSnapshot.accountNotDirty[address] = struct{}{} + } + + for address := range s.touchedAccounts { + newSnapshot.touchedAccounts[address] = struct{}{} + } + + return newSnapshot +} + +// Equal returns true if the two MultiTxSnapshot are equal +func (s *MultiTxSnapshot) Equal(other *MultiTxSnapshot) bool { + if other == nil { + return false + } + if s.invalid != other.invalid { + return false + } + + visited := make(map[common.Address]bool) + for address, obj := range other.prevObjects { + current, exist := s.prevObjects[address] + if !exist { + return false + } + if current == nil && obj != nil { + return false + } + + if current != nil && obj == nil { + return false + } + + visited[address] = true + } + + for address, obj := range s.prevObjects { + if visited[address] { + continue + } + + otherObject, exist := other.prevObjects[address] + if !exist { + return false + } + + if otherObject == nil && obj != nil { + return false + } + + if otherObject != nil && obj == nil { + return false + } + } + + return reflect.DeepEqual(s.numLogsAdded, other.numLogsAdded) && + reflect.DeepEqual(s.accountStorage, other.accountStorage) && + reflect.DeepEqual(s.accountBalance, other.accountBalance) && + reflect.DeepEqual(s.accountNonce, other.accountNonce) && + reflect.DeepEqual(s.accountCode, other.accountCode) && + reflect.DeepEqual(s.accountCodeHash, other.accountCodeHash) && + reflect.DeepEqual(s.accountSuicided, other.accountSuicided) && + reflect.DeepEqual(s.accountDeleted, other.accountDeleted) && + reflect.DeepEqual(s.accountNotPending, other.accountNotPending) && + reflect.DeepEqual(s.accountNotDirty, other.accountNotDirty) && + reflect.DeepEqual(s.touchedAccounts, other.touchedAccounts) +} + +// updateFromJournal updates the snapshot with the changes from the journal. +func (s *MultiTxSnapshot) updateFromJournal(journal *journal) { + for _, journalEntry := range journal.entries { + switch entry := journalEntry.(type) { + case balanceChange: + s.updateBalanceChange(entry) + case nonceChange: + s.updateNonceChange(entry) + case codeChange: + s.updateCodeChange(entry) + case addLogChange: + s.numLogsAdded[entry.txhash]++ + case createObjectChange: + s.updateCreateObjectChange(entry) + case resetObjectChange: + s.updateResetObjectChange(entry) + case suicideChange: + s.updateSuicideChange(entry) + } + } +} + +// objectChanged returns whether the object was changed (in the set of prevObjects), which can happen +// because of self-destructs and deployments. +func (s *MultiTxSnapshot) objectChanged(address common.Address) bool { + _, ok := s.prevObjects[address] + return ok +} + +// updateBalanceChange updates the snapshot with the balance change. +func (s *MultiTxSnapshot) updateBalanceChange(change balanceChange) { + s.touchedAccounts[*change.account] = struct{}{} + if s.objectChanged(*change.account) { + return + } + if _, ok := s.accountBalance[*change.account]; !ok { + s.accountBalance[*change.account] = change.prev + } +} + +// updateNonceChange updates the snapshot with the nonce change. +func (s *MultiTxSnapshot) updateNonceChange(change nonceChange) { + s.touchedAccounts[*change.account] = struct{}{} + if s.objectChanged(*change.account) { + return + } + if _, ok := s.accountNonce[*change.account]; !ok { + s.accountNonce[*change.account] = change.prev + } +} + +// updateCodeChange updates the snapshot with the code change. +func (s *MultiTxSnapshot) updateCodeChange(change codeChange) { + s.touchedAccounts[*change.account] = struct{}{} + if s.objectChanged(*change.account) { + return + } + if _, ok := s.accountCode[*change.account]; !ok { + s.accountCode[*change.account] = change.prevcode + s.accountCodeHash[*change.account] = change.prevhash + } +} + +// updateResetObjectChange updates the snapshot with the reset object change. +func (s *MultiTxSnapshot) updateResetObjectChange(change resetObjectChange) { + s.touchedAccounts[change.prev.address] = struct{}{} + address := change.prev.address + if _, ok := s.prevObjects[address]; !ok { + s.prevObjects[address] = change.prev + } +} + +// updateCreateObjectChange updates the snapshot with the createObjectChange. +func (s *MultiTxSnapshot) updateCreateObjectChange(change createObjectChange) { + s.touchedAccounts[*change.account] = struct{}{} + if _, ok := s.prevObjects[*change.account]; !ok { + s.prevObjects[*change.account] = nil + } +} + +// updateSuicideChange updates the snapshot with the suicide change. +func (s *MultiTxSnapshot) updateSuicideChange(change suicideChange) { + s.touchedAccounts[*change.account] = struct{}{} + if s.objectChanged(*change.account) { + return + } + if _, ok := s.accountSuicided[*change.account]; !ok { + s.accountSuicided[*change.account] = change.prev + } + if _, ok := s.accountBalance[*change.account]; !ok { + s.accountBalance[*change.account] = change.prevbalance + } +} + +// updatePendingStorage updates the snapshot with the pending storage change. +func (s *MultiTxSnapshot) updatePendingStorage(address common.Address, key, value common.Hash, ok bool) { + s.touchedAccounts[address] = struct{}{} + if s.objectChanged(address) { + return + } + if _, exists := s.accountStorage[address]; !exists { + s.accountStorage[address] = make(map[common.Hash]*common.Hash) + } + if _, exists := s.accountStorage[address][key]; exists { + return + } + if ok { + s.accountStorage[address][key] = &value + } else { + s.accountStorage[address][key] = nil + } +} + +// updatePendingStatus updates the snapshot with previous pending status. +func (s *MultiTxSnapshot) updatePendingStatus(address common.Address, pending, dirty bool) { + s.touchedAccounts[address] = struct{}{} + if !pending { + s.accountNotPending[address] = struct{}{} + } + if !dirty { + s.accountNotDirty[address] = struct{}{} + } +} + +// updateObjectDeleted updates the snapshot with the object deletion. +func (s *MultiTxSnapshot) updateObjectDeleted(address common.Address, deleted bool) { + s.touchedAccounts[address] = struct{}{} + if s.objectChanged(address) { + return + } + if _, ok := s.accountDeleted[address]; !ok { + s.accountDeleted[address] = deleted + } +} + +// Merge merges the changes from another snapshot into the current snapshot. +// The operation assumes that the other snapshot is later (newer) than the current snapshot. +// Changes are merged such that older state is retained and not overwritten. +// In other words, this method performs a union operation on two snapshots, where +// older values are retained and any new values are added to the current snapshot. +func (s *MultiTxSnapshot) Merge(other *MultiTxSnapshot) error { + if other.invalid || s.invalid { + return errors.New("failed to merge snapshots - invalid snapshot found") + } + + // each snapshot increments the number of logs per transaction hash + // when we merge snapshots, the number of logs added per transaction are appended to current snapshot + for txHash, numLogs := range other.numLogsAdded { + s.numLogsAdded[txHash] += numLogs + } + + // prevObjects contain mapping of address to state objects + // if the current snapshot has previous object for same address, retain previous object + // otherwise, add new object from other snapshot + for address, object := range other.prevObjects { + if _, exist := s.prevObjects[address]; !exist { + s.prevObjects[address] = object + } + } + + // merge account storage - + // we want to retain any existing storage values for a given account, + // update storage keys if they do not exist for a given account's storage, + // and update pending storage for accounts that don't already exist in current snapshot + for address, storage := range other.accountStorage { + if s.objectChanged(address) { + continue + } + + if _, exist := s.accountStorage[address]; !exist { + s.accountStorage[address] = make(map[common.Hash]*common.Hash) + s.accountStorage[address] = storage + continue + } + + for key, value := range storage { + if _, exists := s.accountStorage[address][key]; !exists { + s.accountStorage[address][key] = value + } + } + } + + // add previous balance(s) for any addresses that don't exist in current snapshot + for address, balance := range other.accountBalance { + if s.objectChanged(address) { + continue + } + + if _, exist := s.accountBalance[address]; !exist { + s.accountBalance[address] = balance + } + } + + // add previous nonce for accounts that don't exist in current snapshot + for address, nonce := range other.accountNonce { + if s.objectChanged(address) { + continue + } + if _, exist := s.accountNonce[address]; !exist { + s.accountNonce[address] = nonce + } + } + + // add previous code for accounts not found in current snapshot + for address, code := range other.accountCode { + if s.objectChanged(address) { + continue + } + if _, exist := s.accountCode[address]; !exist { + if _, found := other.accountCodeHash[address]; !found { + // every codeChange has code and code hash set - + // should never reach this point unless there is programming error + panic("snapshot merge found code but no code hash for account address") + } + + s.accountCode[address] = code + s.accountCodeHash[address] = other.accountCodeHash[address] + } + } + + // add previous suicide for addresses not in current snapshot + for address, suicided := range other.accountSuicided { + if s.objectChanged(address) { + continue + } + + if _, exist := s.accountSuicided[address]; !exist { + s.accountSuicided[address] = suicided + } else { + return errors.New("failed to merge snapshots - duplicate found for account suicide") + } + } + + // add previous account deletions if they don't exist + for address, deleted := range other.accountDeleted { + if s.objectChanged(address) { + continue + } + if _, exist := s.accountDeleted[address]; !exist { + s.accountDeleted[address] = deleted + } + } + + // add previous pending status if not found + for address := range other.accountNotPending { + if _, exist := s.accountNotPending[address]; !exist { + s.accountNotPending[address] = struct{}{} + } + } + + for address := range other.accountNotDirty { + if _, exist := s.accountNotDirty[address]; !exist { + s.accountNotDirty[address] = struct{}{} + } + } + + for address := range other.touchedAccounts { + s.touchedAccounts[address] = struct{}{} + } + + return nil +} + +// revertState reverts the state to the snapshot. +func (s *MultiTxSnapshot) revertState(st *StateDB) { + // remove all the logs added + for txhash, numLogs := range s.numLogsAdded { + lens := len(st.logs[txhash]) + if lens == numLogs { + delete(st.logs, txhash) + } else { + st.logs[txhash] = st.logs[txhash][:lens-numLogs] + } + st.logSize -= uint(numLogs) + } + + // restore the objects + for address, object := range s.prevObjects { + if object == nil { + delete(st.stateObjects, address) + } else { + st.stateObjects[address] = object + } + } + + // restore storage + for address, storage := range s.accountStorage { + st.stateObjects[address].dirtyStorage = make(Storage) + for key, value := range storage { + if value == nil { + if _, ok := st.stateObjects[address].pendingStorage[key]; !ok { + panic(fmt.Sprintf("storage key %x not found in pending storage", key)) + } + delete(st.stateObjects[address].pendingStorage, key) + } else { + if _, ok := st.stateObjects[address].pendingStorage[key]; !ok { + panic(fmt.Sprintf("storage key %x not found in pending storage", key)) + } + st.stateObjects[address].pendingStorage[key] = *value + } + } + } + + // restore balance + for address, balance := range s.accountBalance { + st.stateObjects[address].setBalance(balance) + } + // restore nonce + for address, nonce := range s.accountNonce { + st.stateObjects[address].setNonce(nonce) + } + // restore code + for address, code := range s.accountCode { + st.stateObjects[address].setCode(common.BytesToHash(s.accountCodeHash[address]), code) + } + // restore suicided + for address, suicided := range s.accountSuicided { + st.stateObjects[address].suicided = suicided + } + // restore deleted + for address, deleted := range s.accountDeleted { + st.stateObjects[address].deleted = deleted + } + + // restore pending status + for address := range s.accountNotPending { + delete(st.stateObjectsPending, address) + } + for address := range s.accountNotDirty { + delete(st.stateObjectsDirty, address) + } + + // clean dirty state of touched accounts + for address := range s.touchedAccounts { + if obj, ok := st.stateObjects[address]; ok { + obj.dirtyStorage = make(Storage) + } + } +} + +// MultiTxSnapshotStack contains a list of snapshots for multiple transactions associated with a StateDB. +// Intended use is as follows: +// - Create a new snapshot and push on top of the stack +// - Apply transactions to state and update head snapshot with changes from journal +// - If any changes applied to state database are committed to trie, invalidate the head snapshot +// - If applied changes are not desired, revert the changes from the head snapshot and pop the snapshot from the stack +// - If applied changes are desired, commit the changes from the head snapshot by merging with previous entry +// and pop the snapshot from the stack +type MultiTxSnapshotStack struct { + snapshots []MultiTxSnapshot + state *StateDB +} + +// NewMultiTxSnapshotStack creates a new MultiTxSnapshotStack with a given StateDB. +func NewMultiTxSnapshotStack(state *StateDB) *MultiTxSnapshotStack { + return &MultiTxSnapshotStack{ + snapshots: make([]MultiTxSnapshot, 0), + state: state, + } +} + +// NewSnapshot creates a new snapshot and pushes it on top of the stack. +func (stack *MultiTxSnapshotStack) NewSnapshot() (*MultiTxSnapshot, error) { + if len(stack.snapshots) > 0 && stack.snapshots[len(stack.snapshots)-1].invalid { + return nil, errors.New("failed to create new multi-transaction snapshot - invalid snapshot found at head") + } + + snap := newMultiTxSnapshot() + stack.snapshots = append(stack.snapshots, snap) + return &snap, nil +} + +func (stack *MultiTxSnapshotStack) Copy(statedb *StateDB) *MultiTxSnapshotStack { + newStack := NewMultiTxSnapshotStack(statedb) + for _, snapshot := range stack.snapshots { + newStack.snapshots = append(newStack.snapshots, snapshot.Copy()) + } + return newStack +} + +// Peek returns the snapshot at the top of the stack. +func (stack *MultiTxSnapshotStack) Peek() *MultiTxSnapshot { + if len(stack.snapshots) == 0 { + return nil + } + return &stack.snapshots[len(stack.snapshots)-1] +} + +// Pop removes the snapshot at the top of the stack and returns it. +func (stack *MultiTxSnapshotStack) Pop() (*MultiTxSnapshot, error) { + size := len(stack.snapshots) + if size == 0 { + return nil, errors.New("failed to revert multi-transaction snapshot - does not exist") + } + + head := &stack.snapshots[size-1] + stack.snapshots = stack.snapshots[:size-1] + return head, nil +} + +// Revert rewinds the changes from the head snapshot and removes it from the stack. +func (stack *MultiTxSnapshotStack) Revert() (*MultiTxSnapshot, error) { + size := len(stack.snapshots) + if size == 0 { + return nil, errors.New("failed to revert multi-transaction snapshot - does not exist") + } + + head := &stack.snapshots[size-1] + if head.invalid { + return nil, errors.New("failed to revert multi-transaction snapshot - invalid snapshot found") + } + + head.revertState(stack.state) + stack.snapshots = stack.snapshots[:size-1] + return head, nil +} + +// RevertAll reverts all snapshots in the stack. +func (stack *MultiTxSnapshotStack) RevertAll() (snapshot *MultiTxSnapshot, err error) { + for len(stack.snapshots) > 0 { + if snapshot, err = stack.Revert(); err != nil { + break + } + } + return +} + +// Commit merges the changes from the head snapshot with the previous snapshot and removes it from the stack. +func (stack *MultiTxSnapshotStack) Commit() (*MultiTxSnapshot, error) { + if len(stack.snapshots) == 0 { + return nil, errors.New("failed to commit multi-transaction snapshot - does not exist") + } + + if len(stack.snapshots) == 1 { + return stack.Pop() + } + + var ( + head *MultiTxSnapshot + err error + ) + if head, err = stack.Pop(); err != nil { + return nil, err + } + + current := stack.Peek() + if err = current.Merge(head); err != nil { + return nil, err + } + + stack.snapshots[len(stack.snapshots)-1] = *current + return head, nil +} + +// Size returns the number of snapshots in the stack. +func (stack *MultiTxSnapshotStack) Size() int { + return len(stack.snapshots) +} + +// Invalidate invalidates the latest snapshot. This is used when state changes are committed to trie. +func (stack *MultiTxSnapshotStack) Invalidate() { + size := len(stack.snapshots) + if size == 0 { + return + } + + head := stack.snapshots[size-1] + head.invalid = true + stack.snapshots = stack.snapshots[:0] + stack.snapshots = append(stack.snapshots, head) +} + +// UpdatePendingStatus updates the pending status for an address. +func (stack *MultiTxSnapshotStack) UpdatePendingStatus(address common.Address, pending, dirty bool) { + if len(stack.snapshots) == 0 { + return + } + + current := stack.Peek() + current.updatePendingStatus(address, pending, dirty) + stack.snapshots[len(stack.snapshots)-1] = *current +} + +// UpdatePendingStorage updates the pending storage for an address. +func (stack *MultiTxSnapshotStack) UpdatePendingStorage(address common.Address, key, value common.Hash, ok bool) { + if len(stack.snapshots) == 0 { + return + } + + current := stack.Peek() + current.updatePendingStorage(address, key, value, ok) + stack.snapshots[len(stack.snapshots)-1] = *current +} + +// UpdateFromJournal updates the snapshot with the changes from the journal. +func (stack *MultiTxSnapshotStack) UpdateFromJournal(journal *journal) { + if len(stack.snapshots) == 0 { + return + } + + current := stack.Peek() + current.updateFromJournal(journal) + stack.snapshots[len(stack.snapshots)-1] = *current +} + +// UpdateObjectDeleted updates the snapshot with the object deletion. +func (stack *MultiTxSnapshotStack) UpdateObjectDeleted(address common.Address, deleted bool) { + if len(stack.snapshots) == 0 { + return + } + + current := stack.Peek() + current.updateObjectDeleted(address, deleted) + stack.snapshots[len(stack.snapshots)-1] = *current +} diff --git a/core/state/multi_tx_snapshot_test.go b/core/state/multi_tx_snapshot_test.go new file mode 100644 index 0000000000..6b6b1d56a5 --- /dev/null +++ b/core/state/multi_tx_snapshot_test.go @@ -0,0 +1,929 @@ +package state + +import ( + "bytes" + "fmt" + "math/big" + "math/rand" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common" +) + +var ( + addrs []common.Address + keys []common.Hash + + rng *rand.Rand +) + +func init() { + for i := 0; i < 20; i++ { + addrs = append(addrs, common.HexToAddress(fmt.Sprintf("0x%02x", i))) + } + for i := 0; i < 100; i++ { + keys = append(keys, common.HexToHash(fmt.Sprintf("0x%02x", i))) + } +} + +type observableAccountState struct { + address common.Address + balance *big.Int + nonce uint64 + code []byte + codeHash common.Hash + codeSize int + + state map[common.Hash]common.Hash + committedState map[common.Hash]common.Hash + + selfDestruct bool + exist bool + empty bool +} + +func getObservableAccountState(s *StateDB, address common.Address, storageKeys []common.Hash) *observableAccountState { + state := &observableAccountState{ + address: address, + balance: s.GetBalance(address), + nonce: s.GetNonce(address), + code: s.GetCode(address), + codeHash: s.GetCodeHash(address), + codeSize: s.GetCodeSize(address), + state: make(map[common.Hash]common.Hash), + committedState: make(map[common.Hash]common.Hash), + selfDestruct: s.HasSuicided(address), + exist: s.Exist(address), + empty: s.Empty(address), + } + + for _, key := range storageKeys { + state.state[key] = s.GetState(address, key) + state.committedState[key] = s.GetCommittedState(address, key) + } + + return state +} + +func verifyObservableAccountState(s *StateDB, state *observableAccountState) error { + if s.GetBalance(state.address).Cmp(state.balance) != 0 { + return fmt.Errorf("balance mismatch %v != %v", s.GetBalance(state.address), state.balance) + } + if s.GetNonce(state.address) != state.nonce { + return fmt.Errorf("nonce mismatch %v != %v", s.GetNonce(state.address), state.nonce) + } + if !bytes.Equal(s.GetCode(state.address), state.code) { + return fmt.Errorf("code mismatch %v != %v", s.GetCode(state.address), state.code) + } + if s.GetCodeHash(state.address) != state.codeHash { + return fmt.Errorf("code hash mismatch %v != %v", s.GetCodeHash(state.address), state.codeHash) + } + if s.GetCodeSize(state.address) != state.codeSize { + return fmt.Errorf("code size mismatch %v != %v", s.GetCodeSize(state.address), state.codeSize) + } + for key, value := range state.state { + if s.GetState(state.address, key) != value { + return fmt.Errorf("state mismatch %v != %v", s.GetState(state.address, key), value) + } + } + for key, value := range state.committedState { + if s.GetCommittedState(state.address, key) != value { + return fmt.Errorf("committed state mismatch %v != %v", s.GetCommittedState(state.address, key), value) + } + } + if s.HasSuicided(state.address) != state.selfDestruct { + return fmt.Errorf("self destruct mismatch %v != %v", s.HasSuicided(state.address), state.selfDestruct) + } + if s.Exist(state.address) != state.exist { + return fmt.Errorf("exist mismatch %v != %v", s.Exist(state.address), state.exist) + } + if s.Empty(state.address) != state.empty { + return fmt.Errorf("empty mismatch %v != %v", s.Empty(state.address), state.empty) + } + return nil +} + +func randomBytes(n int) []byte { + b := make([]byte, n) + _, err := rng.Read(b) + if err != nil { + panic(err) + } + return b +} + +func randomHash() common.Hash { + return common.BytesToHash(randomBytes(32)) +} + +func randFillAccountState(addr common.Address, s *StateDB) { + for i, key := range keys { + // Fill some keys with zero value, others with random value + if i%5 == 0 { + s.SetState(addr, key, common.BigToHash(common.Big0)) + } else { + s.SetState(addr, key, randomHash()) + } + } +} + +func genRandomAccountState(seed int64) map[common.Address]map[common.Hash]common.Hash { + rng = rand.New(rand.NewSource(seed)) + + state := make(map[common.Address]map[common.Hash]common.Hash) + + for _, addr := range addrs { + state[addr] = make(map[common.Hash]common.Hash) + for i, key := range keys { + if i%5 == 0 { + state[addr][key] = common.BigToHash(common.Big0) + } else { + state[addr][key] = randomHash() + } + } + } + + return state +} + +func randFillAccount(addr common.Address, s *StateDB) { + s.SetNonce(addr, rng.Uint64()) + s.SetBalance(addr, big.NewInt(rng.Int63())) + s.SetCode(addr, randomBytes(rng.Intn(100))) + randFillAccountState(addr, s) +} + +func prepareInitialState(s *StateDB) { + // We neet to create realistic state for statedb + // for this we apply some changes + // 1. Before calling intermediateRoot + // 2. After calling intermediateRoot but before calling Finalise + rng = rand.New(rand.NewSource(0)) + + var beforeCommitHooks, afterCommitHooks []func(addr common.Address, s *StateDB) + addAccount := func(beforeCommit, afterCommit func(addr common.Address, s *StateDB)) { + beforeCommitHooks = append(beforeCommitHooks, beforeCommit) + afterCommitHooks = append(afterCommitHooks, afterCommit) + } + + addAccount(func(addr common.Address, s *StateDB) { + s.SetNonce(addr, rng.Uint64()) + }, nil) + addAccount(nil, func(addr common.Address, s *StateDB) { + s.SetNonce(addr, rng.Uint64()) + }) + addAccount(func(addr common.Address, s *StateDB) { + s.SetNonce(addr, rng.Uint64()) + }, func(addr common.Address, s *StateDB) { + s.SetNonce(addr, rng.Uint64()) + }) + + addAccount(func(addr common.Address, s *StateDB) { + s.SetBalance(addr, big.NewInt(rng.Int63())) + }, nil) + addAccount(nil, func(addr common.Address, s *StateDB) { + s.SetBalance(addr, big.NewInt(rng.Int63())) + }) + addAccount(func(addr common.Address, s *StateDB) { + s.SetBalance(addr, big.NewInt(rng.Int63())) + }, func(addr common.Address, s *StateDB) { + s.SetBalance(addr, big.NewInt(rng.Int63())) + }) + + addAccount(func(addr common.Address, s *StateDB) { + s.SetCode(addr, randomBytes(rng.Intn(100))) + }, nil) + addAccount(nil, func(addr common.Address, s *StateDB) { + s.SetCode(addr, randomBytes(rng.Intn(100))) + }) + addAccount(func(addr common.Address, s *StateDB) { + s.SetCode(addr, randomBytes(rng.Intn(100))) + s.SetCode(addr, nil) + }, func(addr common.Address, s *StateDB) { + s.SetCode(addr, randomBytes(rng.Intn(100))) + }) + addAccount(func(addr common.Address, s *StateDB) { + s.SetCode(addr, randomBytes(rng.Intn(100))) + s.Suicide(addr) + }, func(addr common.Address, s *StateDB) { + s.SetCode(addr, randomBytes(rng.Intn(100))) + }) + + addAccount(func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + s.Suicide(addr) + }, nil) + addAccount(nil, func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + s.Suicide(addr) + }) + addAccount(func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + }, func(addr common.Address, s *StateDB) { + s.Suicide(addr) + }) + addAccount(func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + s.Suicide(addr) + }, func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + }) + addAccount(func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + s.Suicide(addr) + }, func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + // calling it twice is possible + s.Suicide(addr) + s.Suicide(addr) + }) + + addAccount(func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + }, nil) + addAccount(nil, func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + }) + + for i, beforeHook := range beforeCommitHooks { + if beforeHook != nil { + beforeHook(addrs[i], s) + } + } + s.IntermediateRoot(true) + + for i, afterHook := range afterCommitHooks { + if afterHook != nil { + afterHook(addrs[i], s) + } + } + + s.Finalise(true) +} + +func testMultiTxSnapshot(t *testing.T, actions func(s *StateDB)) { + s := newStateTest() + prepareInitialState(s.state) + + previousRefund := s.state.GetRefund() + + var obsStates []*observableAccountState + for _, account := range addrs { + obsStates = append(obsStates, getObservableAccountState(s.state, account, keys)) + } + + pendingAddressesBefore := make(map[common.Address]struct{}) + for k, v := range s.state.stateObjectsPending { + pendingAddressesBefore[k] = v + } + dirtyAddressesBefore := make(map[common.Address]struct{}) + for k, v := range s.state.stateObjectsDirty { + dirtyAddressesBefore[k] = v + } + + err := s.state.NewMultiTxSnapshot() + if err != nil { + t.Fatal("MultiTxSnapshot failed", err) + } + + if actions != nil { + actions(s.state) + } + + err = s.state.MultiTxSnapshotRevert() + if err != nil { + t.Fatal("MultiTxSnapshotRevert failed", err) + } + + for _, obsState := range obsStates { + err := verifyObservableAccountState(s.state, obsState) + if err != nil { + t.Error("state mismatch", "account", obsState.address, err) + } + } + + if s.state.GetRefund() != previousRefund { + t.Error("refund mismatch", "got", s.state.GetRefund(), "expected", previousRefund) + } + + if len(s.state.stateObjectsPending) != len(pendingAddressesBefore) { + t.Error("pending state objects count mismatch", "got", len(s.state.stateObjectsPending), "expected", len(pendingAddressesBefore)) + } + for k := range s.state.stateObjectsPending { + if _, ok := pendingAddressesBefore[k]; !ok { + t.Error("stateObjectsPending mismatch, before was nil", "address", k) + } + } + if len(s.state.stateObjectsDirty) != len(dirtyAddressesBefore) { + t.Error("dirty state objects count mismatch", "got", len(s.state.stateObjectsDirty), "expected", len(dirtyAddressesBefore)) + } + for k := range s.state.stateObjectsDirty { + if _, ok := dirtyAddressesBefore[k]; !ok { + t.Error("stateObjectsDirty mismatch, before was nil", "address", k) + } + } + + root := s.state.IntermediateRoot(true) + + cleanState := newStateTest() + prepareInitialState(cleanState.state) + expectedRoot := cleanState.state.IntermediateRoot(true) + + if root != expectedRoot { + t.Error("root mismatch", "got", root, "expected", expectedRoot) + } +} + +func TestMultiTxSnapshotAccountChangesSimple(t *testing.T) { + testMultiTxSnapshot(t, func(s *StateDB) { + for _, addr := range addrs { + s.SetNonce(addr, 78) + s.SetBalance(addr, big.NewInt(79)) + s.SetCode(addr, []byte{0x80}) + } + s.Finalise(true) + }) +} + +// This test verifies that dirty account storage is properly cleaned for accounts after revert +func TestMultiTxSnapshotAccountChangesRevertedByJournal(t *testing.T) { + testMultiTxSnapshot(t, func(s *StateDB) { + for _, addr := range addrs { + s.SetState(addr, common.HexToHash("0x01"), common.HexToHash("0x03")) + } + s.Finalise(true) + for _, addr := range addrs { + // we use normal snapshot here because it + // 1. does not mark an account dirty (even though we applied changes) + // 2. changes dirty, uncommitted state of the account + snap := s.Snapshot() + s.SetState(addr, common.HexToHash("0x01"), common.HexToHash("0x02")) + s.RevertToSnapshot(snap) + } + s.Finalise(true) + }) +} + +func TestMultiTxSnapshotRefund(t *testing.T) { + testMultiTxSnapshot(t, func(s *StateDB) { + for _, addr := range addrs { + s.SetNonce(addr, 78) + s.SetBalance(addr, big.NewInt(79)) + s.SetCode(addr, []byte{0x80}) + } + s.Finalise(true) + }) +} + +func TestMultiTxSnapshotAccountChangesMultiTx(t *testing.T) { + testMultiTxSnapshot(t, func(s *StateDB) { + for _, addr := range addrs { + s.SetNonce(addr, 78) + s.SetBalance(addr, big.NewInt(79)) + s.SetCode(addr, []byte{0x80}) + } + s.Finalise(true) + + for _, addr := range addrs { + s.SetNonce(addr, 79) + s.SetBalance(addr, big.NewInt(80)) + s.SetCode(addr, []byte{0x81}) + } + s.Finalise(true) + }) +} + +func TestMultiTxSnapshotAccountChangesSelfDestruct(t *testing.T) { + testMultiTxSnapshot(t, func(s *StateDB) { + for _, addr := range addrs { + s.SetNonce(addr, 78) + s.SetBalance(addr, big.NewInt(79)) + s.SetCode(addr, []byte{0x80}) + } + s.Finalise(true) + + for _, addr := range addrs { + s.Suicide(addr) + } + s.Finalise(true) + + for _, addr := range addrs { + s.SetNonce(addr, 79) + s.SetBalance(addr, big.NewInt(80)) + s.SetCode(addr, []byte{0x81}) + } + s.Finalise(true) + }) +} + +func TestMultiTxSnapshotAccountChangesEmptyAccount(t *testing.T) { + testMultiTxSnapshot(t, func(s *StateDB) { + for _, addr := range addrs { + s.SetNonce(addr, 78) + s.SetBalance(addr, big.NewInt(79)) + s.SetCode(addr, []byte{0x80}) + } + s.Finalise(true) + + for _, addr := range addrs { + s.SetNonce(addr, 0) + s.SetBalance(addr, common.Big0) + s.SetCode(addr, nil) + } + s.Finalise(true) + + for _, addr := range addrs { + s.SetNonce(addr, 79) + s.SetBalance(addr, big.NewInt(80)) + s.SetCode(addr, []byte{0x81}) + } + s.Finalise(true) + }) +} + +func TestMultiTxSnapshotStateChanges(t *testing.T) { + testMultiTxSnapshot(t, func(s *StateDB) { + for _, addr := range addrs { + randFillAccountState(addr, s) + } + s.Finalise(true) + + for _, addr := range addrs { + randFillAccountState(addr, s) + } + s.Finalise(true) + }) +} + +func TestStackBasic(t *testing.T) { + for i := 0; i < 10; i++ { + testMultiTxSnapshot(t, func(s *StateDB) { + // when test starts, actions are performed after new snapshot is created + // we initialize additional snapshot on top of that + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("NewMultiTxSnapshot failed: %v", err) + t.FailNow() + } + + seed := rand.Int63() + stateMap := genRandomAccountState(seed) + for account, accountKeys := range stateMap { + for key, value := range accountKeys { + s.SetState(account, key, value) + } + } + s.Finalise(true) + + stack := s.multiTxSnapshotStack + + // the test starts with 1 snapshot, and we just created new one above + startSize := stack.Size() + if startSize != 2 { + t.Errorf("expected stack size to be 2, got %d", startSize) + t.FailNow() + } + + for _, addr := range addrs { + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("NewMultiTxSnapshot failed: %v", err) + t.FailNow() + } + randFillAccountState(addr, s) + s.Finalise(true) + } + afterAddrSize := stack.Size() + if afterAddrSize != startSize+len(addrs) { + t.Errorf("expected stack size to be %d, got %d", startSize+len(addrs), afterAddrSize) + t.FailNow() + } + + // the testMultiTxSnapshot subroutine calls MultiTxSnapshotRevert after applying actions + // we test here to make sure that the flattened commitments on the head of stack + // yield the same final root hash + // this ensures that we are properly flattening the stack on commit + for stack.Size() > 1 { + if _, err := stack.Commit(); err != nil { + t.Errorf("Commit failed: %v", err) + t.FailNow() + } + } + }) + } +} + +func TestStackSelfDestruct(t *testing.T) { + testMultiTxSnapshot(t, func(s *StateDB) { + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("NewMultiTxSnapshot failed: %v", err) + t.FailNow() + } + for _, addr := range addrs { + s.SetNonce(addr, 78) + s.SetBalance(addr, big.NewInt(79)) + s.SetCode(addr, []byte{0x80}) + s.Finalise(true) + } + + for _, addr := range addrs { + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("NewMultiTxSnapshot failed: %v", err) + t.FailNow() + } + s.Suicide(addr) + } + stack := s.multiTxSnapshotStack + + // merge all the suicide operations + for stack.Size() > 1 { + if _, err := stack.Commit(); err != nil { + t.Errorf("Commit failed: %v", err) + t.FailNow() + } + } + s.Finalise(true) + + for _, addr := range addrs { + s.SetNonce(addr, 79) + s.SetBalance(addr, big.NewInt(80)) + s.SetCode(addr, []byte{0x81}) + } + s.Finalise(true) + }) +} + +func TestStackAgainstSingleSnap(t *testing.T) { + // we generate a random seed ten times to fuzz test multiple stack snapshots against single layer snapshot + for i := 0; i < 10; i++ { + testMultiTxSnapshot(t, func(s *StateDB) { + // Need to drop initial snapshot since copy requires empty snapshot stack + if err := s.MultiTxSnapshotRevert(); err != nil { + t.Fatalf("error reverting snapshot: %v", err) + } + original := s.Copy() + baselineStateDB := s.Copy() + + baselineRootHash, targetRootHash := baselineStateDB.originalRoot, s.originalRoot + + if !bytes.Equal(baselineRootHash.Bytes(), targetRootHash.Bytes()) { + t.Errorf("expected root hash to be %x, got %x", baselineRootHash, targetRootHash) + t.FailNow() + } + + // basic - add multiple snapshots and commit them, and compare them to single snapshot that has all + // state changes + + if err := baselineStateDB.NewMultiTxSnapshot(); err != nil { + t.Errorf("Error initializing snapshot: %v", err) + t.FailNow() + } + + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("Error initializing snapshot: %v", err) + t.FailNow() + } + + // we should be able to revert back to the same intermediate root hash + // for single snapshot and snapshot stack + seed := rand.Int63() + state := genRandomAccountState(seed) + for account, accountKeys := range state { + for key, value := range accountKeys { + baselineStateDB.SetState(account, key, value) + + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("Error initializing snapshot: %v", err) + t.FailNow() + } + s.SetState(account, key, value) + s.Finalise(true) + } + } + baselineStateDB.Finalise(true) + + // commit all but last snapshot + stack := s.multiTxSnapshotStack + for stack.Size() > 1 { + if _, err := stack.Commit(); err != nil { + t.Errorf("Commit failed: %v", err) + t.FailNow() + } + } + + var ( + baselineSnapshot = baselineStateDB.multiTxSnapshotStack.Peek() + targetSnapshot = s.multiTxSnapshotStack.Peek() + ) + if !targetSnapshot.Equal(baselineSnapshot) { + CompareAndPrintSnapshotMismatches(t, targetSnapshot, baselineSnapshot) + t.Errorf("expected snapshots to be equal") + t.FailNow() + } + + // revert back to previously calculated root hash + if err := baselineStateDB.MultiTxSnapshotRevert(); err != nil { + t.Errorf("MultiTxSnapshotRevert failed: %v", err) + t.FailNow() + } + + if err := s.MultiTxSnapshotRevert(); err != nil { + t.Errorf("MultiTxSnapshotRevert failed: %v", err) + t.FailNow() + } + + var err error + if targetRootHash, err = s.Commit(true); err != nil { + t.Errorf("Commit failed: %v", err) + t.FailNow() + } + + if baselineRootHash, err = baselineStateDB.Commit(true); err != nil { + t.Errorf("Commit failed: %v", err) + t.FailNow() + } + if !bytes.Equal(baselineRootHash.Bytes(), targetRootHash.Bytes()) { + t.Errorf("expected root hash to be %x, got %x", baselineRootHash, targetRootHash) + t.FailNow() + } + + *s = *original + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("Error initializing snapshot: %v", err) + t.FailNow() + } + }) + } +} + +func CompareAndPrintSnapshotMismatches(t *testing.T, target, other *MultiTxSnapshot) { + var out bytes.Buffer + if target.Equal(other) { + t.Logf("Snapshots are equal") + return + } + + if target.invalid != other.invalid { + out.WriteString(fmt.Sprintf("invalid: %v != %v\n", target.invalid, other.invalid)) + return + } + + // check log mismatch + visited := make(map[common.Hash]bool) + for address, logCount := range other.numLogsAdded { + targetLogCount, exists := target.numLogsAdded[address] + if !exists { + out.WriteString(fmt.Sprintf("target<>other numLogsAdded[missing]: %v\n", address)) + continue + } + if targetLogCount != logCount { + out.WriteString(fmt.Sprintf("target<>other numLogsAdded[%x]: %v != %v\n", address, targetLogCount, logCount)) + } + } + + for address, logCount := range target.numLogsAdded { + if visited[address] { + continue + } + + otherLogCount, exists := other.numLogsAdded[address] + if !exists { + out.WriteString(fmt.Sprintf("other<>target numLogsAdded[missing]: %v\n", address)) + continue + } + + if otherLogCount != logCount { + out.WriteString(fmt.Sprintf("other<>target numLogsAdded[%x]: %v != %v\n", address, otherLogCount, logCount)) + } + } + + // check previous objects mismatch + for address := range other.prevObjects { + // TODO: we only check existence, need to add RLP comparison + _, exists := target.prevObjects[address] + if !exists { + out.WriteString(fmt.Sprintf("target<>other prevObjects[missing]: %v\n", address.String())) + continue + } + } + + for address, obj := range target.prevObjects { + otherObj, exists := other.prevObjects[address] + if !exists { + out.WriteString(fmt.Sprintf("other<>target prevObjects[missing]: %v\n", address)) + continue + } + if !reflect.DeepEqual(otherObj, obj) { + out.WriteString(fmt.Sprintf("other<>target prevObjects[%x]: %v != %v\n", address, otherObj, obj)) + } + } + + // check account storage mismatch + for account, storage := range other.accountStorage { + targetStorage, exists := target.accountStorage[account] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountStorage[missing]: %v\n", account)) + continue + } + + for key, value := range storage { + targetValue, exists := targetStorage[key] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountStorage[%s][missing]: %v\n", account.String(), key.String())) + continue + } + if !reflect.DeepEqual(targetValue, value) { + out.WriteString(fmt.Sprintf("target<>other accountStorage[%s][%s]: %v != %v\n", account.String(), key.String(), targetValue.String(), value.String())) + } + } + } + + for account, storage := range target.accountStorage { + otherStorage, exists := other.accountStorage[account] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountStorage[missing]: %v\n", account)) + continue + } + + for key, value := range storage { + otherValue, exists := otherStorage[key] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountStorage[%s][missing]: %v\n", account.String(), key.String())) + continue + } + if !reflect.DeepEqual(otherValue, value) { + out.WriteString(fmt.Sprintf("other<>target accountStorage[%s][%s]: %v != %v\n", account.String(), key.String(), otherValue.String(), value.String())) + } + } + } + + // check account balance mismatch + for account, balance := range other.accountBalance { + targetBalance, exists := target.accountBalance[account] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountBalance[missing]: %v\n", account)) + continue + } + if !reflect.DeepEqual(targetBalance, balance) { + out.WriteString(fmt.Sprintf("target<>other accountBalance[%x]: %v != %v\n", account, targetBalance, balance)) + } + } + + for account, balance := range target.accountBalance { + otherBalance, exists := other.accountBalance[account] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountBalance[missing]: %v\n", account)) + continue + } + if !bytes.Equal(otherBalance.Bytes(), balance.Bytes()) { + out.WriteString(fmt.Sprintf("other<>target accountBalance[%x]: %v != %v\n", account, otherBalance, balance)) + } + } + + // check account nonce mismatch + for account, nonce := range other.accountNonce { + targetNonce, exists := target.accountNonce[account] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountNonce[missing]: %v\n", account)) + continue + } + if targetNonce != nonce { + out.WriteString(fmt.Sprintf("target<>other accountNonce[%x]: %v != %v\n", account, targetNonce, nonce)) + } + } + + for account, nonce := range target.accountNonce { + otherNonce, exists := other.accountNonce[account] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountNonce[missing]: %v\n", account)) + continue + } + if otherNonce != nonce { + out.WriteString(fmt.Sprintf("other<>target accountNonce[%x]: %v != %v\n", account, otherNonce, nonce)) + } + } + + // check account code mismatch + for account, code := range other.accountCode { + targetCode, exists := target.accountCode[account] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountCode[missing]: %v\n", account)) + continue + } + if !bytes.Equal(targetCode, code) { + out.WriteString(fmt.Sprintf("target<>other accountCode[%x]: %v != %v\n", account, targetCode, code)) + } + } + + for account, code := range target.accountCode { + otherCode, exists := other.accountCode[account] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountCode[missing]: %v\n", account)) + continue + } + if !bytes.Equal(otherCode, code) { + out.WriteString(fmt.Sprintf("other<>target accountCode[%x]: %v != %v\n", account, otherCode, code)) + } + } + + // check account codeHash mismatch + for account, codeHash := range other.accountCodeHash { + targetCodeHash, exists := target.accountCodeHash[account] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountCodeHash[missing]: %v\n", account)) + continue + } + if !bytes.Equal(targetCodeHash, codeHash) { + out.WriteString(fmt.Sprintf("target<>other accountCodeHash[%x]: %v != %v\n", account, targetCodeHash, codeHash)) + } + } + + for account, codeHash := range target.accountCodeHash { + otherCodeHash, exists := other.accountCodeHash[account] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountCodeHash[missing]: %v\n", account)) + continue + } + if !bytes.Equal(otherCodeHash, codeHash) { + out.WriteString(fmt.Sprintf("other<>target accountCodeHash[%x]: %v != %v\n", account, otherCodeHash, codeHash)) + } + } + + // check account suicide mismatch + for account, suicide := range other.accountSuicided { + targetSuicide, exists := target.accountSuicided[account] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountSuicided[missing]: %v\n", account)) + continue + } + + if targetSuicide != suicide { + out.WriteString(fmt.Sprintf("target<>other accountSuicided[%x]: %t != %t\n", account, targetSuicide, suicide)) + } + } + + for account, suicide := range target.accountSuicided { + otherSuicide, exists := other.accountSuicided[account] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountSuicided[missing]: %v\n", account)) + continue + } + + if otherSuicide != suicide { + out.WriteString(fmt.Sprintf("other<>target accountSuicided[%x]: %t != %t\n", account, otherSuicide, suicide)) + } + } + + // check account deletion mismatch + for account, del := range other.accountDeleted { + targetDelete, exists := target.accountDeleted[account] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountDeleted[missing]: %v\n", account)) + continue + } + + if targetDelete != del { + out.WriteString(fmt.Sprintf("target<>other accountDeleted[%x]: %v != %v\n", account, targetDelete, del)) + } + } + + for account, del := range target.accountDeleted { + otherDelete, exists := other.accountDeleted[account] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountDeleted[missing]: %v\n", account)) + continue + } + + if otherDelete != del { + out.WriteString(fmt.Sprintf("other<>target accountDeleted[%x]: %v != %v\n", account, otherDelete, del)) + } + } + + // check account not pending mismatch + for account := range other.accountNotPending { + if _, exists := target.accountNotPending[account]; !exists { + out.WriteString(fmt.Sprintf("target<>other accountNotPending[missing]: %v\n", account)) + } + } + + for account := range target.accountNotPending { + if _, exists := other.accountNotPending[account]; !exists { + out.WriteString(fmt.Sprintf("other<>target accountNotPending[missing]: %v\n", account)) + } + } + + // check account not dirty mismatch + for account := range other.accountNotDirty { + if _, exists := target.accountNotDirty[account]; !exists { + out.WriteString(fmt.Sprintf("target<>other accountNotDirty[missing]: %v\n", account)) + } + } + + for account := range target.accountNotDirty { + if _, exists := other.accountNotDirty[account]; !exists { + out.WriteString(fmt.Sprintf("other<>target accountNotDirty[missing]: %v\n", account)) + } + } + + fmt.Println(out.String()) + out.Reset() +} diff --git a/core/state/state_object.go b/core/state/state_object.go index 7e34cba44a..cd720019db 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -247,6 +247,9 @@ func (s *stateObject) setState(key, value common.Hash) { func (s *stateObject) finalise(prefetch bool) { slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) for key, value := range s.dirtyStorage { + prev, ok := s.pendingStorage[key] + s.db.multiTxSnapshotStack.UpdatePendingStorage(s.address, key, prev, ok) + s.pendingStorage[key] = value if value != s.originStorage[key] { slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure diff --git a/core/state/statedb.go b/core/state/statedb.go index 256bd3e95f..8ec9de32cb 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -109,6 +109,9 @@ type StateDB struct { validRevisions []revision nextRevisionId int + // Multi-Transaction Snapshot Stack + multiTxSnapshotStack *MultiTxSnapshotStack + // Measurements gathered during execution for debugging purposes AccountReads time.Duration AccountHashes time.Duration @@ -151,6 +154,8 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) transientStorage: newTransientStorage(), hasher: crypto.NewKeccakState(), } + + sdb.multiTxSnapshotStack = NewMultiTxSnapshotStack(sdb) if sdb.snaps != nil { if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil { sdb.snapAccounts = make(map[common.Hash][]byte) @@ -712,6 +717,8 @@ func (s *StateDB) Copy() *StateDB { journal: newJournal(), hasher: crypto.NewKeccakState(), } + // Initialize copy of multi-transaction snapshot stack for the copied state + state.multiTxSnapshotStack = s.multiTxSnapshotStack.Copy(state) // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), @@ -842,6 +849,8 @@ func (s *StateDB) GetRefund() uint64 { // the journal as well as the refunds. Finalise, however, will not push any updates // into the tries just yet. Only IntermediateRoot or Commit will do that. func (s *StateDB) Finalise(deleteEmptyObjects bool) { + s.multiTxSnapshotStack.UpdateFromJournal(s.journal) + addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) for addr := range s.journal.dirties { obj, exist := s.stateObjects[addr] @@ -855,6 +864,8 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { continue } if obj.suicided || (deleteEmptyObjects && obj.empty()) { + s.multiTxSnapshotStack.UpdateObjectDeleted(obj.address, obj.deleted) + obj.deleted = true // We need to maintain account deletions explicitly (will remain @@ -872,6 +883,12 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { } else { obj.finalise(true) // Prefetch slots in the background } + + if s.multiTxSnapshotStack.Size() > 0 { + _, wasPending := s.stateObjectsPending[addr] + _, wasDirty := s.stateObjectsDirty[addr] + s.multiTxSnapshotStack.UpdatePendingStatus(addr, wasPending, wasDirty) + } s.stateObjectsPending[addr] = struct{}{} s.stateObjectsDirty[addr] = struct{}{} @@ -894,6 +911,10 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Finalise all the dirty storage states and write them into the tries s.Finalise(deleteEmptyObjects) + // Intermediate root writes updates to the trie, which will cause + // in memory multi-transaction snapshot to be incompatible with the committed state, so we invalidate. + s.multiTxSnapshotStack.Invalidate() + // If there was a trie prefetcher operating, it gets aborted and irrevocably // modified after we start retrieving tries. Remove it from the statedb after // this round of use. @@ -1181,3 +1202,22 @@ func (s *StateDB) convertAccountSet(set map[common.Address]struct{}) map[common. } return ret } + +func (s *StateDB) NewMultiTxSnapshot() (err error) { + _, err = s.multiTxSnapshotStack.NewSnapshot() + return +} + +func (s *StateDB) MultiTxSnapshotRevert() (err error) { + _, err = s.multiTxSnapshotStack.Revert() + return +} + +func (s *StateDB) MultiTxSnapshotCommit() (err error) { + _, err = s.multiTxSnapshotStack.Commit() + return +} + +func (s *StateDB) MultiTxSnapshotStackSize() int { + return s.multiTxSnapshotStack.Size() +} diff --git a/miner/algo_common.go b/miner/algo_common.go index 7c83ccecfb..8e4cdfeab8 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -13,7 +13,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers/logger" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" ) @@ -35,6 +34,7 @@ var ( defaultAlgorithmConfig = algorithmConfig{ DropRevertibleTxOnErr: false, EnforceProfit: false, + ExpectedProfit: nil, ProfitThresholdPercent: defaultProfitThresholdPercent, PriceCutoffPercent: defaultPriceCutoffPercent, } @@ -42,7 +42,11 @@ var ( var emptyCodeHash = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") -var errInterrupt = errors.New("miner worker interrupted") +var ( + ErrMevGasPriceNotSet = errors.New("mev gas price not set") + errInterrupt = errors.New("miner worker interrupted") + errNoPrivateKey = errors.New("no private key provided") +) // lowProfitError is returned when an order is not committed due to low profit or low effective gas price type lowProfitError struct { @@ -65,14 +69,13 @@ type algorithmConfig struct { // the transaction and continue processing the rest of a bundle or sbundle. // Revertible transactions are specified as hashes that can revert in a bundle or sbundle. DropRevertibleTxOnErr bool - // EnforceProfit is true if we want to enforce a minimum profit threshold // for committing a transaction based on ProfitThresholdPercent EnforceProfit bool - + // ExpectedProfit should be set on a per-transaction basis when profit is enforced + ExpectedProfit *big.Int // ProfitThresholdPercent is the minimum profit threshold for committing a transaction ProfitThresholdPercent int // 0-100, e.g. 70 means 70% - // PriceCutoffPercent is the minimum effective gas price threshold used for bucketing transactions by price. // For example if the top transaction in a list has an effective gas price of 1000 wei and PriceCutoffPercent // is 10 (i.e. 10%), then the minimum effective gas price included in the same bucket as the top transaction @@ -86,51 +89,65 @@ type chainData struct { blacklist map[common.Address]struct{} } -type environmentDiff struct { - baseEnvironment *environment - header *types.Header - gasPool *core.GasPool // available gas used to pack transactions - state *state.StateDB // apply state changes here - newProfit *big.Int - newTxs []*types.Transaction - newReceipts []*types.Receipt +// PayoutTransactionParams holds parameters for committing a payout transaction, used in commitPayoutTx +type PayoutTransactionParams struct { + Amount *big.Int + BaseFee *big.Int + ChainData chainData + Gas uint64 + CommitFn CommitTxFunc + Receiver common.Address + Sender common.Address + SenderBalance *big.Int + SenderNonce uint64 + Signer types.Signer + PrivateKey *ecdsa.PrivateKey } -func newEnvironmentDiff(env *environment) *environmentDiff { - gasPool := new(core.GasPool).AddGas(env.gasPool.Gas()) - return &environmentDiff{ - baseEnvironment: env, - header: types.CopyHeader(env.header), - gasPool: gasPool, - state: env.state.Copy(), - newProfit: new(big.Int), +type ( + // BuildBlockFunc is the function signature for building a block + BuildBlockFunc func( + simBundles []types.SimulatedBundle, + simSBundles []*types.SimSBundle, + transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) + + // CommitTxFunc is the function signature for committing a transaction + CommitTxFunc func(*types.Transaction, chainData) (*types.Receipt, int, error) +) + +func ValidateGasPriceAndProfit(algoConf algorithmConfig, actualPrice, expectedPrice *big.Int, tolerablePriceDifferencePercent int, + actualProfit, expectedProfit *big.Int) error { + // allow tolerablePriceDifferencePercent % divergence + expectedPriceMultiple := new(big.Int).Mul(expectedPrice, big.NewInt(100-int64(tolerablePriceDifferencePercent))) + actualPriceMultiple := new(big.Int).Mul(actualPrice, common.Big100) + + var errLowProfit *lowProfitError = nil + if expectedPriceMultiple.Cmp(actualPriceMultiple) > 0 { + errLowProfit = &lowProfitError{ + ExpectedEffectiveGasPrice: expectedPrice, + ActualEffectiveGasPrice: actualPrice, + } } -} -func (e *environmentDiff) copy() *environmentDiff { - gasPool := new(core.GasPool).AddGas(e.gasPool.Gas()) - - return &environmentDiff{ - baseEnvironment: e.baseEnvironment.copy(), - header: types.CopyHeader(e.header), - gasPool: gasPool, - state: e.state.Copy(), - newProfit: new(big.Int).Set(e.newProfit), - newTxs: e.newTxs[:], - newReceipts: e.newReceipts[:], + if algoConf.EnforceProfit { + // We want to make expected profit smaller to allow for some leeway in cases where the actual profit is + // lower due to transaction ordering + expectedProfitMultiple := common.PercentOf(expectedProfit, algoConf.ProfitThresholdPercent) + actualProfitMultiple := new(big.Int).Mul(actualProfit, common.Big100) + + if expectedProfitMultiple.Cmp(actualProfitMultiple) > 0 { + if errLowProfit == nil { + errLowProfit = new(lowProfitError) + } + errLowProfit.ExpectedProfit = expectedProfit + errLowProfit.ActualProfit = actualProfit + } } -} -func (e *environmentDiff) applyToBaseEnv() { - env := e.baseEnvironment - env.gasPool = new(core.GasPool).AddGas(e.gasPool.Gas()) - env.header = e.header - env.state.StopPrefetcher() - env.state = e.state - env.profit.Add(env.profit, e.newProfit) - env.tcount += len(e.newTxs) - env.txs = append(env.txs, e.newTxs...) - env.receipts = append(env.receipts, e.newReceipts...) + if errLowProfit != nil { // staticcheck linter complains if we don't check for nil here + return errLowProfit + } + return nil } func checkInterrupt(i *int32) bool { @@ -199,192 +216,6 @@ func applyTransactionWithBlacklist( return receipt, statedb, err } -// commit tx to envDiff -func (envDiff *environmentDiff) commitTx(tx *types.Transaction, chData chainData) (*types.Receipt, int, error) { - var ( - header = envDiff.header - coinbase = &envDiff.baseEnvironment.coinbase - signer = envDiff.baseEnvironment.signer - ) - - gasPrice, err := tx.EffectiveGasTip(header.BaseFee) - if err != nil { - return nil, shiftTx, err - } - - envDiff.state.SetTxContext(tx.Hash(), envDiff.baseEnvironment.tcount+len(envDiff.newTxs)) - receipt, newState, err := applyTransactionWithBlacklist(signer, chData.chainConfig, chData.chain, coinbase, - envDiff.gasPool, envDiff.state, header, tx, &header.GasUsed, *chData.chain.GetVMConfig(), chData.blacklist) - - envDiff.state = newState - if err != nil { - switch { - case errors.Is(err, core.ErrGasLimitReached): - // Pop the current out-of-gas transaction without shifting in the next from the account - from, _ := types.Sender(signer, tx) - log.Trace("Gas limit exceeded for current block", "sender", from) - return receipt, popTx, err - - case errors.Is(err, core.ErrNonceTooLow): - // New head notification data race between the transaction pool and miner, shift - from, _ := types.Sender(signer, tx) - log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) - return receipt, shiftTx, err - - case errors.Is(err, core.ErrNonceTooHigh): - // Reorg notification data race between the transaction pool and miner, skip account = - from, _ := types.Sender(signer, tx) - log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) - return receipt, popTx, err - - case errors.Is(err, core.ErrTxTypeNotSupported): - // Pop the unsupported transaction without shifting in the next from the account - from, _ := types.Sender(signer, tx) - log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) - return receipt, popTx, err - - default: - // Strange error, discard the transaction and get the next in line (note, the - // nonce-too-high clause will prevent us from executing in vain). - log.Trace("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) - return receipt, shiftTx, err - } - } - - envDiff.newProfit = envDiff.newProfit.Add(envDiff.newProfit, gasPrice.Mul(gasPrice, big.NewInt(int64(receipt.GasUsed)))) - envDiff.newTxs = append(envDiff.newTxs, tx) - envDiff.newReceipts = append(envDiff.newReceipts, receipt) - - return receipt, shiftTx, nil -} - -// Commit Bundle to env diff -func (envDiff *environmentDiff) commitBundle(bundle *types.SimulatedBundle, chData chainData, interrupt *int32, algoConf algorithmConfig) error { - var ( - coinbase = envDiff.baseEnvironment.coinbase - tmpEnvDiff = envDiff.copy() - - coinbaseBalanceBefore = tmpEnvDiff.state.GetBalance(coinbase) - - profitBefore = new(big.Int).Set(tmpEnvDiff.newProfit) - - gasUsed uint64 - ) - - for _, tx := range bundle.OriginalBundle.Txs { - txHash := tx.Hash() - if tmpEnvDiff.header.BaseFee != nil && tx.Type() == types.DynamicFeeTxType { - // Sanity check for extremely large numbers - if tx.GasFeeCap().BitLen() > 256 { - return core.ErrFeeCapVeryHigh - } - if tx.GasTipCap().BitLen() > 256 { - return core.ErrTipVeryHigh - } - // Ensure gasFeeCap is greater than or equal to gasTipCap. - if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { - return core.ErrTipAboveFeeCap - } - } - - if tx.Value().Sign() == -1 { - return core.ErrNegativeValue - } - - _, err := tx.EffectiveGasTip(envDiff.header.BaseFee) - if err != nil { - return err - } - - _, err = types.Sender(envDiff.baseEnvironment.signer, tx) - if err != nil { - return err - } - - if checkInterrupt(interrupt) { - return errInterrupt - } - - receipt, _, err := tmpEnvDiff.commitTx(tx, chData) - - if err != nil { - isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) - // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one - if algoConf.DropRevertibleTxOnErr && isRevertibleTx { - log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", - "tx", txHash, "err", err) - continue - } - - log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) - return err - } - - if receipt != nil { - if receipt.Status == types.ReceiptStatusFailed && !bundle.OriginalBundle.RevertingHash(txHash) { - // if transaction reverted and isn't specified as reverting hash, return error - log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) - return errors.New("bundle tx revert") - } - } else { - // NOTE: The expectation is that a receipt is only nil if an error occurred. - // If there is no error but receipt is nil, there is likely a programming error. - return errors.New("invalid receipt when no error occurred") - } - - gasUsed += receipt.GasUsed - } - coinbaseBalanceAfter := tmpEnvDiff.state.GetBalance(coinbase) - coinbaseBalanceDelta := new(big.Int).Sub(coinbaseBalanceAfter, coinbaseBalanceBefore) - tmpEnvDiff.newProfit.Add(profitBefore, coinbaseBalanceDelta) - - bundleProfit := coinbaseBalanceDelta - - gasUsedBigInt := new(big.Int).SetUint64(gasUsed) - - var bundleActualEffGP *big.Int - if gasUsed == 0 { - bundleActualEffGP = big.NewInt(0) - } else { - bundleActualEffGP = bundleProfit.Div(bundleProfit, gasUsedBigInt) - } - bundleSimEffGP := new(big.Int).Set(bundle.MevGasPrice) - - // allow >-1% divergence - actualEGP := new(big.Int).Mul(bundleActualEffGP, common.Big100) // bundle actual effective gas price * 100 - simulatedEGP := new(big.Int).Mul(bundleSimEffGP, big.NewInt(99)) // bundle simulated effective gas price * 99 - - if simulatedEGP.Cmp(actualEGP) > 0 { - log.Trace("Bundle underpays after inclusion", "bundle", bundle.OriginalBundle.Hash) - return &lowProfitError{ - ExpectedEffectiveGasPrice: bundleSimEffGP, - ActualEffectiveGasPrice: bundleActualEffGP, - } - } - - if algoConf.EnforceProfit { - // if profit is enforced between simulation and actual commit, only allow ProfitThresholdPercent divergence - simulatedBundleProfit := new(big.Int).Set(bundle.TotalEth) - actualBundleProfit := new(big.Int).Mul(bundleActualEffGP, gasUsedBigInt) - - // We want to make simulated profit smaller to allow for some leeway in cases where the actual profit is - // lower due to transaction ordering - simulatedProfitMultiple := common.PercentOf(simulatedBundleProfit, algoConf.ProfitThresholdPercent) - actualProfitMultiple := new(big.Int).Mul(actualBundleProfit, common.Big100) - - if simulatedProfitMultiple.Cmp(actualProfitMultiple) > 0 { - log.Trace("Lower bundle profit found after inclusion", "bundle", bundle.OriginalBundle.Hash) - return &lowProfitError{ - ExpectedProfit: simulatedBundleProfit, - ActualProfit: actualBundleProfit, - } - } - } - - *envDiff = *tmpEnvDiff - return nil -} - func estimatePayoutTxGas(env *environment, sender, receiver common.Address, prv *ecdsa.PrivateKey, chData chainData) (uint64, bool, error) { if codeHash := env.state.GetCodeHash(receiver); codeHash == (common.Hash{}) || codeHash == emptyCodeHash { return params.TxGas, true, nil @@ -418,6 +249,43 @@ func applyPayoutTx(envDiff *environmentDiff, sender, receiver common.Address, ga return rec, nil } +func commitPayoutTx(parameters PayoutTransactionParams) (*types.Receipt, error) { + if parameters.Gas < params.TxGas { + return nil, errors.New("not enough gas for intrinsic gas cost") + } + + requiredBalance := new(big.Int).Mul(parameters.BaseFee, new(big.Int).SetUint64(parameters.Gas)) + requiredBalance = requiredBalance.Add(requiredBalance, parameters.Amount) + if requiredBalance.Cmp(parameters.SenderBalance) > 0 { + return nil, errors.New("not enough balance") + } + + tx, err := types.SignNewTx(parameters.PrivateKey, parameters.Signer, &types.DynamicFeeTx{ + ChainID: parameters.ChainData.chainConfig.ChainID, + Nonce: parameters.SenderNonce, + GasTipCap: new(big.Int), + GasFeeCap: parameters.BaseFee, + Gas: parameters.Gas, + To: ¶meters.Receiver, + Value: parameters.Amount, + }) + if err != nil { + return nil, err + } + + txSender, err := types.Sender(parameters.Signer, tx) + if err != nil { + return nil, err + } + + if txSender != parameters.Sender { + return nil, errors.New("incorrect sender private key") + } + + receipt, _, err := parameters.CommitFn(tx, parameters.ChainData) + return receipt, err +} + func insertPayoutTx(env *environment, sender, receiver common.Address, gas uint64, isEOA bool, availableFunds *big.Int, prv *ecdsa.PrivateKey, chData chainData) (*types.Receipt, error) { if isEOA { diff := newEnvironmentDiff(env) @@ -461,216 +329,24 @@ func insertPayoutTx(env *environment, sender, receiver common.Address, gas uint6 return nil, err } -func (envDiff *environmentDiff) commitPayoutTx(amount *big.Int, sender, receiver common.Address, gas uint64, prv *ecdsa.PrivateKey, chData chainData) (*types.Receipt, error) { - senderBalance := envDiff.state.GetBalance(sender) - - if gas < params.TxGas { - return nil, errors.New("not enough gas for intrinsic gas cost") - } - - requiredBalance := new(big.Int).Mul(envDiff.header.BaseFee, new(big.Int).SetUint64(gas)) - requiredBalance = requiredBalance.Add(requiredBalance, amount) - if requiredBalance.Cmp(senderBalance) > 0 { - return nil, errors.New("not enough balance") - } - - signer := envDiff.baseEnvironment.signer - tx, err := types.SignNewTx(prv, signer, &types.DynamicFeeTx{ - ChainID: chData.chainConfig.ChainID, - Nonce: envDiff.state.GetNonce(sender), - GasTipCap: new(big.Int), - GasFeeCap: envDiff.header.BaseFee, - Gas: gas, - To: &receiver, - Value: amount, - }) - if err != nil { - return nil, err - } - - txSender, err := types.Sender(signer, tx) - if err != nil { - return nil, err - } - if txSender != sender { - return nil, errors.New("incorrect sender private key") - } - - receipt, _, err := envDiff.commitTx(tx, chData) - if err != nil { - return nil, err - } - - return receipt, nil -} - -func (envDiff *environmentDiff) commitSBundle(b *types.SimSBundle, chData chainData, interrupt *int32, key *ecdsa.PrivateKey, algoConf algorithmConfig) error { - if key == nil { - return errors.New("no private key provided") - } - - tmpEnvDiff := envDiff.copy() - - coinbaseBefore := tmpEnvDiff.state.GetBalance(tmpEnvDiff.header.Coinbase) - gasBefore := tmpEnvDiff.gasPool.Gas() - - if err := tmpEnvDiff.commitSBundleInner(b.Bundle, chData, interrupt, key, algoConf); err != nil { - return err - } - - coinbaseAfter := tmpEnvDiff.state.GetBalance(tmpEnvDiff.header.Coinbase) - gasAfter := tmpEnvDiff.gasPool.Gas() - - coinbaseDelta := new(big.Int).Sub(coinbaseAfter, coinbaseBefore) - gasDelta := new(big.Int).SetUint64(gasBefore - gasAfter) - - if coinbaseDelta.Cmp(common.Big0) < 0 { - return errors.New("coinbase balance decreased") - } - - gotEGP := new(big.Int).Div(coinbaseDelta, gasDelta) - simEGP := new(big.Int).Set(b.MevGasPrice) - - // allow > 1% difference - actualEGP := new(big.Int).Mul(gotEGP, big.NewInt(101)) - simulatedEGP := new(big.Int).Mul(simEGP, common.Big100) - - if simulatedEGP.Cmp(actualEGP) > 0 { - return &lowProfitError{ - ExpectedEffectiveGasPrice: simEGP, - ActualEffectiveGasPrice: gotEGP, - } - } - - if algoConf.EnforceProfit { - // if profit is enforced between simulation and actual commit, only allow ProfitThresholdPercent divergence - simulatedSbundleProfit := new(big.Int).Set(b.Profit) - actualSbundleProfit := new(big.Int).Set(coinbaseDelta) - - // We want to make simulated profit smaller to allow for some leeway in cases where the actual profit is - // lower due to transaction ordering - simulatedProfitMultiple := common.PercentOf(simulatedSbundleProfit, algoConf.ProfitThresholdPercent) - actualProfitMultiple := new(big.Int).Mul(actualSbundleProfit, common.Big100) - - if simulatedProfitMultiple.Cmp(actualProfitMultiple) > 0 { - log.Trace("Lower sbundle profit found after inclusion", "sbundle", b.Bundle.Hash()) - return &lowProfitError{ - ExpectedProfit: simulatedSbundleProfit, - ActualProfit: actualSbundleProfit, - } - } - } - - *envDiff = *tmpEnvDiff - return nil -} - -func (envDiff *environmentDiff) commitSBundleInner( - b *types.SBundle, chData chainData, interrupt *int32, key *ecdsa.PrivateKey, algoConf algorithmConfig, -) error { - // check inclusion - minBlock := b.Inclusion.BlockNumber - maxBlock := b.Inclusion.MaxBlockNumber - if current := envDiff.header.Number.Uint64(); current < minBlock || current > maxBlock { - return fmt.Errorf("bundle inclusion block number out of range: %d <= %d <= %d", minBlock, current, maxBlock) - } - - // extract constraints into convenient format - refundIdx := make([]bool, len(b.Body)) - refundPercents := make([]int, len(b.Body)) - for _, el := range b.Validity.Refund { - refundIdx[el.BodyIdx] = true - refundPercents[el.BodyIdx] = el.Percent - } - - var ( - totalProfit *big.Int = new(big.Int) - refundableProfit *big.Int = new(big.Int) - - coinbaseDelta = new(big.Int) - coinbaseBefore *big.Int - ) - // insert body and check it - for i, el := range b.Body { - coinbaseDelta.Set(common.Big0) - coinbaseBefore = envDiff.state.GetBalance(envDiff.header.Coinbase) - - if el.Tx != nil { - receipt, _, err := envDiff.commitTx(el.Tx, chData) - - if err != nil { - // if drop enabled, and revertible tx has error on commit, - // we skip the transaction and continue with next one - if algoConf.DropRevertibleTxOnErr && el.CanRevert { - log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", - "tx", el.Tx.Hash(), "err", err) - continue - } - return err - } - if receipt.Status != types.ReceiptStatusSuccessful && !el.CanRevert { - return errors.New("tx failed") - } - } else if el.Bundle != nil { - err := envDiff.commitSBundleInner(el.Bundle, chData, interrupt, key, algoConf) - if err != nil { - return err - } - } else { - return errors.New("invalid body element") - } - - coinbaseDelta.Set(envDiff.state.GetBalance(envDiff.header.Coinbase)) - coinbaseDelta.Sub(coinbaseDelta, coinbaseBefore) - - totalProfit.Add(totalProfit, coinbaseDelta) - if !refundIdx[i] { - refundableProfit.Add(refundableProfit, coinbaseDelta) +// CheckRetryOrderAndReinsert checks if the order has been retried up to the retryLimit and if not, reinserts the order into the orders heap. +func CheckRetryOrderAndReinsert( + order *types.TxWithMinerFee, orders *types.TransactionsByPriceAndNonce, + retryMap map[*types.TxWithMinerFee]int, retryLimit int) bool { + var isRetryable bool = false + if retryCount, exists := retryMap[order]; exists { + if retryCount != retryLimit { + isRetryable = true + retryMap[order] = retryCount + 1 } + } else { + retryMap[order] = 0 + isRetryable = true } - // enforce constraints - coinbaseDelta.Set(common.Big0) - coinbaseBefore = envDiff.state.GetBalance(envDiff.header.Coinbase) - for i, el := range refundPercents { - if !refundIdx[i] { - continue - } - refundConfig, err := types.GetRefundConfig(&b.Body[i], envDiff.baseEnvironment.signer) - if err != nil { - return err - } - - maxPayoutCost := new(big.Int).Set(core.SbundlePayoutMaxCost) - maxPayoutCost.Mul(maxPayoutCost, big.NewInt(int64(len(refundConfig)))) - maxPayoutCost.Mul(maxPayoutCost, envDiff.header.BaseFee) - - allocatedValue := common.PercentOf(refundableProfit, el) - allocatedValue.Sub(allocatedValue, maxPayoutCost) - - if allocatedValue.Cmp(common.Big0) < 0 { - return fmt.Errorf("negative payout") - } - - for _, refund := range refundConfig { - refundValue := common.PercentOf(allocatedValue, refund.Percent) - refundReceiver := refund.Address - rec, err := envDiff.commitPayoutTx(refundValue, envDiff.header.Coinbase, refundReceiver, core.SbundlePayoutMaxCostInt, key, chData) - if err != nil { - return err - } - if rec.Status != types.ReceiptStatusSuccessful { - return fmt.Errorf("refund tx failed") - } - log.Trace("Committed kickback", "payout", ethIntToFloat(allocatedValue), "receiver", refundReceiver) - } + if isRetryable { + orders.Push(order) } - coinbaseDelta.Set(envDiff.state.GetBalance(envDiff.header.Coinbase)) - coinbaseDelta.Sub(coinbaseDelta, coinbaseBefore) - totalProfit.Add(totalProfit, coinbaseDelta) - if totalProfit.Cmp(common.Big0) < 0 { - return fmt.Errorf("negative profit") - } - return nil + return isRetryable } diff --git a/miner/algo_common_test.go b/miner/algo_common_test.go index c845b5e256..0ac114ebb2 100644 --- a/miner/algo_common_test.go +++ b/miner/algo_common_test.go @@ -41,7 +41,12 @@ type signerList struct { } func simulateBundle(env *environment, bundle types.MevBundle, chData chainData, interrupt *int32) (types.SimulatedBundle, error) { - stateDB := env.state.Copy() + // NOTE(wazzymandias): We are referencing the environment StateDB here - notice that it is not a copy. + // For test scenarios where bundles depend on previous bundle transactions to succeed, it is + // necessary to reference the same StateDB in order to avoid nonce too high errors. + // As a result, it is recommended that the caller make a copy before invoking this function, in order to + // ensure transaction serializability across bundles. + stateDB := env.state gasPool := new(core.GasPool).AddGas(env.header.GasLimit) var totalGasUsed uint64 @@ -184,21 +189,22 @@ func genGenesisAlloc(sign signerList, contractAddr []common.Address, contractCod return genesisAlloc } -func genTestSetup() (*state.StateDB, chainData, signerList) { +func genTestSetup(gasLimit uint64) (*state.StateDB, chainData, signerList) { config := params.AllEthashProtocolChanges signerList := genSignerList(10, params.AllEthashProtocolChanges) genesisAlloc := genGenesisAlloc(signerList, []common.Address{payProxyAddress, logContractAddress}, [][]byte{payProxyCode, logContractCode}) - stateDB, chainData := genTestSetupWithAlloc(config, genesisAlloc) + stateDB, chainData := genTestSetupWithAlloc(config, genesisAlloc, gasLimit) return stateDB, chainData, signerList } -func genTestSetupWithAlloc(config *params.ChainConfig, alloc core.GenesisAlloc) (*state.StateDB, chainData) { +func genTestSetupWithAlloc(config *params.ChainConfig, alloc core.GenesisAlloc, gasLimit uint64) (*state.StateDB, chainData) { db := rawdb.NewMemoryDatabase() gspec := &core.Genesis{ - Config: config, - Alloc: alloc, + Config: config, + Alloc: alloc, + GasLimit: gasLimit, } _ = gspec.MustCommit(db) @@ -234,7 +240,7 @@ func newEnvironment(data chainData, state *state.StateDB, coinbase common.Addres } func TestTxCommit(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) envDiff := newEnvironmentDiff(env) @@ -281,7 +287,7 @@ func TestTxCommit(t *testing.T) { func TestBundleCommit(t *testing.T) { algoConf := defaultAlgorithmConfig - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) envDiff := newEnvironmentDiff(env) @@ -316,7 +322,7 @@ func TestBundleCommit(t *testing.T) { } func TestErrorTxCommit(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) envDiff := newEnvironmentDiff(env) @@ -350,7 +356,7 @@ func TestErrorTxCommit(t *testing.T) { } func TestCommitTxOverGasLimit(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], 21000, big.NewInt(1)) envDiff := newEnvironmentDiff(env) @@ -378,7 +384,7 @@ func TestCommitTxOverGasLimit(t *testing.T) { } func TestErrorBundleCommit(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], 21000*2, big.NewInt(1)) envDiff := newEnvironmentDiff(env) @@ -440,7 +446,7 @@ func TestErrorBundleCommit(t *testing.T) { } func TestBlacklist(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) envDiff := newEnvironmentDiff(env) @@ -524,7 +530,7 @@ func TestGetSealingWorkAlgos(t *testing.T) { testConfig.AlgoType = ALGO_MEV_GETH }) - for _, algoType := range []AlgoType{ALGO_MEV_GETH, ALGO_GREEDY, ALGO_GREEDY_BUCKETS} { + for _, algoType := range []AlgoType{ALGO_MEV_GETH, ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP} { local := new(params.ChainConfig) *local = *ethashChainConfig local.TerminalTotalDifficulty = big.NewInt(0) @@ -539,12 +545,12 @@ func TestGetSealingWorkAlgosWithProfit(t *testing.T) { testConfig.BuilderTxSigningKey = nil }) - for _, algoType := range []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS} { + for _, algoType := range []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP} { var err error testConfig.BuilderTxSigningKey, err = crypto.GenerateKey() require.NoError(t, err) testConfig.AlgoType = algoType - t.Logf("running for %d", algoType) + t.Logf("running for %s", algoType.String()) testBundles(t) } } @@ -552,7 +558,7 @@ func TestGetSealingWorkAlgosWithProfit(t *testing.T) { func TestPayoutTxUtils(t *testing.T) { availableFunds := big.NewInt(50000000000000000) // 0.05 eth - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) diff --git a/miner/algo_greedy.go b/miner/algo_greedy.go index ae90d0a883..f40f5ff872 100644 --- a/miner/algo_greedy.go +++ b/miner/algo_greedy.go @@ -28,8 +28,9 @@ func newGreedyBuilder( blacklist map[common.Address]struct{}, env *environment, key *ecdsa.PrivateKey, interrupt *int32, ) *greedyBuilder { if algoConf == nil { - algoConf = &defaultAlgorithmConfig + panic("algoConf cannot be nil") } + return &greedyBuilder{ inputEnvironment: env, chainData: chainData{chainConfig, chain, blacklist}, diff --git a/miner/algo_greedy_buckets.go b/miner/algo_greedy_buckets.go index c0c42a23a4..b3e410eb2a 100644 --- a/miner/algo_greedy_buckets.go +++ b/miner/algo_greedy_buckets.go @@ -32,11 +32,9 @@ func newGreedyBucketsBuilder( blacklist map[common.Address]struct{}, env *environment, key *ecdsa.PrivateKey, interrupt *int32, ) *greedyBucketsBuilder { if algoConf == nil { - algoConf = &algorithmConfig{ - EnforceProfit: true, - ProfitThresholdPercent: defaultProfitThresholdPercent, - } + panic("algoConf cannot be nil") } + return &greedyBucketsBuilder{ inputEnvironment: env, chainData: chainData{chainConfig: chainConfig, chain: chain, blacklist: blacklist}, @@ -68,28 +66,6 @@ func (b *greedyBucketsBuilder) commit(envDiff *environmentDiff, usedBundles []types.SimulatedBundle usedSbundles []types.UsedSBundle - - CheckRetryOrderAndReinsert = func( - order *types.TxWithMinerFee, orders *types.TransactionsByPriceAndNonce, - retryMap map[*types.TxWithMinerFee]int, retryLimit int, - ) bool { - var isRetryable bool = false - if retryCount, exists := retryMap[order]; exists { - if retryCount != retryLimit { - isRetryable = true - retryMap[order] = retryCount + 1 - } - } else { - retryMap[order] = 0 - isRetryable = true - } - - if isRetryable { - orders.Push(order) - } - - return isRetryable - } ) for _, order := range transactions { @@ -139,7 +115,7 @@ func (b *greedyBucketsBuilder) commit(envDiff *environmentDiff, } log.Trace("Included bundle", "bundleEGP", bundle.MevGasPrice.String(), - "gasUsed", bundle.TotalGasUsed, "ethToCoinbase", ethIntToFloat(bundle.TotalEth)) + "gasUsed", bundle.TotalGasUsed, "ethToCoinbase", ethIntToFloat(bundle.EthSentToCoinbase)) usedBundles = append(usedBundles, *bundle) } else if sbundle := order.SBundle(); sbundle != nil { usedEntry := types.UsedSBundle{ diff --git a/miner/algo_greedy_buckets_multisnap.go b/miner/algo_greedy_buckets_multisnap.go new file mode 100644 index 0000000000..ac95c4c8d9 --- /dev/null +++ b/miner/algo_greedy_buckets_multisnap.go @@ -0,0 +1,241 @@ +package miner + +import ( + "crypto/ecdsa" + "errors" + "math/big" + "sort" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" +) + +// / To use it: +// / 1. Copy relevant data from the worker +// / 2. Call buildBlock +// / 2. If new bundles, txs arrive, call buildBlock again +// / This struct lifecycle is tied to 1 block-building task +type greedyBucketsMultiSnapBuilder struct { + inputEnvironment *environment + chainData chainData + builderKey *ecdsa.PrivateKey + interrupt *int32 + gasUsedMap map[*types.TxWithMinerFee]uint64 + algoConf algorithmConfig +} + +func newGreedyBucketsMultiSnapBuilder( + chain *core.BlockChain, chainConfig *params.ChainConfig, algoConf *algorithmConfig, + blacklist map[common.Address]struct{}, env *environment, key *ecdsa.PrivateKey, interrupt *int32, +) *greedyBucketsMultiSnapBuilder { + if algoConf == nil { + panic("algoConf cannot be nil") + } + + return &greedyBucketsMultiSnapBuilder{ + inputEnvironment: env, + chainData: chainData{chainConfig: chainConfig, chain: chain, blacklist: blacklist}, + builderKey: key, + interrupt: interrupt, + gasUsedMap: make(map[*types.TxWithMinerFee]uint64), + algoConf: *algoConf, + } +} + +func (b *greedyBucketsMultiSnapBuilder) commit(changes *envChanges, + transactions []*types.TxWithMinerFee, + orders *types.TransactionsByPriceAndNonce, + gasUsedMap map[*types.TxWithMinerFee]uint64, retryMap map[*types.TxWithMinerFee]int, retryLimit int, +) ([]types.SimulatedBundle, []types.UsedSBundle) { + var ( + algoConf = b.algoConf + + usedBundles []types.SimulatedBundle + usedSbundles []types.UsedSBundle + ) + + for _, order := range transactions { + if err := changes.env.state.NewMultiTxSnapshot(); err != nil { + log.Error("Failed to create new multi-tx snapshot", "err", err) + return usedBundles, usedSbundles + } + + orderFailed := false + + if tx := order.Tx(); tx != nil { + receipt, skip, err := changes.commitTx(tx, b.chainData) + orderFailed = err != nil + if err != nil { + log.Trace("could not apply tx", "hash", tx.Hash(), "err", err) + + // attempt to retry transaction commit up to retryLimit + // the gas used is set for the order to re-calculate profit of the transaction for subsequent retries + if receipt != nil { + // if the receipt is nil we don't attempt to retry the transaction - this is to mitigate abuse since + // without a receipt the default profit calculation for a transaction uses the gas limit which + // can cause the transaction to always be first in any profit-sorted transaction list + gasUsedMap[order] = receipt.GasUsed + CheckRetryOrderAndReinsert(order, orders, retryMap, retryLimit) + } + } else { + if skip == shiftTx { + orders.ShiftAndPushByAccountForTx(tx) + } + // we don't check for error here because if EGP returns error, it would have been caught and returned by commitTx + effGapPrice, _ := tx.EffectiveGasTip(changes.env.header.BaseFee) + log.Trace("Included tx", "EGP", effGapPrice.String(), "gasUsed", receipt.GasUsed) + } + } else if bundle := order.Bundle(); bundle != nil { + err := changes.commitBundle(bundle, b.chainData, algoConf) + orderFailed = err != nil + if err != nil { + log.Trace("Could not apply bundle", "bundle", bundle.OriginalBundle.Hash, "err", err) + + var e *lowProfitError + if errors.As(err, &e) { + if e.ActualEffectiveGasPrice != nil { + order.SetPrice(e.ActualEffectiveGasPrice) + } + + if e.ActualProfit != nil { + order.SetProfit(e.ActualProfit) + } + // if the bundle was not included due to low profit, we can retry the bundle + CheckRetryOrderAndReinsert(order, orders, retryMap, retryLimit) + } + } else { + log.Trace("Included bundle", "bundleEGP", bundle.MevGasPrice.String(), + "gasUsed", bundle.TotalGasUsed, "ethToCoinbase", ethIntToFloat(bundle.EthSentToCoinbase)) + usedBundles = append(usedBundles, *bundle) + } + } else if sbundle := order.SBundle(); sbundle != nil { + err := changes.CommitSBundle(sbundle, b.chainData, b.builderKey, algoConf) + orderFailed = err != nil + usedEntry := types.UsedSBundle{ + Bundle: sbundle.Bundle, + Success: err == nil, + } + + isValidOrNotRetried := true + if err != nil { + log.Trace("Could not apply sbundle", "bundle", sbundle.Bundle.Hash(), "err", err) + + var e *lowProfitError + if errors.As(err, &e) { + if e.ActualEffectiveGasPrice != nil { + order.SetPrice(e.ActualEffectiveGasPrice) + } + + if e.ActualProfit != nil { + order.SetProfit(e.ActualProfit) + } + + // if the sbundle was not included due to low profit, we can retry the bundle + if ok := CheckRetryOrderAndReinsert(order, orders, retryMap, retryLimit); ok { + isValidOrNotRetried = false + } + } + } else { + log.Trace("Included sbundle", "bundleEGP", sbundle.MevGasPrice.String(), "ethToCoinbase", ethIntToFloat(sbundle.Profit)) + } + + if isValidOrNotRetried { + usedSbundles = append(usedSbundles, usedEntry) + } + } else { + // note: this should never happen because we should not be inserting invalid transaction types into + // the orders heap + panic("unsupported order type found") + } + + if orderFailed { + if err := changes.env.state.MultiTxSnapshotRevert(); err != nil { + log.Error("Failed to revert snapshot", "err", err) + return usedBundles, usedSbundles + } + } else { + if err := changes.env.state.MultiTxSnapshotCommit(); err != nil { + log.Error("Failed to commit snapshot", "err", err) + return usedBundles, usedSbundles + } + } + } + return usedBundles, usedSbundles +} + +func (b *greedyBucketsMultiSnapBuilder) mergeOrdersAndApplyToEnv( + orders *types.TransactionsByPriceAndNonce) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + if orders.Peek() == nil { + return b.inputEnvironment, nil, nil + } + + changes, err := newEnvChanges(b.inputEnvironment) + if err != nil { + log.Error("Failed to create new environment changes", "err", err) + return b.inputEnvironment, nil, nil + } + + const retryLimit = 1 + + var ( + baseFee = changes.env.header.BaseFee + retryMap = make(map[*types.TxWithMinerFee]int) + usedBundles []types.SimulatedBundle + usedSbundles []types.UsedSBundle + transactions []*types.TxWithMinerFee + priceCutoffPercent = b.algoConf.PriceCutoffPercent + + SortInPlaceByProfit = func(baseFee *big.Int, transactions []*types.TxWithMinerFee, gasUsedMap map[*types.TxWithMinerFee]uint64) { + sort.SliceStable(transactions, func(i, j int) bool { + return transactions[i].Profit(baseFee, gasUsedMap[transactions[i]]).Cmp(transactions[j].Profit(baseFee, gasUsedMap[transactions[j]])) > 0 + }) + } + ) + + minPrice := CutoffPriceFromOrder(orders.Peek(), priceCutoffPercent) + for { + order := orders.Peek() + if order == nil { + if len(transactions) != 0 { + SortInPlaceByProfit(baseFee, transactions, b.gasUsedMap) + bundles, sbundles := b.commit(changes, transactions, orders, b.gasUsedMap, retryMap, retryLimit) + usedBundles = append(usedBundles, bundles...) + usedSbundles = append(usedSbundles, sbundles...) + transactions = nil + // re-run since committing transactions may have pushed higher nonce transactions, or previously + // failed transactions back into orders heap + continue + } + break + } + + if ok := IsOrderInPriceRange(order, minPrice); ok { + orders.Pop() + transactions = append(transactions, order) + } else { + if len(transactions) != 0 { + SortInPlaceByProfit(baseFee, transactions, b.gasUsedMap) + bundles, sbundles := b.commit(changes, transactions, orders, b.gasUsedMap, retryMap, retryLimit) + usedBundles = append(usedBundles, bundles...) + usedSbundles = append(usedSbundles, sbundles...) + transactions = nil + } + minPrice = CutoffPriceFromOrder(order, priceCutoffPercent) + } + } + + if err := changes.apply(); err != nil { + log.Error("Failed to apply changes", "err", err) + return b.inputEnvironment, nil, nil + } + + return changes.env, usedBundles, usedSbundles +} + +func (b *greedyBucketsMultiSnapBuilder) buildBlock(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + orders := types.NewTransactionsByPriceAndNonce(b.inputEnvironment.signer, transactions, simBundles, simSBundles, b.inputEnvironment.header.BaseFee) + return b.mergeOrdersAndApplyToEnv(orders) +} diff --git a/miner/algo_greedy_multisnap.go b/miner/algo_greedy_multisnap.go new file mode 100644 index 0000000000..ca3ee3d3ed --- /dev/null +++ b/miner/algo_greedy_multisnap.go @@ -0,0 +1,134 @@ +package miner + +import ( + "crypto/ecdsa" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" +) + +// / To use it: +// / 1. Copy relevant data from the worker +// / 2. Call buildBlock +// / 2. If new bundles, txs arrive, call buildBlock again +// / This struct lifecycle is tied to 1 block-building task +type greedyMultiSnapBuilder struct { + inputEnvironment *environment + chainData chainData + builderKey *ecdsa.PrivateKey + interrupt *int32 + algoConf algorithmConfig +} + +func newGreedyMultiSnapBuilder( + chain *core.BlockChain, chainConfig *params.ChainConfig, algoConf *algorithmConfig, + blacklist map[common.Address]struct{}, env *environment, key *ecdsa.PrivateKey, interrupt *int32, +) *greedyMultiSnapBuilder { + if algoConf == nil { + algoConf = &defaultAlgorithmConfig + } + return &greedyMultiSnapBuilder{ + inputEnvironment: env, + chainData: chainData{chainConfig, chain, blacklist}, + builderKey: key, + interrupt: interrupt, + algoConf: *algoConf, + } +} + +func (b *greedyMultiSnapBuilder) buildBlock(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + orders := types.NewTransactionsByPriceAndNonce(b.inputEnvironment.signer, transactions, simBundles, simSBundles, b.inputEnvironment.header.BaseFee) + + var ( + usedBundles []types.SimulatedBundle + usedSbundles []types.UsedSBundle + ) + + changes, err := newEnvChanges(b.inputEnvironment) + if err != nil { + log.Error("Failed to create new environment changes", "err", err) + return b.inputEnvironment, usedBundles, usedSbundles + } + + for { + order := orders.Peek() + if order == nil { + break + } + + orderFailed := false + if err := changes.env.state.NewMultiTxSnapshot(); err != nil { + log.Error("Failed to create snapshot", "err", err) + return b.inputEnvironment, usedBundles, usedSbundles + } + + if tx := order.Tx(); tx != nil { + receipt, skip, err := changes.commitTx(tx, b.chainData) + switch skip { + case shiftTx: + orders.Shift() + case popTx: + orders.Pop() + } + orderFailed = err != nil + + if err != nil { + log.Trace("could not apply tx", "hash", tx.Hash(), "err", err) + } else { + // we don't check for error here because if EGP returns error, it would have been caught and returned by commitTx + effGapPrice, _ := tx.EffectiveGasTip(changes.env.header.BaseFee) + log.Trace("Included tx", "EGP", effGapPrice.String(), "gasUsed", receipt.GasUsed) + } + } else if bundle := order.Bundle(); bundle != nil { + err := changes.commitBundle(bundle, b.chainData, b.algoConf) + orders.Pop() + orderFailed = err != nil + + if err != nil { + log.Trace("Could not apply bundle", "bundle", bundle.OriginalBundle.Hash, "err", err) + } else { + log.Trace("Included bundle", "bundleEGP", bundle.MevGasPrice.String(), + "gasUsed", bundle.TotalGasUsed, "ethToCoinbase", ethIntToFloat(bundle.EthSentToCoinbase)) + usedBundles = append(usedBundles, *bundle) + } + } else if sbundle := order.SBundle(); sbundle != nil { + err := changes.CommitSBundle(sbundle, b.chainData, b.builderKey, b.algoConf) + orders.Pop() + orderFailed = err != nil + usedEntry := types.UsedSBundle{ + Bundle: sbundle.Bundle, + Success: err == nil, + } + + if err != nil { + log.Trace("Could not apply sbundle", "bundle", sbundle.Bundle.Hash(), "err", err) + } else { + log.Trace("Included sbundle", "bundleEGP", sbundle.MevGasPrice.String(), "ethToCoinbase", ethIntToFloat(sbundle.Profit)) + } + + usedSbundles = append(usedSbundles, usedEntry) + } + + if orderFailed { + if err := changes.env.state.MultiTxSnapshotRevert(); err != nil { + log.Error("Failed to revert snapshot", "err", err) + return b.inputEnvironment, usedBundles, usedSbundles + } + } else { + if err := changes.env.state.MultiTxSnapshotCommit(); err != nil { + log.Error("Failed to commit snapshot", "err", err) + return b.inputEnvironment, usedBundles, usedSbundles + } + } + } + + if err := changes.apply(); err != nil { + log.Error("Failed to apply changes", "err", err) + return b.inputEnvironment, usedBundles, usedSbundles + } + + return changes.env, usedBundles, usedSbundles +} diff --git a/miner/algo_greedy_test.go b/miner/algo_greedy_test.go index 404816bbc9..ba680ec059 100644 --- a/miner/algo_greedy_test.go +++ b/miner/algo_greedy_test.go @@ -11,9 +11,9 @@ import ( ) func TestBuildBlockGasLimit(t *testing.T) { - algos := []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS} + algos := []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP} for _, algo := range algos { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], 21000, big.NewInt(1)) txs := make(map[common.Address]types.Transactions) @@ -29,11 +29,17 @@ func TestBuildBlockGasLimit(t *testing.T) { var result *environment switch algo { + case ALGO_GREEDY: + builder := newGreedyBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) + result, _, _ = builder.buildBlock([]types.SimulatedBundle{}, nil, txs) + case ALGO_GREEDY_MULTISNAP: + builder := newGreedyMultiSnapBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) + result, _, _ = builder.buildBlock([]types.SimulatedBundle{}, nil, txs) case ALGO_GREEDY_BUCKETS: - builder := newGreedyBucketsBuilder(chData.chain, chData.chainConfig, nil, nil, env, nil, nil) + builder := newGreedyBucketsBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) result, _, _ = builder.buildBlock([]types.SimulatedBundle{}, nil, txs) - case ALGO_GREEDY: - builder := newGreedyBuilder(chData.chain, chData.chainConfig, nil, nil, env, nil, nil) + case ALGO_GREEDY_BUCKETS_MULTISNAP: + builder := newGreedyBucketsMultiSnapBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) result, _, _ = builder.buildBlock([]types.SimulatedBundle{}, nil, txs) } @@ -45,7 +51,7 @@ func TestBuildBlockGasLimit(t *testing.T) { } func TestTxWithMinerFeeHeap(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], 21000, big.NewInt(1)) diff --git a/miner/algo_state_test.go b/miner/algo_state_test.go new file mode 100644 index 0000000000..3f51430d71 --- /dev/null +++ b/miner/algo_state_test.go @@ -0,0 +1,1056 @@ +package miner + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/rand" + "encoding/hex" + "fmt" + "math/big" + mathrand "math/rand" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/require" +) + +// NOTE(wazzymandias): Below is a FuzzTest contract written in Solidity and shown here as reference code +// for the generated abi and bytecode used for testing. +// The generated abi can be found in the `testdata` directory in `state_fuzz_test.abi`. +// The abi, bytecode, and Go bindings were generated using the following commands: +// - docker run -v ${STATE_FUZZ_TEST_CONTRACT_DIRECTORY}:/sources +// ethereum/solc:0.8.19 -o /sources/output --abi --bin /sources/StateFuzzTest.sol +// - go run ./cmd/abigen/ --bin ${TARGET_STATE_FUZZ_TEST_BIN_PATH} --abi ${TARGET_STATE_FUZZ_TEST_ABI_PATH} +// --pkg statefuzztest --out=state_fuzz_test_abigen_bindings.go +const StateFuzzTestSolidity = ` +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +contract StateFuzzTest { + mapping(address => uint256) public balances; + mapping(bytes32 => bytes) public storageData; + mapping(address => bool) public isSelfDestructed; + mapping(address => uint256) private refunds; + + function addThenWithdrawRefund(uint256 amount) external payable { + refunds[msg.sender] += amount; + payable(msg.sender).transfer(amount); + } + + function createObject(bytes32 key, bytes memory value) public { + storageData[key] = value; + } + + function resetObject(bytes32 key) public { + delete storageData[key]; + } + + function selfDestruct() public { + isSelfDestructed[msg.sender] = true; + selfdestruct(payable(msg.sender)); + } + + function changeBalance(address account, uint256 newBalance) public { + balances[account] = newBalance; + } + + function changeStorage(bytes32 key, bytes memory newValue) public { + storageData[key] = newValue; + } + + function touchContract(address contractAddress) public view returns (bytes32 codeHash) { + assembly { + codeHash := extcodehash(contractAddress) + } + return codeHash; + } +} +` + +func changeBalanceFuzzTestContract(nonce uint64, to, address common.Address, newBalance *big.Int) (types.TxData, error) { + abi, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + + data, err := abi.Pack("changeBalance", address, newBalance) + if err != nil { + return nil, err + } + + return &types.LegacyTx{ + Nonce: nonce, + GasPrice: big.NewInt(1), + Gas: 10_000_000, + To: (*common.Address)(to[:]), + Value: big.NewInt(0), + Data: data, + }, nil +} + +func changeStorageFuzzTestContract(chainID *big.Int, nonce uint64, to common.Address, key [32]byte, value []byte) (types.TxData, error) { + abi, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + + data, err := abi.Pack("changeStorage", key, value) + if err != nil { + return nil, err + } + + return &types.DynamicFeeTx{ + ChainID: chainID, + Nonce: nonce, + Gas: 100_000, + GasFeeCap: big.NewInt(1), + To: (*common.Address)(to[:]), + Value: big.NewInt(0), + Data: data, + }, nil +} + +func createObjectFuzzTestContract(chainID *big.Int, nonce uint64, to common.Address, key [32]byte, value []byte) (types.TxData, error) { + abi, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + + data, err := abi.Pack("createObject", key, value) + if err != nil { + return nil, err + } + + return &types.DynamicFeeTx{ + ChainID: chainID, + Nonce: nonce, + Gas: 100_000, + GasFeeCap: big.NewInt(1), + To: (*common.Address)(to[:]), + Value: big.NewInt(0), + Data: data, + }, nil +} + +func resetObjectFuzzTestContract(nonce uint64, address common.Address, key [32]byte) (types.TxData, error) { + abi, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + + data, err := abi.Pack("resetObject", key) + if err != nil { + return nil, err + } + + return &types.LegacyTx{ + Nonce: nonce, + GasPrice: big.NewInt(1), + Gas: 10_000_000, + To: (*common.Address)(address[:]), + Value: big.NewInt(0), + Data: data, + }, nil +} + +func selfDestructFuzzTestContract(chainID *big.Int, nonce uint64, to common.Address) (types.TxData, error) { + abi, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + + data, err := abi.Pack("selfDestruct") + if err != nil { + return nil, err + } + + return &types.DynamicFeeTx{ + ChainID: chainID, + Nonce: nonce, + Gas: 500_000, + GasFeeCap: big.NewInt(1), + To: (*common.Address)(to[:]), + Value: big.NewInt(0), + Data: data, + }, nil +} + +func touchAccountFuzzTestContract(chainID *big.Int, nonce uint64, address common.Address) (types.TxData, error) { + abi, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + + data, err := abi.Pack("touchContract", address) + if err != nil { + return nil, err + } + + return &types.DynamicFeeTx{ + ChainID: chainID, + Nonce: nonce, + Gas: 100_000, + GasFeeCap: big.NewInt(1), + To: (*common.Address)(address[:]), + Value: big.NewInt(0), + Data: data, + }, nil +} + +func addThenWithdrawRefundFuzzTestContract(chainID *big.Int, nonce uint64, to common.Address, value *big.Int) (types.TxData, error) { + abi, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + + data, err := abi.Pack("addThenWithdrawRefund", value) + if err != nil { + return nil, err + } + + return &types.DynamicFeeTx{ + ChainID: chainID, + Nonce: nonce, + Gas: 400_000, + GasFeeCap: big.NewInt(1), + To: (*common.Address)(to[:]), + Value: value, + Data: data, + }, nil +} + +const ( + Baseline = 0 + SingleSnapshot = 1 + MultiSnapshot = 2 +) + +type stateComparisonTestContext struct { + Name string + + statedb *state.StateDB + chainData chainData + signers signerList + + env *environment + + envDiff *environmentDiff + changes *envChanges + + transactions []*types.Transaction + + rootHash common.Hash +} + +type stateComparisonTestContexts []stateComparisonTestContext + +func (sc stateComparisonTestContexts) Init(t *testing.T, gasLimit uint64) stateComparisonTestContexts { + for i := range sc { + tc := stateComparisonTestContext{} + tc.statedb, tc.chainData, tc.signers = genTestSetup(gasLimit) + tc.env = newEnvironment(tc.chainData, tc.statedb, tc.signers.addresses[0], gasLimit, big.NewInt(1)) + var err error + switch i { + case Baseline: + tc.Name = "baseline" + tc.envDiff = newEnvironmentDiff(tc.env) + case SingleSnapshot: + tc.Name = "single-snapshot" + tc.changes, err = newEnvChanges(tc.env) + _ = tc.changes.env.state.MultiTxSnapshotCommit() + case MultiSnapshot: + tc.Name = "multi-snapshot" + tc.changes, err = newEnvChanges(tc.env) + _ = tc.changes.env.state.MultiTxSnapshotCommit() + } + + require.NoError(t, err, "failed to initialize test contexts: %v", err) + sc[i] = tc + } + return sc +} + +func (sc stateComparisonTestContexts) ApplyChanges(t *testing.T) { + for _, tc := range sc { + if tc.envDiff != nil { + tc.envDiff.applyToBaseEnv() + } + if tc.changes != nil { + require.NoError(t, tc.changes.apply()) + } + } +} + +func (sc stateComparisonTestContexts) SimulateBundle(testCtxIdx int, b types.MevBundle) (types.SimulatedBundle, error) { + tc := sc[testCtxIdx] + var env *environment + switch testCtxIdx { + case Baseline: + env = tc.envDiff.baseEnvironment + case SingleSnapshot, MultiSnapshot: + env = tc.changes.env + } + + return simulateBundle(env.copy(), b, tc.chainData, nil) +} + +func (sc stateComparisonTestContexts) ValidateRootHashes(t *testing.T, expected common.Hash) { + for _, tc := range sc { + require.Equal(t, expected.Bytes(), tc.rootHash.Bytes(), + "root hash mismatch for test context %s [expected: %s] [found: %s]", + tc.Name, expected.TerminalString(), tc.rootHash.TerminalString()) + } +} + +func (sc stateComparisonTestContexts) GenerateTransactions(t *testing.T, txCount int, failEveryN int) { + for tcIndex, tc := range sc { + signers := tc.signers + tc.transactions = sc.generateTransactions(txCount, failEveryN, signers) + tc.signers = signers + require.Len(t, tc.transactions, txCount) + + sc[tcIndex] = tc + } +} + +func (sc stateComparisonTestContexts) generateTransactions(txCount int, failEveryN int, signers signerList) []*types.Transaction { + transactions := make([]*types.Transaction, 0, txCount) + for i := 0; i < txCount; i++ { + var data []byte + if failEveryN != 0 && i%failEveryN == 0 { + data = []byte{0x01} + } else { + data = []byte{} + } + + from := i % len(signers.addresses) + tx := signers.signTx(from, params.TxGas, big.NewInt(0), big.NewInt(1), + signers.addresses[(i+1)%len(signers.addresses)], big.NewInt(0), data) + transactions = append(transactions, tx) + } + + return transactions +} + +func (sc stateComparisonTestContexts) UpdateRootHashes(t *testing.T) { + for tcIndex, tc := range sc { + if tc.envDiff != nil { + tc.rootHash = tc.envDiff.baseEnvironment.state.IntermediateRoot(true) + } else { + tc.rootHash = tc.env.state.IntermediateRoot(true) + } + sc[tcIndex] = tc + + require.NotEmpty(t, tc.rootHash.Bytes(), "root hash is empty for test context %s", tc.Name) + } +} + +func (sc stateComparisonTestContexts) ValidateTestCases(t *testing.T, reference int) { + expected := sc[reference] + var ( + expectedGasPool *core.GasPool = expected.envDiff.baseEnvironment.gasPool + expectedHeader *types.Header = expected.envDiff.baseEnvironment.header + expectedProfit *big.Int = expected.envDiff.baseEnvironment.profit + expectedTxCount int = expected.envDiff.baseEnvironment.tcount + expectedTransactions []*types.Transaction = expected.envDiff.baseEnvironment.txs + expectedReceipts types.Receipts = expected.envDiff.baseEnvironment.receipts + ) + for tcIndex, tc := range sc { + if tcIndex == reference { + continue + } + + var ( + actualGasPool *core.GasPool = tc.env.gasPool + actualHeader *types.Header = tc.env.header + actualProfit *big.Int = tc.env.profit + actualTxCount int = tc.env.tcount + actualTransactions []*types.Transaction = tc.env.txs + actualReceipts types.Receipts = tc.env.receipts + ) + if actualGasPool.Gas() != expectedGasPool.Gas() { + t.Errorf("gas pool mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, expectedGasPool.Gas(), actualGasPool.Gas()) + } + + if actualHeader.Hash() != expectedHeader.Hash() { + t.Errorf("header hash mismatch for test context %s [expected: %s] [found: %s]", + tc.Name, expectedHeader.Hash().TerminalString(), actualHeader.Hash().TerminalString()) + } + + if actualProfit.Cmp(expectedProfit) != 0 { + t.Errorf("profit mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, expectedProfit, actualProfit) + } + + if actualTxCount != expectedTxCount { + t.Errorf("transaction count mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, expectedTxCount, actualTxCount) + break + } + + if len(actualTransactions) != len(expectedTransactions) { + t.Errorf("transaction count mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, len(expectedTransactions), len(actualTransactions)) + } + + for txIdx := 0; txIdx < len(actualTransactions); txIdx++ { + expectedTx := expectedTransactions[txIdx] + actualTx := actualTransactions[txIdx] + + expectedBytes, err := rlp.EncodeToBytes(expectedTx) + if err != nil { + t.Fatalf("failed to encode expected transaction #%d: %v", txIdx, err) + } + + actualBytes, err := rlp.EncodeToBytes(actualTx) + if err != nil { + t.Fatalf("failed to encode actual transaction #%d: %v", txIdx, err) + } + + if !bytes.Equal(expectedBytes, actualBytes) { + t.Errorf("transaction #%d mismatch for test context %s [expected: %v] [found: %v]", + txIdx, tc.Name, expectedTx, actualTx) + } + } + + if len(actualReceipts) != len(expectedReceipts) { + t.Errorf("receipt count mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, len(expectedReceipts), len(actualReceipts)) + } + } +} + +func TestStateComparisons(t *testing.T) { + var testContexts = make(stateComparisonTestContexts, 3) + + // test commit tx + t.Run("state-compare-commit-tx", func(t *testing.T) { + testContexts = testContexts.Init(t, GasLimit) + for i := 0; i < 3; i++ { + tx1 := testContexts[i].signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), + testContexts[i].signers.addresses[2], big.NewInt(0), []byte{}) + var ( + receipt *types.Receipt + status int + err error + ) + switch i { + case Baseline: + receipt, status, err = testContexts[i].envDiff.commitTx(tx1, testContexts[i].chainData) + testContexts[i].envDiff.applyToBaseEnv() + + case SingleSnapshot: + require.NoError(t, testContexts[i].changes.env.state.NewMultiTxSnapshot(), "can't create multi tx snapshot: %v", err) + receipt, status, err = testContexts[i].changes.commitTx(tx1, testContexts[i].chainData) + require.NoError(t, err, "can't commit single snapshot tx") + + err = testContexts[i].changes.apply() + case MultiSnapshot: + require.NoError(t, testContexts[i].changes.env.state.NewMultiTxSnapshot(), "can't create multi tx snapshot: %v", err) + receipt, status, err = testContexts[i].changes.commitTx(tx1, testContexts[i].chainData) + require.NoError(t, err, "can't commit multi snapshot tx") + + err = testContexts[i].changes.apply() + } + require.NoError(t, err, "can't commit tx") + require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status) + require.Equal(t, 21000, int(receipt.GasUsed)) + require.Equal(t, shiftTx, status) + } + + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, Baseline) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + }) + + // test bundle + t.Run("state-compare-bundle", func(t *testing.T) { + testContexts = testContexts.Init(t, GasLimit) + for i, tc := range testContexts { + var ( + signers = tc.signers + header = tc.env.header + env = tc.env + chData = tc.chainData + ) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + mevBundle := types.MevBundle{ + Txs: types.Transactions{tx1, tx2}, + BlockNumber: header.Number, + } + + envCopy := env.copy() + simBundle, err := simulateBundle(envCopy, mevBundle, chData, nil) + require.NoError(t, err, "can't simulate bundle: %v", err) + + switch i { + case Baseline: + err = tc.envDiff.commitBundle(&simBundle, chData, nil, defaultAlgorithmConfig) + if err != nil { + break + } + tc.envDiff.applyToBaseEnv() + + case SingleSnapshot: + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, "can't create multi tx snapshot: %v", err) + + err = tc.changes.commitBundle(&simBundle, chData, defaultAlgorithmConfig) + if err != nil { + break + } + + err = tc.changes.apply() + + case MultiSnapshot: + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, "can't create multi tx snapshot: %v", err) + + err = tc.changes.commitBundle(&simBundle, chData, defaultAlgorithmConfig) + if err != nil { + break + } + + err = tc.changes.apply() + } + + require.NoError(t, err, "can't commit bundle: %v", err) + } + + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, 0) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + }) + + // test failed transactions + t.Run("state-compare-failed-txs", func(t *testing.T) { + // generate 100 transactions, with 50% of them failing + var ( + txCount = 100 + failEveryN = 2 + ) + testContexts = testContexts.Init(t, GasLimit) + testContexts.GenerateTransactions(t, txCount, failEveryN) + require.Len(t, testContexts[Baseline].transactions, txCount) + + for txIdx := 0; txIdx < txCount; txIdx++ { + for ctxIdx, tc := range testContexts { + tx := tc.transactions[txIdx] + + var commitErr error + switch ctxIdx { + case Baseline: + _, _, commitErr = tc.envDiff.commitTx(tx, tc.chainData) + tc.envDiff.applyToBaseEnv() + + case SingleSnapshot: + err := tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, "can't create multi tx snapshot for tx %d: %v", txIdx, err) + + _, _, commitErr = tc.changes.commitTx(tx, tc.chainData) + require.NoError(t, tc.changes.apply()) + case MultiSnapshot: + err := tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, + "can't create multi tx snapshot: %v", err) + + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, + "can't create multi tx snapshot: %v", err) + + _, _, commitErr = tc.changes.commitTx(tx, tc.chainData) + require.NoError(t, tc.changes.apply()) + + // NOTE(wazzymandias): At the time of writing this, the changes struct does not reset after performing + // an apply - because the intended use of the changes struct is to create it and discard it + // after every commit->(discard||apply) loop. + // So for now to test multiple snapshots we apply the changes for the top of the stack and + // then pop the underlying state snapshot from the base of the stack. + // Otherwise, if changes are applied twice, then there can be double counting of transactions. + require.NoError(t, tc.changes.env.state.MultiTxSnapshotCommit()) + } + + if txIdx%failEveryN == 0 { + require.Errorf(t, commitErr, "tx %d should fail", txIdx) + } else { + require.NoError(t, commitErr, "tx %d should succeed, found: %v", txIdx, commitErr) + } + } + } + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, 0) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + }) +} + +func TestBundles(t *testing.T) { + const maxGasLimit = 1_000_000_000_000 + + var testContexts = make(stateComparisonTestContexts, 3) + testContexts.Init(t, maxGasLimit) + + // Set up FuzzTest ABI and bytecode + abi, err := StatefuzztestMetaData.GetAbi() + require.NoError(t, err) + + fuzzTestSolBytecode := StatefuzztestMetaData.Bin + bytecodeBytes, err := hex.DecodeString(fuzzTestSolBytecode[2:]) + require.NoError(t, err) + + // FuzzTest constructor + deployData, err := abi.Pack("") + require.NoError(t, err) + + simulations := make([]*backends.SimulatedBackend, 3) + controlFuzzTestContracts := make(map[int][]*Statefuzztest, 3) + variantFuzzTestAddresses := make(map[int][]common.Address, 3) + + for tcIdx, tc := range testContexts { + disk := tc.env.state.Copy().Database().DiskDB() + db := rawdb.NewDatabase(disk) + + backend := backends.NewSimulatedBackendChain(db, tc.chainData.chain) + simulations[tcIdx] = backend + + s := tc.signers + controlFuzzTestContracts[tcIdx] = make([]*Statefuzztest, len(s.signers)) + variantFuzzTestAddresses[tcIdx] = make([]common.Address, len(s.signers)) + // commit transaction for deploying Fuzz Test contract + for i, pk := range s.signers { + deployTx := &types.LegacyTx{ + Nonce: s.nonces[i], + GasPrice: big.NewInt(1), + Gas: 10_000_000, + Value: big.NewInt(0), + To: nil, + Data: append(bytecodeBytes, deployData...), + } + + signTx := types.MustSignNewTx(pk, types.LatestSigner(s.config), deployTx) + + auth, err := bind.NewKeyedTransactorWithChainID(pk, tc.chainData.chainConfig.ChainID) + require.NoError(t, err) + + // deploy Fuzz Test contract to control chain (i.e, the chain we compare the test contexts against) + _, _, fuzz, err := DeployStatefuzztest(auth, backend) + require.NoError(t, err) + backend.Commit() + + controlFuzzTestContracts[tcIdx][i] = fuzz + + var receipt *types.Receipt + switch tcIdx { + case Baseline: + receipt, _, err = tc.envDiff.commitTx(signTx, tc.chainData) + require.NoError(t, err) + tc.envDiff.applyToBaseEnv() + + _, err = tc.envDiff.baseEnvironment.state.Commit(true) + case SingleSnapshot: + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err) + + receipt, _, err = tc.changes.commitTx(signTx, tc.chainData) + require.NoError(t, err) + + err = tc.changes.apply() + require.NoError(t, err) + + _, err = tc.changes.env.state.Commit(true) + + case MultiSnapshot: + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err) + + receipt, _, err = tc.changes.commitTx(signTx, tc.chainData) + require.NoError(t, err) + + err = tc.changes.apply() + require.NoError(t, err) + + _, err = tc.changes.env.state.Commit(true) + } + + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status) + variantFuzzTestAddresses[tcIdx][i] = receipt.ContractAddress + + s.nonces[i]++ + } + } + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, Baseline) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + + // initialize fuzz test contract for each account with random objects through createObject function + const createObjectCount = 100 + var randCreateObjectKeys = [createObjectCount][32]byte{} + var randCreateObjectValues = [createObjectCount][32]byte{} + for i := 0; i < createObjectCount; i++ { + _, err := rand.Read(randCreateObjectKeys[i][:]) + require.NoError(t, err) + + _, err = rand.Read(randCreateObjectValues[i][:]) + require.NoError(t, err) + } + + for tcIdx, tc := range testContexts { + backend := simulations[tcIdx] + + // deploy fuzz test smart contract across all the account addresses we wish to test + t.Run(fmt.Sprintf("%s-create-object", tc.Name), func(t *testing.T) { + signers := tc.signers + for signerIdx, pk := range signers.signers { + var ( + actualTransactions = [createObjectCount]*types.Transaction{} + expectedTransactions = [createObjectCount]*types.Transaction{} + expectedReceipts = [createObjectCount]*types.Receipt{} + to = variantFuzzTestAddresses[tcIdx][signerIdx] + ) + auth, err := bind.NewKeyedTransactorWithChainID(pk, tc.chainData.chainConfig.ChainID) + require.NoError(t, err) + + for txIdx := 0; txIdx < createObjectCount; txIdx++ { + var ( + createObjKey = randCreateObjectKeys[txIdx] + createObjValue = randCreateObjectValues[txIdx] + ) + tx, err := createObjectFuzzTestContract( + tc.chainData.chainConfig.ChainID, signers.nonces[signerIdx], to, createObjKey, createObjValue[:]) + require.NoError(t, err) + + actualTx := types.MustSignNewTx(pk, types.LatestSigner(signers.config), tx) + actualTransactions[txIdx] = actualTx + + expectedTx, err := + controlFuzzTestContracts[tcIdx][signerIdx].CreateObject(auth, createObjKey, createObjValue[:]) + require.NoError(t, err) + + expectedTransactions[txIdx] = expectedTx + + require.Equal(t, expectedTx.Data(), actualTx.Data()) + require.Equal(t, expectedTx.Nonce(), actualTx.Nonce()) + require.Equal(t, expectedTx.To().String(), actualTx.To().String()) + + // commit transaction for control chain (i.e, what we compare the test contexts against) + backend.Commit() + expectedReceipt, err := backend.TransactionReceipt(context.Background(), expectedTransactions[txIdx].Hash()) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, expectedReceipt.Status) + + expectedReceipts[txIdx] = expectedReceipt + + // update nonce + signers.nonces[signerIdx]++ + } + + for txIdx := 0; txIdx < createObjectCount; txIdx++ { + actualTx := actualTransactions[txIdx] + var actualReceipt *types.Receipt + switch tcIdx { + case Baseline: + actualReceipt, _, err = tc.envDiff.commitTx(actualTx, tc.chainData) + tc.envDiff.applyToBaseEnv() + signer := tc.envDiff.baseEnvironment.signer + from, senderErr := types.Sender(signer, actualTx) + require.NoError(t, senderErr) + + if err == nil { + expectedNonce := actualTx.Nonce() + 1 + actualNonce := tc.envDiff.baseEnvironment.state.GetNonce(from) + require.Equal(t, expectedNonce, actualNonce) + } else { + expectedNonce := actualTx.Nonce() - 1 + actualNonce := tc.envDiff.baseEnvironment.state.GetNonce(from) + require.Equal(t, expectedNonce, actualNonce) + } + case SingleSnapshot: + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err) + + var commitErr error + actualReceipt, _, commitErr = tc.changes.commitTx(actualTx, tc.chainData) + require.NoError(t, err) + + err = tc.changes.apply() + + signer := tc.changes.env.signer + from, senderErr := types.Sender(signer, actualTx) + require.NoError(t, senderErr) + if commitErr == nil { + expectedNonce := actualTx.Nonce() + 1 + actualNonce := tc.changes.env.state.GetNonce(from) + require.Equal(t, expectedNonce, actualNonce) + } else { + expectedNonce := actualTx.Nonce() - 1 + actualNonce := tc.changes.env.state.GetNonce(from) + require.Equal(t, expectedNonce, actualNonce) + } + case MultiSnapshot: + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err) + + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err) + + var commitErr error + actualReceipt, _, commitErr = tc.changes.commitTx(actualTx, tc.chainData) + require.NoError(t, commitErr) + + err = tc.changes.apply() + require.NoError(t, err) + + err = tc.changes.env.state.MultiTxSnapshotCommit() + + signer := tc.changes.env.signer + from, senderErr := types.Sender(signer, actualTx) + require.NoError(t, senderErr) + if commitErr == nil { + expectedNonce := actualTx.Nonce() + 1 + actualNonce := tc.changes.env.state.GetNonce(from) + require.Equal(t, expectedNonce, actualNonce) + } else { + expectedNonce := actualTx.Nonce() - 1 + actualNonce := tc.changes.env.state.GetNonce(from) + require.Equal(t, expectedNonce, actualNonce) + } + } + + require.NoError(t, err) + + expectedReceipt := expectedReceipts[txIdx] + require.Equal(t, expectedReceipt.PostState, actualReceipt.PostState) + require.Equal(t, expectedReceipt.ContractAddress.String(), actualReceipt.ContractAddress.String()) + require.Equal(t, types.ReceiptStatusSuccessful, actualReceipt.Status, "test %s, signer %d", tc.Name, signerIdx) + } + } + }) + } + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, Baseline) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + + // generate bundles of transactions, where each transaction will either: + // - change balance + // - create object + // - self-destruct + // - reset object + // - change storage + // - change transient storage + // - touch account + type TransactionOperation int + const ( + ChangeBalance TransactionOperation = iota + CreateObject + SelfDestruct + ResetObject + ChangeStorage + ChangeTransientStorage + TouchAccount + ) + operations := []TransactionOperation{ + ChangeBalance, + CreateObject, + SelfDestruct, + ResetObject, + ChangeStorage, + ChangeTransientStorage, + TouchAccount, + } + const ( + bundleCount = 10 + bundleSize = 100 + ) + + // NOTE(wazzymandias): We make a copy of the signer list before we craft the bundles of transactions. + // The reason is that the pre-bundle signer list will be used to simulate the bundles. + // Using the actual signer list will cause nonce mismatch errors, since we increment nonce + // as we craft the bundles of transactions. + var preBundleSigners = signerList{ + config: testContexts[0].signers.config, + addresses: make([]common.Address, len(testContexts[0].signers.addresses)), + signers: make([]*ecdsa.PrivateKey, len(testContexts[0].signers.signers)), + nonces: make([]uint64, len(testContexts[0].signers.nonces)), + } + copy(preBundleSigners.addresses, testContexts[0].signers.addresses) + copy(preBundleSigners.signers, testContexts[0].signers.signers) + copy(preBundleSigners.nonces, testContexts[0].signers.nonces) + + bundles := [bundleCount]types.MevBundle{} + for bundleIdx := 0; bundleIdx < bundleCount; bundleIdx++ { + transactions := [bundleSize]*types.Transaction{} + for txIdx := 0; txIdx < bundleSize; txIdx++ { + var ( + // pick a random operation that represents one of the transactions we will create + randomOperation = operations[mathrand.Intn(len(operations))] + s = testContexts[0].signers + chainID = s.config.ChainID + // choose a random To Address index + toAddressRandomIdx = mathrand.Intn(len(s.signers)) + // reference the correct nonce for the associated To Address + nonce = s.nonces[toAddressRandomIdx] + toAddress = s.addresses[toAddressRandomIdx] + + txData types.TxData + err error + ) + switch randomOperation { + case ChangeBalance: // change balance + balanceAddressRandomIdx := mathrand.Intn(len(s.signers)) + balanceAddress := s.addresses[balanceAddressRandomIdx] + + randomBalance := new(big.Int).SetUint64(mathrand.Uint64()) + + txData, err = changeBalanceFuzzTestContract(nonce, toAddress, balanceAddress, randomBalance) + + case CreateObject: // create object + var ( + key [32]byte + value [32]byte + ) + _, err = rand.Read(key[:]) + require.NoError(t, err) + + _, err = rand.Read(value[:]) + require.NoError(t, err) + + txData, err = createObjectFuzzTestContract(chainID, nonce, toAddress, key, value[:]) + + case SelfDestruct: // self-destruct + txData, err = selfDestructFuzzTestContract(chainID, nonce, toAddress) + + case ResetObject: // reset object + var ( + resetObjectRandomIdx = mathrand.Intn(createObjectCount) + resetObjectKey = randCreateObjectKeys[resetObjectRandomIdx] + fuzzContractAddress = variantFuzzTestAddresses[0][toAddressRandomIdx] + ) + txData, err = resetObjectFuzzTestContract(nonce, fuzzContractAddress, resetObjectKey) + + case ChangeStorage: // change storage + var ( + changeStorageRandomIdx = mathrand.Intn(createObjectCount) + changeStorageObjectKey = randCreateObjectKeys[changeStorageRandomIdx] + fuzzContractAddress = variantFuzzTestAddresses[0][toAddressRandomIdx] + value [32]byte + ) + _, err = rand.Read(value[:]) + require.NoError(t, err) + + txData, err = changeStorageFuzzTestContract(chainID, nonce, fuzzContractAddress, changeStorageObjectKey, value[:]) + + case ChangeTransientStorage: // change transient storage + value := new(big.Int).Rand( + mathrand.New(mathrand.NewSource(time.Now().UnixNano())), big.NewInt(1000000), + ) + require.NoError(t, err) + + txData, err = addThenWithdrawRefundFuzzTestContract(chainID, nonce, toAddress, value) + case TouchAccount: // touch random account + fuzzContractAddress := variantFuzzTestAddresses[0][toAddressRandomIdx] + + txData, err = touchAccountFuzzTestContract(chainID, nonce, fuzzContractAddress) + } + require.NotNilf(t, txData, "txData is nil for bundle %d, tx %d", bundleIdx, txIdx) + require.NoError(t, err) + + tx := types.MustSignNewTx(s.signers[toAddressRandomIdx], types.LatestSigner(s.config), txData) + transactions[txIdx] = tx + + // update nonce for all test contexts + base := testContexts[Baseline] + single := testContexts[SingleSnapshot] + multi := testContexts[MultiSnapshot] + + base.signers.nonces[toAddressRandomIdx]++ + testContexts[Baseline].signers = base.signers + + single.signers.nonces[toAddressRandomIdx]++ + testContexts[SingleSnapshot].signers = single.signers + + multi.signers.nonces[toAddressRandomIdx]++ + testContexts[MultiSnapshot].signers = multi.signers + } + + bundles[bundleIdx] = types.MevBundle{ + Txs: transactions[:], + } + } + + // prepare for bundle application + + // initialize new snapshot(s) + for tcIdx, tc := range testContexts { + switch tcIdx { + case SingleSnapshot, MultiSnapshot: + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err) + } + } + + // commit bundles to each test context, with intermittent bundle failures + const bundleFailEveryN = 2 + var ( + base = testContexts[0] + commitErrMap = map[int]error{ + Baseline: nil, + SingleSnapshot: nil, + MultiSnapshot: nil, + } + genesisAlloc = genGenesisAlloc(preBundleSigners, + []common.Address{payProxyAddress, logContractAddress}, [][]byte{payProxyCode, logContractCode}) + ) + simulatedBundleList, err := simulateBundles(base.chainData.chainConfig, + types.CopyHeader(base.env.header), genesisAlloc, bundles[:]) + require.NoError(t, err) + require.Len(t, simulatedBundleList, len(bundles)) + + // commit bundles one by one to each test context to make sure each bundle result is deterministic + // apply all to the underlying environment at the end + for bundleIdx, b := range simulatedBundleList { + algoConf := defaultAlgorithmConfig + algoConf.EnforceProfit = true + shouldRevert := bundleFailEveryN != 0 && bundleIdx%bundleFailEveryN == 0 + for tcIdx, tc := range testContexts { + var commitErr error + + switch tcIdx { + case Baseline: + // We don't commit bundle to Baseline if it's meant to fail, in order to ensure that the state + // for SingleSnapshot and MultiSnapshot matches on revert to the baseline state + if shouldRevert { + break + } + commitErr = tc.envDiff.commitBundle(&b, tc.chainData, nil, algoConf) + case SingleSnapshot, MultiSnapshot: + commitErr = tc.changes.commitBundle(&b, tc.chainData, algoConf) + + if commitErrMap[Baseline] != nil || shouldRevert { + require.NoError(t, tc.changes.env.state.MultiTxSnapshotRevert()) + } else { + require.NoError(t, tc.changes.env.state.MultiTxSnapshotCommit()) + } + require.NoError(t, tc.changes.env.state.NewMultiTxSnapshot()) + } + commitErrMap[tcIdx] = commitErr + } + } + testContexts.ApplyChanges(t) + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, Baseline) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) +} diff --git a/miner/algo_test.go b/miner/algo_test.go index c67ad89edd..ab63031e48 100644 --- a/miner/algo_test.go +++ b/miner/algo_test.go @@ -38,7 +38,7 @@ var algoTests = []*algoTest{ } }, WantProfit: big.NewInt(2 * 21_000), - SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS}, + SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP}, AlgorithmConfig: defaultAlgorithmConfig, }, { @@ -65,7 +65,7 @@ var algoTests = []*algoTest{ } }, WantProfit: big.NewInt(4 * 21_000), - SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS}, + SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP}, AlgorithmConfig: defaultAlgorithmConfig, }, { @@ -84,7 +84,7 @@ var algoTests = []*algoTest{ } }, WantProfit: big.NewInt(0), - SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS}, + SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP}, AlgorithmConfig: defaultAlgorithmConfig, }, { @@ -106,7 +106,7 @@ var algoTests = []*algoTest{ } }, WantProfit: big.NewInt(50_000), - SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS}, + SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP}, AlgorithmConfig: defaultAlgorithmConfig, }, { @@ -128,7 +128,7 @@ var algoTests = []*algoTest{ } }, WantProfit: common.Big0, - SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS}, + SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP}, AlgorithmConfig: algorithmConfig{ DropRevertibleTxOnErr: true, EnforceProfit: defaultAlgorithmConfig.EnforceProfit, @@ -160,7 +160,7 @@ var algoTests = []*algoTest{ } }, WantProfit: big.NewInt(21_000), - SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS}, + SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP}, AlgorithmConfig: algorithmConfig{ DropRevertibleTxOnErr: true, EnforceProfit: defaultAlgorithmConfig.EnforceProfit, @@ -191,7 +191,7 @@ var algoTests = []*algoTest{ } }, WantProfit: big.NewInt(50_000), - SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS}, + SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP}, AlgorithmConfig: defaultAlgorithmConfig, }, } @@ -205,6 +205,7 @@ func TestAlgo(t *testing.T) { for _, test := range algoTests { for _, algo := range test.SupportedAlgorithms { testName := fmt.Sprintf("%s-%s", test.Name, algo.String()) + t.Run(testName, func(t *testing.T) { alloc, txPool, bundles, err := test.build(signer, 1) if err != nil { @@ -214,7 +215,6 @@ func TestAlgo(t *testing.T) { if err != nil { t.Fatalf("Simulate Bundles: %v", err) } - gotProfit, err := runAlgoTest(algo, test.AlgorithmConfig, config, alloc, txPool, simBundles, test.Header, 1) if err != nil { t.Fatal(err) @@ -287,18 +287,24 @@ func runAlgoTest( txPool map[common.Address]types.Transactions, bundles []types.SimulatedBundle, header *types.Header, scale int, ) (gotProfit *big.Int, err error) { var ( - statedb, chData = genTestSetupWithAlloc(config, alloc) + statedb, chData = genTestSetupWithAlloc(config, alloc, GasLimit) env = newEnvironment(chData, statedb, header.Coinbase, header.GasLimit*uint64(scale), header.BaseFee) resultEnv *environment ) // build block switch algo { + case ALGO_GREEDY: + builder := newGreedyBuilder(chData.chain, chData.chainConfig, &algoConf, nil, env, nil, nil) + resultEnv, _, _ = builder.buildBlock(bundles, nil, txPool) + case ALGO_GREEDY_MULTISNAP: + builder := newGreedyMultiSnapBuilder(chData.chain, chData.chainConfig, &algoConf, nil, env, nil, nil) + resultEnv, _, _ = builder.buildBlock(bundles, nil, txPool) case ALGO_GREEDY_BUCKETS: builder := newGreedyBucketsBuilder(chData.chain, chData.chainConfig, &algoConf, nil, env, nil, nil) resultEnv, _, _ = builder.buildBlock(bundles, nil, txPool) - case ALGO_GREEDY: - builder := newGreedyBuilder(chData.chain, chData.chainConfig, &algoConf, nil, env, nil, nil) + case ALGO_GREEDY_BUCKETS_MULTISNAP: + builder := newGreedyBucketsMultiSnapBuilder(chData.chain, chData.chainConfig, &algoConf, nil, env, nil, nil) resultEnv, _, _ = builder.buildBlock(bundles, nil, txPool) } return resultEnv.profit, nil @@ -307,7 +313,7 @@ func runAlgoTest( // simulateBundles simulates bundles and returns the simulated bundles. func simulateBundles(config *params.ChainConfig, header *types.Header, alloc core.GenesisAlloc, bundles []types.MevBundle) ([]types.SimulatedBundle, error) { var ( - statedb, chData = genTestSetupWithAlloc(config, alloc) + statedb, chData = genTestSetupWithAlloc(config, alloc, GasLimit) env = newEnvironment(chData, statedb, header.Coinbase, header.GasLimit, header.BaseFee) simBundles = make([]types.SimulatedBundle, 0) diff --git a/miner/contract_simulator_test.go b/miner/contract_simulator_test.go index 973053fb83..c14c83983b 100644 --- a/miner/contract_simulator_test.go +++ b/miner/contract_simulator_test.go @@ -110,153 +110,167 @@ func parseAbi(t *testing.T, filename string) *abi.ABI { func TestSimulatorState(t *testing.T) { // enableLogging() - t.Cleanup(func() { - testConfig.AlgoType = ALGO_MEV_GETH - testConfig.BuilderTxSigningKey = nil - testConfig.Etherbase = common.Address{} - }) - - testConfig.AlgoType = ALGO_GREEDY - var err error - testConfig.BuilderTxSigningKey, err = crypto.GenerateKey() - require.NoError(t, err) - testConfig.Etherbase = crypto.PubkeyToAddress(testConfig.BuilderTxSigningKey.PublicKey) - - db := rawdb.NewMemoryDatabase() - chainConfig := *params.AllEthashProtocolChanges - chainConfig.ChainID = big.NewInt(31337) - engine := ethash.NewFaker() - - // (not needed I think) chainConfig.LondonBlock = big.NewInt(0) - deployerKey, err := crypto.ToECDSA(hexutil.MustDecode("0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80")) - deployerAddress := crypto.PubkeyToAddress(deployerKey.PublicKey) - deployerTestAddress := common.HexToAddress("0x70997970C51812dc3A010C7d01b50e0d17dc79C8") - alloc := core.GenesisAlloc{deployerAddress: {Balance: new(big.Int).Mul(big.NewInt(10000), bigEther)}, deployerTestAddress: {Balance: new(big.Int).Mul(big.NewInt(10000), bigEther)}} - - testParticipants := NewTestParticipants(5, 5) - alloc = testParticipants.AppendToGenesisAlloc(alloc) - - var genesis = core.Genesis{ - Config: &chainConfig, - Alloc: alloc, - GasLimit: 30000000, - } + algorithmTable := []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP} + for _, algo := range algorithmTable { + t.Run(algo.String(), func(t *testing.T) { + t.Cleanup(func() { + testConfig.AlgoType = ALGO_MEV_GETH + testConfig.BuilderTxSigningKey = nil + testConfig.Etherbase = common.Address{} + }) + + testConfig.AlgoType = algo + var err error + testConfig.BuilderTxSigningKey, err = crypto.GenerateKey() + require.NoError(t, err) + testConfig.Etherbase = crypto.PubkeyToAddress(testConfig.BuilderTxSigningKey.PublicKey) + + db := rawdb.NewMemoryDatabase() + defer func() { + require.NoError(t, db.Close()) + }() + + chainConfig := *params.AllEthashProtocolChanges + chainConfig.ChainID = big.NewInt(31337) + engine := ethash.NewFaker() + defer func() { + require.NoError(t, engine.Close()) + }() + + // (not needed I think) chainConfig.LondonBlock = big.NewInt(0) + deployerKey, err := crypto.ToECDSA(hexutil.MustDecode("0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80")) + require.NoError(t, err) - w, b := newTestWorkerGenesis(t, &chainConfig, engine, db, genesis, 0) - w.setEtherbase(crypto.PubkeyToAddress(testConfig.BuilderTxSigningKey.PublicKey)) + deployerAddress := crypto.PubkeyToAddress(deployerKey.PublicKey) + deployerTestAddress := common.HexToAddress("0x70997970C51812dc3A010C7d01b50e0d17dc79C8") + alloc := core.GenesisAlloc{deployerAddress: {Balance: new(big.Int).Mul(big.NewInt(10000), bigEther)}, deployerTestAddress: {Balance: new(big.Int).Mul(big.NewInt(10000), bigEther)}} - simBackend := backends.NewSimulatedBackendChain(db, b.chain) + testParticipants := NewTestParticipants(5, 5) + alloc = testParticipants.AppendToGenesisAlloc(alloc) - univ2FactoryA := NewTContract(t, simBackend, "testdata/univ2factory.abi", univ2FactoryA_Address) - univ2FactoryB := NewTContract(t, simBackend, "testdata/univ2factory.abi", univ2FactoryB_Address) + var genesis = core.Genesis{ + Config: &chainConfig, + Alloc: alloc, + GasLimit: 30000000, + } - wethContract := NewTContract(t, simBackend, "testdata/weth.abi", wethAddress) - daiContract := NewTContract(t, simBackend, "testdata/dai.abi", daiAddress) - atomicSwapContract := NewTContract(t, simBackend, "testdata/swap.abi", atomicSwapAddress) + w, b := newTestWorkerGenesis(t, &chainConfig, engine, db, genesis, 0) + w.setEtherbase(crypto.PubkeyToAddress(testConfig.BuilderTxSigningKey.PublicKey)) - testAddress1Key, _ := crypto.GenerateKey() - testAddress1 := crypto.PubkeyToAddress(testAddress1Key.PublicKey) + simBackend := backends.NewSimulatedBackendChain(db, b.chain) - rand.New(rand.NewSource(10)) + univ2FactoryA := NewTContract(t, simBackend, "testdata/univ2factory.abi", univ2FactoryA_Address) + univ2FactoryB := NewTContract(t, simBackend, "testdata/univ2factory.abi", univ2FactoryB_Address) - deploymentTxs := deployAllContracts(t, deployerKey, b.chain.CurrentHeader().BaseFee) + wethContract := NewTContract(t, simBackend, "testdata/weth.abi", wethAddress) + daiContract := NewTContract(t, simBackend, "testdata/dai.abi", daiAddress) + atomicSwapContract := NewTContract(t, simBackend, "testdata/swap.abi", atomicSwapAddress) - getBaseFee := func() *big.Int { - return new(big.Int).Mul(big.NewInt(2), b.chain.CurrentHeader().BaseFee) - } + testAddress1Key, _ := crypto.GenerateKey() + testAddress1 := crypto.PubkeyToAddress(testAddress1Key.PublicKey) - nonceModFor := big.NewInt(0) - nonceMod := make(map[common.Address]uint64) - getNonce := func(addr common.Address) uint64 { - if nonceModFor.Cmp(b.chain.CurrentHeader().Number) != 0 { - nonceMod = make(map[common.Address]uint64) - nonceModFor.Set(b.chain.CurrentHeader().Number) - } - - cm := nonceMod[addr] - nonceMod[addr] = cm + 1 - return b.txPool.Nonce(addr) + cm - } + rand.New(rand.NewSource(10)) - prepareContractCallTx := func(contract tConctract, signerKey *ecdsa.PrivateKey, method string, args ...interface{}) *types.Transaction { - callData, err := contract.abi.Pack(method, args...) - require.NoError(t, err) + deploymentTxs := deployAllContracts(t, deployerKey, b.chain.CurrentHeader().BaseFee) - fromAddress := crypto.PubkeyToAddress(signerKey.PublicKey) + getBaseFee := func() *big.Int { + return new(big.Int).Mul(big.NewInt(2), b.chain.CurrentHeader().BaseFee) + } - callRes, err := contract.doCall(fromAddress, method, args...) - if err != nil { - t.Errorf("Prepared smart contract call error %s with result %s", err.Error(), string(callRes)) - } + nonceModFor := big.NewInt(0) + nonceMod := make(map[common.Address]uint64) + getNonce := func(addr common.Address) uint64 { + if nonceModFor.Cmp(b.chain.CurrentHeader().Number) != 0 { + nonceMod = make(map[common.Address]uint64) + nonceModFor.Set(b.chain.CurrentHeader().Number) + } - tx, err := types.SignTx(types.NewTransaction(getNonce(fromAddress), contract.address, new(big.Int), 9000000, getBaseFee(), callData), types.HomesteadSigner{}, signerKey) - require.NoError(t, err) + cm := nonceMod[addr] + nonceMod[addr] = cm + 1 + return b.txPool.Nonce(addr) + cm + } - return tx - } + prepareContractCallTx := func(contract tConctract, signerKey *ecdsa.PrivateKey, method string, args ...interface{}) *types.Transaction { + callData, err := contract.abi.Pack(method, args...) + require.NoError(t, err) - buildBlock := func(txs []*types.Transaction, requireTx int) *types.Block { - errs := b.txPool.AddLocals(txs) - for _, err := range errs { - require.NoError(t, err) - } + fromAddress := crypto.PubkeyToAddress(signerKey.PublicKey) - block, _, err := w.getSealingBlock(b.chain.CurrentBlock().Hash(), b.chain.CurrentHeader().Time+12, testAddress1, 0, common.Hash{}, nil, false, nil) - require.NoError(t, err) - require.NotNil(t, block) - if requireTx != -1 { - require.Equal(t, requireTx, len(block.Transactions())) - } - _, err = b.chain.InsertChain([]*types.Block{block}) - require.NoError(t, err) - return block - } + callRes, err := contract.doCall(fromAddress, method, args...) + if err != nil { + t.Errorf("Prepared smart contract call error %s with result %s", err.Error(), string(callRes)) + } - buildBlock(deploymentTxs, len(deploymentTxs)+1) - require.Equal(t, uint64(18), b.txPool.Nonce(deployerAddress)) - require.Equal(t, uint64(3), b.txPool.Nonce(deployerTestAddress)) + tx, err := types.SignTx(types.NewTransaction(getNonce(fromAddress), contract.address, new(big.Int), 9000000, getBaseFee(), callData), types.HomesteadSigner{}, signerKey) + require.NoError(t, err) - // Mint tokens - require.NoError(t, err) + return tx + } - approveTxs := []*types.Transaction{} + buildBlock := func(txs []*types.Transaction, requireTx int) *types.Block { + errs := b.txPool.AddLocals(txs) + for _, err := range errs { + require.NoError(t, err) + } - adminApproveTxWeth := prepareContractCallTx(wethContract, deployerKey, "approve", atomicSwapContract.address, ethmath.MaxBig256) - approveTxs = append(approveTxs, adminApproveTxWeth) - adminApproveTxDai := prepareContractCallTx(daiContract, deployerKey, "approve", atomicSwapContract.address, ethmath.MaxBig256) - approveTxs = append(approveTxs, adminApproveTxDai) + block, _, err := w.getSealingBlock(b.chain.CurrentBlock().Hash(), b.chain.CurrentHeader().Time+12, testAddress1, 0, common.Hash{}, nil, false, nil) + require.NoError(t, err) + require.NotNil(t, block) + if requireTx != -1 { + require.Equal(t, requireTx, len(block.Transactions())) + } + _, err = b.chain.InsertChain([]*types.Block{block}) + require.NoError(t, err) + return block + } - for _, spender := range []TestParticipant{testParticipants.users[0], testParticipants.searchers[0]} { - mintTx := prepareContractCallTx(daiContract, deployerKey, "mint", spender.address, new(big.Int).Mul(bigEther, big.NewInt(50000))) - approveTxs = append(approveTxs, mintTx) + buildBlock(deploymentTxs, len(deploymentTxs)+1) + require.Equal(t, uint64(18), b.txPool.Nonce(deployerAddress)) + require.Equal(t, uint64(3), b.txPool.Nonce(deployerTestAddress)) - depositTx, err := types.SignTx(types.NewTransaction(getNonce(spender.address), wethContract.address, new(big.Int).Mul(bigEther, big.NewInt(1000)), 9000000, getBaseFee(), hexutil.MustDecode("0xd0e30db0")), types.HomesteadSigner{}, spender.key) - require.NoError(t, err) - approveTxs = append(approveTxs, depositTx) + // Mint tokens + require.NoError(t, err) - spenderApproveTxWeth := prepareContractCallTx(wethContract, spender.key, "approve", atomicSwapContract.address, ethmath.MaxBig256) - approveTxs = append(approveTxs, spenderApproveTxWeth) + approveTxs := []*types.Transaction{} - spenderApproveTxDai := prepareContractCallTx(daiContract, spender.key, "approve", atomicSwapContract.address, ethmath.MaxBig256) - approveTxs = append(approveTxs, spenderApproveTxDai) - } + adminApproveTxWeth := prepareContractCallTx(wethContract, deployerKey, "approve", atomicSwapContract.address, ethmath.MaxBig256) + approveTxs = append(approveTxs, adminApproveTxWeth) + adminApproveTxDai := prepareContractCallTx(daiContract, deployerKey, "approve", atomicSwapContract.address, ethmath.MaxBig256) + approveTxs = append(approveTxs, adminApproveTxDai) - buildBlock(approveTxs, len(approveTxs)+1) + for _, spender := range []TestParticipant{testParticipants.users[0], testParticipants.searchers[0]} { + mintTx := prepareContractCallTx(daiContract, deployerKey, "mint", spender.address, new(big.Int).Mul(bigEther, big.NewInt(50000))) + approveTxs = append(approveTxs, mintTx) - amtIn := new(big.Int).Mul(bigEther, big.NewInt(50)) + depositTx, err := types.SignTx(types.NewTransaction(getNonce(spender.address), wethContract.address, new(big.Int).Mul(bigEther, big.NewInt(1000)), 9000000, getBaseFee(), hexutil.MustDecode("0xd0e30db0")), types.HomesteadSigner{}, spender.key) + require.NoError(t, err) + approveTxs = append(approveTxs, depositTx) - userSwapTx := prepareContractCallTx(atomicSwapContract, testParticipants.users[0].key, "swap", []common.Address{wethContract.address, daiContract.address}, amtIn, univ2FactoryA.address, testParticipants.users[0].address, false) + spenderApproveTxWeth := prepareContractCallTx(wethContract, spender.key, "approve", atomicSwapContract.address, ethmath.MaxBig256) + approveTxs = append(approveTxs, spenderApproveTxWeth) - backrunTxData, err := atomicSwapContract.abi.Pack("backrun", daiContract.address, univ2FactoryB.address, univ2FactoryA.address, new(big.Int).Div(amtIn, big.NewInt(2))) - require.NoError(t, err) + spenderApproveTxDai := prepareContractCallTx(daiContract, spender.key, "approve", atomicSwapContract.address, ethmath.MaxBig256) + approveTxs = append(approveTxs, spenderApproveTxDai) + } - backrunTx, err := types.SignTx(types.NewTransaction(getNonce(testParticipants.searchers[0].address), atomicSwapContract.address, new(big.Int), 9000000, getBaseFee(), backrunTxData), types.HomesteadSigner{}, testParticipants.searchers[0].key) - require.NoError(t, err) + buildBlock(approveTxs, len(approveTxs)+1) + + amtIn := new(big.Int).Mul(bigEther, big.NewInt(50)) - targetBlockNumber := new(big.Int).Set(b.chain.CurrentHeader().Number) - targetBlockNumber.Add(targetBlockNumber, big.NewInt(1)) - b.txPool.AddMevBundle(types.Transactions{userSwapTx, backrunTx}, targetBlockNumber, uuid.UUID{}, common.Address{}, 0, 0, nil) - buildBlock([]*types.Transaction{}, 3) + userSwapTx := prepareContractCallTx(atomicSwapContract, testParticipants.users[0].key, "swap", []common.Address{wethContract.address, daiContract.address}, amtIn, univ2FactoryA.address, testParticipants.users[0].address, false) + + backrunTxData, err := atomicSwapContract.abi.Pack("backrun", daiContract.address, univ2FactoryB.address, univ2FactoryA.address, new(big.Int).Div(amtIn, big.NewInt(2))) + require.NoError(t, err) + + backrunTx, err := types.SignTx(types.NewTransaction(getNonce(testParticipants.searchers[0].address), atomicSwapContract.address, new(big.Int), 9000000, getBaseFee(), backrunTxData), types.HomesteadSigner{}, testParticipants.searchers[0].key) + require.NoError(t, err) + + targetBlockNumber := new(big.Int).Set(b.chain.CurrentHeader().Number) + targetBlockNumber.Add(targetBlockNumber, big.NewInt(1)) + b.txPool.AddMevBundle(types.Transactions{userSwapTx, backrunTx}, targetBlockNumber, uuid.UUID{}, common.Address{}, 0, 0, nil) + buildBlock([]*types.Transaction{}, 3) + }) + } } type tConctract struct { diff --git a/miner/env_changes.go b/miner/env_changes.go new file mode 100644 index 0000000000..3af63f0399 --- /dev/null +++ b/miner/env_changes.go @@ -0,0 +1,427 @@ +package miner + +import ( + "crypto/ecdsa" + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +// envChanges is a helper struct to apply and discard changes to the environment +type envChanges struct { + env *environment + gasPool *core.GasPool + usedGas uint64 + profit *big.Int + txs []*types.Transaction + receipts []*types.Receipt +} + +func newEnvChanges(env *environment) (*envChanges, error) { + if err := env.state.NewMultiTxSnapshot(); err != nil { + return nil, err + } + + return &envChanges{ + env: env, + gasPool: new(core.GasPool).AddGas(env.gasPool.Gas()), + usedGas: env.header.GasUsed, + profit: new(big.Int).Set(env.profit), + txs: make([]*types.Transaction, 0), + receipts: make([]*types.Receipt, 0), + }, nil +} + +func (c *envChanges) commitPayoutTx( + amount *big.Int, sender, receiver common.Address, + gas uint64, prv *ecdsa.PrivateKey, chData chainData) (*types.Receipt, error) { + return commitPayoutTx(PayoutTransactionParams{ + Amount: amount, + BaseFee: c.env.header.BaseFee, + ChainData: chData, + Gas: gas, + CommitFn: c.commitTx, + Receiver: receiver, + Sender: sender, + SenderBalance: c.env.state.GetBalance(sender), + SenderNonce: c.env.state.GetNonce(sender), + Signer: c.env.signer, + PrivateKey: prv, + }) +} + +func (c *envChanges) commitTx(tx *types.Transaction, chData chainData) (*types.Receipt, int, error) { + signer := c.env.signer + from, err := types.Sender(signer, tx) + if err != nil { + return nil, popTx, err + } + + gasPrice, err := tx.EffectiveGasTip(c.env.header.BaseFee) + if err != nil { + return nil, shiftTx, err + } + + c.env.state.SetTxContext(tx.Hash(), c.env.tcount+len(c.txs)) + receipt, _, err := applyTransactionWithBlacklist(signer, chData.chainConfig, chData.chain, &c.env.coinbase, c.gasPool, c.env.state, c.env.header, tx, &c.usedGas, *chData.chain.GetVMConfig(), chData.blacklist) + if err != nil { + switch { + case errors.Is(err, core.ErrGasLimitReached): + // Pop the current out-of-gas transaction without shifting in the next from the account + log.Trace("Gas limit exceeded for current block", "sender", from) + return receipt, popTx, err + + case errors.Is(err, core.ErrNonceTooLow): + // New head notification data race between the transaction pool and miner, shift + log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) + return receipt, shiftTx, err + + case errors.Is(err, core.ErrNonceTooHigh): + // Reorg notification data race between the transaction pool and miner, skip account = + log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) + return receipt, popTx, err + + case errors.Is(err, core.ErrTxTypeNotSupported): + // Pop the unsupported transaction without shifting in the next from the account + log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) + return receipt, popTx, err + + default: + // Strange error, discard the transaction and get the next in line (note, the + // nonce-too-high clause will prevent us from executing in vain). + log.Trace("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) + return receipt, shiftTx, err + } + } + + c.profit = c.profit.Add(c.profit, new(big.Int).Mul(new(big.Int).SetUint64(receipt.GasUsed), gasPrice)) + c.txs = append(c.txs, tx) + c.receipts = append(c.receipts, receipt) + + return receipt, shiftTx, nil +} + +func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainData, algoConf algorithmConfig) error { + var ( + profitBefore = new(big.Int).Set(c.profit) + coinbaseBefore = new(big.Int).Set(c.env.state.GetBalance(c.env.coinbase)) + gasUsedBefore = c.usedGas + gasPoolBefore = new(core.GasPool).AddGas(c.gasPool.Gas()) + txsBefore = c.txs[:] + receiptsBefore = c.receipts[:] + hasBaseFee = c.env.header.BaseFee != nil + + bundleErr error + ) + + for _, tx := range bundle.OriginalBundle.Txs { + txHash := tx.Hash() + // TODO: Checks for base fee and dynamic fee txs should be moved to the transaction pool, + // similar to mev-share bundles. See SBundlesPool.validateTx() for reference. + if hasBaseFee && tx.Type() == types.DynamicFeeTxType { + // Sanity check for extremely large numbers + if tx.GasFeeCap().BitLen() > 256 { + bundleErr = core.ErrFeeCapVeryHigh + break + } + if tx.GasTipCap().BitLen() > 256 { + bundleErr = core.ErrTipVeryHigh + break + } + + // Ensure gasFeeCap is greater than or equal to gasTipCap. + if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + bundleErr = core.ErrTipAboveFeeCap + break + } + } + receipt, _, err := c.commitTx(tx, chData) + + switch { + case err != nil: + isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) + // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one + if algoConf.DropRevertibleTxOnErr && isRevertibleTx { + log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", + "tx", txHash, "err", err) + } else { + bundleErr = err + } + case receipt != nil: + if receipt.Status == types.ReceiptStatusFailed && !bundle.OriginalBundle.RevertingHash(txHash) { + // if transaction reverted and isn't specified as reverting hash, return error + log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) + bundleErr = errors.New("bundle tx revert") + } + case receipt == nil && err == nil: + // NOTE: The expectation is that a receipt is only nil if an error occurred. + // If there is no error but receipt is nil, there is likely a programming error. + bundleErr = errors.New("invalid receipt when no error occurred") + } + + if bundleErr != nil { + break + } + } + + if bundleErr != nil { + c.rollback(gasUsedBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return bundleErr + } + + if bundle.MevGasPrice == nil { + c.rollback(gasUsedBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return ErrMevGasPriceNotSet + } + + var ( + bundleProfit = new(big.Int).Sub(c.env.state.GetBalance(c.env.coinbase), coinbaseBefore) + gasUsed = c.usedGas - gasUsedBefore + + // EGP = Effective Gas Price (Profit / GasUsed) + simulatedEGP = new(big.Int).Set(bundle.MevGasPrice) + actualEGP *big.Int + tolerablePriceDifferencePercent = 1 + + simulatedBundleProfit = new(big.Int).Set(bundle.TotalEth) + actualBundleProfit = new(big.Int).Set(bundleProfit) + ) + + if gasUsed == 0 { + c.rollback(gasUsedBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return errors.New("bundle gas used is 0") + } else { + actualEGP = new(big.Int).Div(bundleProfit, big.NewInt(int64(gasUsed))) + } + + err := ValidateGasPriceAndProfit(algoConf, + actualEGP, simulatedEGP, tolerablePriceDifferencePercent, + actualBundleProfit, simulatedBundleProfit, + ) + if err != nil { + c.rollback(gasUsedBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return err + } + + c.profit.Add(profitBefore, bundleProfit) + return nil +} + +func (c *envChanges) CommitSBundle(sbundle *types.SimSBundle, chData chainData, key *ecdsa.PrivateKey, algoConf algorithmConfig) error { + // TODO: Suggestion for future improvement: instead of checking if key is nil, panic. + // Discussed with @Ruteri, see PR#90 for details: https://github.com/flashbots/builder/pull/90#discussion_r1285567550 + if key == nil { + return errNoPrivateKey + } + + var ( + coinbaseBefore = new(big.Int).Set(c.env.state.GetBalance(c.env.coinbase)) + gasPoolBefore = new(core.GasPool).AddGas(c.gasPool.Gas()) + gasBefore = c.usedGas + txsBefore = c.txs[:] + receiptsBefore = c.receipts[:] + profitBefore = new(big.Int).Set(c.profit) + ) + + if err := c.commitSBundle(sbundle.Bundle, chData, key, algoConf); err != nil { + c.rollback(gasBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return err + } + + var ( + coinbaseAfter = c.env.state.GetBalance(c.env.header.Coinbase) + gasAfter = c.usedGas + + coinbaseDelta = new(big.Int).Sub(coinbaseAfter, coinbaseBefore) + gasDelta = new(big.Int).SetUint64(gasAfter - gasBefore) + ) + if coinbaseDelta.Cmp(common.Big0) < 0 { + c.rollback(gasBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return errors.New("coinbase balance decreased") + } + + gotEGP := new(big.Int).Div(coinbaseDelta, gasDelta) + simEGP := new(big.Int).Set(sbundle.MevGasPrice) + + // allow > 1% difference + actualEGP := new(big.Int).Mul(gotEGP, common.Big100) + simulatedEGP := new(big.Int).Mul(simEGP, big.NewInt(99)) + + if simulatedEGP.Cmp(actualEGP) > 0 { + c.rollback(gasBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return &lowProfitError{ + ExpectedEffectiveGasPrice: simEGP, + ActualEffectiveGasPrice: gotEGP, + } + } + + if algoConf.EnforceProfit { + // if profit is enforced between simulation and actual commit, only allow >-1% divergence + simulatedProfit := new(big.Int).Set(sbundle.Profit) + actualProfit := new(big.Int).Set(coinbaseDelta) + + // We want to make simulated profit smaller to allow for some leeway in cases where the actual profit is + // lower due to transaction ordering + simulatedProfitMultiple := common.PercentOf(simulatedProfit, algoConf.ProfitThresholdPercent) + actualProfitMultiple := new(big.Int).Mul(actualProfit, common.Big100) + + if simulatedProfitMultiple.Cmp(actualProfitMultiple) > 0 { + log.Trace("Lower sbundle profit found after inclusion", "sbundle", sbundle.Bundle.Hash()) + c.rollback(gasBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return &lowProfitError{ + ExpectedProfit: simulatedProfit, + ActualProfit: actualProfit, + } + } + } + + return nil +} + +func (c *envChanges) commitSBundle(sbundle *types.SBundle, chData chainData, key *ecdsa.PrivateKey, algoConf algorithmConfig) error { + var ( + // check inclusion + minBlock = sbundle.Inclusion.BlockNumber + maxBlock = sbundle.Inclusion.MaxBlockNumber + ) + if current := c.env.header.Number.Uint64(); current < minBlock || current > maxBlock { + return fmt.Errorf("bundle inclusion block number out of range: %d <= %d <= %d", minBlock, current, maxBlock) + } + + var ( + // extract constraints into convenient format + refundIdx = make([]bool, len(sbundle.Body)) + refundPercents = make([]int, len(sbundle.Body)) + ) + for _, el := range sbundle.Validity.Refund { + refundIdx[el.BodyIdx] = true + refundPercents[el.BodyIdx] = el.Percent + } + + var ( + totalProfit *big.Int = new(big.Int) + refundableProfit *big.Int = new(big.Int) + + coinbaseDelta = new(big.Int) + coinbaseBefore *big.Int + ) + + // insert body and check it + for i, el := range sbundle.Body { + coinbaseDelta.Set(common.Big0) + coinbaseBefore = c.env.state.GetBalance(c.env.coinbase) + + if el.Tx != nil { + receipt, _, err := c.commitTx(el.Tx, chData) + if err != nil { + // if drop enabled, and revertible tx has error on commit, + // we skip the transaction and continue with next one + if algoConf.DropRevertibleTxOnErr && el.CanRevert { + log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", + "tx", el.Tx.Hash(), "err", err) + continue + } + return err + } + if receipt.Status != types.ReceiptStatusSuccessful && !el.CanRevert { + return errors.New("tx failed") + } + } else if el.Bundle != nil { + err := c.commitSBundle(el.Bundle, chData, key, algoConf) + if err != nil { + return err + } + } else { + return errors.New("invalid body element") + } + + coinbaseDelta.Set(c.env.state.GetBalance(c.env.coinbase)) + coinbaseDelta.Sub(coinbaseDelta, coinbaseBefore) + + totalProfit.Add(totalProfit, coinbaseDelta) + if !refundIdx[i] { + refundableProfit.Add(refundableProfit, coinbaseDelta) + } + } + + // enforce constraints + coinbaseDelta.Set(common.Big0) + coinbaseBefore = c.env.state.GetBalance(c.env.header.Coinbase) + for i, el := range refundPercents { + if !refundIdx[i] { + continue + } + refundConfig, err := types.GetRefundConfig(&sbundle.Body[i], c.env.signer) + if err != nil { + return err + } + + maxPayoutCost := new(big.Int).Set(core.SbundlePayoutMaxCost) + maxPayoutCost.Mul(maxPayoutCost, big.NewInt(int64(len(refundConfig)))) + maxPayoutCost.Mul(maxPayoutCost, c.env.header.BaseFee) + + allocatedValue := common.PercentOf(refundableProfit, el) + allocatedValue.Sub(allocatedValue, maxPayoutCost) + + if allocatedValue.Cmp(common.Big0) < 0 { + return fmt.Errorf("negative payout") + } + + for _, refund := range refundConfig { + refundValue := common.PercentOf(allocatedValue, refund.Percent) + refundReceiver := refund.Address + rec, err := c.commitPayoutTx(refundValue, c.env.header.Coinbase, refundReceiver, core.SbundlePayoutMaxCostInt, key, chData) + if err != nil { + return err + } + if rec.Status != types.ReceiptStatusSuccessful { + return fmt.Errorf("refund tx failed") + } + log.Trace("Committed kickback", "payout", ethIntToFloat(allocatedValue), "receiver", refundReceiver) + } + } + coinbaseDelta.Set(c.env.state.GetBalance(c.env.header.Coinbase)) + coinbaseDelta.Sub(coinbaseDelta, coinbaseBefore) + totalProfit.Add(totalProfit, coinbaseDelta) + + if totalProfit.Cmp(common.Big0) < 0 { + return fmt.Errorf("negative profit") + } + return nil +} + +// discard reverts all changes to the environment - every commit operation must be followed by a discard or apply operation +func (c *envChanges) discard() error { + return c.env.state.MultiTxSnapshotRevert() +} + +// rollback reverts all changes to the environment - whereas apply and discard update the state, rollback only updates the environment +// the intended use is to call rollback after a commit operation has failed +func (c *envChanges) rollback( + gasUsedBefore uint64, gasPoolBefore *core.GasPool, profitBefore *big.Int, + txsBefore []*types.Transaction, receiptsBefore []*types.Receipt) { + c.usedGas = gasUsedBefore + c.gasPool = gasPoolBefore + c.txs = txsBefore + c.receipts = receiptsBefore + c.profit.Set(profitBefore) +} + +func (c *envChanges) apply() error { + if err := c.env.state.MultiTxSnapshotCommit(); err != nil { + return err + } + + c.env.gasPool.SetGas(c.gasPool.Gas()) + c.env.header.GasUsed = c.usedGas + c.env.profit.Set(c.profit) + c.env.tcount += len(c.txs) + c.env.txs = append(c.env.txs, c.txs...) + c.env.receipts = append(c.env.receipts, c.receipts...) + return nil +} diff --git a/miner/env_changes_test.go b/miner/env_changes_test.go new file mode 100644 index 0000000000..6396c9b94a --- /dev/null +++ b/miner/env_changes_test.go @@ -0,0 +1,390 @@ +package miner + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +func TestTxCommitSnaps(t *testing.T) { + statedb, chData, signers := genTestSetup(GasLimit) + + env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) + tx := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + changes, err := newEnvChanges(env) + if err != nil { + t.Fatalf("Error creating changes: %v", err) + } + + receipt, i, err := changes.commitTx(tx, chData) + if err != nil { + t.Fatal("can't commit transaction:", err) + } + if receipt.Status != 1 { + t.Fatal("tx failed", receipt) + } + if i != shiftTx { + t.Fatal("incorrect shift value") + } + + if env.tcount != 0 { + t.Fatal("env tcount modified") + } + if len(env.receipts) != 0 { + t.Fatal("env receipts modified") + } + if len(env.txs) != 0 { + t.Fatal("env txs modified") + } + if env.gasPool.Gas() != GasLimit { + t.Fatal("env gas pool modified") + } + + if changes.gasPool.AddGas(receipt.GasUsed).Gas() != GasLimit { + t.Fatal("envDiff gas pool incorrect") + } + if changes.usedGas != receipt.GasUsed { + t.Fatal("envDiff gas used is incorrect") + } + if len(changes.receipts) != 1 { + t.Fatal("envDiff receipts incorrect") + } + if len(changes.txs) != 1 { + t.Fatal("envDiff txs incorrect") + } +} +func TestBundleCommitSnaps(t *testing.T) { + statedb, chData, signers := genTestSetup(GasLimit) + + algoConf := defaultAlgorithmConfig + env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + bundle := types.MevBundle{ + Txs: types.Transactions{tx1, tx2}, + BlockNumber: env.header.Number, + } + + envCopy := env.copy() + simBundle, err := simulateBundle(envCopy, bundle, chData, nil) + if err != nil { + t.Fatal("Failed to simulate bundle", err) + } + + changes, err := newEnvChanges(env) + if err != nil { + t.Fatal("can't create env changes", err) + } + + err = changes.commitBundle(&simBundle, chData, algoConf) + if err != nil { + t.Fatal("Failed to commit bundle", err) + } + + if len(changes.txs) != 2 { + t.Fatal("Incorrect new txs") + } + if len(changes.receipts) != 2 { + t.Fatal("Incorrect receipts txs") + } + if changes.gasPool.AddGas(21000*2).Gas() != GasLimit { + t.Fatal("Gas pool incorrect update") + } +} + +func TestErrorTxCommitSnaps(t *testing.T) { + statedb, chData, signers := genTestSetup(GasLimit) + + env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) + changes, err := newEnvChanges(env) + if err != nil { + t.Fatal("can't create env changes", err) + } + + signers.nonces[1] = 10 + tx := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + _, i, err := changes.commitTx(tx, chData) + if err == nil { + t.Fatal("committed incorrect transaction:", err) + } + if i != popTx { + t.Fatal("incorrect shift value") + } + + if changes.gasPool.Gas() != GasLimit { + t.Fatal("envDiff gas pool incorrect") + } + if changes.usedGas != 0 { + t.Fatal("envDiff gas used incorrect") + } + if changes.profit.Sign() != 0 { + t.Fatal("envDiff new profit incorrect") + } + if len(changes.receipts) != 0 { + t.Fatal("envDiff receipts incorrect") + } + if len(changes.receipts) != 0 { + t.Fatal("envDiff txs incorrect") + } +} + +func TestCommitTxOverGasLimitSnaps(t *testing.T) { + statedb, chData, signers := genTestSetup(GasLimit) + + env := newEnvironment(chData, statedb, signers.addresses[0], 21000, big.NewInt(1)) + changes, err := newEnvChanges(env) + if err != nil { + t.Fatal("can't create env changes", err) + } + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + receipt, i, err := changes.commitTx(tx1, chData) + if err != nil { + t.Fatal("can't commit transaction:", err) + } + if receipt.Status != 1 { + t.Fatal("tx failed", receipt) + } + if i != shiftTx { + t.Fatal("incorrect shift value") + } + + if changes.gasPool.Gas() != 0 { + t.Fatal("Env diff gas pool is not drained") + } + + _, _, err = changes.commitTx(tx2, chData) + require.Error(t, err, "committed tx over gas limit") +} + +func TestErrorBundleCommitSnaps(t *testing.T) { + statedb, chData, signers := genTestSetup(GasLimit) + + algoConf := defaultAlgorithmConfig + env := newEnvironment(chData, statedb, signers.addresses[0], 21000*2, big.NewInt(1)) + + // This tx will be included before bundle so bundle will fail because of gas limit + tx0 := signers.signTx(4, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + bundle := types.MevBundle{ + Txs: types.Transactions{tx1, tx2}, + BlockNumber: env.header.Number, + } + + simBundle, err := simulateBundle(env, bundle, chData, nil) + if err != nil { + t.Fatal("Failed to simulate bundle", err) + } + + changes, err := newEnvChanges(env) + if err != nil { + t.Fatal("can't create env changes", err) + } + + _, _, err = changes.commitTx(tx0, chData) + if err != nil { + t.Fatal("Failed to commit tx0", err) + } + + gasPoolBefore := *changes.gasPool + gasUsedBefore := changes.usedGas + newProfitBefore := new(big.Int).Set(changes.profit) + balanceBefore := changes.env.state.GetBalance(signers.addresses[2]) + + err = changes.commitBundle(&simBundle, chData, algoConf) + if err == nil { + t.Fatal("Committed failed bundle", err) + } + + if *changes.gasPool != gasPoolBefore { + t.Fatalf("gasPool changed [found: %d, expected: %d]", changes.gasPool.Gas(), gasPoolBefore.Gas()) + } + + if changes.usedGas != gasUsedBefore { + t.Fatal("gasUsed changed") + } + + balanceAfter := changes.env.state.GetBalance(signers.addresses[2]) + if balanceAfter.Cmp(balanceBefore) != 0 { + t.Fatal("balance changed") + } + + if changes.profit.Cmp(newProfitBefore) != 0 { + t.Fatal("newProfit changed") + } + + if len(changes.txs) != 1 { + t.Fatal("Incorrect new txs") + } + if len(changes.receipts) != 1 { + t.Fatal("Incorrect receipts txs") + } +} + +func TestErrorSBundleCommitSnaps(t *testing.T) { + statedb, chData, signers := genTestSetup(GasLimit) + + env := newEnvironment(chData, statedb, signers.addresses[0], 21000*2, big.NewInt(1)) + changes, err := newEnvChanges(env) + if err != nil { + t.Fatal("can't create env changes", err) + } + + // This tx will be included before sbundle so sbundle will fail because of gas limit + tx0 := signers.signTx(4, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + sbundle := types.SimSBundle{ + Bundle: &types.SBundle{ + Inclusion: types.BundleInclusion{ + BlockNumber: env.header.Number.Uint64(), + MaxBlockNumber: env.header.Number.Uint64(), + }, + Body: []types.BundleBody{ + { + Tx: tx1, + }, + { + Tx: tx2, + }, + }, + }, + // with such small values this bundle will never be rejected based on insufficient profit + MevGasPrice: big.NewInt(1), + Profit: big.NewInt(1), + } + + _, _, err = changes.commitTx(tx0, chData) + if err != nil { + t.Fatal("Failed to commit tx0", err) + } + + gasPoolBefore := *changes.gasPool + gasUsedBefore := changes.usedGas + newProfitBefore := new(big.Int).Set(changes.profit) + balanceBefore := changes.env.state.GetBalance(signers.addresses[2]) + + err = changes.CommitSBundle(&sbundle, chData, builderPrivKey, defaultAlgorithmConfig) + if err == nil { + t.Fatal("Committed failed bundle", err) + } + + if *changes.gasPool != gasPoolBefore { + t.Fatalf("gasPool changed [found: %d, expected: %d]", changes.gasPool.Gas(), gasPoolBefore.Gas()) + } + + if changes.usedGas != gasUsedBefore { + t.Fatal("gasUsed changed") + } + + balanceAfter := changes.env.state.GetBalance(signers.addresses[2]) + if balanceAfter.Cmp(balanceBefore) != 0 { + t.Fatal("balance changed") + } + + if changes.profit.Cmp(newProfitBefore) != 0 { + t.Fatal("newProfit changed") + } + + if len(changes.txs) != 1 { + t.Fatal("Incorrect new txs") + } + if len(changes.receipts) != 1 { + t.Fatal("Incorrect receipts txs") + } +} + +func TestBlacklistSnaps(t *testing.T) { + statedb, chData, signers := genTestSetup(GasLimit) + + // NOTE: intermediate root hash MUST be generated before env changes are instantiated, otherwise state.MultiTxSnapshot + // will be invalidated and the test will fail + beforeRoot := statedb.IntermediateRoot(true) + + env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) + changes, err := newEnvChanges(env) + if err != nil { + t.Fatal("can't create env changes", err) + } + + blacklist := map[common.Address]struct{}{ + signers.addresses[3]: {}, + } + chData.blacklist = blacklist + + gasPoolBefore := *changes.gasPool + gasUsedBefore := changes.usedGas + balanceBefore := changes.env.state.GetBalance(signers.addresses[3]) + + tx := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[3], big.NewInt(77), []byte{}) + _, _, err = changes.commitTx(tx, chData) + if err == nil { + t.Fatal("committed blacklisted transaction: to") + } + + tx = signers.signTx(3, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[1], big.NewInt(88), []byte{}) + _, _, err = changes.commitTx(tx, chData) + if err == nil { + t.Fatal("committed blacklisted transaction: sender") + } + + calldata := make([]byte, 32-20, 20) + calldata = append(calldata, signers.addresses[3].Bytes()...) + + tx = signers.signTx(4, 40000, big.NewInt(0), big.NewInt(1), payProxyAddress, big.NewInt(99), calldata) + _, _, err = changes.commitTx(tx, chData) + t.Log("balance", changes.env.state.GetBalance(signers.addresses[3])) + + if err == nil { + t.Fatal("committed blacklisted transaction: trace") + } + + err = changes.discard() + if err != nil { + t.Fatal("failed reverting changes", err) + } + + if *changes.gasPool != gasPoolBefore { + t.Fatalf("gasPool changed [found: %d, expected: %d]", changes.gasPool.Gas(), gasPoolBefore.Gas()) + } + + if changes.usedGas != gasUsedBefore { + t.Fatal("gasUsed changed") + } + + if changes.profit.Sign() != 0 { + t.Fatal("newProfit changed") + } + + if changes.env.state.GetBalance(signers.addresses[3]).Cmp(balanceBefore) != 0 { + t.Fatalf("blacklisted balance changed [found: %d, expected: %d]", + changes.env.state.GetBalance(signers.addresses[3]), balanceBefore) + } + + if len(changes.txs) != 0 { + t.Fatal("newTxs changed") + } + + if len(changes.receipts) != 0 { + t.Fatal("newReceipts changed") + } + + afterRoot := statedb.IntermediateRoot(true) + if beforeRoot != afterRoot { + t.Fatal("statedb root changed") + } +} diff --git a/miner/environment_diff.go b/miner/environment_diff.go new file mode 100644 index 0000000000..d41dfd7a30 --- /dev/null +++ b/miner/environment_diff.go @@ -0,0 +1,417 @@ +package miner + +import ( + "crypto/ecdsa" + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +// environmentDiff is a helper struct used to apply transactions to a block using a copy of the state at that block +type environmentDiff struct { + baseEnvironment *environment + header *types.Header + gasPool *core.GasPool // available gas used to pack transactions + state *state.StateDB // apply state changes here + newProfit *big.Int + newTxs []*types.Transaction + newReceipts []*types.Receipt +} + +func newEnvironmentDiff(env *environment) *environmentDiff { + gasPool := new(core.GasPool).AddGas(env.gasPool.Gas()) + return &environmentDiff{ + baseEnvironment: env, + header: types.CopyHeader(env.header), + gasPool: gasPool, + state: env.state.Copy(), + newProfit: new(big.Int), + } +} + +func (envDiff *environmentDiff) copy() *environmentDiff { + gasPool := new(core.GasPool).AddGas(envDiff.gasPool.Gas()) + + return &environmentDiff{ + baseEnvironment: envDiff.baseEnvironment.copy(), + header: types.CopyHeader(envDiff.header), + gasPool: gasPool, + state: envDiff.state.Copy(), + newProfit: new(big.Int).Set(envDiff.newProfit), + newTxs: envDiff.newTxs[:], + newReceipts: envDiff.newReceipts[:], + } +} + +func (envDiff *environmentDiff) applyToBaseEnv() { + env := envDiff.baseEnvironment + env.gasPool = new(core.GasPool).AddGas(envDiff.gasPool.Gas()) + env.header = envDiff.header + env.state.StopPrefetcher() + env.state = envDiff.state + env.profit.Add(env.profit, envDiff.newProfit) + env.tcount += len(envDiff.newTxs) + env.txs = append(env.txs, envDiff.newTxs...) + env.receipts = append(env.receipts, envDiff.newReceipts...) +} + +// commit tx to envDiff +func (envDiff *environmentDiff) commitTx(tx *types.Transaction, chData chainData) (*types.Receipt, int, error) { + header := envDiff.header + coinbase := &envDiff.baseEnvironment.coinbase + signer := envDiff.baseEnvironment.signer + + gasPrice, err := tx.EffectiveGasTip(header.BaseFee) + if err != nil { + return nil, shiftTx, err + } + + envDiff.state.SetTxContext(tx.Hash(), envDiff.baseEnvironment.tcount+len(envDiff.newTxs)) + + receipt, newState, err := applyTransactionWithBlacklist(signer, chData.chainConfig, chData.chain, coinbase, + envDiff.gasPool, envDiff.state, header, tx, &header.GasUsed, *chData.chain.GetVMConfig(), chData.blacklist) + + envDiff.state = newState + if err != nil { + switch { + case errors.Is(err, core.ErrGasLimitReached): + // Pop the current out-of-gas transaction without shifting in the next from the account + from, _ := types.Sender(signer, tx) + log.Trace("Gas limit exceeded for current block", "sender", from) + return receipt, popTx, err + + case errors.Is(err, core.ErrNonceTooLow): + // New head notification data race between the transaction pool and miner, shift + from, _ := types.Sender(signer, tx) + log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) + return receipt, shiftTx, err + + case errors.Is(err, core.ErrNonceTooHigh): + // Reorg notification data race between the transaction pool and miner, skip account = + from, _ := types.Sender(signer, tx) + log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) + return receipt, popTx, err + + case errors.Is(err, core.ErrTxTypeNotSupported): + // Pop the unsupported transaction without shifting in the next from the account + from, _ := types.Sender(signer, tx) + log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) + return receipt, popTx, err + + default: + // Strange error, discard the transaction and get the next in line (note, the + // nonce-too-high clause will prevent us from executing in vain). + log.Trace("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) + return receipt, shiftTx, err + } + } + + envDiff.newProfit = envDiff.newProfit.Add(envDiff.newProfit, gasPrice.Mul(gasPrice, big.NewInt(int64(receipt.GasUsed)))) + envDiff.newTxs = append(envDiff.newTxs, tx) + envDiff.newReceipts = append(envDiff.newReceipts, receipt) + + return receipt, shiftTx, nil +} + +// Commit Bundle to env diff +func (envDiff *environmentDiff) commitBundle(bundle *types.SimulatedBundle, chData chainData, interrupt *int32, algoConf algorithmConfig) error { + coinbase := envDiff.baseEnvironment.coinbase + tmpEnvDiff := envDiff.copy() + + coinbaseBalanceBefore := tmpEnvDiff.state.GetBalance(coinbase) + + profitBefore := new(big.Int).Set(tmpEnvDiff.newProfit) + var gasUsed uint64 + + for _, tx := range bundle.OriginalBundle.Txs { + txHash := tx.Hash() + if tmpEnvDiff.header.BaseFee != nil && tx.Type() == types.DynamicFeeTxType { + // Sanity check for extremely large numbers + if tx.GasFeeCap().BitLen() > 256 { + return core.ErrFeeCapVeryHigh + } + if tx.GasTipCap().BitLen() > 256 { + return core.ErrTipVeryHigh + } + // Ensure gasFeeCap is greater than or equal to gasTipCap. + if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + return core.ErrTipAboveFeeCap + } + } + + if tx.Value().Sign() == -1 { + return core.ErrNegativeValue + } + + _, err := tx.EffectiveGasTip(envDiff.header.BaseFee) + if err != nil { + return err + } + + _, err = types.Sender(envDiff.baseEnvironment.signer, tx) + if err != nil { + return err + } + + if checkInterrupt(interrupt) { + return errInterrupt + } + + receipt, _, err := tmpEnvDiff.commitTx(tx, chData) + + if err != nil { + isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) + // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one + if algoConf.DropRevertibleTxOnErr && isRevertibleTx { + log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", + "tx", txHash, "err", err) + continue + } + log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) + return err + } + + if receipt != nil { + if receipt.Status == types.ReceiptStatusFailed && !bundle.OriginalBundle.RevertingHash(txHash) { + // if transaction reverted and isn't specified as reverting hash, return error + log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) + return errors.New("bundle tx revert") + } + } else { + // NOTE: The expectation is that a receipt is only nil if an error occurred. + // If there is no error but receipt is nil, there is likely a programming error. + return errors.New("invalid receipt when no error occurred") + } + + gasUsed += receipt.GasUsed + } + coinbaseBalanceAfter := tmpEnvDiff.state.GetBalance(coinbase) + coinbaseBalanceDelta := new(big.Int).Sub(coinbaseBalanceAfter, coinbaseBalanceBefore) + tmpEnvDiff.newProfit.Add(profitBefore, coinbaseBalanceDelta) + + if bundle.MevGasPrice == nil { + return ErrMevGasPriceNotSet + } + + var ( + bundleProfit = coinbaseBalanceDelta + // EGP = Effective Gas Price (Profit / GasUsed) + simulatedEGP = new(big.Int).Set(bundle.MevGasPrice) + actualEGP *big.Int + tolerablePriceDifferencePercent = 1 + + simulatedBundleProfit = new(big.Int).Set(bundle.TotalEth) + actualBundleProfit = new(big.Int).Set(bundleProfit) + ) + + if gasUsed == 0 { + return errors.New("bundle gas used is 0") + } else { + actualEGP = new(big.Int).Div(bundleProfit, big.NewInt(int64(gasUsed))) + } + + err := ValidateGasPriceAndProfit(algoConf, + actualEGP, simulatedEGP, tolerablePriceDifferencePercent, + actualBundleProfit, simulatedBundleProfit, + ) + if err != nil { + return err + } + + *envDiff = *tmpEnvDiff + return nil +} + +func (envDiff *environmentDiff) commitPayoutTx(amount *big.Int, sender, receiver common.Address, gas uint64, prv *ecdsa.PrivateKey, chData chainData) (*types.Receipt, error) { + return commitPayoutTx(PayoutTransactionParams{ + Amount: amount, + BaseFee: envDiff.header.BaseFee, + ChainData: chData, + Gas: gas, + CommitFn: envDiff.commitTx, + Receiver: receiver, + Sender: sender, + SenderBalance: envDiff.state.GetBalance(sender), + SenderNonce: envDiff.state.GetNonce(sender), + Signer: envDiff.baseEnvironment.signer, + PrivateKey: prv, + }) +} + +func (envDiff *environmentDiff) commitSBundle(b *types.SimSBundle, chData chainData, interrupt *int32, key *ecdsa.PrivateKey, algoConf algorithmConfig) error { + // TODO: Suggestion for future improvement: instead of checking if key is nil, panic. + // Discussed with @Ruteri, see PR#90 for details: https://github.com/flashbots/builder/pull/90#discussion_r1285567550 + if key == nil { + return errNoPrivateKey + } + + tmpEnvDiff := envDiff.copy() + + coinbaseBefore := tmpEnvDiff.state.GetBalance(tmpEnvDiff.header.Coinbase) + gasBefore := tmpEnvDiff.gasPool.Gas() + + if err := tmpEnvDiff.commitSBundleInner(b.Bundle, chData, interrupt, key, algoConf); err != nil { + return err + } + + coinbaseAfter := tmpEnvDiff.state.GetBalance(tmpEnvDiff.header.Coinbase) + gasAfter := tmpEnvDiff.gasPool.Gas() + + coinbaseDelta := new(big.Int).Sub(coinbaseAfter, coinbaseBefore) + gasDelta := new(big.Int).SetUint64(gasBefore - gasAfter) + + if coinbaseDelta.Cmp(common.Big0) < 0 { + return errors.New("coinbase balance decreased") + } + + gotEGP := new(big.Int).Div(coinbaseDelta, gasDelta) + simEGP := new(big.Int).Set(b.MevGasPrice) + + // allow > 1% difference + actualEGP := new(big.Int).Mul(gotEGP, big.NewInt(101)) + simulatedEGP := new(big.Int).Mul(simEGP, common.Big100) + + if simulatedEGP.Cmp(actualEGP) > 0 { + return &lowProfitError{ + ExpectedEffectiveGasPrice: simEGP, + ActualEffectiveGasPrice: gotEGP, + } + } + + if algoConf.EnforceProfit { + // if profit is enforced between simulation and actual commit, only allow >-1% divergence + simulatedSbundleProfit := new(big.Int).Set(b.Profit) + actualSbundleProfit := new(big.Int).Set(coinbaseDelta) + + // We want to make simulated profit smaller to allow for some leeway in cases where the actual profit is + // lower due to transaction ordering + simulatedProfitMultiple := common.PercentOf(simulatedSbundleProfit, algoConf.ProfitThresholdPercent) + actualProfitMultiple := new(big.Int).Mul(actualSbundleProfit, common.Big100) + + if simulatedProfitMultiple.Cmp(actualProfitMultiple) > 0 { + log.Trace("Lower sbundle profit found after inclusion", "sbundle", b.Bundle.Hash()) + return &lowProfitError{ + ExpectedProfit: simulatedSbundleProfit, + ActualProfit: actualSbundleProfit, + } + } + } + + *envDiff = *tmpEnvDiff + return nil +} + +func (envDiff *environmentDiff) commitSBundleInner(b *types.SBundle, chData chainData, interrupt *int32, key *ecdsa.PrivateKey, algoConf algorithmConfig) error { + // check inclusion + minBlock := b.Inclusion.BlockNumber + maxBlock := b.Inclusion.MaxBlockNumber + if current := envDiff.header.Number.Uint64(); current < minBlock || current > maxBlock { + return fmt.Errorf("bundle inclusion block number out of range: %d <= %d <= %d", minBlock, current, maxBlock) + } + + // extract constraints into convenient format + refundIdx := make([]bool, len(b.Body)) + refundPercents := make([]int, len(b.Body)) + for _, el := range b.Validity.Refund { + refundIdx[el.BodyIdx] = true + refundPercents[el.BodyIdx] = el.Percent + } + + var ( + totalProfit *big.Int = new(big.Int) + refundableProfit *big.Int = new(big.Int) + ) + + var ( + coinbaseDelta = new(big.Int) + coinbaseBefore *big.Int + ) + // insert body and check it + for i, el := range b.Body { + coinbaseDelta.Set(common.Big0) + coinbaseBefore = envDiff.state.GetBalance(envDiff.header.Coinbase) + + if el.Tx != nil { + receipt, _, err := envDiff.commitTx(el.Tx, chData) + if err != nil { + // if drop enabled, and revertible tx has error on commit, + // we skip the transaction and continue with next one + if algoConf.DropRevertibleTxOnErr && el.CanRevert { + log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", + "tx", el.Tx.Hash(), "err", err) + continue + } + return err + } + if receipt.Status != types.ReceiptStatusSuccessful && !el.CanRevert { + return errors.New("tx failed") + } + } else if el.Bundle != nil { + err := envDiff.commitSBundleInner(el.Bundle, chData, interrupt, key, algoConf) + if err != nil { + return err + } + } else { + return errors.New("invalid body element") + } + + coinbaseDelta.Set(envDiff.state.GetBalance(envDiff.header.Coinbase)) + coinbaseDelta.Sub(coinbaseDelta, coinbaseBefore) + + totalProfit.Add(totalProfit, coinbaseDelta) + if !refundIdx[i] { + refundableProfit.Add(refundableProfit, coinbaseDelta) + } + } + + // enforce constraints + coinbaseDelta.Set(common.Big0) + coinbaseBefore = envDiff.state.GetBalance(envDiff.header.Coinbase) + for i, el := range refundPercents { + if !refundIdx[i] { + continue + } + refundConfig, err := types.GetRefundConfig(&b.Body[i], envDiff.baseEnvironment.signer) + if err != nil { + return err + } + + maxPayoutCost := new(big.Int).Set(core.SbundlePayoutMaxCost) + maxPayoutCost.Mul(maxPayoutCost, big.NewInt(int64(len(refundConfig)))) + maxPayoutCost.Mul(maxPayoutCost, envDiff.header.BaseFee) + + allocatedValue := common.PercentOf(refundableProfit, el) + allocatedValue.Sub(allocatedValue, maxPayoutCost) + + if allocatedValue.Cmp(common.Big0) < 0 { + return fmt.Errorf("negative payout") + } + + for _, refund := range refundConfig { + refundValue := common.PercentOf(allocatedValue, refund.Percent) + refundReceiver := refund.Address + rec, err := envDiff.commitPayoutTx(refundValue, envDiff.header.Coinbase, refundReceiver, core.SbundlePayoutMaxCostInt, key, chData) + if err != nil { + return err + } + if rec.Status != types.ReceiptStatusSuccessful { + return fmt.Errorf("refund tx failed") + } + log.Trace("Committed kickback", "payout", ethIntToFloat(allocatedValue), "receiver", refundReceiver) + } + } + coinbaseDelta.Set(envDiff.state.GetBalance(envDiff.header.Coinbase)) + coinbaseDelta.Sub(coinbaseDelta, coinbaseBefore) + totalProfit.Add(totalProfit, coinbaseDelta) + + if totalProfit.Cmp(common.Big0) < 0 { + return fmt.Errorf("negative profit") + } + return nil +} diff --git a/miner/miner.go b/miner/miner.go index a01aa5d88f..203132fc7d 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -54,16 +54,22 @@ const ( ALGO_MEV_GETH AlgoType = iota ALGO_GREEDY ALGO_GREEDY_BUCKETS + ALGO_GREEDY_MULTISNAP + ALGO_GREEDY_BUCKETS_MULTISNAP ) func (a AlgoType) String() string { switch a { case ALGO_GREEDY: return "greedy" + case ALGO_GREEDY_MULTISNAP: + return "greedy-multi-snap" case ALGO_MEV_GETH: return "mev-geth" case ALGO_GREEDY_BUCKETS: return "greedy-buckets" + case ALGO_GREEDY_BUCKETS_MULTISNAP: + return "greedy-buckets-multi-snap" default: return "unsupported" } @@ -77,6 +83,10 @@ func AlgoTypeFlagToEnum(algoString string) (AlgoType, error) { return ALGO_GREEDY_BUCKETS, nil case ALGO_GREEDY.String(): return ALGO_GREEDY, nil + case ALGO_GREEDY_MULTISNAP.String(): + return ALGO_GREEDY_MULTISNAP, nil + case ALGO_GREEDY_BUCKETS_MULTISNAP.String(): + return ALGO_GREEDY_BUCKETS_MULTISNAP, nil default: return ALGO_MEV_GETH, errors.New("algo not recognized") } @@ -98,8 +108,8 @@ type Config struct { MaxMergedBundles int Blocklist []common.Address `toml:",omitempty"` NewPayloadTimeout time.Duration // The maximum time allowance for creating a new payload - DiscardRevertibleTxOnErr bool // Whether to discard revertible transactions on error PriceCutoffPercent int // Effective gas price cutoff % used for bucketing transactions by price (only useful in greedy-buckets AlgoType) + DiscardRevertibleTxOnErr bool // When enabled, if bundle revertible transaction has error on commit, builder will discard the transaction } // DefaultConfig contains default settings for miner. diff --git a/miner/multi_worker.go b/miner/multi_worker.go index 93cb8aadae..ab33a84ee8 100644 --- a/miner/multi_worker.go +++ b/miner/multi_worker.go @@ -141,7 +141,7 @@ func newMultiWorker(config *Config, chainConfig *params.ChainConfig, engine cons switch config.AlgoType { case ALGO_MEV_GETH: return newMultiWorkerMevGeth(config, chainConfig, engine, eth, mux, isLocalBlock, init) - case ALGO_GREEDY, ALGO_GREEDY_BUCKETS: + case ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP: return newMultiWorkerGreedy(config, chainConfig, engine, eth, mux, isLocalBlock, init) default: panic("unsupported builder algorithm found") diff --git a/miner/sbundle_test.go b/miner/sbundle_test.go index 4278db2461..6cf34a2b6b 100644 --- a/miner/sbundle_test.go +++ b/miner/sbundle_test.go @@ -452,7 +452,7 @@ func TestSBundles(t *testing.T) { var ( config = params.TestChainConfig signer = types.LatestSigner(config) - statedb, chData = genTestSetupWithAlloc(config, testSuite.GenesisAlloc) + statedb, chData = genTestSetupWithAlloc(config, testSuite.GenesisAlloc, GasLimit) env = newEnvironment(chData, statedb, testSuite.Header.Coinbase, testSuite.Header.GasLimit, testSuite.Header.BaseFee) envDiff = newEnvironmentDiff(env) diff --git a/miner/state_fuzz_test_abigen_bindings.go b/miner/state_fuzz_test_abigen_bindings.go new file mode 100644 index 0000000000..06a1a70039 --- /dev/null +++ b/miner/state_fuzz_test_abigen_bindings.go @@ -0,0 +1,453 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package miner + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// StatefuzztestMetaData contains all meta data concerning the Statefuzztest contract. +var StatefuzztestMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"addThenWithdrawRefund\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"balances\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"changeBalance\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"key\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"newValue\",\"type\":\"bytes\"}],\"name\":\"changeStorage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"key\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"value\",\"type\":\"bytes\"}],\"name\":\"createObject\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"isSelfDestructed\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"key\",\"type\":\"bytes32\"}],\"name\":\"resetObject\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"selfDestruct\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"storageData\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"contractAddress\",\"type\":\"address\"}],\"name\":\"touchContract\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"codeHash\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610d4e806100206000396000f3fe6080604052600436106100915760003560e01c8063a2601e0a11610059578063a2601e0a1461016c578063b0d50e3814610195578063c522de44146101d2578063d58010651461020f578063f529d4481461023857610091565b806327e235e3146100965780633e978173146100d3578063798d40e3146100ef5780637a5ae62e1461012c5780639cb8a26a14610155575b600080fd5b3480156100a257600080fd5b506100bd60048036038101906100b891906105d7565b610261565b6040516100ca919061061d565b60405180910390f35b6100ed60048036038101906100e89190610664565b610279565b005b3480156100fb57600080fd5b50610116600480360381019061011191906105d7565b610319565b60405161012391906106aa565b60405180910390f35b34801561013857600080fd5b50610153600480360381019061014e91906106f1565b610324565b005b34801561016157600080fd5b5061016a610346565b005b34801561017857600080fd5b50610193600480360381019061018e9190610864565b6103b7565b005b3480156101a157600080fd5b506101bc60048036038101906101b791906105d7565b6103dc565b6040516101c991906108db565b60405180910390f35b3480156101de57600080fd5b506101f960048036038101906101f491906106f1565b6103fc565b6040516102069190610975565b60405180910390f35b34801561021b57600080fd5b5061023660048036038101906102319190610864565b61049c565b005b34801561024457600080fd5b5061025f600480360381019061025a9190610997565b6104c1565b005b60006020528060005260406000206000915090505481565b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282546102c89190610a06565b925050819055503373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015610315573d6000803e3d6000fd5b5050565b6000813f9050919050565b6001600082815260200190815260200160002060006103439190610508565b50565b6001600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff0219169083151502179055503373ffffffffffffffffffffffffffffffffffffffff16ff5b806001600084815260200190815260200160002090816103d79190610c46565b505050565b60026020528060005260406000206000915054906101000a900460ff1681565b6001602052806000526040600020600091509050805461041b90610a69565b80601f016020809104026020016040519081016040528092919081815260200182805461044790610a69565b80156104945780601f1061046957610100808354040283529160200191610494565b820191906000526020600020905b81548152906001019060200180831161047757829003601f168201915b505050505081565b806001600084815260200190815260200160002090816104bc9190610c46565b505050565b806000808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055505050565b50805461051490610a69565b6000825580601f106105265750610545565b601f0160209004906000526020600020908101906105449190610548565b5b50565b5b80821115610561576000816000905550600101610549565b5090565b6000604051905090565b600080fd5b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006105a482610579565b9050919050565b6105b481610599565b81146105bf57600080fd5b50565b6000813590506105d1816105ab565b92915050565b6000602082840312156105ed576105ec61056f565b5b60006105fb848285016105c2565b91505092915050565b6000819050919050565b61061781610604565b82525050565b6000602082019050610632600083018461060e565b92915050565b61064181610604565b811461064c57600080fd5b50565b60008135905061065e81610638565b92915050565b60006020828403121561067a5761067961056f565b5b60006106888482850161064f565b91505092915050565b6000819050919050565b6106a481610691565b82525050565b60006020820190506106bf600083018461069b565b92915050565b6106ce81610691565b81146106d957600080fd5b50565b6000813590506106eb816106c5565b92915050565b6000602082840312156107075761070661056f565b5b6000610715848285016106dc565b91505092915050565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b61077182610728565b810181811067ffffffffffffffff821117156107905761078f610739565b5b80604052505050565b60006107a3610565565b90506107af8282610768565b919050565b600067ffffffffffffffff8211156107cf576107ce610739565b5b6107d882610728565b9050602081019050919050565b82818337600083830152505050565b6000610807610802846107b4565b610799565b90508281526020810184848401111561082357610822610723565b5b61082e8482856107e5565b509392505050565b600082601f83011261084b5761084a61071e565b5b813561085b8482602086016107f4565b91505092915050565b6000806040838503121561087b5761087a61056f565b5b6000610889858286016106dc565b925050602083013567ffffffffffffffff8111156108aa576108a9610574565b5b6108b685828601610836565b9150509250929050565b60008115159050919050565b6108d5816108c0565b82525050565b60006020820190506108f060008301846108cc565b92915050565b600081519050919050565b600082825260208201905092915050565b60005b83811015610930578082015181840152602081019050610915565b60008484015250505050565b6000610947826108f6565b6109518185610901565b9350610961818560208601610912565b61096a81610728565b840191505092915050565b6000602082019050818103600083015261098f818461093c565b905092915050565b600080604083850312156109ae576109ad61056f565b5b60006109bc858286016105c2565b92505060206109cd8582860161064f565b9150509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000610a1182610604565b9150610a1c83610604565b9250828201905080821115610a3457610a336109d7565b5b92915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b60006002820490506001821680610a8157607f821691505b602082108103610a9457610a93610a3a565b5b50919050565b60008190508160005260206000209050919050565b60006020601f8301049050919050565b600082821b905092915050565b600060088302610afc7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82610abf565b610b068683610abf565b95508019841693508086168417925050509392505050565b6000819050919050565b6000610b43610b3e610b3984610604565b610b1e565b610604565b9050919050565b6000819050919050565b610b5d83610b28565b610b71610b6982610b4a565b848454610acc565b825550505050565b600090565b610b86610b79565b610b91818484610b54565b505050565b5b81811015610bb557610baa600082610b7e565b600181019050610b97565b5050565b601f821115610bfa57610bcb81610a9a565b610bd484610aaf565b81016020851015610be3578190505b610bf7610bef85610aaf565b830182610b96565b50505b505050565b600082821c905092915050565b6000610c1d60001984600802610bff565b1980831691505092915050565b6000610c368383610c0c565b9150826002028217905092915050565b610c4f826108f6565b67ffffffffffffffff811115610c6857610c67610739565b5b610c728254610a69565b610c7d828285610bb9565b600060209050601f831160018114610cb05760008415610c9e578287015190505b610ca88582610c2a565b865550610d10565b601f198416610cbe86610a9a565b60005b82811015610ce657848901518255600182019150602085019450602081019050610cc1565b86831015610d035784890151610cff601f891682610c0c565b8355505b6001600288020188555050505b50505050505056fea2646970667358221220bf0fddc0e0582d2115c83591396205edb56de333d7cc4ef10f8a3d740b137fc464736f6c63430008130033", +} + +// StatefuzztestABI is the input ABI used to generate the binding from. +// Deprecated: Use StatefuzztestMetaData.ABI instead. +var StatefuzztestABI = StatefuzztestMetaData.ABI + +// StatefuzztestBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use StatefuzztestMetaData.Bin instead. +var StatefuzztestBin = StatefuzztestMetaData.Bin + +// DeployStatefuzztest deploys a new Ethereum contract, binding an instance of Statefuzztest to it. +func DeployStatefuzztest(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Statefuzztest, error) { + parsed, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(StatefuzztestBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Statefuzztest{StatefuzztestCaller: StatefuzztestCaller{contract: contract}, StatefuzztestTransactor: StatefuzztestTransactor{contract: contract}, StatefuzztestFilterer: StatefuzztestFilterer{contract: contract}}, nil +} + +// Statefuzztest is an auto generated Go binding around an Ethereum contract. +type Statefuzztest struct { + StatefuzztestCaller // Read-only binding to the contract + StatefuzztestTransactor // Write-only binding to the contract + StatefuzztestFilterer // Log filterer for contract events +} + +// StatefuzztestCaller is an auto generated read-only Go binding around an Ethereum contract. +type StatefuzztestCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// StatefuzztestTransactor is an auto generated write-only Go binding around an Ethereum contract. +type StatefuzztestTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// StatefuzztestFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type StatefuzztestFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// StatefuzztestSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type StatefuzztestSession struct { + Contract *Statefuzztest // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// StatefuzztestCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type StatefuzztestCallerSession struct { + Contract *StatefuzztestCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// StatefuzztestTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type StatefuzztestTransactorSession struct { + Contract *StatefuzztestTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// StatefuzztestRaw is an auto generated low-level Go binding around an Ethereum contract. +type StatefuzztestRaw struct { + Contract *Statefuzztest // Generic contract binding to access the raw methods on +} + +// StatefuzztestCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type StatefuzztestCallerRaw struct { + Contract *StatefuzztestCaller // Generic read-only contract binding to access the raw methods on +} + +// StatefuzztestTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type StatefuzztestTransactorRaw struct { + Contract *StatefuzztestTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewStatefuzztest creates a new instance of Statefuzztest, bound to a specific deployed contract. +func NewStatefuzztest(address common.Address, backend bind.ContractBackend) (*Statefuzztest, error) { + contract, err := bindStatefuzztest(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Statefuzztest{StatefuzztestCaller: StatefuzztestCaller{contract: contract}, StatefuzztestTransactor: StatefuzztestTransactor{contract: contract}, StatefuzztestFilterer: StatefuzztestFilterer{contract: contract}}, nil +} + +// NewStatefuzztestCaller creates a new read-only instance of Statefuzztest, bound to a specific deployed contract. +func NewStatefuzztestCaller(address common.Address, caller bind.ContractCaller) (*StatefuzztestCaller, error) { + contract, err := bindStatefuzztest(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &StatefuzztestCaller{contract: contract}, nil +} + +// NewStatefuzztestTransactor creates a new write-only instance of Statefuzztest, bound to a specific deployed contract. +func NewStatefuzztestTransactor(address common.Address, transactor bind.ContractTransactor) (*StatefuzztestTransactor, error) { + contract, err := bindStatefuzztest(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &StatefuzztestTransactor{contract: contract}, nil +} + +// NewStatefuzztestFilterer creates a new log filterer instance of Statefuzztest, bound to a specific deployed contract. +func NewStatefuzztestFilterer(address common.Address, filterer bind.ContractFilterer) (*StatefuzztestFilterer, error) { + contract, err := bindStatefuzztest(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &StatefuzztestFilterer{contract: contract}, nil +} + +// bindStatefuzztest binds a generic wrapper to an already deployed contract. +func bindStatefuzztest(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Statefuzztest *StatefuzztestRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Statefuzztest.Contract.StatefuzztestCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Statefuzztest *StatefuzztestRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Statefuzztest.Contract.StatefuzztestTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Statefuzztest *StatefuzztestRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Statefuzztest.Contract.StatefuzztestTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Statefuzztest *StatefuzztestCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Statefuzztest.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Statefuzztest *StatefuzztestTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Statefuzztest.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Statefuzztest *StatefuzztestTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Statefuzztest.Contract.contract.Transact(opts, method, params...) +} + +// Balances is a free data retrieval call binding the contract method 0x27e235e3. +// +// Solidity: function balances(address ) view returns(uint256) +func (_Statefuzztest *StatefuzztestCaller) Balances(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) { + var out []interface{} + err := _Statefuzztest.contract.Call(opts, &out, "balances", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// Balances is a free data retrieval call binding the contract method 0x27e235e3. +// +// Solidity: function balances(address ) view returns(uint256) +func (_Statefuzztest *StatefuzztestSession) Balances(arg0 common.Address) (*big.Int, error) { + return _Statefuzztest.Contract.Balances(&_Statefuzztest.CallOpts, arg0) +} + +// Balances is a free data retrieval call binding the contract method 0x27e235e3. +// +// Solidity: function balances(address ) view returns(uint256) +func (_Statefuzztest *StatefuzztestCallerSession) Balances(arg0 common.Address) (*big.Int, error) { + return _Statefuzztest.Contract.Balances(&_Statefuzztest.CallOpts, arg0) +} + +// IsSelfDestructed is a free data retrieval call binding the contract method 0xb0d50e38. +// +// Solidity: function isSelfDestructed(address ) view returns(bool) +func (_Statefuzztest *StatefuzztestCaller) IsSelfDestructed(opts *bind.CallOpts, arg0 common.Address) (bool, error) { + var out []interface{} + err := _Statefuzztest.contract.Call(opts, &out, "isSelfDestructed", arg0) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +// IsSelfDestructed is a free data retrieval call binding the contract method 0xb0d50e38. +// +// Solidity: function isSelfDestructed(address ) view returns(bool) +func (_Statefuzztest *StatefuzztestSession) IsSelfDestructed(arg0 common.Address) (bool, error) { + return _Statefuzztest.Contract.IsSelfDestructed(&_Statefuzztest.CallOpts, arg0) +} + +// IsSelfDestructed is a free data retrieval call binding the contract method 0xb0d50e38. +// +// Solidity: function isSelfDestructed(address ) view returns(bool) +func (_Statefuzztest *StatefuzztestCallerSession) IsSelfDestructed(arg0 common.Address) (bool, error) { + return _Statefuzztest.Contract.IsSelfDestructed(&_Statefuzztest.CallOpts, arg0) +} + +// StorageData is a free data retrieval call binding the contract method 0xc522de44. +// +// Solidity: function storageData(bytes32 ) view returns(bytes) +func (_Statefuzztest *StatefuzztestCaller) StorageData(opts *bind.CallOpts, arg0 [32]byte) ([]byte, error) { + var out []interface{} + err := _Statefuzztest.contract.Call(opts, &out, "storageData", arg0) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +// StorageData is a free data retrieval call binding the contract method 0xc522de44. +// +// Solidity: function storageData(bytes32 ) view returns(bytes) +func (_Statefuzztest *StatefuzztestSession) StorageData(arg0 [32]byte) ([]byte, error) { + return _Statefuzztest.Contract.StorageData(&_Statefuzztest.CallOpts, arg0) +} + +// StorageData is a free data retrieval call binding the contract method 0xc522de44. +// +// Solidity: function storageData(bytes32 ) view returns(bytes) +func (_Statefuzztest *StatefuzztestCallerSession) StorageData(arg0 [32]byte) ([]byte, error) { + return _Statefuzztest.Contract.StorageData(&_Statefuzztest.CallOpts, arg0) +} + +// TouchContract is a free data retrieval call binding the contract method 0x798d40e3. +// +// Solidity: function touchContract(address contractAddress) view returns(bytes32 codeHash) +func (_Statefuzztest *StatefuzztestCaller) TouchContract(opts *bind.CallOpts, contractAddress common.Address) ([32]byte, error) { + var out []interface{} + err := _Statefuzztest.contract.Call(opts, &out, "touchContract", contractAddress) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +// TouchContract is a free data retrieval call binding the contract method 0x798d40e3. +// +// Solidity: function touchContract(address contractAddress) view returns(bytes32 codeHash) +func (_Statefuzztest *StatefuzztestSession) TouchContract(contractAddress common.Address) ([32]byte, error) { + return _Statefuzztest.Contract.TouchContract(&_Statefuzztest.CallOpts, contractAddress) +} + +// TouchContract is a free data retrieval call binding the contract method 0x798d40e3. +// +// Solidity: function touchContract(address contractAddress) view returns(bytes32 codeHash) +func (_Statefuzztest *StatefuzztestCallerSession) TouchContract(contractAddress common.Address) ([32]byte, error) { + return _Statefuzztest.Contract.TouchContract(&_Statefuzztest.CallOpts, contractAddress) +} + +// AddThenWithdrawRefund is a paid mutator transaction binding the contract method 0x3e978173. +// +// Solidity: function addThenWithdrawRefund(uint256 amount) payable returns() +func (_Statefuzztest *StatefuzztestTransactor) AddThenWithdrawRefund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _Statefuzztest.contract.Transact(opts, "addThenWithdrawRefund", amount) +} + +// AddThenWithdrawRefund is a paid mutator transaction binding the contract method 0x3e978173. +// +// Solidity: function addThenWithdrawRefund(uint256 amount) payable returns() +func (_Statefuzztest *StatefuzztestSession) AddThenWithdrawRefund(amount *big.Int) (*types.Transaction, error) { + return _Statefuzztest.Contract.AddThenWithdrawRefund(&_Statefuzztest.TransactOpts, amount) +} + +// AddThenWithdrawRefund is a paid mutator transaction binding the contract method 0x3e978173. +// +// Solidity: function addThenWithdrawRefund(uint256 amount) payable returns() +func (_Statefuzztest *StatefuzztestTransactorSession) AddThenWithdrawRefund(amount *big.Int) (*types.Transaction, error) { + return _Statefuzztest.Contract.AddThenWithdrawRefund(&_Statefuzztest.TransactOpts, amount) +} + +// ChangeBalance is a paid mutator transaction binding the contract method 0xf529d448. +// +// Solidity: function changeBalance(address account, uint256 newBalance) returns() +func (_Statefuzztest *StatefuzztestTransactor) ChangeBalance(opts *bind.TransactOpts, account common.Address, newBalance *big.Int) (*types.Transaction, error) { + return _Statefuzztest.contract.Transact(opts, "changeBalance", account, newBalance) +} + +// ChangeBalance is a paid mutator transaction binding the contract method 0xf529d448. +// +// Solidity: function changeBalance(address account, uint256 newBalance) returns() +func (_Statefuzztest *StatefuzztestSession) ChangeBalance(account common.Address, newBalance *big.Int) (*types.Transaction, error) { + return _Statefuzztest.Contract.ChangeBalance(&_Statefuzztest.TransactOpts, account, newBalance) +} + +// ChangeBalance is a paid mutator transaction binding the contract method 0xf529d448. +// +// Solidity: function changeBalance(address account, uint256 newBalance) returns() +func (_Statefuzztest *StatefuzztestTransactorSession) ChangeBalance(account common.Address, newBalance *big.Int) (*types.Transaction, error) { + return _Statefuzztest.Contract.ChangeBalance(&_Statefuzztest.TransactOpts, account, newBalance) +} + +// ChangeStorage is a paid mutator transaction binding the contract method 0xa2601e0a. +// +// Solidity: function changeStorage(bytes32 key, bytes newValue) returns() +func (_Statefuzztest *StatefuzztestTransactor) ChangeStorage(opts *bind.TransactOpts, key [32]byte, newValue []byte) (*types.Transaction, error) { + return _Statefuzztest.contract.Transact(opts, "changeStorage", key, newValue) +} + +// ChangeStorage is a paid mutator transaction binding the contract method 0xa2601e0a. +// +// Solidity: function changeStorage(bytes32 key, bytes newValue) returns() +func (_Statefuzztest *StatefuzztestSession) ChangeStorage(key [32]byte, newValue []byte) (*types.Transaction, error) { + return _Statefuzztest.Contract.ChangeStorage(&_Statefuzztest.TransactOpts, key, newValue) +} + +// ChangeStorage is a paid mutator transaction binding the contract method 0xa2601e0a. +// +// Solidity: function changeStorage(bytes32 key, bytes newValue) returns() +func (_Statefuzztest *StatefuzztestTransactorSession) ChangeStorage(key [32]byte, newValue []byte) (*types.Transaction, error) { + return _Statefuzztest.Contract.ChangeStorage(&_Statefuzztest.TransactOpts, key, newValue) +} + +// CreateObject is a paid mutator transaction binding the contract method 0xd5801065. +// +// Solidity: function createObject(bytes32 key, bytes value) returns() +func (_Statefuzztest *StatefuzztestTransactor) CreateObject(opts *bind.TransactOpts, key [32]byte, value []byte) (*types.Transaction, error) { + return _Statefuzztest.contract.Transact(opts, "createObject", key, value) +} + +// CreateObject is a paid mutator transaction binding the contract method 0xd5801065. +// +// Solidity: function createObject(bytes32 key, bytes value) returns() +func (_Statefuzztest *StatefuzztestSession) CreateObject(key [32]byte, value []byte) (*types.Transaction, error) { + return _Statefuzztest.Contract.CreateObject(&_Statefuzztest.TransactOpts, key, value) +} + +// CreateObject is a paid mutator transaction binding the contract method 0xd5801065. +// +// Solidity: function createObject(bytes32 key, bytes value) returns() +func (_Statefuzztest *StatefuzztestTransactorSession) CreateObject(key [32]byte, value []byte) (*types.Transaction, error) { + return _Statefuzztest.Contract.CreateObject(&_Statefuzztest.TransactOpts, key, value) +} + +// ResetObject is a paid mutator transaction binding the contract method 0x7a5ae62e. +// +// Solidity: function resetObject(bytes32 key) returns() +func (_Statefuzztest *StatefuzztestTransactor) ResetObject(opts *bind.TransactOpts, key [32]byte) (*types.Transaction, error) { + return _Statefuzztest.contract.Transact(opts, "resetObject", key) +} + +// ResetObject is a paid mutator transaction binding the contract method 0x7a5ae62e. +// +// Solidity: function resetObject(bytes32 key) returns() +func (_Statefuzztest *StatefuzztestSession) ResetObject(key [32]byte) (*types.Transaction, error) { + return _Statefuzztest.Contract.ResetObject(&_Statefuzztest.TransactOpts, key) +} + +// ResetObject is a paid mutator transaction binding the contract method 0x7a5ae62e. +// +// Solidity: function resetObject(bytes32 key) returns() +func (_Statefuzztest *StatefuzztestTransactorSession) ResetObject(key [32]byte) (*types.Transaction, error) { + return _Statefuzztest.Contract.ResetObject(&_Statefuzztest.TransactOpts, key) +} + +// SelfDestruct is a paid mutator transaction binding the contract method 0x9cb8a26a. +// +// Solidity: function selfDestruct() returns() +func (_Statefuzztest *StatefuzztestTransactor) SelfDestruct(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Statefuzztest.contract.Transact(opts, "selfDestruct") +} + +// SelfDestruct is a paid mutator transaction binding the contract method 0x9cb8a26a. +// +// Solidity: function selfDestruct() returns() +func (_Statefuzztest *StatefuzztestSession) SelfDestruct() (*types.Transaction, error) { + return _Statefuzztest.Contract.SelfDestruct(&_Statefuzztest.TransactOpts) +} + +// SelfDestruct is a paid mutator transaction binding the contract method 0x9cb8a26a. +// +// Solidity: function selfDestruct() returns() +func (_Statefuzztest *StatefuzztestTransactorSession) SelfDestruct() (*types.Transaction, error) { + return _Statefuzztest.Contract.SelfDestruct(&_Statefuzztest.TransactOpts) +} diff --git a/miner/testdata/state_fuzz_test.abi b/miner/testdata/state_fuzz_test.abi new file mode 100644 index 0000000000..92bdef0182 --- /dev/null +++ b/miner/testdata/state_fuzz_test.abi @@ -0,0 +1 @@ +[{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"addThenWithdrawRefund","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"balances","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"},{"internalType":"uint256","name":"newBalance","type":"uint256"}],"name":"changeBalance","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"key","type":"bytes32"},{"internalType":"bytes","name":"newValue","type":"bytes"}],"name":"changeStorage","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"key","type":"bytes32"},{"internalType":"bytes","name":"value","type":"bytes"}],"name":"createObject","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"isSelfDestructed","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"key","type":"bytes32"}],"name":"resetObject","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"selfDestruct","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"name":"storageData","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"contractAddress","type":"address"}],"name":"touchContract","outputs":[{"internalType":"bytes32","name":"codeHash","type":"bytes32"}],"stateMutability":"view","type":"function"}] \ No newline at end of file diff --git a/miner/verify_bundles_test.go b/miner/verify_bundles_test.go index 426ba1d8b4..c57f401bff 100644 --- a/miner/verify_bundles_test.go +++ b/miner/verify_bundles_test.go @@ -532,7 +532,7 @@ func TestVerifyBundlesAtomicity(t *testing.T) { } func TestExtractBundleDataFromUsedBundles(t *testing.T) { - _, _, signers := genTestSetup() + _, _, signers := genTestSetup(GasLimit) tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) @@ -580,7 +580,7 @@ func TestExtractBundleDataFromUsedBundles(t *testing.T) { } func TestExtractIncludedTxDataFromEnv(t *testing.T) { - _, _, signers := genTestSetup() + _, _, signers := genTestSetup(GasLimit) tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) @@ -638,7 +638,7 @@ func TestExtractPrivateTxData(t *testing.T) { } func BenchmarkVerifyBundlesAtomicity(b *testing.B) { - _, _, signers := genTestSetup() + _, _, signers := genTestSetup(GasLimit) var ( env = &environment{} diff --git a/miner/worker.go b/miner/worker.go index d9fb0e465b..ef31510ba2 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1312,7 +1312,7 @@ func (w *worker) fillTransactionsSelectAlgo(interrupt *int32, env *environment) err error ) switch w.flashbots.algoType { - case ALGO_GREEDY, ALGO_GREEDY_BUCKETS: + case ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP: blockBundles, allBundles, usedSbundles, mempoolTxHashes, err = w.fillTransactionsAlgoWorker(interrupt, env) case ALGO_MEV_GETH: blockBundles, allBundles, mempoolTxHashes, err = w.fillTransactions(interrupt, env) @@ -1432,6 +1432,37 @@ func (w *worker) fillTransactionsAlgoWorker(interrupt *int32, env *environment) w.config.BuilderTxSigningKey, interrupt, ) + newEnv, blockBundles, usedSbundle = builder.buildBlock(bundlesToConsider, sbundlesToConsider, pending) + case ALGO_GREEDY_BUCKETS_MULTISNAP: + priceCutoffPercent := w.config.PriceCutoffPercent + if !(priceCutoffPercent >= 0 && priceCutoffPercent <= 100) { + return nil, nil, nil, nil, errors.New("invalid price cutoff percent - must be between 0 and 100") + } + + algoConf := &algorithmConfig{ + DropRevertibleTxOnErr: w.config.DiscardRevertibleTxOnErr, + EnforceProfit: true, + ProfitThresholdPercent: defaultProfitThresholdPercent, + PriceCutoffPercent: priceCutoffPercent, + } + builder := newGreedyBucketsMultiSnapBuilder( + w.chain, w.chainConfig, algoConf, w.blockList, env, + w.config.BuilderTxSigningKey, interrupt, + ) + newEnv, blockBundles, usedSbundle = builder.buildBlock(bundlesToConsider, sbundlesToConsider, pending) + case ALGO_GREEDY_MULTISNAP: + // For greedy multi-snap builder, set algorithm configuration to default values, + // except DropRevertibleTxOnErr which is passed in from worker config + algoConf := &algorithmConfig{ + DropRevertibleTxOnErr: w.config.DiscardRevertibleTxOnErr, + EnforceProfit: defaultAlgorithmConfig.EnforceProfit, + ProfitThresholdPercent: defaultAlgorithmConfig.ProfitThresholdPercent, + } + + builder := newGreedyMultiSnapBuilder( + w.chain, w.chainConfig, algoConf, w.blockList, env, + w.config.BuilderTxSigningKey, interrupt, + ) newEnv, blockBundles, usedSbundle = builder.buildBlock(bundlesToConsider, sbundlesToConsider, pending) case ALGO_GREEDY: fallthrough @@ -1443,11 +1474,11 @@ func (w *worker) fillTransactionsAlgoWorker(interrupt *int32, env *environment) EnforceProfit: defaultAlgorithmConfig.EnforceProfit, ProfitThresholdPercent: defaultAlgorithmConfig.ProfitThresholdPercent, } + builder := newGreedyBuilder( - w.chain, w.chainConfig, algoConf, w.blockList, env, - w.config.BuilderTxSigningKey, interrupt, + w.chain, w.chainConfig, algoConf, w.blockList, + env, w.config.BuilderTxSigningKey, interrupt, ) - newEnv, blockBundles, usedSbundle = builder.buildBlock(bundlesToConsider, sbundlesToConsider, pending) } @@ -1517,7 +1548,8 @@ func (w *worker) generateWork(params *generateParams) (*types.Block, *big.Int, e totalSbundles++ } - log.Info("Block finalized and assembled", "blockProfit", ethIntToFloat(profit), + log.Info("Block finalized and assembled", + "height", block.Number().String(), "blockProfit", ethIntToFloat(profit), "txs", len(env.txs), "bundles", len(blockBundles), "okSbundles", okSbundles, "totalSbundles", totalSbundles, "gasUsed", block.GasUsed(), "time", time.Since(start)) if metrics.EnabledBuilder {