diff --git a/common/errors.go b/common/errors.go index 6342eb4f..4bb40d12 100644 --- a/common/errors.go +++ b/common/errors.go @@ -2,6 +2,7 @@ package common import ( "errors" + "github.com/multiversx/mx-chain-core-go/core" ) @@ -50,9 +51,6 @@ var ErrFailedCacheEviction = errors.New("failed eviction within cache") // ErrImmuneItemsCapacityReached signals that capacity for immune items is reached var ErrImmuneItemsCapacityReached = errors.New("capacity reached for immune items") -// ErrItemAlreadyInCache signals that an item is already in cache -var ErrItemAlreadyInCache = errors.New("item already in cache") - // ErrCacheSizeInvalid signals that size of cache is less than 1 var ErrCacheSizeInvalid = errors.New("cache size is less than 1") @@ -71,9 +69,6 @@ var ErrNegativeSizeInBytes = errors.New("negative size in bytes") // ErrNilTimeCache signals that a nil time cache has been provided var ErrNilTimeCache = errors.New("nil time cache") -// ErrNilTxGasHandler signals that a nil tx gas handler was provided -var ErrNilTxGasHandler = errors.New("nil tx gas handler") - // ErrNilStoredDataFactory signals that a nil stored data factory has been provided var ErrNilStoredDataFactory = errors.New("nil stored data factory") diff --git a/go.mod b/go.mod index 5e658ad6..f01532d9 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.20 require ( github.com/hashicorp/golang-lru v0.6.0 github.com/multiversx/concurrent-map v0.1.4 - github.com/multiversx/mx-chain-core-go v1.2.21 + github.com/multiversx/mx-chain-core-go v1.2.23 github.com/multiversx/mx-chain-logger-go v1.0.15 github.com/stretchr/testify v1.7.2 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d diff --git a/go.sum b/go.sum index 7f61e942..f98609ea 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-core-go v1.2.21 h1:+XVKznPTlUU5EFS1A8chtS8fStW60upRIyF4Pgml19I= -github.com/multiversx/mx-chain-core-go v1.2.21/go.mod h1:B5zU4MFyJezmEzCsAHE9YNULmGCm2zbPHvl9hazNxmE= +github.com/multiversx/mx-chain-core-go v1.2.23 h1:8WlCGqJHR2HQ0vN4feJwb7W4VrCwBGIzPPHunOOg5Wc= +github.com/multiversx/mx-chain-core-go v1.2.23/go.mod h1:B5zU4MFyJezmEzCsAHE9YNULmGCm2zbPHvl9hazNxmE= github.com/multiversx/mx-chain-logger-go v1.0.15 h1:HlNdK8etyJyL9NQ+6mIXyKPEBo+wRqOwi3n+m2QIHXc= github.com/multiversx/mx-chain-logger-go v1.0.15/go.mod h1:t3PRKaWB1M+i6gUfD27KXgzLJJC+mAQiN+FLlL1yoGQ= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= diff --git a/testscommon/txcachemocks/mempoolHostMock.go b/testscommon/txcachemocks/mempoolHostMock.go new file mode 100644 index 00000000..d8d797b0 --- /dev/null +++ b/testscommon/txcachemocks/mempoolHostMock.go @@ -0,0 +1,65 @@ +package txcachemocks + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" +) + +// MempoolHostMock - +type MempoolHostMock struct { + minGasLimit uint64 + minGasPrice uint64 + gasPerDataByte uint64 + gasPriceModifier float64 + + ComputeTxFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int + GetTransferredValueCalled func(tx data.TransactionHandler) *big.Int +} + +// NewMempoolHostMock - +func NewMempoolHostMock() *MempoolHostMock { + return &MempoolHostMock{ + minGasLimit: 50000, + minGasPrice: 1000000000, + gasPerDataByte: 1500, + gasPriceModifier: 0.01, + } +} + +// ComputeTxFee - +func (mock *MempoolHostMock) ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int { + if mock.ComputeTxFeeCalled != nil { + return mock.ComputeTxFeeCalled(tx) + } + + dataLength := uint64(len(tx.GetData())) + gasPriceForMovement := tx.GetGasPrice() + gasPriceForProcessing := uint64(float64(gasPriceForMovement) * mock.gasPriceModifier) + + gasLimitForMovement := mock.minGasLimit + dataLength*mock.gasPerDataByte + if tx.GetGasLimit() < gasLimitForMovement { + panic("tx.GetGasLimit() < gasLimitForMovement") + } + + gasLimitForProcessing := tx.GetGasLimit() - gasLimitForMovement + feeForMovement := core.SafeMul(gasPriceForMovement, gasLimitForMovement) + feeForProcessing := core.SafeMul(gasPriceForProcessing, gasLimitForProcessing) + fee := big.NewInt(0).Add(feeForMovement, feeForProcessing) + return fee +} + +// GetTransferredValue - +func (mock *MempoolHostMock) GetTransferredValue(tx data.TransactionHandler) *big.Int { + if mock.GetTransferredValueCalled != nil { + return mock.GetTransferredValueCalled(tx) + } + + return tx.GetValue() +} + +// IsInterfaceNil - +func (mock *MempoolHostMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/txcachemocks/selectionSessionMock.go b/testscommon/txcachemocks/selectionSessionMock.go new file mode 100644 index 00000000..db789b51 --- /dev/null +++ b/testscommon/txcachemocks/selectionSessionMock.go @@ -0,0 +1,91 @@ +package txcachemocks + +import ( + "math/big" + "sync" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-storage-go/types" +) + +// SelectionSessionMock - +type SelectionSessionMock struct { + mutex sync.Mutex + + AccountStateByAddress map[string]*types.AccountState + GetAccountStateCalled func(address []byte) (*types.AccountState, error) + IsIncorrectlyGuardedCalled func(tx data.TransactionHandler) bool +} + +// NewSelectionSessionMock - +func NewSelectionSessionMock() *SelectionSessionMock { + return &SelectionSessionMock{ + AccountStateByAddress: make(map[string]*types.AccountState), + } +} + +// SetNonce - +func (mock *SelectionSessionMock) SetNonce(address []byte, nonce uint64) { + mock.mutex.Lock() + defer mock.mutex.Unlock() + + key := string(address) + + if mock.AccountStateByAddress[key] == nil { + mock.AccountStateByAddress[key] = newDefaultAccountState() + } + + mock.AccountStateByAddress[key].Nonce = nonce +} + +// SetBalance - +func (mock *SelectionSessionMock) SetBalance(address []byte, balance *big.Int) { + mock.mutex.Lock() + defer mock.mutex.Unlock() + + key := string(address) + + if mock.AccountStateByAddress[key] == nil { + mock.AccountStateByAddress[key] = newDefaultAccountState() + } + + mock.AccountStateByAddress[key].Balance = balance +} + +// GetAccountState - +func (mock *SelectionSessionMock) GetAccountState(address []byte) (*types.AccountState, error) { + mock.mutex.Lock() + defer mock.mutex.Unlock() + + if mock.GetAccountStateCalled != nil { + return mock.GetAccountStateCalled(address) + } + + state, ok := mock.AccountStateByAddress[string(address)] + if ok { + return state, nil + } + + return newDefaultAccountState(), nil +} + +// IsIncorrectlyGuarded - +func (mock *SelectionSessionMock) IsIncorrectlyGuarded(tx data.TransactionHandler) bool { + if mock.IsIncorrectlyGuardedCalled != nil { + return mock.IsIncorrectlyGuardedCalled(tx) + } + + return false +} + +// IsInterfaceNil - +func (mock *SelectionSessionMock) IsInterfaceNil() bool { + return mock == nil +} + +func newDefaultAccountState() *types.AccountState { + return &types.AccountState{ + Nonce: 0, + Balance: big.NewInt(1000000000000000000), + } +} diff --git a/testscommon/txcachemocks/txGasHandlerMock.go b/testscommon/txcachemocks/txGasHandlerMock.go deleted file mode 100644 index b26c8823..00000000 --- a/testscommon/txcachemocks/txGasHandlerMock.go +++ /dev/null @@ -1,59 +0,0 @@ -package txcachemocks - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// TxGasHandler - -type TxGasHandler interface { - SplitTxGasInCategories(tx data.TransactionWithFeeHandler) (uint64, uint64) - GasPriceForProcessing(tx data.TransactionWithFeeHandler) uint64 - GasPriceForMove(tx data.TransactionWithFeeHandler) uint64 - MinGasPrice() uint64 - MinGasLimit() uint64 - MinGasPriceForProcessing() uint64 - IsInterfaceNil() bool -} - -// TxGasHandlerMock - -type TxGasHandlerMock struct { - MinimumGasMove uint64 - MinimumGasPrice uint64 - GasProcessingDivisor uint64 -} - -// SplitTxGasInCategories - -func (ghm *TxGasHandlerMock) SplitTxGasInCategories(tx data.TransactionWithFeeHandler) (uint64, uint64) { - moveGas := ghm.MinimumGasMove - return moveGas, tx.GetGasLimit() - moveGas -} - -// GasPriceForProcessing - -func (ghm *TxGasHandlerMock) GasPriceForProcessing(tx data.TransactionWithFeeHandler) uint64 { - return tx.GetGasPrice() / ghm.GasProcessingDivisor -} - -// GasPriceForMove - -func (ghm *TxGasHandlerMock) GasPriceForMove(tx data.TransactionWithFeeHandler) uint64 { - return tx.GetGasPrice() -} - -// MinGasPrice - -func (ghm *TxGasHandlerMock) MinGasPrice() uint64 { - return ghm.MinimumGasPrice -} - -// MinGasLimit - -func (ghm *TxGasHandlerMock) MinGasLimit() uint64 { - return ghm.MinimumGasMove -} - -// MinGasPriceProcessing - -func (ghm *TxGasHandlerMock) MinGasPriceForProcessing() uint64 { - return ghm.MinimumGasPrice / ghm.GasProcessingDivisor -} - -// IsInterfaceNil - -func (ghm *TxGasHandlerMock) IsInterfaceNil() bool { - return ghm == nil -} diff --git a/txcache/README.md b/txcache/README.md new file mode 100644 index 00000000..cb6a564c --- /dev/null +++ b/txcache/README.md @@ -0,0 +1,210 @@ +## Mempool + +### Glossary + +1. **selection session:** an ephemeral session during which the mempool selects transactions for a proposer. A session starts when a proposer asks the mempool for transactions and ends when the mempool returns the transactions. The most important part of a session is the _selection loop_. +2. **transaction PPU:** the price per unit of computation, for a transaction. It's computed as `initiallyPaidFee / gasLimit`. +3. **initially paid transaction fee:** the fee for processing a transaction, as known before its actual processing. That is, without knowing the _refund_ component. + +### Configuration + +1. **SelectTransactions::gasRequested:** `10_000_000_000`, the maximum total gas limit of the transactions to be returned to a proposer (one _selection session_). This value is provided by the Protocol. +2. **SelectTransactions::maxNum:** `30_000`, the maximum number of transactions to be returned to a proposer (one _selection session_). This value is provided by the Protocol. + +### Transactions selection + +### Paragraph 1 + +When a proposer asks the mempool for transactions, it provides the following parameters: + + - `gasRequested`: the maximum total gas limit of the transactions to be returned + - `maxNum`: the maximum number of transactions to be returned + +### Paragraph 2 + +The PPU (price per gas unit) of a transaction, is computed (once it enters the mempool) as follows: + +``` +ppu = initiallyPaidFee / gasLimit +``` + +In the formula above, + +``` +initiallyPaidFee = + dataCost * gasPrice + + executionCost * gasPrice * network.gasPriceModifier + +dataCost = network.minGasLimit + len(data) * network.gasPerDataByte + +executionCost = gasLimit - dataCost +``` + +Network parameters (as of November of 2024): + +``` +gasPriceModifier = 0.01 +minGasLimit = 50_000 +gasPerDataByte = 1_500 +``` + +#### Examples + +**(a)** A simple native transfer with `gasLimit = 50_000` and `gasPrice = 1_000_000_000`: + +``` +initiallyPaidFee = 50_000_000_000 atoms +ppu = 1_000_000_000 atoms +``` + +**(b)** A simple native transfer with `gasLimit = 50_000` and `gasPrice = 1_500_000_000`: + +``` +initiallyPaidFee = gasLimit * gasPrice = 75_000_000_000 atoms +ppu = 75_000_000_000 / 50_000 = 1_500_000_000 atoms +``` + +**(c)** A simple native transfer with a data payload of 7 bytes, with `gasLimit = 50_000 + 7 * 1500` and `gasPrice = 1_000_000_000`: + +``` +initiallyPaidFee = 60_500_000_000_000 atoms +ppu = 60_500_000_000_000 / 60_500 = 1_000_000_000 atoms +``` + +That is, for simple native transfers (whether they hold a data payload or not), the PPU is equal to the gas price. + +**(d)** A contract call with `gasLimit = 75_000_000` and `gasPrice = 1_000_000_000`, with a data payload of `42` bytes: + +``` +initiallyPaidFee = 861_870_000_000_000 atoms +ppu = 11_491_600 atoms +``` + +**(e)** Similar to **(d)**, but with `gasPrice = 2_000_000_000`: + +``` +initiallyPaidFee = 1_723_740_000_000_000 atoms +ppu = 22_983_200 atoms +``` + +That is, for contract calls, the PPU is not equal to the gas price, but much lower, due to the contract call _cost subsidy_. **A higher gas price will result in a higher PPU.** + +### Paragraph 3 + +Transaction **A** is considered **more valuable (for the Network)** than transaction **B** if **it has a higher PPU**. + +If two transactions have the same PPU, they are ordered by gas limit (higher is better, promoting less "execution fragmentation"). In the end, they are ordered using an arbitrary, but deterministic rule: the transaction with the "lower" transaction hash "wins" the comparison. + +Pseudo-code: + +``` +func isTransactionMoreValuableForNetwork(A, B): + if A.ppu > B.ppu: + return true + if A.ppu < B.ppu: + return false + + if A.gasLimit > B.gasLimit: + return true + if A.gasLimit < B.gasLimit: + return false + + return A.hash < B.hash +``` + +### Paragraph 4 + +The mempool selects transactions as follows (pseudo-code): + +``` +func selectTransactions(gasRequested, maxNum): + // Setup phase + senders := list of all current senders in the mempool, in an arbitrary order + bunchesOfTransactions := sourced from senders, nicely sorted by nonce + + // Holds selected transactions + selectedTransactions := empty + + // Holds not-yet-selected transactions, ordered by PPU + competitionHeap := empty + + for each bunch in bunchesOfTransactions: + competitionHeap.push(next available transaction from bunch) + + // Selection loop + while competitionHeap is not empty: + mostValuableTransaction := competitionHeap.pop() + + // Check if adding the next transaction exceeds limits + if selectedTransactions.totalGasLimit + mostValuableTransaction.gasLimit > gasRequested: + break + if selectedTransactions.length + 1 > maxNum: + break + + selectedTransactions.append(mostValuableTransaction) + + nextTransaction := next available transaction from the bunch of mostValuableTransaction + if nextTransaction exists: + competitionHeap.push(nextTransaction) + + return selectedTransactions +``` + +Thus, the mempool selects transactions using an efficient and value-driven algorithm that ensures the most valuable transactions (in terms of PPU) are prioritized while maintaining correct nonce sequencing per sender. The selection process is as follows: + +**Setup phase:** + + - **Snapshot of senders:** + - Before starting the selection loop, obtain a snapshot of all current senders in the mempool in an arbitrary order. + + - **Organize transactions into bunches:** + - For each sender, collect all their pending transactions and organize them into a "bunch." + - Each bunch is: + - **Sorted by nonce:** Transactions are ordered in ascending order based on their nonce values. + + - **Prepare the heap:** + - Extract the first transaction (lowest nonce) from each sender's bunch. + - Place these transactions onto a max heap, which is ordered based on the transaction's PPU. + +**Selection loop:** + + - **Iterative selection:** + - Continue the loop until either the total gas of selected transactions meets or exceeds `gasRequested`, or the number of selected transactions reaches `maxNum`. + - In each iteration: + - **Select the most valuable transaction:** + - Pop the transaction with the highest PPU from the heap. + - Append this transaction to the list of `selectedTransactions`. + - **Update the sender's bunch:** + - If the sender of the selected transaction has more transactions in their bunch: + - Take the next transaction (next higher nonce) from the bunch. + - Push this transaction onto the heap to compete in subsequent iterations. + - This process ensures that at each step, the most valuable transaction across all senders is selected while maintaining proper nonce order for each sender. + + - **Early termination:** + - The selection loop can terminate early if either of the following conditions is satisfied before all transactions are processed: + - The accumulated gas of selected transactions meets or exceeds `gasRequested`. + - The number of selected transactions reaches `maxNum`. + +**Additional notes:** + - Within the selection loop, the current nonce of the sender is queried from the blockchain, lazily (when needed). + - If an initial nonce gap is detected, the sender is (completely) skipped in the current selection session. + - If a middle nonce gap is detected, the sender is skipped (from now on) in the current selection session. + - Transactions with nonces lower than the current nonce of the sender are skipped. + - Transactions having the same nonce as a previously selected one (in the scope of a sender) are skipped. Also see paragraph 5. + - Incorrectly guarded transactions are skipped. + - Once the accumulated fees of selected transactions of a given sender exceed the sender's balance, the sender is skipped (from now one). + + +### Paragraph 5 + +On the node's side, the selected transactions are shuffled using a deterministic algorithm. This shuffling ensures that the transaction order remains unpredictable to the proposer, effectively preventing _front-running attacks_. Therefore, being selected first by the mempool does not guarantee that a transaction will be included first in the block. Additionally, selection by the mempool does not ensure inclusion in the very next block, as the proposer has the final authority on which transactions to include, based on **the remaining space available** in the block. + +### Order of transactions of the same sender + +Transactions from the same sender are organized based on specific rules to ensure proper sequencing for the selection flow: + +1. **Nonce ascending**: transactions are primarily sorted by their nonce values in ascending order. This sequence ensures that the transactions are processed in the order intended by the sender, as the nonce represents the transaction number in the sender's sequence. + +2. **Gas price descending (same nonce)**: if multiple transactions share the same nonce, they are sorted by their gas prices in descending order - transactions offering higher gas prices are prioritized. This mechanism allows one to easily override a pending transaction with a higher gas price. + +3. **Hash ascending (same nonce and gas price)**: for transactions that have identical nonce and gas price, the tie is broken by sorting them based on their transaction hash in ascending order. This provides a consistent and deterministic ordering when other factors are equal. While this ordering isn't a critical aspect of the mempool's operation, it ensures logical consistency. diff --git a/txcache/benchmarks.sh b/txcache/benchmarks.sh deleted file mode 100644 index a3f9fa36..00000000 --- a/txcache/benchmarks.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -go test -bench="BenchmarkSendersMap_GetSnapshotAscending$" -benchtime=1x diff --git a/txcache/config.go b/txcache/config.go index 40236d3d..1e371d41 100644 --- a/txcache/config.go +++ b/txcache/config.go @@ -15,19 +15,18 @@ const maxNumBytesUpperBound = 1_073_741_824 // one GB const maxNumItemsPerSenderLowerBound = 1 const maxNumBytesPerSenderLowerBound = maxNumItemsPerSenderLowerBound * 1 const maxNumBytesPerSenderUpperBound = 33_554_432 // 32 MB -const numTxsToPreemptivelyEvictLowerBound = 1 -const numSendersToPreemptivelyEvictLowerBound = 1 +const numItemsToPreemptivelyEvictLowerBound = uint32(1) // ConfigSourceMe holds cache configuration type ConfigSourceMe struct { - Name string - NumChunks uint32 - EvictionEnabled bool - NumBytesThreshold uint32 - NumBytesPerSenderThreshold uint32 - CountThreshold uint32 - CountPerSenderThreshold uint32 - NumSendersToPreemptivelyEvict uint32 + Name string + NumChunks uint32 + EvictionEnabled bool + NumBytesThreshold uint32 + NumBytesPerSenderThreshold uint32 + CountThreshold uint32 + CountPerSenderThreshold uint32 + NumItemsToPreemptivelyEvict uint32 } type senderConstraints struct { @@ -35,7 +34,6 @@ type senderConstraints struct { maxNumBytes uint32 } -// TODO: Upon further analysis and brainstorming, add some sensible minimum accepted values for the appropriate fields. func (config *ConfigSourceMe) verify() error { if len(config.Name) == 0 { return fmt.Errorf("%w: config.Name is invalid", common.ErrInvalidConfig) @@ -49,16 +47,15 @@ func (config *ConfigSourceMe) verify() error { if config.CountPerSenderThreshold < maxNumItemsPerSenderLowerBound { return fmt.Errorf("%w: config.CountPerSenderThreshold is invalid", common.ErrInvalidConfig) } - if config.EvictionEnabled { - if config.NumBytesThreshold < maxNumBytesLowerBound || config.NumBytesThreshold > maxNumBytesUpperBound { - return fmt.Errorf("%w: config.NumBytesThreshold is invalid", common.ErrInvalidConfig) - } - if config.CountThreshold < maxNumItemsLowerBound { - return fmt.Errorf("%w: config.CountThreshold is invalid", common.ErrInvalidConfig) - } - if config.NumSendersToPreemptivelyEvict < numSendersToPreemptivelyEvictLowerBound { - return fmt.Errorf("%w: config.NumSendersToPreemptivelyEvict is invalid", common.ErrInvalidConfig) - } + + if config.NumBytesThreshold < maxNumBytesLowerBound || config.NumBytesThreshold > maxNumBytesUpperBound { + return fmt.Errorf("%w: config.NumBytesThreshold is invalid", common.ErrInvalidConfig) + } + if config.CountThreshold < maxNumItemsLowerBound { + return fmt.Errorf("%w: config.CountThreshold is invalid", common.ErrInvalidConfig) + } + if config.NumItemsToPreemptivelyEvict < numItemsToPreemptivelyEvictLowerBound { + return fmt.Errorf("%w: config.NumItemsToPreemptivelyEvict is invalid", common.ErrInvalidConfig) } return nil @@ -75,7 +72,7 @@ func (config *ConfigSourceMe) getSenderConstraints() senderConstraints { func (config *ConfigSourceMe) String() string { bytes, err := json.Marshal(config) if err != nil { - log.Error("ConfigSourceMe.String()", "err", err) + log.Error("ConfigSourceMe.String", "err", err) } return string(bytes) @@ -90,7 +87,6 @@ type ConfigDestinationMe struct { NumItemsToPreemptivelyEvict uint32 } -// TODO: Upon further analysis and brainstorming, add some sensible minimum accepted values for the appropriate fields. func (config *ConfigDestinationMe) verify() error { if len(config.Name) == 0 { return fmt.Errorf("%w: config.Name is invalid", common.ErrInvalidConfig) @@ -104,7 +100,7 @@ func (config *ConfigDestinationMe) verify() error { if config.MaxNumBytes < maxNumBytesLowerBound || config.MaxNumBytes > maxNumBytesUpperBound { return fmt.Errorf("%w: config.MaxNumBytes is invalid", common.ErrInvalidConfig) } - if config.NumItemsToPreemptivelyEvict < numTxsToPreemptivelyEvictLowerBound { + if config.NumItemsToPreemptivelyEvict < numItemsToPreemptivelyEvictLowerBound { return fmt.Errorf("%w: config.NumItemsToPreemptivelyEvict is invalid", common.ErrInvalidConfig) } @@ -115,7 +111,7 @@ func (config *ConfigDestinationMe) verify() error { func (config *ConfigDestinationMe) String() string { bytes, err := json.Marshal(config) if err != nil { - log.Error("ConfigDestinationMe.String()", "err", err) + log.Error("ConfigDestinationMe.String", "err", err) } return string(bytes) diff --git a/txcache/constants.go b/txcache/constants.go index a76fb3d3..fe5f1993 100644 --- a/txcache/constants.go +++ b/txcache/constants.go @@ -1,9 +1,5 @@ package txcache -const estimatedNumOfSweepableSendersPerSelection = 100 - -const senderGracePeriodLowerBound = 2 - -const senderGracePeriodUpperBound = 2 - -const numEvictedTxsToDisplay = 3 +const diagnosisMaxTransactionsToDisplay = 10000 +const initialCapacityOfSelectionSlice = 30000 +const selectionLoopDurationCheckInterval = 10 diff --git a/txcache/crossTxCache.go b/txcache/crossTxCache.go index 0c21e0d9..1a64e77b 100644 --- a/txcache/crossTxCache.go +++ b/txcache/crossTxCache.go @@ -46,7 +46,7 @@ func NewCrossTxCache(config ConfigDestinationMe) (*CrossTxCache, error) { // ImmunizeTxsAgainstEviction marks items as non-evictable func (cache *CrossTxCache) ImmunizeTxsAgainstEviction(keys [][]byte) { numNow, numFuture := cache.ImmunityCache.ImmunizeKeys(keys) - log.Trace("CrossTxCache.ImmunizeTxsAgainstEviction()", + log.Trace("CrossTxCache.ImmunizeTxsAgainstEviction", "name", cache.config.Name, "len(keys)", len(keys), "numNow", numNow, @@ -57,6 +57,7 @@ func (cache *CrossTxCache) ImmunizeTxsAgainstEviction(keys [][]byte) { // AddTx adds a transaction in the cache func (cache *CrossTxCache) AddTx(tx *WrappedTransaction) (has, added bool) { + log.Trace("CrossTxCache.AddTx", "name", cache.config.Name, "txHash", tx.TxHash) return cache.HasOrAdd(tx.TxHash, tx, int(tx.Size)) } @@ -93,6 +94,7 @@ func (cache *CrossTxCache) Peek(key []byte) (value interface{}, ok bool) { // RemoveTxByHash removes tx by hash func (cache *CrossTxCache) RemoveTxByHash(txHash []byte) bool { + log.Trace("CrossTxCache.RemoveTxByHash", "name", cache.config.Name, "txHash", txHash) return cache.RemoveWithResult(txHash) } diff --git a/txcache/diagnosis.go b/txcache/diagnosis.go new file mode 100644 index 00000000..df2a99fe --- /dev/null +++ b/txcache/diagnosis.go @@ -0,0 +1,120 @@ +package txcache + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + logger "github.com/multiversx/mx-chain-logger-go" +) + +type printedTransaction struct { + Hash string `json:"hash"` + PPU uint64 `json:"ppu"` + Nonce uint64 `json:"nonce"` + Sender string `json:"sender"` + GasPrice uint64 `json:"gasPrice"` + GasLimit uint64 `json:"gasLimit"` + Receiver string `json:"receiver"` + DataLength int `json:"dataLength"` +} + +// Diagnose checks the state of the cache for inconsistencies and displays a summary, senders and transactions. +func (cache *TxCache) Diagnose(_ bool) { + cache.diagnoseCounters() + cache.diagnoseTransactions() +} + +func (cache *TxCache) diagnoseCounters() { + if log.GetLevel() > logger.LogDebug { + return + } + + sizeInBytes := cache.NumBytes() + numTxsEstimate := int(cache.CountTx()) + numTxsInChunks := cache.txByHash.backingMap.Count() + txsKeys := cache.txByHash.backingMap.Keys() + numSendersEstimate := int(cache.CountSenders()) + numSendersInChunks := cache.txListBySender.backingMap.Count() + sendersKeys := cache.txListBySender.backingMap.Keys() + + fine := numSendersEstimate == numSendersInChunks + fine = fine && (int(numSendersEstimate) == len(sendersKeys)) + fine = fine && (numTxsEstimate == numTxsInChunks && numTxsEstimate == len(txsKeys)) + + log.Debug("diagnoseCounters", + "fine", fine, + "numTxsEstimate", numTxsEstimate, + "numTxsInChunks", numTxsInChunks, + "len(txsKeys)", len(txsKeys), + "sizeInBytes", sizeInBytes, + "numBytesThreshold", cache.config.NumBytesThreshold, + "numSendersEstimate", numSendersEstimate, + "numSendersInChunks", numSendersInChunks, + "len(sendersKeys)", len(sendersKeys), + ) +} + +func (cache *TxCache) diagnoseTransactions() { + if logDiagnoseTransactions.GetLevel() > logger.LogTrace { + return + } + + transactions := cache.getAllTransactions() + if len(transactions) == 0 { + return + } + + numToDisplay := core.MinInt(diagnosisMaxTransactionsToDisplay, len(transactions)) + logDiagnoseTransactions.Trace("diagnoseTransactions", "numTransactions", len(transactions), "numToDisplay", numToDisplay) + logDiagnoseTransactions.Trace(marshalTransactionsToNewlineDelimitedJSON(transactions[:numToDisplay], "diagnoseTransactions")) +} + +// marshalTransactionsToNewlineDelimitedJSON converts a list of transactions to a newline-delimited JSON string. +// Note: each line is indexed, to improve readability. The index is easily removable if separate analysis is needed. +func marshalTransactionsToNewlineDelimitedJSON(transactions []*WrappedTransaction, linePrefix string) string { + builder := strings.Builder{} + builder.WriteString("\n") + + for i, wrappedTx := range transactions { + printedTx := convertWrappedTransactionToPrintedTransaction(wrappedTx) + printedTxJSON, _ := json.Marshal(printedTx) + + builder.WriteString(fmt.Sprintf("%s#%d: ", linePrefix, i)) + builder.WriteString(string(printedTxJSON)) + builder.WriteString("\n") + } + + builder.WriteString("\n") + return builder.String() +} + +func convertWrappedTransactionToPrintedTransaction(wrappedTx *WrappedTransaction) *printedTransaction { + transaction := wrappedTx.Tx + + return &printedTransaction{ + Hash: hex.EncodeToString(wrappedTx.TxHash), + Nonce: transaction.GetNonce(), + Receiver: hex.EncodeToString(transaction.GetRcvAddr()), + Sender: hex.EncodeToString(transaction.GetSndAddr()), + GasPrice: transaction.GetGasPrice(), + GasLimit: transaction.GetGasLimit(), + DataLength: len(transaction.GetData()), + PPU: wrappedTx.PricePerUnit, + } +} + +func displaySelectionOutcome(contextualLogger logger.Logger, linePrefix string, transactions []*WrappedTransaction) { + if contextualLogger.GetLevel() > logger.LogTrace { + return + } + + if len(transactions) > 0 { + contextualLogger.Trace("displaySelectionOutcome - transactions (as newline-separated JSON):") + contextualLogger.Trace(marshalTransactionsToNewlineDelimitedJSON(transactions, linePrefix)) + } else { + contextualLogger.Trace("displaySelectionOutcome - transactions: none") + } +} diff --git a/txcache/disabledCache.go b/txcache/disabledCache.go index 5a5473e5..d448ba59 100644 --- a/txcache/disabledCache.go +++ b/txcache/disabledCache.go @@ -25,9 +25,9 @@ func (cache *DisabledCache) GetByTxHash(_ []byte) (*WrappedTransaction, bool) { return nil, false } -// SelectTransactionsWithBandwidth returns an empty slice -func (cache *DisabledCache) SelectTransactionsWithBandwidth(_ int, _ int, _ uint64) []*WrappedTransaction { - return make([]*WrappedTransaction, 0) +// SelectTransactions returns an empty slice +func (cache *DisabledCache) SelectTransactions(uint64, int) ([]*WrappedTransaction, uint64) { + return make([]*WrappedTransaction, 0), 0 } // RemoveTxByHash does nothing @@ -105,10 +105,6 @@ func (cache *DisabledCache) RegisterHandler(func(key []byte, value interface{}), func (cache *DisabledCache) UnRegisterHandler(string) { } -// NotifyAccountNonce does nothing -func (cache *DisabledCache) NotifyAccountNonce(_ []byte, _ uint64) { -} - // ImmunizeTxsAgainstEviction does nothing func (cache *DisabledCache) ImmunizeTxsAgainstEviction(_ [][]byte) { } diff --git a/txcache/disabledCache_test.go b/txcache/disabledCache_test.go index a19e947a..9725a01e 100644 --- a/txcache/disabledCache_test.go +++ b/txcache/disabledCache_test.go @@ -1,7 +1,6 @@ package txcache import ( - "math" "testing" "github.com/stretchr/testify/require" @@ -18,8 +17,9 @@ func TestDisabledCache_DoesNothing(t *testing.T) { require.Nil(t, tx) require.False(t, ok) - selection := cache.SelectTransactionsWithBandwidth(42, 42, math.MaxUint64) + selection, accumulatedGas := cache.SelectTransactions(42, 42) require.Equal(t, 0, len(selection)) + require.Equal(t, uint64(0), accumulatedGas) removed := cache.RemoveTxByHash([]byte{}) require.False(t, removed) diff --git a/txcache/errors.go b/txcache/errors.go new file mode 100644 index 00000000..71ee0169 --- /dev/null +++ b/txcache/errors.go @@ -0,0 +1,8 @@ +package txcache + +import "errors" + +var errNilMempoolHost = errors.New("nil mempool host") +var errNilSelectionSession = errors.New("nil selection session") +var errItemAlreadyInCache = errors.New("item already in cache") +var errEmptyBunchOfTransactions = errors.New("empty bunch of transactions") diff --git a/txcache/eviction.go b/txcache/eviction.go index 985a1986..61d09cfb 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -1,18 +1,27 @@ package txcache import ( + "container/heap" + "github.com/multiversx/mx-chain-core-go/core" ) -// doEviction does cache eviction -// We do not allow more evictions to start concurrently -func (cache *TxCache) doEviction() { +// evictionJournal keeps a short journal about the eviction process +// This is useful for debugging and reasoning about the eviction +type evictionJournal struct { + numEvicted int + numEvictedByPass []int +} + +// doEviction does cache eviction. +// We do not allow more evictions to start concurrently. +func (cache *TxCache) doEviction() *evictionJournal { if cache.isEvictionInProgress.IsSet() { - return + return nil } if !cache.isCapacityExceeded() { - return + return nil } cache.evictionMutex.Lock() @@ -22,31 +31,37 @@ func (cache *TxCache) doEviction() { defer cache.isEvictionInProgress.Reset() if !cache.isCapacityExceeded() { - return + return nil } - stopWatch := cache.monitorEvictionStart() - cache.makeSnapshotOfSenders() + logRemove.Debug("doEviction: before eviction", + "num bytes", cache.NumBytes(), + "num txs", cache.CountTx(), + "num senders", cache.CountSenders(), + ) - journal := evictionJournal{} - journal.passOneNumSteps, journal.passOneNumTxs, journal.passOneNumSenders = cache.evictSendersInLoop() - journal.evictionPerformed = true - cache.evictionJournal = journal + stopWatch := core.NewStopWatch() + stopWatch.Start("eviction") - cache.monitorEvictionEnd(stopWatch) - cache.destroySnapshotOfSenders() -} + evictionJournal := cache.evictLeastLikelyToSelectTransactions() -func (cache *TxCache) makeSnapshotOfSenders() { - cache.evictionSnapshotOfSenders = cache.txListBySender.getSnapshotAscending() -} + stopWatch.Stop("eviction") -func (cache *TxCache) destroySnapshotOfSenders() { - cache.evictionSnapshotOfSenders = nil + logRemove.Debug( + "doEviction: after eviction", + "num bytes", cache.NumBytes(), + "num now", cache.CountTx(), + "num senders", cache.CountSenders(), + "duration", stopWatch.GetMeasurement("eviction"), + "evicted txs", evictionJournal.numEvicted, + ) + + return evictionJournal } func (cache *TxCache) isCapacityExceeded() bool { - return cache.areThereTooManyBytes() || cache.areThereTooManySenders() || cache.areThereTooManyTxs() + exceeded := cache.areThereTooManyBytes() || cache.areThereTooManySenders() || cache.areThereTooManyTxs() + return exceeded } func (cache *TxCache) areThereTooManyBytes() bool { @@ -67,62 +82,88 @@ func (cache *TxCache) areThereTooManyTxs() bool { return tooManyTxs } -// This is called concurrently by two goroutines: the eviction one and the sweeping one -func (cache *TxCache) doEvictItems(txsToEvict [][]byte, sendersToEvict []string) (countTxs uint32, countSenders uint32) { - countTxs = cache.txByHash.RemoveTxsBulk(txsToEvict) - countSenders = cache.txListBySender.RemoveSendersBulk(sendersToEvict) - return -} +// Eviction tolerates concurrent transaction additions / removals. +func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { + senders := cache.getSenders() + bunches := make([]bunchOfTransactions, 0, len(senders)) -func (cache *TxCache) evictSendersInLoop() (uint32, uint32, uint32) { - return cache.evictSendersWhile(cache.isCapacityExceeded) -} - -// evictSendersWhileTooManyTxs removes transactions in a loop, as long as "shouldContinue" is true -// One batch of senders is removed in each step -func (cache *TxCache) evictSendersWhile(shouldContinue func() bool) (step uint32, numTxs uint32, numSenders uint32) { - if !shouldContinue() { - return + for _, sender := range senders { + // Include transactions after gaps, as well (important), unlike when selecting transactions for processing. + // Reverse the order of transactions (will come in handy later, when creating the min-heap). + bunch := sender.getTxsReversed() + bunches = append(bunches, bunch) } - snapshot := cache.evictionSnapshotOfSenders - snapshotLength := uint32(len(snapshot)) - batchSize := cache.config.NumSendersToPreemptivelyEvict - batchStart := uint32(0) + journal := &evictionJournal{} - for step = 0; shouldContinue(); step++ { - batchEnd := batchStart + batchSize - batchEndBounded := core.MinUint32(batchEnd, snapshotLength) - batch := snapshot[batchStart:batchEndBounded] + // Heap is reused among passes. + // Items popped from the heap are added to "transactionsToEvict" (slice is re-created in each pass). + transactionsHeap := newMinTransactionsHeap(len(bunches)) + heap.Init(transactionsHeap) - numTxsEvictedInStep, numSendersEvictedInStep := cache.evictSendersAndTheirTxs(batch) + // Initialize the heap with the first transaction of each bunch + for _, bunch := range bunches { + item, err := newTransactionsHeapItem(bunch) + if err != nil { + continue + } - numTxs += numTxsEvictedInStep - numSenders += numSendersEvictedInStep - batchStart += batchSize + // Items will be reused (see below). Each sender gets one (and only one) item in the heap. + heap.Push(transactionsHeap, item) + } - reachedEnd := batchStart >= snapshotLength - noTxsEvicted := numTxsEvictedInStep == 0 - incompleteBatch := numSendersEvictedInStep < batchSize + for pass := 0; cache.isCapacityExceeded(); pass++ { + transactionsToEvict := make(bunchOfTransactions, 0, cache.config.NumItemsToPreemptivelyEvict) + transactionsToEvictHashes := make([][]byte, 0, cache.config.NumItemsToPreemptivelyEvict) + + // Select transactions (sorted). + for transactionsHeap.Len() > 0 { + // Always pick the "worst" transaction. + item := heap.Pop(transactionsHeap).(*transactionsHeapItem) + + if len(transactionsToEvict) >= int(cache.config.NumItemsToPreemptivelyEvict) { + // We have enough transactions to evict in this pass. + break + } + + transactionsToEvict = append(transactionsToEvict, item.currentTransaction) + transactionsToEvictHashes = append(transactionsToEvictHashes, item.currentTransaction.TxHash) + + // If there are more transactions in the same bunch (same sender as the popped item), + // add the next one to the heap (to compete with the others in being "the worst"). + // Item is reused (same originating sender), pushed back on the heap. + if item.gotoNextTransaction() { + heap.Push(transactionsHeap, item) + } + } - shouldBreak := noTxsEvicted || incompleteBatch || reachedEnd - if shouldBreak { + if len(transactionsToEvict) == 0 { + // No more transactions to evict. break } - } - return -} + // For each sender, find the "lowest" (in nonce) transaction to evict, + // so that we can remove all transactions with higher or equal nonces (of a sender) in one go (see below). + lowestToEvictBySender := make(map[string]uint64) + + for _, tx := range transactionsToEvict { + sender := string(tx.Tx.GetSndAddr()) + lowestToEvictBySender[sender] = tx.Tx.GetNonce() + } + + // Remove those transactions from "txListBySender". + for sender, nonce := range lowestToEvictBySender { + cache.txListBySender.removeTransactionsWithHigherOrEqualNonce([]byte(sender), nonce) + } + + // Remove those transactions from "txByHash". + _ = cache.txByHash.RemoveTxsBulk(transactionsToEvictHashes) -// This is called concurrently by two goroutines: the eviction one and the sweeping one -func (cache *TxCache) evictSendersAndTheirTxs(listsToEvict []*txListForSender) (uint32, uint32) { - sendersToEvict := make([]string, 0, len(listsToEvict)) - txsToEvict := make([][]byte, 0, approximatelyCountTxInLists(listsToEvict)) + journal.numEvictedByPass = append(journal.numEvictedByPass, len(transactionsToEvict)) + journal.numEvicted += len(transactionsToEvict) - for _, txList := range listsToEvict { - sendersToEvict = append(sendersToEvict, txList.sender) - txsToEvict = append(txsToEvict, txList.getTxHashes()...) + logRemove.Debug("evictLeastLikelyToSelectTransactions", "pass", pass, "num evicted", len(transactionsToEvict)) } - return cache.doEvictItems(txsToEvict, sendersToEvict) + return journal } diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go index f5555ec0..9b435d8e 100644 --- a/txcache/eviction_test.go +++ b/txcache/eviction_test.go @@ -1,307 +1,227 @@ package txcache import ( + "fmt" "math" - "sync" "testing" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) -func TestEviction_EvictSendersWhileTooManyTxs(t *testing.T) { +func TestTxCache_DoEviction_BecauseOfCount(t *testing.T) { config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - CountThreshold: 100, - CountPerSenderThreshold: math.MaxUint32, - NumSendersToPreemptivelyEvict: 20, - NumBytesThreshold: maxNumBytesUpperBound, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 4, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 1, } - txGasHandler, _ := dummyParams() + host := txcachemocks.NewMempoolHostMock() - cache, err := NewTxCache(config, txGasHandler) + cache, err := NewTxCache(config, host) require.Nil(t, err) require.NotNil(t, cache) - // 200 senders, each with 1 transaction - for index := 0; index < 200; index++ { - sender := string(createFakeSenderAddress(index)) - cache.AddTx(createTx([]byte{byte(index)}, sender, uint64(1))) - } - - require.Equal(t, int64(200), cache.txListBySender.counter.Get()) - require.Equal(t, int64(200), cache.txByHash.counter.Get()) + cache.AddTx(createTx([]byte("hash-alice"), "alice", 1).withGasPrice(1 * oneBillion)) + cache.AddTx(createTx([]byte("hash-bob"), "bob", 1).withGasPrice(2 * oneBillion)) + cache.AddTx(createTx([]byte("hash-carol"), "carol", 1).withGasPrice(3 * oneBillion)) + cache.AddTx(createTx([]byte("hash-eve"), "eve", 1).withGasPrice(4 * oneBillion)) + cache.AddTx(createTx([]byte("hash-dan"), "dan", 1).withGasPrice(5 * oneBillion)) - cache.makeSnapshotOfSenders() - steps, nTxs, nSenders := cache.evictSendersInLoop() + journal := cache.doEviction() + require.Equal(t, 1, journal.numEvicted) + require.Equal(t, []int{1}, journal.numEvictedByPass) - require.Equal(t, uint32(5), steps) - require.Equal(t, uint32(100), nTxs) - require.Equal(t, uint32(100), nSenders) - require.Equal(t, int64(100), cache.txListBySender.counter.Get()) - require.Equal(t, int64(100), cache.txByHash.counter.Get()) + // Alice and Bob evicted. Carol still there (better score). + _, ok := cache.GetByTxHash([]byte("hash-carol")) + require.True(t, ok) + require.Equal(t, uint64(4), cache.CountSenders()) + require.Equal(t, uint64(4), cache.CountTx()) } -func TestEviction_EvictSendersWhileTooManyBytes(t *testing.T) { - numBytesPerTx := uint32(1000) - +func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - CountThreshold: math.MaxUint32, - CountPerSenderThreshold: math.MaxUint32, - NumBytesThreshold: numBytesPerTx * 100, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - NumSendersToPreemptivelyEvict: 20, + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 1000, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 1, } - txGasHandler, _ := dummyParams() - cache, err := NewTxCache(config, txGasHandler) - require.Nil(t, err) - require.NotNil(t, cache) - - // 200 senders, each with 1 transaction - for index := 0; index < 200; index++ { - sender := string(createFakeSenderAddress(index)) - cache.AddTx(createTxWithParams([]byte{byte(index)}, sender, uint64(1), uint64(numBytesPerTx), 10000, 100*oneBillion)) - } - - require.Equal(t, int64(200), cache.txListBySender.counter.Get()) - require.Equal(t, int64(200), cache.txByHash.counter.Get()) - - cache.makeSnapshotOfSenders() - steps, nTxs, nSenders := cache.evictSendersInLoop() - - require.Equal(t, uint32(5), steps) - require.Equal(t, uint32(100), nTxs) - require.Equal(t, uint32(100), nSenders) - require.Equal(t, int64(100), cache.txListBySender.counter.Get()) - require.Equal(t, int64(100), cache.txByHash.counter.Get()) -} + host := txcachemocks.NewMempoolHostMock() -func TestEviction_DoEvictionDoneInPassTwo_BecauseOfCount(t *testing.T) { - config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - NumBytesThreshold: maxNumBytesUpperBound, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountThreshold: 2, - CountPerSenderThreshold: math.MaxUint32, - NumSendersToPreemptivelyEvict: 2, - } - txGasHandler, _ := dummyParamsWithGasPrice(100 * oneBillion) - cache, err := NewTxCache(config, txGasHandler) + cache, err := NewTxCache(config, host) require.Nil(t, err) require.NotNil(t, cache) - cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 1000, 100000, 100*oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 1000, 100000, 100*oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 1000, 100000, 700*oneBillion)) + cache.AddTx(createTx([]byte("hash-alice"), "alice", 1).withSize(256).withGasLimit(500000)) + cache.AddTx(createTx([]byte("hash-bob"), "bob", 1).withSize(256).withGasLimit(500000)) + cache.AddTx(createTx([]byte("hash-carol"), "carol", 1).withSize(256).withGasLimit(500000).withGasPrice(1.5 * oneBillion)) + cache.AddTx(createTx([]byte("hash-eve"), "eve", 1).withSize(256).withGasLimit(500000).withGasPrice(3 * oneBillion)) - cache.doEviction() - require.Equal(t, uint32(2), cache.evictionJournal.passOneNumTxs) - require.Equal(t, uint32(2), cache.evictionJournal.passOneNumSenders) - require.Equal(t, uint32(1), cache.evictionJournal.passOneNumSteps) + journal := cache.doEviction() + require.Equal(t, 1, journal.numEvicted) + require.Equal(t, []int{1}, journal.numEvictedByPass) - // Alice and Bob evicted. Carol still there. + // Alice and Bob evicted (lower score). Carol and Eve still there. _, ok := cache.GetByTxHash([]byte("hash-carol")) require.True(t, ok) - require.Equal(t, uint64(1), cache.CountSenders()) - require.Equal(t, uint64(1), cache.CountTx()) + _, ok = cache.GetByTxHash([]byte("hash-eve")) + require.True(t, ok) + require.Equal(t, uint64(3), cache.CountSenders()) + require.Equal(t, uint64(3), cache.CountTx()) } -func TestEviction_DoEvictionDoneInPassTwo_BecauseOfSize(t *testing.T) { +func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - CountThreshold: math.MaxUint32, - CountPerSenderThreshold: math.MaxUint32, - NumBytesThreshold: 1000, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - NumSendersToPreemptivelyEvict: 2, + Name: "untitled", + NumChunks: 1, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 4, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 1, } - txGasHandler, _ := dummyParamsWithGasPrice(oneBillion) - cache, err := NewTxCache(config, txGasHandler) + host := txcachemocks.NewMempoolHostMock() + + cache, err := NewTxCache(config, host) require.Nil(t, err) require.NotNil(t, cache) - cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 128, 100000, oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 128, 100000, oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-dave1"), "dave", uint64(3), 128, 40000000, oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-dave2"), "dave", uint64(1), 128, 50000, oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-dave3"), "dave", uint64(2), 128, 50000, oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-chris"), "chris", uint64(1), 128, 50000, oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-richard"), "richard", uint64(1), 128, 50000, uint64(1.2*oneBillion))) - cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 128, 100000, 7*oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-eve"), "eve", uint64(1), 128, 50000, 4*oneBillion)) - - scoreAlice := cache.getScoreOfSender("alice") - scoreBob := cache.getScoreOfSender("bob") - scoreDave := cache.getScoreOfSender("dave") - scoreCarol := cache.getScoreOfSender("carol") - scoreEve := cache.getScoreOfSender("eve") - scoreChris := cache.getScoreOfSender("chris") - scoreRichard := cache.getScoreOfSender("richard") - - require.Equal(t, uint32(23), scoreAlice) - require.Equal(t, uint32(23), scoreBob) - require.Equal(t, uint32(7), scoreDave) - require.Equal(t, uint32(100), scoreCarol) - require.Equal(t, uint32(100), scoreEve) - require.Equal(t, uint32(33), scoreChris) - require.Equal(t, uint32(54), scoreRichard) - - cache.doEviction() - require.Equal(t, uint32(4), cache.evictionJournal.passOneNumTxs) - require.Equal(t, uint32(2), cache.evictionJournal.passOneNumSenders) - require.Equal(t, uint32(1), cache.evictionJournal.passOneNumSteps) + _ = cache.isEvictionInProgress.SetReturningPrevious() - // Alice and Bob evicted (lower score). Carol and Eve still there. - _, ok := cache.GetByTxHash([]byte("hash-carol")) - require.True(t, ok) - require.Equal(t, uint64(5), cache.CountSenders()) + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", uint64(1))) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", uint64(2))) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", uint64(3))) + cache.AddTx(createTx([]byte("hash-alice-4"), "alice", uint64(4))) + cache.AddTx(createTx([]byte("hash-alice-5"), "alice", uint64(5))) + + // Nothing is evicted because eviction is already in progress. + journal := cache.doEviction() + require.Nil(t, journal) require.Equal(t, uint64(5), cache.CountTx()) + + cache.isEvictionInProgress.Reset() + + // Now eviction can happen. + journal = cache.doEviction() + require.NotNil(t, journal) + require.Equal(t, 1, journal.numEvicted) + require.Equal(t, 4, int(cache.CountTx())) } -func TestEviction_doEvictionDoesNothingWhenAlreadyInProgress(t *testing.T) { +func TestBenchmarkTxCache_DoEviction(t *testing.T) { config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 1, - CountThreshold: 0, - NumSendersToPreemptivelyEvict: 1, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 1000000000, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 300000, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 50000, } - txGasHandler, _ := dummyParams() - cache, err := NewTxCache(config, txGasHandler) - require.Nil(t, err) - require.NotNil(t, cache) + host := txcachemocks.NewMempoolHostMock() - cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) + sw := core.NewStopWatch() - _ = cache.isEvictionInProgress.SetReturningPrevious() - cache.doEviction() + t.Run("numSenders = 35000, numTransactions = 10", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) - require.False(t, cache.evictionJournal.evictionPerformed) -} + cache.config.EvictionEnabled = false + addManyTransactionsWithUniformDistribution(cache, 35000, 10) + cache.config.EvictionEnabled = true -func TestEviction_evictSendersInLoop_CoverLoopBreak_WhenSmallBatch(t *testing.T) { - config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 1, - CountThreshold: 0, - NumSendersToPreemptivelyEvict: 42, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, - } + require.Equal(t, uint64(350000), cache.CountTx()) - txGasHandler, _ := dummyParams() - cache, err := NewTxCache(config, txGasHandler) - require.Nil(t, err) - require.NotNil(t, cache) + sw.Start(t.Name()) + journal := cache.doEviction() + sw.Stop(t.Name()) - cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) + require.Equal(t, 50000, journal.numEvicted) + require.Equal(t, 1, len(journal.numEvictedByPass)) + }) - cache.makeSnapshotOfSenders() + t.Run("numSenders = 100000, numTransactions = 5", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) - steps, nTxs, nSenders := cache.evictSendersInLoop() - require.Equal(t, uint32(0), steps) - require.Equal(t, uint32(1), nTxs) - require.Equal(t, uint32(1), nSenders) -} + cache.config.EvictionEnabled = false + addManyTransactionsWithUniformDistribution(cache, 100000, 5) + cache.config.EvictionEnabled = true -func TestEviction_evictSendersWhile_ShouldContinueBreak(t *testing.T) { - config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 1, - CountThreshold: 0, - NumSendersToPreemptivelyEvict: 1, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, - } + require.Equal(t, uint64(500000), cache.CountTx()) - txGasHandler, _ := dummyParams() - cache, err := NewTxCache(config, txGasHandler) - require.Nil(t, err) - require.NotNil(t, cache) + sw.Start(t.Name()) + journal := cache.doEviction() + sw.Stop(t.Name()) - cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) - cache.AddTx(createTx([]byte("hash-bob"), "bob", uint64(1))) + require.Equal(t, 200000, journal.numEvicted) + require.Equal(t, 4, len(journal.numEvictedByPass)) + }) - cache.makeSnapshotOfSenders() + t.Run("numSenders = 400000, numTransactions = 1", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) - steps, nTxs, nSenders := cache.evictSendersWhile(func() bool { - return false - }) + cache.config.EvictionEnabled = false + addManyTransactionsWithUniformDistribution(cache, 400000, 1) + cache.config.EvictionEnabled = true - require.Equal(t, uint32(0), steps) - require.Equal(t, uint32(0), nTxs) - require.Equal(t, uint32(0), nSenders) -} + require.Equal(t, uint64(400000), cache.CountTx()) -// This seems to be the most reasonable "bad-enough" (not worst) scenario to benchmark: -// 25000 senders with 10 transactions each, with default "NumSendersToPreemptivelyEvict". -// ~1 second on average laptop. -func Test_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { - config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - EvictionEnabled: true, - NumBytesThreshold: 1000000000, - CountThreshold: 240000, - NumSendersToPreemptivelyEvict: 1000, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, - } + sw.Start(t.Name()) + journal := cache.doEviction() + sw.Stop(t.Name()) + + require.Equal(t, 100000, journal.numEvicted) + require.Equal(t, 2, len(journal.numEvictedByPass)) + }) - txGasHandler, _ := dummyParams() - numSenders := 25000 - numTxsPerSender := 10 + t.Run("numSenders = 10000, numTransactions = 100", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) - cache, err := NewTxCache(config, txGasHandler) - require.Nil(t, err) - require.NotNil(t, cache) + cache.config.EvictionEnabled = false + addManyTransactionsWithUniformDistribution(cache, 10000, 100) + cache.config.EvictionEnabled = true - addManyTransactionsWithUniformDistribution(cache, numSenders, numTxsPerSender) + require.Equal(t, uint64(1000000), cache.CountTx()) - // Sometimes (due to map iteration non-determinism), more eviction happens - one more step of 100 senders. - require.LessOrEqual(t, uint32(cache.CountTx()), config.CountThreshold) - require.GreaterOrEqual(t, uint32(cache.CountTx()), config.CountThreshold-config.NumSendersToPreemptivelyEvict*uint32(numTxsPerSender)) -} + sw.Start(t.Name()) + journal := cache.doEviction() + sw.Stop(t.Name()) + + require.Equal(t, 700000, journal.numEvicted) + require.Equal(t, 14, len(journal.numEvictedByPass)) + }) -func Test_EvictSendersAndTheirTxs_Concurrently(t *testing.T) { - cache := newUnconstrainedCacheToTest() - var wg sync.WaitGroup - - for i := 0; i < 10; i++ { - wg.Add(3) - - go func() { - cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) - cache.AddTx(createTx([]byte("alice-y"), "alice", 43)) - cache.AddTx(createTx([]byte("bob-x"), "bob", 42)) - cache.AddTx(createTx([]byte("bob-y"), "bob", 43)) - cache.Remove([]byte("alice-x")) - cache.Remove([]byte("bob-x")) - wg.Done() - }() - - go func() { - snapshot := cache.txListBySender.getSnapshotAscending() - cache.evictSendersAndTheirTxs(snapshot) - wg.Done() - }() - - go func() { - snapshot := cache.txListBySender.getSnapshotAscending() - cache.evictSendersAndTheirTxs(snapshot) - wg.Done() - }() + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) } - wg.Wait() + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // 0.119274s (TestBenchmarkTxCache_DoEviction/numSenders_=_35000,_numTransactions_=_10) + // 0.484147s (TestBenchmarkTxCache_DoEviction/numSenders_=_100000,_numTransactions_=_5) + // 0.504588s (TestBenchmarkTxCache_DoEviction/numSenders_=_10000,_numTransactions_=_100) + // 0.571885s (TestBenchmarkTxCache_DoEviction/numSenders_=_400000,_numTransactions_=_1) } diff --git a/txcache/feeComputationHelper.go b/txcache/feeComputationHelper.go deleted file mode 100644 index 66e365dc..00000000 --- a/txcache/feeComputationHelper.go +++ /dev/null @@ -1,80 +0,0 @@ -package txcache - -type feeHelper interface { - gasLimitShift() uint64 - gasPriceShift() uint64 - minPricePerUnit() uint64 - normalizedMinFee() uint64 - minGasPriceFactor() uint64 - IsInterfaceNil() bool -} - -type feeComputationHelper struct { - gasShiftingFactor uint64 - priceShiftingFactor uint64 - minFeeNormalized uint64 - minPPUNormalized uint64 - minPriceFactor uint64 -} - -const priceBinaryResolution = 10 -const gasBinaryResolution = 4 - -func newFeeComputationHelper(minPrice, minGasLimit, minPriceProcessing uint64) *feeComputationHelper { - feeComputeHelper := &feeComputationHelper{} - feeComputeHelper.initializeHelperParameters(minPrice, minGasLimit, minPriceProcessing) - return feeComputeHelper -} - -func (fch *feeComputationHelper) gasLimitShift() uint64 { - return fch.gasShiftingFactor -} - -func (fch *feeComputationHelper) gasPriceShift() uint64 { - return fch.priceShiftingFactor -} - -func (fch *feeComputationHelper) normalizedMinFee() uint64 { - return fch.minFeeNormalized -} - -func (fch *feeComputationHelper) minPricePerUnit() uint64 { - return fch.minPPUNormalized -} - -func (fch *feeComputationHelper) minGasPriceFactor() uint64 { - return fch.minPriceFactor -} - -func (fch *feeComputationHelper) initializeHelperParameters(minPrice, minGasLimit, minPriceProcessing uint64) { - fch.priceShiftingFactor = computeShiftMagnitude(minPrice, priceBinaryResolution) - x := minPriceProcessing >> fch.priceShiftingFactor - for x == 0 && fch.priceShiftingFactor > 0 { - fch.priceShiftingFactor-- - x = minPriceProcessing >> fch.priceShiftingFactor - } - - fch.gasShiftingFactor = computeShiftMagnitude(minGasLimit, gasBinaryResolution) - - fch.minPPUNormalized = minPriceProcessing >> fch.priceShiftingFactor - fch.minFeeNormalized = (minGasLimit >> fch.gasLimitShift()) * (minPrice >> fch.priceShiftingFactor) - fch.minPriceFactor = minPrice / minPriceProcessing -} - -// returns the maximum shift magnitude of the number in order to maintain the given binary resolution -func computeShiftMagnitude(x uint64, resolution uint8) uint64 { - m := uint64(0) - stopCondition := uint64(1) << resolution - shiftStep := uint64(1) - - for i := x; i > stopCondition; i >>= shiftStep { - m += shiftStep - } - - return m -} - -// IsInterfaceNil returns nil if the underlying object is nil -func (fch *feeComputationHelper) IsInterfaceNil() bool { - return fch == nil -} diff --git a/txcache/feeComputationHelper_test.go b/txcache/feeComputationHelper_test.go deleted file mode 100644 index 9a015ccf..00000000 --- a/txcache/feeComputationHelper_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package txcache - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func Test_initializeHelperParameters(t *testing.T) { - fch := &feeComputationHelper{ - gasShiftingFactor: 0, - priceShiftingFactor: 0, - minFeeNormalized: 0, - minPPUNormalized: 0, - minPriceFactor: 0, - } - - fch.initializeHelperParameters(1<<20, 1<<10, 1<<10) - require.Equal(t, uint64(10), fch.priceShiftingFactor) - require.Equal(t, uint64(6), fch.gasShiftingFactor) - require.Equal(t, uint64(1<<10), fch.minPriceFactor) - require.Equal(t, uint64((1<<4)*(1<<10)), fch.minFeeNormalized) - require.Equal(t, uint64(1), fch.minPPUNormalized) - - fch.initializeHelperParameters(1<<22, 1<<17, 1<<7) - require.Equal(t, uint64(7), fch.priceShiftingFactor) - require.Equal(t, uint64(13), fch.gasShiftingFactor) - require.Equal(t, uint64(1<<15), fch.minPriceFactor) - require.Equal(t, uint64((1<<4)*(1<<15)), fch.minFeeNormalized) - require.Equal(t, uint64(1), fch.minPPUNormalized) - - fch.initializeHelperParameters(1<<20, 1<<3, 1<<15) - require.Equal(t, uint64(10), fch.priceShiftingFactor) - require.Equal(t, uint64(0), fch.gasShiftingFactor) - require.Equal(t, uint64(1<<5), fch.minPriceFactor) - require.Equal(t, uint64((1<<3)*(1<<10)), fch.minFeeNormalized) - require.Equal(t, uint64(1<<5), fch.minPPUNormalized) -} - -func Test_newFeeComputationHelper(t *testing.T) { - fch := newFeeComputationHelper(1<<20, 1<<10, 1<<10) - require.Equal(t, uint64(10), fch.priceShiftingFactor) - require.Equal(t, uint64(6), fch.gasShiftingFactor) - require.Equal(t, uint64(1<<10), fch.minPriceFactor) - require.Equal(t, uint64((1<<4)*(1<<10)), fch.minFeeNormalized) - require.Equal(t, uint64(1), fch.minPPUNormalized) -} - -func Test_getters(t *testing.T) { - fch := newFeeComputationHelper(1<<20, 1<<10, 1<<10) - gasShift := fch.gasLimitShift() - gasPriceShift := fch.gasPriceShift() - minFeeNormalized := fch.normalizedMinFee() - minPPUNormalized := fch.minPricePerUnit() - minGasPriceFactor := fch.minGasPriceFactor() - - require.Equal(t, uint64(10), gasPriceShift) - require.Equal(t, uint64(6), gasShift) - require.Equal(t, uint64(1<<10), minGasPriceFactor) - require.Equal(t, uint64((1<<4)*(1<<10)), minFeeNormalized) - require.Equal(t, uint64(1), minPPUNormalized) -} - -func Test_computeShiftMagnitude(t *testing.T) { - shift := computeShiftMagnitude(1<<20, 10) - require.Equal(t, uint64(10), shift) - - shift = computeShiftMagnitude(1<<12, 10) - require.Equal(t, uint64(2), shift) - - shift = computeShiftMagnitude(1<<8, 10) - require.Equal(t, uint64(0), shift) -} diff --git a/txcache/interface.go b/txcache/interface.go index 73624759..b6d0aee5 100644 --- a/txcache/interface.go +++ b/txcache/interface.go @@ -1,21 +1,23 @@ package txcache import ( + "math/big" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-storage-go/types" ) -type scoreComputer interface { - computeScore(scoreParams senderScoreParams) uint32 +// MempoolHost provides blockchain information for mempool operations +type MempoolHost interface { + ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int + GetTransferredValue(tx data.TransactionHandler) *big.Int + IsInterfaceNil() bool } -// TxGasHandler handles a transaction gas and gas cost -type TxGasHandler interface { - SplitTxGasInCategories(tx data.TransactionWithFeeHandler) (uint64, uint64) - GasPriceForProcessing(tx data.TransactionWithFeeHandler) uint64 - GasPriceForMove(tx data.TransactionWithFeeHandler) uint64 - MinGasPrice() uint64 - MinGasLimit() uint64 - MinGasPriceForProcessing() uint64 +// SelectionSession provides blockchain information for transaction selection +type SelectionSession interface { + GetAccountState(accountKey []byte) (*types.AccountState, error) + IsIncorrectlyGuarded(tx data.TransactionHandler) bool IsInterfaceNil() bool } diff --git a/txcache/loggers.go b/txcache/loggers.go new file mode 100644 index 00000000..ecedbfec --- /dev/null +++ b/txcache/loggers.go @@ -0,0 +1,9 @@ +package txcache + +import logger "github.com/multiversx/mx-chain-logger-go" + +var log = logger.GetOrCreate("txcache/main") +var logAdd = logger.GetOrCreate("txcache/add") +var logRemove = logger.GetOrCreate("txcache/remove") +var logSelect = logger.GetOrCreate("txcache/select") +var logDiagnoseTransactions = logger.GetOrCreate("txcache/diagnose/transactions") diff --git a/txcache/maps/bucketSortedMap.go b/txcache/maps/bucketSortedMap.go deleted file mode 100644 index 90a94162..00000000 --- a/txcache/maps/bucketSortedMap.go +++ /dev/null @@ -1,342 +0,0 @@ -package maps - -import ( - "sync" -) - -// BucketSortedMap is -type BucketSortedMap struct { - mutex sync.RWMutex - nChunks uint32 - nScoreChunks uint32 - maxScore uint32 - chunks []*MapChunk - scoreChunks []*MapChunk -} - -// MapChunk is -type MapChunk struct { - items map[string]BucketSortedMapItem - mutex sync.RWMutex -} - -// NewBucketSortedMap creates a new map. -func NewBucketSortedMap(nChunks uint32, nScoreChunks uint32) *BucketSortedMap { - if nChunks == 0 { - nChunks = 1 - } - if nScoreChunks == 0 { - nScoreChunks = 1 - } - - sortedMap := BucketSortedMap{ - nChunks: nChunks, - nScoreChunks: nScoreChunks, - maxScore: nScoreChunks - 1, - } - - sortedMap.initializeChunks() - - return &sortedMap -} - -func (sortedMap *BucketSortedMap) initializeChunks() { - // Assignment is not an atomic operation, so we have to wrap this in a critical section - sortedMap.mutex.Lock() - defer sortedMap.mutex.Unlock() - - sortedMap.chunks = make([]*MapChunk, sortedMap.nChunks) - sortedMap.scoreChunks = make([]*MapChunk, sortedMap.nScoreChunks) - - for i := uint32(0); i < sortedMap.nChunks; i++ { - sortedMap.chunks[i] = &MapChunk{ - items: make(map[string]BucketSortedMapItem), - } - } - - for i := uint32(0); i < sortedMap.nScoreChunks; i++ { - sortedMap.scoreChunks[i] = &MapChunk{ - items: make(map[string]BucketSortedMapItem), - } - } -} - -// Set puts the item in the map -// This doesn't add the item to the score chunks (not necessary) -func (sortedMap *BucketSortedMap) Set(item BucketSortedMapItem) { - chunk := sortedMap.getChunk(item.GetKey()) - chunk.setItem(item) -} - -// NotifyScoreChange moves or adds the item to the corresponding score chunk -func (sortedMap *BucketSortedMap) NotifyScoreChange(item BucketSortedMapItem, newScore uint32) { - if newScore > sortedMap.maxScore { - newScore = sortedMap.maxScore - } - - newScoreChunk := sortedMap.getScoreChunks()[newScore] - if newScoreChunk != item.GetScoreChunk() { - removeFromScoreChunk(item) - newScoreChunk.setItem(item) - item.SetScoreChunk(newScoreChunk) - } -} - -func removeFromScoreChunk(item BucketSortedMapItem) { - currentScoreChunk := item.GetScoreChunk() - if currentScoreChunk != nil { - currentScoreChunk.removeItem(item) - } -} - -// Get retrieves an element from map under given key. -func (sortedMap *BucketSortedMap) Get(key string) (BucketSortedMapItem, bool) { - chunk := sortedMap.getChunk(key) - chunk.mutex.RLock() - val, ok := chunk.items[key] - chunk.mutex.RUnlock() - return val, ok -} - -// Has looks up an item under specified key -func (sortedMap *BucketSortedMap) Has(key string) bool { - chunk := sortedMap.getChunk(key) - chunk.mutex.RLock() - _, ok := chunk.items[key] - chunk.mutex.RUnlock() - return ok -} - -// Remove removes an element from the map -func (sortedMap *BucketSortedMap) Remove(key string) (interface{}, bool) { - chunk := sortedMap.getChunk(key) - item := chunk.removeItemByKey(key) - if item != nil { - removeFromScoreChunk(item) - } - - return item, item != nil -} - -// getChunk returns the chunk holding the given key. -func (sortedMap *BucketSortedMap) getChunk(key string) *MapChunk { - sortedMap.mutex.RLock() - defer sortedMap.mutex.RUnlock() - return sortedMap.chunks[fnv32Hash(key)%sortedMap.nChunks] -} - -// fnv32Hash implements https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function for 32 bits -func fnv32Hash(key string) uint32 { - hash := uint32(2166136261) - const prime32 = uint32(16777619) - for i := 0; i < len(key); i++ { - hash *= prime32 - hash ^= uint32(key[i]) - } - return hash -} - -// Clear clears the map -func (sortedMap *BucketSortedMap) Clear() { - // There is no need to explicitly remove each item for each chunk - // The garbage collector will remove the data from memory - sortedMap.initializeChunks() -} - -// Count returns the number of elements within the map -func (sortedMap *BucketSortedMap) Count() uint32 { - count := uint32(0) - for _, chunk := range sortedMap.getChunks() { - count += chunk.countItems() - } - return count -} - -// CountSorted returns the number of sorted elements within the map -func (sortedMap *BucketSortedMap) CountSorted() uint32 { - count := uint32(0) - for _, chunk := range sortedMap.getScoreChunks() { - count += chunk.countItems() - } - return count -} - -// ChunksCounts returns the number of elements by chunk -func (sortedMap *BucketSortedMap) ChunksCounts() []uint32 { - counts := make([]uint32, sortedMap.nChunks) - for i, chunk := range sortedMap.getChunks() { - counts[i] = chunk.countItems() - } - return counts -} - -// ScoreChunksCounts returns the number of elements by chunk -func (sortedMap *BucketSortedMap) ScoreChunksCounts() []uint32 { - counts := make([]uint32, sortedMap.nScoreChunks) - for i, chunk := range sortedMap.getScoreChunks() { - counts[i] = chunk.countItems() - } - return counts -} - -// SortedMapIterCb is an iterator callback -type SortedMapIterCb func(key string, value BucketSortedMapItem) - -// GetSnapshotAscending gets a snapshot of the items -func (sortedMap *BucketSortedMap) GetSnapshotAscending() []BucketSortedMapItem { - return sortedMap.getSortedSnapshot(sortedMap.fillSnapshotAscending) -} - -// GetSnapshotDescending gets a snapshot of the items -func (sortedMap *BucketSortedMap) GetSnapshotDescending() []BucketSortedMapItem { - return sortedMap.getSortedSnapshot(sortedMap.fillSnapshotDescending) -} - -// This applies a read lock on all chunks, so that they aren't mutated during snapshot -func (sortedMap *BucketSortedMap) getSortedSnapshot(fillSnapshot func(scoreChunks []*MapChunk, snapshot []BucketSortedMapItem)) []BucketSortedMapItem { - counter := uint32(0) - scoreChunks := sortedMap.getScoreChunks() - - for _, chunk := range scoreChunks { - chunk.mutex.RLock() - counter += uint32(len(chunk.items)) - } - - snapshot := make([]BucketSortedMapItem, counter) - fillSnapshot(scoreChunks, snapshot) - - for _, chunk := range scoreChunks { - chunk.mutex.RUnlock() - } - - return snapshot -} - -// This function should only be called under already read-locked score chunks -func (sortedMap *BucketSortedMap) fillSnapshotAscending(scoreChunks []*MapChunk, snapshot []BucketSortedMapItem) { - i := 0 - for _, chunk := range scoreChunks { - for _, item := range chunk.items { - snapshot[i] = item - i++ - } - } -} - -// This function should only be called under already read-locked score chunks -func (sortedMap *BucketSortedMap) fillSnapshotDescending(scoreChunks []*MapChunk, snapshot []BucketSortedMapItem) { - i := 0 - for chunkIndex := len(scoreChunks) - 1; chunkIndex >= 0; chunkIndex-- { - chunk := scoreChunks[chunkIndex] - for _, item := range chunk.items { - snapshot[i] = item - i++ - } - } -} - -// IterCbSortedAscending iterates over the sorted elements in the map -func (sortedMap *BucketSortedMap) IterCbSortedAscending(callback SortedMapIterCb) { - for _, chunk := range sortedMap.getScoreChunks() { - chunk.forEachItem(callback) - } -} - -// IterCbSortedDescending iterates over the sorted elements in the map -func (sortedMap *BucketSortedMap) IterCbSortedDescending(callback SortedMapIterCb) { - chunks := sortedMap.getScoreChunks() - for i := len(chunks) - 1; i >= 0; i-- { - chunk := chunks[i] - chunk.forEachItem(callback) - } -} - -// Keys returns all keys as []string -func (sortedMap *BucketSortedMap) Keys() []string { - count := sortedMap.Count() - // count is not exact anymore, since we are in a different lock than the one aquired by Count() (but is a good approximation) - keys := make([]string, 0, count) - - for _, chunk := range sortedMap.getChunks() { - keys = chunk.appendKeys(keys) - } - - return keys -} - -// KeysSorted returns all keys of the sorted items as []string -func (sortedMap *BucketSortedMap) KeysSorted() []string { - count := sortedMap.CountSorted() - // count is not exact anymore, since we are in a different lock than the one aquired by CountSorted() (but is a good approximation) - keys := make([]string, 0, count) - - for _, chunk := range sortedMap.getScoreChunks() { - keys = chunk.appendKeys(keys) - } - - return keys -} - -func (sortedMap *BucketSortedMap) getChunks() []*MapChunk { - sortedMap.mutex.RLock() - defer sortedMap.mutex.RUnlock() - return sortedMap.chunks -} - -func (sortedMap *BucketSortedMap) getScoreChunks() []*MapChunk { - sortedMap.mutex.RLock() - defer sortedMap.mutex.RUnlock() - return sortedMap.scoreChunks -} - -func (chunk *MapChunk) removeItem(item BucketSortedMapItem) { - chunk.mutex.Lock() - defer chunk.mutex.Unlock() - - key := item.GetKey() - delete(chunk.items, key) -} - -func (chunk *MapChunk) removeItemByKey(key string) BucketSortedMapItem { - chunk.mutex.Lock() - defer chunk.mutex.Unlock() - - item := chunk.items[key] - delete(chunk.items, key) - return item -} - -func (chunk *MapChunk) setItem(item BucketSortedMapItem) { - chunk.mutex.Lock() - defer chunk.mutex.Unlock() - - key := item.GetKey() - chunk.items[key] = item -} - -func (chunk *MapChunk) countItems() uint32 { - chunk.mutex.RLock() - defer chunk.mutex.RUnlock() - - return uint32(len(chunk.items)) -} - -func (chunk *MapChunk) forEachItem(callback SortedMapIterCb) { - chunk.mutex.RLock() - defer chunk.mutex.RUnlock() - - for key, value := range chunk.items { - callback(key, value) - } -} - -func (chunk *MapChunk) appendKeys(keysAccumulator []string) []string { - chunk.mutex.RLock() - defer chunk.mutex.RUnlock() - - for key := range chunk.items { - keysAccumulator = append(keysAccumulator, key) - } - - return keysAccumulator -} diff --git a/txcache/maps/bucketSortedMapItem.go b/txcache/maps/bucketSortedMapItem.go deleted file mode 100644 index 4ba55181..00000000 --- a/txcache/maps/bucketSortedMapItem.go +++ /dev/null @@ -1,8 +0,0 @@ -package maps - -// BucketSortedMapItem defines an item of the bucket sorted map -type BucketSortedMapItem interface { - GetKey() string - GetScoreChunk() *MapChunk - SetScoreChunk(*MapChunk) -} diff --git a/txcache/maps/bucketSortedMap_test.go b/txcache/maps/bucketSortedMap_test.go deleted file mode 100644 index 2bb4a7ee..00000000 --- a/txcache/maps/bucketSortedMap_test.go +++ /dev/null @@ -1,421 +0,0 @@ -package maps - -import ( - "fmt" - "sync" - "testing" - - "github.com/multiversx/mx-chain-core-go/core/atomic" - "github.com/stretchr/testify/require" -) - -type dummyItem struct { - score atomic.Uint32 - key string - chunk *MapChunk - chunkMutex sync.RWMutex - mutex sync.Mutex -} - -func newDummyItem(key string) *dummyItem { - return &dummyItem{ - key: key, - } -} - -func newScoredDummyItem(key string, score uint32) *dummyItem { - item := &dummyItem{ - key: key, - } - item.score.Set(score) - return item -} - -func (item *dummyItem) GetKey() string { - return item.key -} - -func (item *dummyItem) GetScoreChunk() *MapChunk { - item.chunkMutex.RLock() - defer item.chunkMutex.RUnlock() - - return item.chunk -} - -func (item *dummyItem) SetScoreChunk(chunk *MapChunk) { - item.chunkMutex.Lock() - defer item.chunkMutex.Unlock() - - item.chunk = chunk -} - -func (item *dummyItem) simulateMutationThatChangesScore(myMap *BucketSortedMap) { - item.mutex.Lock() - myMap.NotifyScoreChange(item, item.score.Get()) - item.mutex.Unlock() -} - -func simulateMutationThatChangesScore(myMap *BucketSortedMap, key string) { - item, ok := myMap.Get(key) - if !ok { - return - } - - itemAsDummy := item.(*dummyItem) - itemAsDummy.simulateMutationThatChangesScore(myMap) -} - -func TestNewBucketSortedMap(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - require.Equal(t, uint32(4), myMap.nChunks) - require.Equal(t, 4, len(myMap.chunks)) - require.Equal(t, uint32(100), myMap.nScoreChunks) - require.Equal(t, 100, len(myMap.scoreChunks)) - - // 1 is minimum number of chunks - myMap = NewBucketSortedMap(0, 0) - require.Equal(t, uint32(1), myMap.nChunks) - require.Equal(t, uint32(1), myMap.nScoreChunks) -} - -func TestBucketSortedMap_Count(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - myMap.Set(newScoredDummyItem("a", 0)) - myMap.Set(newScoredDummyItem("b", 1)) - myMap.Set(newScoredDummyItem("c", 2)) - myMap.Set(newScoredDummyItem("d", 3)) - - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - simulateMutationThatChangesScore(myMap, "c") - simulateMutationThatChangesScore(myMap, "d") - - require.Equal(t, uint32(4), myMap.Count()) - require.Equal(t, uint32(4), myMap.CountSorted()) - - counts := myMap.ChunksCounts() - require.Equal(t, uint32(1), counts[0]) - require.Equal(t, uint32(1), counts[1]) - require.Equal(t, uint32(1), counts[2]) - require.Equal(t, uint32(1), counts[3]) - - counts = myMap.ScoreChunksCounts() - require.Equal(t, uint32(1), counts[0]) - require.Equal(t, uint32(1), counts[1]) - require.Equal(t, uint32(1), counts[2]) - require.Equal(t, uint32(1), counts[3]) -} - -func TestBucketSortedMap_Keys(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - myMap.Set(newDummyItem("a")) - myMap.Set(newDummyItem("b")) - myMap.Set(newDummyItem("c")) - - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - simulateMutationThatChangesScore(myMap, "c") - - require.Equal(t, 3, len(myMap.Keys())) - require.Equal(t, 3, len(myMap.KeysSorted())) -} - -func TestBucketSortedMap_KeysSorted(t *testing.T) { - myMap := NewBucketSortedMap(1, 4) - - myMap.Set(newScoredDummyItem("d", 3)) - myMap.Set(newScoredDummyItem("a", 0)) - myMap.Set(newScoredDummyItem("c", 2)) - myMap.Set(newScoredDummyItem("b", 1)) - myMap.Set(newScoredDummyItem("f", 5)) - myMap.Set(newScoredDummyItem("e", 4)) - - simulateMutationThatChangesScore(myMap, "d") - simulateMutationThatChangesScore(myMap, "e") - simulateMutationThatChangesScore(myMap, "f") - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - simulateMutationThatChangesScore(myMap, "c") - - keys := myMap.KeysSorted() - require.Equal(t, "a", keys[0]) - require.Equal(t, "b", keys[1]) - require.Equal(t, "c", keys[2]) - - counts := myMap.ScoreChunksCounts() - require.Equal(t, uint32(1), counts[0]) - require.Equal(t, uint32(1), counts[1]) - require.Equal(t, uint32(1), counts[2]) - require.Equal(t, uint32(3), counts[3]) -} - -func TestBucketSortedMap_ItemMovesOnNotifyScoreChange(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - - a := newScoredDummyItem("a", 1) - b := newScoredDummyItem("b", 42) - myMap.Set(a) - myMap.Set(b) - - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - - require.Equal(t, myMap.scoreChunks[1], a.GetScoreChunk()) - require.Equal(t, myMap.scoreChunks[42], b.GetScoreChunk()) - - a.score.Set(2) - b.score.Set(43) - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - - require.Equal(t, myMap.scoreChunks[2], a.GetScoreChunk()) - require.Equal(t, myMap.scoreChunks[43], b.GetScoreChunk()) -} - -func TestBucketSortedMap_Has(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - myMap.Set(newDummyItem("a")) - myMap.Set(newDummyItem("b")) - - require.True(t, myMap.Has("a")) - require.True(t, myMap.Has("b")) - require.False(t, myMap.Has("c")) -} - -func TestBucketSortedMap_Remove(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - myMap.Set(newDummyItem("a")) - myMap.Set(newDummyItem("b")) - - _, ok := myMap.Remove("b") - require.True(t, ok) - _, ok = myMap.Remove("x") - require.False(t, ok) - - require.True(t, myMap.Has("a")) - require.False(t, myMap.Has("b")) -} - -func TestBucketSortedMap_Clear(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - myMap.Set(newDummyItem("a")) - myMap.Set(newDummyItem("b")) - - myMap.Clear() - - require.Equal(t, uint32(0), myMap.Count()) - require.Equal(t, uint32(0), myMap.CountSorted()) -} - -func TestBucketSortedMap_IterCb(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - - myMap.Set(newScoredDummyItem("a", 15)) - myMap.Set(newScoredDummyItem("b", 101)) - myMap.Set(newScoredDummyItem("c", 3)) - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - simulateMutationThatChangesScore(myMap, "c") - - sorted := []string{"c", "a", "b"} - - i := 0 - myMap.IterCbSortedAscending(func(key string, value BucketSortedMapItem) { - require.Equal(t, sorted[i], key) - i++ - }) - - require.Equal(t, 3, i) - - i = len(sorted) - 1 - myMap.IterCbSortedDescending(func(key string, value BucketSortedMapItem) { - require.Equal(t, sorted[i], key) - i-- - }) - - require.Equal(t, 0, i+1) -} - -func TestBucketSortedMap_GetSnapshotAscending(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - - snapshot := myMap.GetSnapshotAscending() - require.Equal(t, []BucketSortedMapItem{}, snapshot) - - a := newScoredDummyItem("a", 15) - b := newScoredDummyItem("b", 101) - c := newScoredDummyItem("c", 3) - - myMap.Set(a) - myMap.Set(b) - myMap.Set(c) - - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - simulateMutationThatChangesScore(myMap, "c") - - snapshot = myMap.GetSnapshotAscending() - require.Equal(t, []BucketSortedMapItem{c, a, b}, snapshot) -} - -func TestBucketSortedMap_GetSnapshotDescending(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - - snapshot := myMap.GetSnapshotDescending() - require.Equal(t, []BucketSortedMapItem{}, snapshot) - - a := newScoredDummyItem("a", 15) - b := newScoredDummyItem("b", 101) - c := newScoredDummyItem("c", 3) - - myMap.Set(a) - myMap.Set(b) - myMap.Set(c) - - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - simulateMutationThatChangesScore(myMap, "c") - - snapshot = myMap.GetSnapshotDescending() - require.Equal(t, []BucketSortedMapItem{b, a, c}, snapshot) -} - -func TestBucketSortedMap_AddManyItems(t *testing.T) { - numGoroutines := 42 - numItemsPerGoroutine := 1000 - numScoreChunks := 100 - numItemsInScoreChunkPerGoroutine := numItemsPerGoroutine / numScoreChunks - numItemsInScoreChunk := numItemsInScoreChunkPerGoroutine * numGoroutines - - myMap := NewBucketSortedMap(16, uint32(numScoreChunks)) - - var waitGroup sync.WaitGroup - waitGroup.Add(numGoroutines) - - for i := 0; i < numGoroutines; i++ { - go func(i int) { - for j := 0; j < numItemsPerGoroutine; j++ { - key := fmt.Sprintf("%d_%d", i, j) - item := newScoredDummyItem(key, uint32(j%numScoreChunks)) - myMap.Set(item) - simulateMutationThatChangesScore(myMap, key) - } - - waitGroup.Done() - }(i) - } - - waitGroup.Wait() - - require.Equal(t, uint32(numGoroutines*numItemsPerGoroutine), myMap.CountSorted()) - - counts := myMap.ScoreChunksCounts() - for i := 0; i < numScoreChunks; i++ { - require.Equal(t, uint32(numItemsInScoreChunk), counts[i]) - } -} - -func TestBucketSortedMap_ClearConcurrentWithRead(t *testing.T) { - numChunks := uint32(4) - numScoreChunks := uint32(4) - myMap := NewBucketSortedMap(numChunks, numScoreChunks) - - var wg sync.WaitGroup - wg.Add(2) - - go func() { - defer wg.Done() - - for j := 0; j < 1000; j++ { - myMap.Clear() - } - }() - - go func() { - defer wg.Done() - - for j := 0; j < 1000; j++ { - require.Equal(t, uint32(0), myMap.Count()) - require.Equal(t, uint32(0), myMap.CountSorted()) - require.Len(t, myMap.ChunksCounts(), int(numChunks)) - require.Len(t, myMap.ScoreChunksCounts(), int(numScoreChunks)) - require.Len(t, myMap.Keys(), 0) - require.Len(t, myMap.KeysSorted(), 0) - require.Equal(t, false, myMap.Has("foobar")) - item, ok := myMap.Get("foobar") - require.Nil(t, item) - require.False(t, ok) - require.Len(t, myMap.GetSnapshotAscending(), 0) - myMap.IterCbSortedAscending(func(key string, item BucketSortedMapItem) { - }) - myMap.IterCbSortedDescending(func(key string, item BucketSortedMapItem) { - }) - } - }() - - wg.Wait() -} - -func TestBucketSortedMap_ClearConcurrentWithWrite(t *testing.T) { - myMap := NewBucketSortedMap(4, 4) - - var wg sync.WaitGroup - wg.Add(2) - - go func() { - for j := 0; j < 10000; j++ { - myMap.Clear() - } - - wg.Done() - }() - - go func() { - for j := 0; j < 10000; j++ { - myMap.Set(newDummyItem("foobar")) - _, _ = myMap.Remove("foobar") - myMap.NotifyScoreChange(newDummyItem("foobar"), 42) - simulateMutationThatChangesScore(myMap, "foobar") - } - - wg.Done() - }() - - wg.Wait() -} - -func TestBucketSortedMap_NoForgottenItemsOnConcurrentScoreChanges(t *testing.T) { - // This test helped us to find a memory leak occuring on concurrent score changes (concurrent movements across buckets) - - for i := 0; i < 1000; i++ { - myMap := NewBucketSortedMap(16, 16) - a := newScoredDummyItem("a", 0) - myMap.Set(a) - simulateMutationThatChangesScore(myMap, "a") - - var wg sync.WaitGroup - wg.Add(2) - - go func() { - a.score.Set(1) - simulateMutationThatChangesScore(myMap, "a") - wg.Done() - }() - - go func() { - a.score.Set(2) - simulateMutationThatChangesScore(myMap, "a") - wg.Done() - }() - - wg.Wait() - - require.Equal(t, uint32(1), myMap.CountSorted()) - require.Equal(t, uint32(1), myMap.Count()) - - _, _ = myMap.Remove("a") - - require.Equal(t, uint32(0), myMap.CountSorted()) - require.Equal(t, uint32(0), myMap.Count()) - } -} diff --git a/txcache/monitoring.go b/txcache/monitoring.go deleted file mode 100644 index 7d8ad284..00000000 --- a/txcache/monitoring.go +++ /dev/null @@ -1,247 +0,0 @@ -package txcache - -import ( - "encoding/hex" - "fmt" - "strings" - - "github.com/multiversx/mx-chain-core-go/core" - logger "github.com/multiversx/mx-chain-logger-go" -) - -var log = logger.GetOrCreate("txcache") - -func (cache *TxCache) monitorEvictionWrtSenderLimit(sender []byte, evicted [][]byte) { - log.Trace("TxCache.AddTx() evict transactions wrt. limit by sender", "name", cache.name, "sender", sender, "num", len(evicted)) - - for i := 0; i < core.MinInt(len(evicted), numEvictedTxsToDisplay); i++ { - log.Trace("TxCache.AddTx() evict transactions wrt. limit by sender", "name", cache.name, "sender", sender, "tx", evicted[i]) - } -} - -func (cache *TxCache) monitorEvictionStart() *core.StopWatch { - log.Debug("TxCache: eviction started", "name", cache.name, "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) - cache.displaySendersHistogram() - sw := core.NewStopWatch() - sw.Start("eviction") - return sw -} - -func (cache *TxCache) monitorEvictionEnd(stopWatch *core.StopWatch) { - stopWatch.Stop("eviction") - duration := stopWatch.GetMeasurement("eviction") - log.Debug("TxCache: eviction ended", "name", cache.name, "duration", duration, "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) - cache.evictionJournal.display() - cache.displaySendersHistogram() -} - -func (cache *TxCache) monitorSelectionStart() *core.StopWatch { - log.Debug("TxCache: selection started", "name", cache.name, "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) - cache.displaySendersHistogram() - sw := core.NewStopWatch() - sw.Start("selection") - return sw -} - -func (cache *TxCache) monitorSelectionEnd(selection []*WrappedTransaction, stopWatch *core.StopWatch) { - stopWatch.Stop("selection") - duration := stopWatch.GetMeasurement("selection") - numSendersSelected := cache.numSendersSelected.Reset() - numSendersWithInitialGap := cache.numSendersWithInitialGap.Reset() - numSendersWithMiddleGap := cache.numSendersWithMiddleGap.Reset() - numSendersInGracePeriod := cache.numSendersInGracePeriod.Reset() - - log.Debug("TxCache: selection ended", "name", cache.name, "duration", duration, - "numTxSelected", len(selection), - "numSendersSelected", numSendersSelected, - "numSendersWithInitialGap", numSendersWithInitialGap, - "numSendersWithMiddleGap", numSendersWithMiddleGap, - "numSendersInGracePeriod", numSendersInGracePeriod, - ) -} - -type batchSelectionJournal struct { - copied int - isFirstBatch bool - hasInitialGap bool - hasMiddleGap bool - isGracePeriod bool -} - -func (cache *TxCache) monitorBatchSelectionEnd(journal batchSelectionJournal) { - if !journal.isFirstBatch { - return - } - - if journal.hasInitialGap { - cache.numSendersWithInitialGap.Increment() - } else if journal.hasMiddleGap { - // Currently, we only count middle gaps on first batch (for simplicity) - cache.numSendersWithMiddleGap.Increment() - } - - if journal.isGracePeriod { - cache.numSendersInGracePeriod.Increment() - } else if journal.copied > 0 { - cache.numSendersSelected.Increment() - } -} - -func (cache *TxCache) monitorSweepingStart() *core.StopWatch { - sw := core.NewStopWatch() - sw.Start("sweeping") - return sw -} - -func (cache *TxCache) monitorSweepingEnd(numTxs uint32, numSenders uint32, stopWatch *core.StopWatch) { - stopWatch.Stop("sweeping") - duration := stopWatch.GetMeasurement("sweeping") - log.Debug("TxCache: swept senders:", "name", cache.name, "duration", duration, "txs", numTxs, "senders", numSenders) - cache.displaySendersHistogram() -} - -func (cache *TxCache) displaySendersHistogram() { - backingMap := cache.txListBySender.backingMap - log.Debug("TxCache.sendersHistogram:", "chunks", backingMap.ChunksCounts(), "scoreChunks", backingMap.ScoreChunksCounts()) -} - -// evictionJournal keeps a short journal about the eviction process -// This is useful for debugging and reasoning about the eviction -type evictionJournal struct { - evictionPerformed bool - passOneNumTxs uint32 - passOneNumSenders uint32 - passOneNumSteps uint32 -} - -func (journal *evictionJournal) display() { - log.Debug("Eviction.pass1:", "txs", journal.passOneNumTxs, "senders", journal.passOneNumSenders, "steps", journal.passOneNumSteps) -} - -// Diagnose checks the state of the cache for inconsistencies and displays a summary -func (cache *TxCache) Diagnose(deep bool) { - cache.diagnoseShallowly() - if deep { - cache.diagnoseDeeply() - } -} - -func (cache *TxCache) diagnoseShallowly() { - sw := core.NewStopWatch() - sw.Start("diagnose") - - sizeInBytes := cache.NumBytes() - numTxsEstimate := int(cache.CountTx()) - numTxsInChunks := cache.txByHash.backingMap.Count() - txsKeys := cache.txByHash.backingMap.Keys() - numSendersEstimate := uint32(cache.CountSenders()) - numSendersInChunks := cache.txListBySender.backingMap.Count() - numSendersInScoreChunks := cache.txListBySender.backingMap.CountSorted() - sendersKeys := cache.txListBySender.backingMap.Keys() - sendersKeysSorted := cache.txListBySender.backingMap.KeysSorted() - sendersSnapshot := cache.txListBySender.getSnapshotAscending() - - sw.Stop("diagnose") - duration := sw.GetMeasurement("diagnose") - - fine := numSendersEstimate == numSendersInChunks && numSendersEstimate == numSendersInScoreChunks - fine = fine && (len(sendersKeys) == len(sendersKeysSorted) && len(sendersKeys) == len(sendersSnapshot)) - fine = fine && (int(numSendersEstimate) == len(sendersKeys)) - fine = fine && (numTxsEstimate == numTxsInChunks && numTxsEstimate == len(txsKeys)) - - log.Debug("TxCache.diagnoseShallowly()", "name", cache.name, "duration", duration, "fine", fine) - log.Debug("TxCache.Size:", "current", sizeInBytes, "max", cache.config.NumBytesThreshold) - log.Debug("TxCache.NumSenders:", "estimate", numSendersEstimate, "inChunks", numSendersInChunks, "inScoreChunks", numSendersInScoreChunks) - log.Debug("TxCache.NumSenders (continued):", "keys", len(sendersKeys), "keysSorted", len(sendersKeysSorted), "snapshot", len(sendersSnapshot)) - log.Debug("TxCache.NumTxs:", "estimate", numTxsEstimate, "inChunks", numTxsInChunks, "keys", len(txsKeys)) -} - -func (cache *TxCache) diagnoseDeeply() { - sw := core.NewStopWatch() - sw.Start("diagnose") - - journal := cache.checkInternalConsistency() - cache.displaySendersSummary() - - sw.Stop("diagnose") - duration := sw.GetMeasurement("diagnose") - - log.Debug("TxCache.diagnoseDeeply()", "name", cache.name, "duration", duration) - journal.display() - cache.displaySendersHistogram() -} - -type internalConsistencyJournal struct { - numInMapByHash int - numInMapBySender int - numMissingInMapByHash int -} - -func (journal *internalConsistencyJournal) isFine() bool { - return (journal.numInMapByHash == journal.numInMapBySender) && (journal.numMissingInMapByHash == 0) -} - -func (journal *internalConsistencyJournal) display() { - log.Debug("TxCache.internalConsistencyJournal:", "fine", journal.isFine(), "numInMapByHash", journal.numInMapByHash, "numInMapBySender", journal.numInMapBySender, "numMissingInMapByHash", journal.numMissingInMapByHash) -} - -func (cache *TxCache) checkInternalConsistency() internalConsistencyJournal { - internalMapByHash := cache.txByHash - internalMapBySender := cache.txListBySender - - senders := internalMapBySender.getSnapshotAscending() - numInMapByHash := len(internalMapByHash.keys()) - numInMapBySender := 0 - numMissingInMapByHash := 0 - - for _, sender := range senders { - numInMapBySender += int(sender.countTx()) - - for _, hash := range sender.getTxHashes() { - _, ok := internalMapByHash.getTx(string(hash)) - if !ok { - numMissingInMapByHash++ - } - } - } - - return internalConsistencyJournal{ - numInMapByHash: numInMapByHash, - numInMapBySender: numInMapBySender, - numMissingInMapByHash: numMissingInMapByHash, - } -} - -func (cache *TxCache) displaySendersSummary() { - if log.GetLevel() != logger.LogTrace { - return - } - - senders := cache.txListBySender.getSnapshotAscending() - if len(senders) == 0 { - return - } - - var builder strings.Builder - builder.WriteString("\n[#index (score)] address [nonce known / nonce vs lowestTxNonce] txs = numTxs, !numFailedSelections\n") - - for i, sender := range senders { - address := hex.EncodeToString([]byte(sender.sender)) - accountNonce := sender.accountNonce.Get() - accountNonceKnown := sender.accountNonceKnown.IsSet() - numFailedSelections := sender.numFailedSelections.Get() - score := sender.getLastComputedScore() - numTxs := sender.countTxWithLock() - - lowestTxNonce := -1 - lowestTx := sender.getLowestNonceTx() - if lowestTx != nil { - lowestTxNonce = int(lowestTx.Tx.GetNonce()) - } - - _, _ = fmt.Fprintf(&builder, "[#%d (%d)] %s [%t / %d vs %d] txs = %d, !%d\n", i, score, address, accountNonceKnown, accountNonce, lowestTxNonce, numTxs, numFailedSelections) - } - - summary := builder.String() - log.Debug("TxCache.displaySendersSummary()", "name", cache.name, "summary\n", summary) -} diff --git a/txcache/score.go b/txcache/score.go deleted file mode 100644 index 06bde537..00000000 --- a/txcache/score.go +++ /dev/null @@ -1,67 +0,0 @@ -package txcache - -import ( - "math" -) - -var _ scoreComputer = (*defaultScoreComputer)(nil) - -// TODO (continued): The score formula should work even if minGasPrice = 0. -type senderScoreParams struct { - count uint64 - // Fee score is normalized - feeScore uint64 - gas uint64 -} - -type defaultScoreComputer struct { - txFeeHelper feeHelper - ppuDivider uint64 -} - -func newDefaultScoreComputer(txFeeHelper feeHelper) *defaultScoreComputer { - ppuScoreDivider := txFeeHelper.minGasPriceFactor() - ppuScoreDivider = ppuScoreDivider * ppuScoreDivider * ppuScoreDivider - - return &defaultScoreComputer{ - txFeeHelper: txFeeHelper, - ppuDivider: ppuScoreDivider, - } -} - -// computeScore computes the score of the sender, as an integer 0-100 -func (computer *defaultScoreComputer) computeScore(scoreParams senderScoreParams) uint32 { - rawScore := computer.computeRawScore(scoreParams) - truncatedScore := uint32(rawScore) - return truncatedScore -} - -// TODO (optimization): switch to integer operations (as opposed to float operations). -func (computer *defaultScoreComputer) computeRawScore(params senderScoreParams) float64 { - allParamsDefined := params.feeScore > 0 && params.gas > 0 && params.count > 0 - if !allParamsDefined { - return 0 - } - - ppuMin := computer.txFeeHelper.minPricePerUnit() - normalizedGas := params.gas >> computer.txFeeHelper.gasLimitShift() - if normalizedGas == 0 { - normalizedGas = 1 - } - ppuAvg := params.feeScore / normalizedGas - // (<< 3)^3 and >> 9 cancel each other; used to preserve a bit more resolution - ppuRatio := ppuAvg << 3 / ppuMin - ppuScore := ppuRatio * ppuRatio * ppuRatio >> 9 - ppuScoreAdjusted := float64(ppuScore) / float64(computer.ppuDivider) - - countPow2 := params.count * params.count - countScore := math.Log(float64(countPow2)+1) + 1 - - rawScore := ppuScoreAdjusted / countScore - // We apply the logistic function, - // and then subtract 0.5, since we only deal with positive scores, - // and then we multiply by 2, to have full [0..1] range. - asymptoticScore := (1/(1+math.Exp(-rawScore)) - 0.5) * 2 - score := asymptoticScore * float64(numberOfScoreChunks) - return score -} diff --git a/txcache/score_test.go b/txcache/score_test.go deleted file mode 100644 index 51e438e1..00000000 --- a/txcache/score_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package txcache - -import ( - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestDefaultScoreComputer_computeRawScore(t *testing.T) { - _, txFeeHelper := dummyParamsWithGasPrice(oneBillion) - computer := newDefaultScoreComputer(txFeeHelper) - - // 50k moveGas, 100Bil minPrice -> normalizedFee 8940 - score := computer.computeRawScore(senderScoreParams{count: 1, feeScore: 18000, gas: 100000}) - assert.InDelta(t, float64(16.8753739025), score, delta) - - score = computer.computeRawScore(senderScoreParams{count: 1, feeScore: 1500000, gas: 10000000}) - assert.InDelta(t, float64(9.3096887100), score, delta) - - score = computer.computeRawScore(senderScoreParams{count: 1, feeScore: 5000000, gas: 30000000}) - assert.InDelta(t, float64(12.7657690638), score, delta) - - score = computer.computeRawScore(senderScoreParams{count: 2, feeScore: 36000, gas: 200000}) - assert.InDelta(t, float64(11.0106052638), score, delta) - - score = computer.computeRawScore(senderScoreParams{count: 1000, feeScore: 18000000, gas: 100000000}) - assert.InDelta(t, float64(1.8520698299), score, delta) - - score = computer.computeRawScore(senderScoreParams{count: 10000, feeScore: 180000000, gas: 1000000000}) - assert.InDelta(t, float64(1.4129614707), score, delta) -} - -func BenchmarkScoreComputer_computeRawScore(b *testing.B) { - _, txFeeHelper := dummyParams() - computer := newDefaultScoreComputer(txFeeHelper) - - for i := 0; i < b.N; i++ { - for j := uint64(0); j < 10000000; j++ { - computer.computeRawScore(senderScoreParams{count: j, feeScore: uint64(float64(8000) * float64(j)), gas: 100000 * j}) - } - } -} - -func TestDefaultScoreComputer_computeRawScoreOfTxListForSender(t *testing.T) { - txGasHandler, txFeeHelper := dummyParamsWithGasPrice(oneBillion) - computer := newDefaultScoreComputer(txFeeHelper) - list := newUnconstrainedListToTest() - - list.AddTx(createTxWithParams([]byte("a"), ".", 1, 1000, 50000, oneBillion), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("b"), ".", 1, 500, 100000, oneBillion), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("c"), ".", 1, 500, 100000, oneBillion), txGasHandler, txFeeHelper) - - require.Equal(t, uint64(3), list.countTx()) - require.Equal(t, int64(2000), list.totalBytes.Get()) - require.Equal(t, int64(250000), list.totalGas.Get()) - require.Equal(t, int64(51588), list.totalFeeScore.Get()) - - scoreParams := list.getScoreParams() - rawScore := computer.computeRawScore(scoreParams) - require.InDelta(t, float64(12.4595615805), rawScore, delta) -} - -func TestDefaultScoreComputer_scoreFluctuatesDeterministicallyWhileTxListForSenderMutates(t *testing.T) { - txGasHandler, txFeeHelper := dummyParamsWithGasPrice(oneBillion) - computer := newDefaultScoreComputer(txFeeHelper) - list := newUnconstrainedListToTest() - - A := createTxWithParams([]byte("A"), ".", 1, 1000, 200000, oneBillion) - B := createTxWithParams([]byte("b"), ".", 1, 500, 100000, oneBillion) - C := createTxWithParams([]byte("c"), ".", 1, 500, 100000, oneBillion) - D := createTxWithParams([]byte("d"), ".", 1, 128, 50000, oneBillion) - - scoreNone := int(computer.computeScore(list.getScoreParams())) - list.AddTx(A, txGasHandler, txFeeHelper) - scoreA := int(computer.computeScore(list.getScoreParams())) - list.AddTx(B, txGasHandler, txFeeHelper) - scoreAB := int(computer.computeScore(list.getScoreParams())) - list.AddTx(C, txGasHandler, txFeeHelper) - scoreABC := int(computer.computeScore(list.getScoreParams())) - list.AddTx(D, txGasHandler, txFeeHelper) - scoreABCD := int(computer.computeScore(list.getScoreParams())) - - require.Equal(t, 0, scoreNone) - require.Equal(t, 18, scoreA) - require.Equal(t, 12, scoreAB) - require.Equal(t, 10, scoreABC) - require.Equal(t, 9, scoreABCD) - - list.RemoveTx(D) - scoreABC = int(computer.computeScore(list.getScoreParams())) - list.RemoveTx(C) - scoreAB = int(computer.computeScore(list.getScoreParams())) - list.RemoveTx(B) - scoreA = int(computer.computeScore(list.getScoreParams())) - list.RemoveTx(A) - scoreNone = int(computer.computeScore(list.getScoreParams())) - - require.Equal(t, 0, scoreNone) - require.Equal(t, 18, scoreA) - require.Equal(t, 12, scoreAB) - require.Equal(t, 10, scoreABC) -} - -func TestDefaultScoreComputer_DifferentSenders(t *testing.T) { - txGasHandler, txFeeHelper := dummyParamsWithGasPrice(oneBillion) - computer := newDefaultScoreComputer(txFeeHelper) - - A := createTxWithParams([]byte("a"), "a", 1, 128, 50000, oneBillion) // min value normal tx - B := createTxWithParams([]byte("b"), "b", 1, 128, 50000, uint64(1.5*oneBillion)) // 50% higher value normal tx - C := createTxWithParams([]byte("c"), "c", 1, 128, 10000000, oneBillion) // min value SC call - D := createTxWithParams([]byte("d"), "d", 1, 128, 10000000, uint64(1.5*oneBillion)) // 50% higher value SC call - - listA := newUnconstrainedListToTest() - listA.AddTx(A, txGasHandler, txFeeHelper) - scoreA := int(computer.computeScore(listA.getScoreParams())) - - listB := newUnconstrainedListToTest() - listB.AddTx(B, txGasHandler, txFeeHelper) - scoreB := int(computer.computeScore(listB.getScoreParams())) - - listC := newUnconstrainedListToTest() - listC.AddTx(C, txGasHandler, txFeeHelper) - scoreC := int(computer.computeScore(listC.getScoreParams())) - - listD := newUnconstrainedListToTest() - listD.AddTx(D, txGasHandler, txFeeHelper) - scoreD := int(computer.computeScore(listD.getScoreParams())) - - require.Equal(t, 33, scoreA) - require.Equal(t, 82, scoreB) - require.Equal(t, 15, scoreC) - require.Equal(t, 16, scoreD) - - // adding same type of transactions for each sender decreases the score - for i := 2; i < 1000; i++ { - A = createTxWithParams([]byte("a"+strconv.Itoa(i)), "a", uint64(i), 128, 50000, oneBillion) // min value normal tx - listA.AddTx(A, txGasHandler, txFeeHelper) - B = createTxWithParams([]byte("b"+strconv.Itoa(i)), "b", uint64(i), 128, 50000, uint64(1.5*oneBillion)) // 50% higher value normal tx - listB.AddTx(B, txGasHandler, txFeeHelper) - C = createTxWithParams([]byte("c"+strconv.Itoa(i)), "c", uint64(i), 128, 10000000, oneBillion) // min value SC call - listC.AddTx(C, txGasHandler, txFeeHelper) - D = createTxWithParams([]byte("d"+strconv.Itoa(i)), "d", uint64(i), 128, 10000000, uint64(1.5*oneBillion)) // 50% higher value SC call - listD.AddTx(D, txGasHandler, txFeeHelper) - } - - scoreA = int(computer.computeScore(listA.getScoreParams())) - scoreB = int(computer.computeScore(listB.getScoreParams())) - scoreC = int(computer.computeScore(listC.getScoreParams())) - scoreD = int(computer.computeScore(listD.getScoreParams())) - - require.Equal(t, 3, scoreA) - require.Equal(t, 12, scoreB) - require.Equal(t, 1, scoreC) - require.Equal(t, 1, scoreD) -} diff --git a/txcache/selection.go b/txcache/selection.go new file mode 100644 index 00000000..f889a3a9 --- /dev/null +++ b/txcache/selection.go @@ -0,0 +1,123 @@ +package txcache + +import ( + "container/heap" + "time" +) + +func (cache *TxCache) doSelectTransactions(session SelectionSession, gasRequested uint64, maxNum int, selectionLoopMaximumDuration time.Duration) (bunchOfTransactions, uint64) { + bunches := cache.acquireBunchesOfTransactions() + + return selectTransactionsFromBunches(session, bunches, gasRequested, maxNum, selectionLoopMaximumDuration) +} + +func (cache *TxCache) acquireBunchesOfTransactions() []bunchOfTransactions { + senders := cache.getSenders() + bunches := make([]bunchOfTransactions, 0, len(senders)) + + for _, sender := range senders { + bunches = append(bunches, sender.getTxs()) + } + + return bunches +} + +// Selection tolerates concurrent transaction additions / removals. +func selectTransactionsFromBunches(session SelectionSession, bunches []bunchOfTransactions, gasRequested uint64, maxNum int, selectionLoopMaximumDuration time.Duration) (bunchOfTransactions, uint64) { + selectedTransactions := make(bunchOfTransactions, 0, initialCapacityOfSelectionSlice) + + // Items popped from the heap are added to "selectedTransactions". + transactionsHeap := newMaxTransactionsHeap(len(bunches)) + heap.Init(transactionsHeap) + + // Initialize the heap with the first transaction of each bunch + for _, bunch := range bunches { + item, err := newTransactionsHeapItem(bunch) + if err != nil { + continue + } + + // Items will be reused (see below). Each sender gets one (and only one) item in the heap. + heap.Push(transactionsHeap, item) + } + + accumulatedGas := uint64(0) + selectionLoopStartTime := time.Now() + + // Select transactions (sorted). + for transactionsHeap.Len() > 0 { + // Always pick the best transaction. + item := heap.Pop(transactionsHeap).(*transactionsHeapItem) + gasLimit := item.currentTransaction.Tx.GetGasLimit() + + if accumulatedGas+gasLimit > gasRequested { + break + } + if len(selectedTransactions) >= maxNum { + break + } + if len(selectedTransactions)%selectionLoopDurationCheckInterval == 0 { + if time.Since(selectionLoopStartTime) > selectionLoopMaximumDuration { + logSelect.Debug("TxCache.selectTransactionsFromBunches, selection loop timeout", "duration", time.Since(selectionLoopStartTime)) + break + } + } + + err := item.requestAccountStateIfNecessary(session) + if err != nil { + // Skip this sender. + logSelect.Debug("TxCache.selectTransactionsFromBunches, could not retrieve account state", "sender", item.sender, "err", err) + continue + } + + shouldSkipSender := detectSkippableSender(item) + if shouldSkipSender { + // Item was popped from the heap, but not used downstream. + // Therefore, the sender is completely ignored (from now on) in the current selection session. + continue + } + + shouldSkipTransaction := detectSkippableTransaction(session, item) + if !shouldSkipTransaction { + accumulatedGas += gasLimit + selectedTransactions = append(selectedTransactions, item.selectCurrentTransaction()) + } + + // If there are more transactions in the same bunch (same sender as the popped item), + // add the next one to the heap (to compete with the others). + // Heap item is reused (same originating sender), pushed back on the heap. + if item.gotoNextTransaction() { + heap.Push(transactionsHeap, item) + } + } + + return selectedTransactions, accumulatedGas +} + +func detectSkippableSender(item *transactionsHeapItem) bool { + if item.detectInitialGap() { + return true + } + if item.detectMiddleGap() { + return true + } + if item.detectWillFeeExceedBalance() { + return true + } + + return false +} + +func detectSkippableTransaction(session SelectionSession, item *transactionsHeapItem) bool { + if item.detectLowerNonce() { + return true + } + if item.detectIncorrectlyGuarded(session) { + return true + } + if item.detectNonceDuplicate() { + return true + } + + return false +} diff --git a/txcache/selection_test.go b/txcache/selection_test.go new file mode 100644 index 00000000..798876cd --- /dev/null +++ b/txcache/selection_test.go @@ -0,0 +1,579 @@ +package txcache + +import ( + "bytes" + "fmt" + "math" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" + "github.com/stretchr/testify/require" +) + +func TestTxCache_SelectTransactions_Dummy(t *testing.T) { + t.Run("all having same PPU", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetNonce([]byte("bob"), 5) + session.SetNonce([]byte("carol"), 1) + + cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) + cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6)) + cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5)) + cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1)) + + selected, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + require.Len(t, selected, 8) + require.Equal(t, 400000, int(accumulatedGas)) + + // Check order + require.Equal(t, "hash-alice-1", string(selected[0].TxHash)) + require.Equal(t, "hash-alice-2", string(selected[1].TxHash)) + require.Equal(t, "hash-alice-3", string(selected[2].TxHash)) + require.Equal(t, "hash-alice-4", string(selected[3].TxHash)) + require.Equal(t, "hash-bob-5", string(selected[4].TxHash)) + require.Equal(t, "hash-bob-6", string(selected[5].TxHash)) + require.Equal(t, "hash-bob-7", string(selected[6].TxHash)) + require.Equal(t, "hash-carol-1", string(selected[7].TxHash)) + }) + + t.Run("alice > carol > bob", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetNonce([]byte("bob"), 5) + session.SetNonce([]byte("carol"), 3) + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1).withGasPrice(100)) + cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasPrice(50)) + cache.AddTx(createTx([]byte("hash-carol-3"), "carol", 3).withGasPrice(75)) + + selected, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + require.Len(t, selected, 3) + require.Equal(t, 150000, int(accumulatedGas)) + + // Check order + require.Equal(t, "hash-alice-1", string(selected[0].TxHash)) + require.Equal(t, "hash-carol-3", string(selected[1].TxHash)) + require.Equal(t, "hash-bob-5", string(selected[2].TxHash)) + }) +} + +func TestTxCache_SelectTransactionsWithBandwidth_Dummy(t *testing.T) { + t.Run("transactions with no data field", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetNonce([]byte("bob"), 5) + session.SetNonce([]byte("carol"), 1) + + cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2).withGasLimit(500000)) + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1).withGasLimit(200000)) + cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7).withGasLimit(400000)) + cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6).withGasLimit(50000)) + cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasLimit(50000)) + cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1).withGasLimit(50000)) + + selected, accumulatedGas := cache.SelectTransactions(session, 760000, math.MaxInt, selectionLoopMaximumDuration) + require.Len(t, selected, 5) + require.Equal(t, 750000, int(accumulatedGas)) + + // Check order + require.Equal(t, "hash-bob-5", string(selected[0].TxHash)) + require.Equal(t, "hash-bob-6", string(selected[1].TxHash)) + require.Equal(t, "hash-carol-1", string(selected[2].TxHash)) + require.Equal(t, "hash-alice-1", string(selected[3].TxHash)) + require.Equal(t, "hash-bob-7", string(selected[4].TxHash)) + }) +} + +func TestTxCache_SelectTransactions_HandlesNotExecutableTransactions(t *testing.T) { + t.Run("with middle gaps", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetNonce([]byte("bob"), 42) + session.SetNonce([]byte("carol"), 7) + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) + cache.AddTx(createTx([]byte("hash-alice-5"), "alice", 5)) // gap + cache.AddTx(createTx([]byte("hash-bob-42"), "bob", 42)) + cache.AddTx(createTx([]byte("hash-bob-44"), "bob", 44)) // gap + cache.AddTx(createTx([]byte("hash-bob-45"), "bob", 45)) + cache.AddTx(createTx([]byte("hash-carol-7"), "carol", 7)) + cache.AddTx(createTx([]byte("hash-carol-8"), "carol", 8)) + cache.AddTx(createTx([]byte("hash-carol-10"), "carol", 10)) // gap + cache.AddTx(createTx([]byte("hash-carol-11"), "carol", 11)) + + sorted, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + expectedNumSelected := 3 + 1 + 2 // 3 alice + 1 bob + 2 carol + require.Len(t, sorted, expectedNumSelected) + require.Equal(t, 300000, int(accumulatedGas)) + }) + + t.Run("with initial gaps", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetNonce([]byte("bob"), 42) + session.SetNonce([]byte("carol"), 7) + + // Good + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) + + // Initial gap + cache.AddTx(createTx([]byte("hash-bob-42"), "bob", 44)) + cache.AddTx(createTx([]byte("hash-bob-43"), "bob", 45)) + cache.AddTx(createTx([]byte("hash-bob-44"), "bob", 46)) + + // Good + cache.AddTx(createTx([]byte("hash-carol-7"), "carol", 7)) + cache.AddTx(createTx([]byte("hash-carol-8"), "carol", 8)) + + sorted, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + expectedNumSelected := 3 + 0 + 2 // 3 alice + 0 bob + 2 carol + require.Len(t, sorted, expectedNumSelected) + require.Equal(t, 250000, int(accumulatedGas)) + }) + + t.Run("with lower nonces", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetNonce([]byte("bob"), 42) + session.SetNonce([]byte("carol"), 7) + + // Good + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) + + // A few with lower nonce + cache.AddTx(createTx([]byte("hash-bob-42"), "bob", 40)) + cache.AddTx(createTx([]byte("hash-bob-43"), "bob", 41)) + cache.AddTx(createTx([]byte("hash-bob-44"), "bob", 42)) + + // Good + cache.AddTx(createTx([]byte("hash-carol-7"), "carol", 7)) + cache.AddTx(createTx([]byte("hash-carol-8"), "carol", 8)) + + sorted, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + expectedNumSelected := 3 + 1 + 2 // 3 alice + 1 bob + 2 carol + require.Len(t, sorted, expectedNumSelected) + require.Equal(t, 300000, int(accumulatedGas)) + }) + + t.Run("with duplicated nonces", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-alice-3a"), "alice", 3)) + cache.AddTx(createTx([]byte("hash-alice-3b"), "alice", 3).withGasPrice(oneBillion * 2)) + cache.AddTx(createTx([]byte("hash-alice-3c"), "alice", 3)) + cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4)) + + sorted, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + require.Len(t, sorted, 4) + require.Equal(t, 200000, int(accumulatedGas)) + + require.Equal(t, "hash-alice-1", string(sorted[0].TxHash)) + require.Equal(t, "hash-alice-2", string(sorted[1].TxHash)) + require.Equal(t, "hash-alice-3b", string(sorted[2].TxHash)) + require.Equal(t, "hash-alice-4", string(sorted[3].TxHash)) + }) + + t.Run("with fee exceeding balance", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetBalance([]byte("alice"), big.NewInt(150000000000000)) + session.SetNonce([]byte("bob"), 42) + session.SetBalance([]byte("bob"), big.NewInt(70000000000000)) + + // Enough balance + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) + + // Not enough balance + cache.AddTx(createTx([]byte("hash-bob-42"), "bob", 40)) + cache.AddTx(createTx([]byte("hash-bob-43"), "bob", 41)) + cache.AddTx(createTx([]byte("hash-bob-44"), "bob", 42)) + + sorted, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + expectedNumSelected := 3 + 1 // 3 alice + 1 bob + require.Len(t, sorted, expectedNumSelected) + require.Equal(t, 200000, int(accumulatedGas)) + }) + + t.Run("with incorrectly guarded", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetNonce([]byte("bob"), 42) + + session.IsIncorrectlyGuardedCalled = func(tx data.TransactionHandler) bool { + return bytes.Equal(tx.GetData(), []byte("t")) + } + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1).withData([]byte("x")).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-bob-42a"), "bob", 42).withData([]byte("y")).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-bob-43a"), "bob", 43).withData([]byte("z")).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-bob-43b"), "bob", 43).withData([]byte("t")).withGasLimit(100000)) + + sorted, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + require.Len(t, sorted, 3) + require.Equal(t, 300000, int(accumulatedGas)) + + require.Equal(t, "hash-alice-1", string(sorted[0].TxHash)) + require.Equal(t, "hash-bob-42a", string(sorted[1].TxHash)) + require.Equal(t, "hash-bob-43a", string(sorted[2].TxHash)) + }) +} + +func TestTxCache_SelectTransactions_WhenTransactionsAddedInReversedNonceOrder(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + + // Add "nSenders" * "nTransactionsPerSender" transactions in the cache (in reversed nonce order) + nSenders := 1000 + nTransactionsPerSender := 100 + nTotalTransactions := nSenders * nTransactionsPerSender + + for senderTag := 0; senderTag < nSenders; senderTag++ { + sender := fmt.Sprintf("sender:%d", senderTag) + + for txNonce := nTransactionsPerSender - 1; txNonce >= 0; txNonce-- { + txHash := fmt.Sprintf("hash:%d:%d", senderTag, txNonce) + tx := createTx([]byte(txHash), sender, uint64(txNonce)) + cache.AddTx(tx) + } + } + + require.Equal(t, uint64(nTotalTransactions), cache.CountTx()) + + sorted, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + require.Len(t, sorted, nTotalTransactions) + require.Equal(t, 5_000_000_000, int(accumulatedGas)) + + // Check order + nonces := make(map[string]uint64, nSenders) + + for _, tx := range sorted { + nonce := tx.Tx.GetNonce() + sender := string(tx.Tx.GetSndAddr()) + previousNonce := nonces[sender] + + require.LessOrEqual(t, previousNonce, nonce) + nonces[sender] = nonce + } +} + +func TestTxCache_selectTransactionsFromBunches(t *testing.T) { + t.Run("empty cache", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + selected, accumulatedGas := selectTransactionsFromBunches(session, []bunchOfTransactions{}, 10_000_000_000, math.MaxInt, selectionLoopMaximumDuration) + + require.Equal(t, 0, len(selected)) + require.Equal(t, uint64(0), accumulatedGas) + }) +} + +func TestBenchmarkTxCache_acquireBunchesOfTransactions(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 1000000000, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 300001, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: false, + NumItemsToPreemptivelyEvict: 1, + } + + host := txcachemocks.NewMempoolHostMock() + + sw := core.NewStopWatch() + + t.Run("numSenders = 10000, numTransactions = 100", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 10000, 100) + + require.Equal(t, 1000000, int(cache.CountTx())) + + sw.Start(t.Name()) + bunches := cache.acquireBunchesOfTransactions() + sw.Stop(t.Name()) + + require.Len(t, bunches, 10000) + require.Len(t, bunches[0], 100) + require.Len(t, bunches[len(bunches)-1], 100) + }) + + t.Run("numSenders = 50000, numTransactions = 2", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 50000, 2) + + require.Equal(t, 100000, int(cache.CountTx())) + + sw.Start(t.Name()) + bunches := cache.acquireBunchesOfTransactions() + sw.Stop(t.Name()) + + require.Len(t, bunches, 50000) + require.Len(t, bunches[0], 2) + require.Len(t, bunches[len(bunches)-1], 2) + }) + + t.Run("numSenders = 100000, numTransactions = 1", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 100000, 1) + + require.Equal(t, 100000, int(cache.CountTx())) + + sw.Start(t.Name()) + bunches := cache.acquireBunchesOfTransactions() + sw.Stop(t.Name()) + + require.Len(t, bunches, 100000) + require.Len(t, bunches[0], 1) + require.Len(t, bunches[len(bunches)-1], 1) + }) + + t.Run("numSenders = 300000, numTransactions = 1", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 300000, 1) + + require.Equal(t, 300000, int(cache.CountTx())) + + sw.Start(t.Name()) + bunches := cache.acquireBunchesOfTransactions() + sw.Stop(t.Name()) + + require.Len(t, bunches, 300000) + require.Len(t, bunches[0], 1) + require.Len(t, bunches[len(bunches)-1], 1) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } + + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // 0.014468s (TestBenchmarkTxCache_acquireBunchesOfTransactions/numSenders_=_10000,_numTransactions_=_100) + // 0.019183s (TestBenchmarkTxCache_acquireBunchesOfTransactions/numSenders_=_50000,_numTransactions_=_2) + // 0.013876s (TestBenchmarkTxCache_acquireBunchesOfTransactions/numSenders_=_100000,_numTransactions_=_1) + // 0.056631s (TestBenchmarkTxCache_acquireBunchesOfTransactions/numSenders_=_300000,_numTransactions_=_1) +} + +func TestBenchmarkTxCache_selectTransactionsFromBunches(t *testing.T) { + sw := core.NewStopWatch() + + t.Run("numSenders = 1000, numTransactions = 1000", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) + + sw.Start(t.Name()) + selected, accumulatedGas := selectTransactionsFromBunches(session, bunches, 10_000_000_000, math.MaxInt, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 200000, len(selected)) + require.Equal(t, uint64(10_000_000_000), accumulatedGas) + }) + + t.Run("numSenders = 10000, numTransactions = 100", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) + + sw.Start(t.Name()) + selected, accumulatedGas := selectTransactionsFromBunches(session, bunches, 10_000_000_000, math.MaxInt, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 200000, len(selected)) + require.Equal(t, uint64(10_000_000_000), accumulatedGas) + }) + + t.Run("numSenders = 100000, numTransactions = 3", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + bunches := createBunchesOfTransactionsWithUniformDistribution(100000, 3) + + sw.Start(t.Name()) + selected, accumulatedGas := selectTransactionsFromBunches(session, bunches, 10_000_000_000, math.MaxInt, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 200000, len(selected)) + require.Equal(t, uint64(10_000_000_000), accumulatedGas) + }) + + t.Run("numSenders = 300000, numTransactions = 1", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + bunches := createBunchesOfTransactionsWithUniformDistribution(300000, 1) + + sw.Start(t.Name()) + selected, accumulatedGas := selectTransactionsFromBunches(session, bunches, 10_000_000_000, math.MaxInt, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 200000, len(selected)) + require.Equal(t, uint64(10_000_000_000), accumulatedGas) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } + + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // 0.057519s (TestBenchmarkTxCache_selectTransactionsFromBunches/numSenders_=_1000,_numTransactions_=_1000) + // 0.048023s (TestBenchmarkTxCache_selectTransactionsFromBunches/numSenders_=_10000,_numTransactions_=_100) + // 0.289515s (TestBenchmarkTxCache_selectTransactionsFromBunches/numSenders_=_100000,_numTransactions_=_3) + // 0.460242s (TestBenchmarkTxCache_selectTransactionsFromBunches/numSenders_=_300000,_numTransactions_=_1) +} + +func TestTxCache_selectTransactionsFromBunches_loopBreaks_whenTakesTooLong(t *testing.T) { + t.Run("numSenders = 300000, numTransactions = 1", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + bunches := createBunchesOfTransactionsWithUniformDistribution(300000, 1) + selected, accumulatedGas := selectTransactionsFromBunches(session, bunches, 10_000_000_000, 50_000, 1*time.Millisecond) + + require.Less(t, len(selected), 50_000) + require.Less(t, int(accumulatedGas), 10_000_000_000) + }) +} + +func TestBenchmarkTxCache_doSelectTransactions(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 1000000000, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 300001, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: false, + NumItemsToPreemptivelyEvict: 1, + } + + host := txcachemocks.NewMempoolHostMock() + session := txcachemocks.NewSelectionSessionMock() + + sw := core.NewStopWatch() + + t.Run("numSenders = 10000, numTransactions = 100, maxNum = 30_000", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 10000, 100) + + require.Equal(t, 1000000, int(cache.CountTx())) + + sw.Start(t.Name()) + selected, accumulatedGas := cache.SelectTransactions(session, 10_000_000_000, 30_000, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 30_000, len(selected)) + require.Equal(t, uint64(1_500_000_000), accumulatedGas) + }) + + t.Run("numSenders = 50000, numTransactions = 2, maxNum = 30_000", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 50000, 2) + + require.Equal(t, 100000, int(cache.CountTx())) + + sw.Start(t.Name()) + selected, accumulatedGas := cache.SelectTransactions(session, 10_000_000_000, 30_000, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 30_000, len(selected)) + require.Equal(t, uint64(1_500_000_000), accumulatedGas) + }) + + t.Run("numSenders = 100000, numTransactions = 1, maxNum = 30_000", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 100000, 1) + + require.Equal(t, 100000, int(cache.CountTx())) + + sw.Start(t.Name()) + selected, accumulatedGas := cache.SelectTransactions(session, 10_000_000_000, 30_000, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 30_000, len(selected)) + require.Equal(t, uint64(1_500_000_000), accumulatedGas) + }) + + t.Run("numSenders = 300000, numTransactions = 1, maxNum = 30_000", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 300000, 1) + + require.Equal(t, 300000, int(cache.CountTx())) + + sw.Start(t.Name()) + selected, accumulatedGas := cache.SelectTransactions(session, 10_000_000_000, 30_000, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 30_000, len(selected)) + require.Equal(t, uint64(1_500_000_000), accumulatedGas) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } + + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // 0.042209s (TestBenchmarkTxCache_doSelectTransactions/numSenders_=_10000,_numTransactions_=_100,_maxNum_=_30_000) + // 0.055784s (TestBenchmarkTxCache_doSelectTransactions/numSenders_=_50000,_numTransactions_=_2,_maxNum_=_30_000) + // 0.078637s (TestBenchmarkTxCache_doSelectTransactions/numSenders_=_100000,_numTransactions_=_1,_maxNum_=_30_000) + // 0.222669s (TestBenchmarkTxCache_doSelectTransactions/numSenders_=_300000,_numTransactions_=_1,_maxNum_=_30_000) +} diff --git a/txcache/sweeping.go b/txcache/sweeping.go deleted file mode 100644 index 92255309..00000000 --- a/txcache/sweeping.go +++ /dev/null @@ -1,29 +0,0 @@ -package txcache - -func (cache *TxCache) initSweepable() { - cache.sweepingListOfSenders = make([]*txListForSender, 0, estimatedNumOfSweepableSendersPerSelection) -} - -func (cache *TxCache) collectSweepable(list *txListForSender) { - if !list.sweepable.IsSet() { - return - } - - cache.sweepingMutex.Lock() - cache.sweepingListOfSenders = append(cache.sweepingListOfSenders, list) - cache.sweepingMutex.Unlock() -} - -func (cache *TxCache) sweepSweepable() { - cache.sweepingMutex.Lock() - defer cache.sweepingMutex.Unlock() - - if len(cache.sweepingListOfSenders) == 0 { - return - } - - stopWatch := cache.monitorSweepingStart() - numTxs, numSenders := cache.evictSendersAndTheirTxs(cache.sweepingListOfSenders) - cache.initSweepable() - cache.monitorSweepingEnd(numTxs, numSenders, stopWatch) -} diff --git a/txcache/sweeping_test.go b/txcache/sweeping_test.go deleted file mode 100644 index a700f7a8..00000000 --- a/txcache/sweeping_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package txcache - -import ( - "math" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSweeping_CollectSweepable(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - cache.AddTx(createTx([]byte("alice-42"), "alice", 42)) - cache.AddTx(createTx([]byte("bob-42"), "bob", 42)) - cache.AddTx(createTx([]byte("carol-42"), "carol", 42)) - - // Senders have no initial gaps - selection := cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 3, len(selection)) - require.Equal(t, 0, len(cache.sweepingListOfSenders)) - - // Alice and Bob have initial gaps, Carol doesn't - cache.NotifyAccountNonce([]byte("alice"), 10) - cache.NotifyAccountNonce([]byte("bob"), 20) - - // 1st fail - selection = cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 1, len(selection)) - require.Equal(t, 0, len(cache.sweepingListOfSenders)) - require.Equal(t, 1, cache.getNumFailedSelectionsOfSender("alice")) - require.Equal(t, 1, cache.getNumFailedSelectionsOfSender("bob")) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("carol")) - - // 2nd fail, grace period, one grace transaction for Alice and Bob - selection = cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 3, len(selection)) - require.Equal(t, 0, len(cache.sweepingListOfSenders)) - require.Equal(t, 2, cache.getNumFailedSelectionsOfSender("alice")) - require.Equal(t, 2, cache.getNumFailedSelectionsOfSender("bob")) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("carol")) - - // 3nd fail, collect Alice and Bob as sweepables - selection = cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 1, len(selection)) - require.Equal(t, 2, len(cache.sweepingListOfSenders)) - require.True(t, cache.isSenderSweepable("alice")) - require.True(t, cache.isSenderSweepable("bob")) - require.Equal(t, 3, cache.getNumFailedSelectionsOfSender("alice")) - require.Equal(t, 3, cache.getNumFailedSelectionsOfSender("bob")) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("carol")) -} - -func TestSweeping_WhenSendersEscapeCollection(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - cache.AddTx(createTx([]byte("alice-42"), "alice", 42)) - cache.AddTx(createTx([]byte("bob-42"), "bob", 42)) - cache.AddTx(createTx([]byte("carol-42"), "carol", 42)) - - // Senders have no initial gaps - selection := cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 3, len(selection)) - require.Equal(t, 0, len(cache.sweepingListOfSenders)) - - // Alice and Bob have initial gaps, Carol doesn't - cache.NotifyAccountNonce([]byte("alice"), 10) - cache.NotifyAccountNonce([]byte("bob"), 20) - - // 1st fail - selection = cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 1, len(selection)) - require.Equal(t, 0, len(cache.sweepingListOfSenders)) - require.Equal(t, 1, cache.getNumFailedSelectionsOfSender("alice")) - require.Equal(t, 1, cache.getNumFailedSelectionsOfSender("bob")) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("carol")) - - // 2nd fail, grace period, one grace transaction for Alice and Bob - selection = cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 3, len(selection)) - require.Equal(t, 0, len(cache.sweepingListOfSenders)) - require.Equal(t, 2, cache.getNumFailedSelectionsOfSender("alice")) - require.Equal(t, 2, cache.getNumFailedSelectionsOfSender("bob")) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("carol")) - - // 3rd attempt, but with gaps resolved - // Alice and Bob escape and won't be collected as sweepables - cache.NotifyAccountNonce([]byte("alice"), 42) - cache.NotifyAccountNonce([]byte("bob"), 42) - - selection = cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 3, len(selection)) - require.Equal(t, 0, len(cache.sweepingListOfSenders)) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("alice")) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("bob")) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("carol")) -} - -func TestSweeping_SweepSweepable(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - cache.AddTx(createTx([]byte("alice-42"), "alice", 42)) - cache.AddTx(createTx([]byte("bob-42"), "bob", 42)) - cache.AddTx(createTx([]byte("carol-42"), "carol", 42)) - - // Fake "Alice" and "Bob" as sweepable - cache.sweepingListOfSenders = []*txListForSender{ - cache.getListForSender("alice"), - cache.getListForSender("bob"), - } - - require.Equal(t, uint64(3), cache.CountTx()) - require.Equal(t, uint64(3), cache.CountSenders()) - - cache.sweepSweepable() - - require.Equal(t, uint64(1), cache.CountTx()) - require.Equal(t, uint64(1), cache.CountSenders()) -} diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index 5911f434..49dc0e85 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -2,22 +2,48 @@ package txcache import ( "encoding/binary" + "math/big" + "math/rand" "sync" - "testing" "time" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" ) const oneMilion = 1000000 const oneBillion = oneMilion * 1000 -const delta = 0.00000001 +const oneQuintillion = 1_000_000_000_000_000_000 const estimatedSizeOfBoundedTxFields = uint64(128) +const hashLength = 32 + +var oneQuintillionBig = big.NewInt(oneQuintillion) + +// The GitHub Actions runners are (extremely) slow. +const selectionLoopMaximumDuration = 30 * time.Second func (cache *TxCache) areInternalMapsConsistent() bool { - journal := cache.checkInternalConsistency() - return journal.isFine() + internalMapByHash := cache.txByHash + internalMapBySender := cache.txListBySender + + senders := internalMapBySender.getSenders() + numInMapByHash := len(internalMapByHash.keys()) + numInMapBySender := 0 + numMissingInMapByHash := 0 + + for _, sender := range senders { + numInMapBySender += int(sender.countTx()) + + for _, hash := range sender.getTxsHashes() { + _, ok := internalMapByHash.getTx(string(hash)) + if !ok { + numMissingInMapByHash++ + } + } + } + + isFine := (numInMapByHash == numInMapBySender) && (numMissingInMapByHash == 0) + return isFine } func (cache *TxCache) getHashesForSender(sender string) []string { @@ -37,30 +63,23 @@ func (txMap *txListBySenderMap) testGetListForSender(sender string) *txListForSe return list } -func (cache *TxCache) getScoreOfSender(sender string) uint32 { - list := cache.getListForSender(sender) - scoreParams := list.getScoreParams() - computer := cache.txListBySender.scoreComputer - return computer.computeScore(scoreParams) +func (listForSender *txListForSender) getTxHashesAsStrings() []string { + hashes := listForSender.getTxsHashes() + return hashesAsStrings(hashes) } -func (cache *TxCache) getNumFailedSelectionsOfSender(sender string) int { - return int(cache.getListForSender(sender).numFailedSelections.Get()) -} +func (listForSender *txListForSender) getTxsHashes() [][]byte { + listForSender.mutex.RLock() + defer listForSender.mutex.RUnlock() -func (cache *TxCache) isSenderSweepable(sender string) bool { - for _, item := range cache.sweepingListOfSenders { - if item.sender == sender { - return true - } - } + result := make([][]byte, 0, listForSender.countTx()) - return false -} + for element := listForSender.items.Front(); element != nil; element = element.Next() { + value := element.Value.(*WrappedTransaction) + result = append(result, value.TxHash) + } -func (listForSender *txListForSender) getTxHashesAsStrings() []string { - hashes := listForSender.getTxHashes() - return hashesAsStrings(hashes) + return result } func hashesAsStrings(hashes [][]byte) []string { @@ -85,31 +104,45 @@ func addManyTransactionsWithUniformDistribution(cache *TxCache, nSenders int, nT for senderTag := 0; senderTag < nSenders; senderTag++ { sender := createFakeSenderAddress(senderTag) - for txNonce := nTransactionsPerSender; txNonce > 0; txNonce-- { - txHash := createFakeTxHash(sender, txNonce) - tx := createTx(txHash, string(sender), uint64(txNonce)) - cache.AddTx(tx) + for nonce := nTransactionsPerSender - 1; nonce >= 0; nonce-- { + transactionHash := createFakeTxHash(sender, nonce) + gasPrice := oneBillion + rand.Intn(3*oneBillion) + transaction := createTx(transactionHash, string(sender), uint64(nonce)).withGasPrice(uint64(gasPrice)) + + cache.AddTx(transaction) } } } -func createTx(hash []byte, sender string, nonce uint64) *WrappedTransaction { - tx := &transaction.Transaction{ - SndAddr: []byte(sender), - Nonce: nonce, - } +func createBunchesOfTransactionsWithUniformDistribution(nSenders int, nTransactionsPerSender int) []bunchOfTransactions { + bunches := make([]bunchOfTransactions, 0, nSenders) + host := txcachemocks.NewMempoolHostMock() - return &WrappedTransaction{ - Tx: tx, - TxHash: hash, - Size: int64(estimatedSizeOfBoundedTxFields), + for senderTag := 0; senderTag < nSenders; senderTag++ { + bunch := make(bunchOfTransactions, 0, nTransactionsPerSender) + sender := createFakeSenderAddress(senderTag) + + for nonce := 0; nonce < nTransactionsPerSender; nonce++ { + transactionHash := createFakeTxHash(sender, nonce) + gasPrice := oneBillion + rand.Intn(3*oneBillion) + transaction := createTx(transactionHash, string(sender), uint64(nonce)).withGasPrice(uint64(gasPrice)) + transaction.precomputeFields(host) + + bunch = append(bunch, transaction) + } + + bunches = append(bunches, bunch) } + + return bunches } -func createTxWithGasLimit(hash []byte, sender string, nonce uint64, gasLimit uint64) *WrappedTransaction { + +func createTx(hash []byte, sender string, nonce uint64) *WrappedTransaction { tx := &transaction.Transaction{ SndAddr: []byte(sender), Nonce: nonce, - GasLimit: gasLimit, + GasLimit: 50000, + GasPrice: oneBillion, } return &WrappedTransaction{ @@ -119,25 +152,44 @@ func createTxWithGasLimit(hash []byte, sender string, nonce uint64, gasLimit uin } } -func createTxWithParams(hash []byte, sender string, nonce uint64, size uint64, gasLimit uint64, gasPrice uint64) *WrappedTransaction { - dataLength := int(size) - int(estimatedSizeOfBoundedTxFields) - if dataLength < 0 { - panic("createTxWithData(): invalid length for dummy tx") - } +func (wrappedTx *WrappedTransaction) withSize(size uint64) *WrappedTransaction { + dataLength := size - estimatedSizeOfBoundedTxFields + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.Data = make([]byte, dataLength) + wrappedTx.Size = int64(size) + return wrappedTx +} - tx := &transaction.Transaction{ - SndAddr: []byte(sender), - Nonce: nonce, - Data: make([]byte, dataLength), - GasLimit: gasLimit, - GasPrice: gasPrice, - } +func (wrappedTx *WrappedTransaction) withData(data []byte) *WrappedTransaction { + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.Data = data + wrappedTx.Size = int64(len(data)) + int64(estimatedSizeOfBoundedTxFields) + return wrappedTx +} - return &WrappedTransaction{ - Tx: tx, - TxHash: hash, - Size: int64(size), - } +func (wrappedTx *WrappedTransaction) withDataLength(dataLength int) *WrappedTransaction { + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.Data = make([]byte, dataLength) + wrappedTx.Size = int64(dataLength) + int64(estimatedSizeOfBoundedTxFields) + return wrappedTx +} + +func (wrappedTx *WrappedTransaction) withGasPrice(gasPrice uint64) *WrappedTransaction { + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.GasPrice = gasPrice + return wrappedTx +} + +func (wrappedTx *WrappedTransaction) withGasLimit(gasLimit uint64) *WrappedTransaction { + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.GasLimit = gasLimit + return wrappedTx +} + +func (wrappedTx *WrappedTransaction) withValue(value *big.Int) *WrappedTransaction { + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.Value = value + return wrappedTx } func createFakeSenderAddress(senderTag int) []byte { @@ -155,16 +207,6 @@ func createFakeTxHash(fakeSenderAddress []byte, nonce int) []byte { return bytes } -func measureWithStopWatch(b *testing.B, function func()) { - sw := core.NewStopWatch() - sw.Start("time") - function() - sw.Stop("time") - - duration := sw.GetMeasurementsMap()["time"] - b.ReportMetric(duration, "time@stopWatch") -} - // waitTimeout waits for the waitgroup for the specified max timeout. // Returns true if waiting timed out. // Reference: https://stackoverflow.com/a/32843750/1475331 @@ -181,12 +223,3 @@ func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { return true // timed out } } - -var _ scoreComputer = (*disabledScoreComputer)(nil) - -type disabledScoreComputer struct { -} - -func (computer *disabledScoreComputer) computeScore(_ senderScoreParams) uint32 { - return 0 -} diff --git a/txcache/transactionsHeap.go b/txcache/transactionsHeap.go new file mode 100644 index 00000000..28b4e072 --- /dev/null +++ b/txcache/transactionsHeap.go @@ -0,0 +1,59 @@ +package txcache + +type transactionsHeap struct { + items []*transactionsHeapItem + less func(i, j int) bool +} + +func newMinTransactionsHeap(capacity int) *transactionsHeap { + h := transactionsHeap{ + items: make([]*transactionsHeapItem, 0, capacity), + } + + h.less = func(i, j int) bool { + return h.items[j].isCurrentTransactionMoreValuableForNetwork(h.items[i]) + } + + return &h +} + +func newMaxTransactionsHeap(capacity int) *transactionsHeap { + h := transactionsHeap{ + items: make([]*transactionsHeapItem, 0, capacity), + } + + h.less = func(i, j int) bool { + return h.items[i].isCurrentTransactionMoreValuableForNetwork(h.items[j]) + } + + return &h +} + +// Len returns the number of elements in the heap. +func (h *transactionsHeap) Len() int { return len(h.items) } + +// Less reports whether the element with index i should sort before the element with index j. +func (h *transactionsHeap) Less(i, j int) bool { + return h.less(i, j) +} + +// Swap swaps the elements with indexes i and j. +func (h *transactionsHeap) Swap(i, j int) { + h.items[i], h.items[j] = h.items[j], h.items[i] +} + +// Push pushes the element x onto the heap. +func (h *transactionsHeap) Push(x interface{}) { + h.items = append(h.items, x.(*transactionsHeapItem)) +} + +// Pop removes and returns the minimum element (according to "h.less") from the heap. +func (h *transactionsHeap) Pop() interface{} { + // Standard code when storing the heap in a slice: + // https://pkg.go.dev/container/heap + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} diff --git a/txcache/transactionsHeapItem.go b/txcache/transactionsHeapItem.go new file mode 100644 index 00000000..5d09dd59 --- /dev/null +++ b/txcache/transactionsHeapItem.go @@ -0,0 +1,209 @@ +package txcache + +import ( + "math/big" + + "github.com/multiversx/mx-chain-storage-go/types" +) + +type transactionsHeapItem struct { + sender []byte + bunch bunchOfTransactions + + // The sender's state, as fetched in "requestAccountStateIfNecessary". + senderState *types.AccountState + + currentTransactionIndex int + currentTransaction *WrappedTransaction + currentTransactionNonce uint64 + latestSelectedTransaction *WrappedTransaction + latestSelectedTransactionNonce uint64 + + consumedBalance *big.Int +} + +func newTransactionsHeapItem(bunch bunchOfTransactions) (*transactionsHeapItem, error) { + if len(bunch) == 0 { + return nil, errEmptyBunchOfTransactions + } + + firstTransaction := bunch[0] + + return &transactionsHeapItem{ + sender: firstTransaction.Tx.GetSndAddr(), + bunch: bunch, + + senderState: nil, + + currentTransactionIndex: 0, + currentTransaction: firstTransaction, + currentTransactionNonce: firstTransaction.Tx.GetNonce(), + latestSelectedTransaction: nil, + + consumedBalance: big.NewInt(0), + }, nil +} + +func (item *transactionsHeapItem) selectCurrentTransaction() *WrappedTransaction { + item.accumulateConsumedBalance() + + item.latestSelectedTransaction = item.currentTransaction + item.latestSelectedTransactionNonce = item.currentTransactionNonce + + return item.currentTransaction +} + +func (item *transactionsHeapItem) accumulateConsumedBalance() { + fee := item.currentTransaction.Fee + if fee != nil { + item.consumedBalance.Add(item.consumedBalance, fee) + } + + transferredValue := item.currentTransaction.TransferredValue + if transferredValue != nil { + item.consumedBalance.Add(item.consumedBalance, transferredValue) + } +} + +func (item *transactionsHeapItem) gotoNextTransaction() bool { + if item.currentTransactionIndex+1 >= len(item.bunch) { + return false + } + + item.currentTransactionIndex++ + item.currentTransaction = item.bunch[item.currentTransactionIndex] + item.currentTransactionNonce = item.currentTransaction.Tx.GetNonce() + return true +} + +func (item *transactionsHeapItem) detectInitialGap() bool { + if item.latestSelectedTransaction != nil { + return false + } + if item.senderState == nil { + return false + } + + hasInitialGap := item.currentTransactionNonce > item.senderState.Nonce + if hasInitialGap { + logSelect.Trace("transactionsHeapItem.detectInitialGap, initial gap", + "tx", item.currentTransaction.TxHash, + "nonce", item.currentTransactionNonce, + "sender", item.sender, + "senderState.Nonce", item.senderState.Nonce, + ) + } + + return hasInitialGap +} + +func (item *transactionsHeapItem) detectMiddleGap() bool { + if item.latestSelectedTransaction == nil { + return false + } + + // Detect middle gap. + hasMiddleGap := item.currentTransactionNonce > item.latestSelectedTransactionNonce+1 + if hasMiddleGap { + logSelect.Trace("transactionsHeapItem.detectMiddleGap, middle gap", + "tx", item.currentTransaction.TxHash, + "nonce", item.currentTransactionNonce, + "sender", item.sender, + "previousSelectedNonce", item.latestSelectedTransactionNonce, + ) + } + + return hasMiddleGap +} + +func (item *transactionsHeapItem) detectWillFeeExceedBalance() bool { + if item.senderState == nil { + return false + } + + fee := item.currentTransaction.Fee + if fee == nil { + return false + } + + // Here, we are not interested into an eventual transfer of value (we only check if there's enough balance to pay the transaction fee). + futureConsumedBalance := new(big.Int).Add(item.consumedBalance, fee) + senderBalance := item.senderState.Balance + + willFeeExceedBalance := futureConsumedBalance.Cmp(senderBalance) > 0 + if willFeeExceedBalance { + logSelect.Trace("transactionsHeapItem.detectWillFeeExceedBalance", + "tx", item.currentTransaction.TxHash, + "sender", item.sender, + "balance", item.senderState.Balance, + "consumedBalance", item.consumedBalance, + ) + } + + return willFeeExceedBalance +} + +func (item *transactionsHeapItem) detectLowerNonce() bool { + if item.senderState == nil { + return false + } + + isLowerNonce := item.currentTransactionNonce < item.senderState.Nonce + if isLowerNonce { + logSelect.Trace("transactionsHeapItem.detectLowerNonce", + "tx", item.currentTransaction.TxHash, + "nonce", item.currentTransactionNonce, + "sender", item.sender, + "senderState.Nonce", item.senderState.Nonce, + ) + } + + return isLowerNonce +} + +func (item *transactionsHeapItem) detectIncorrectlyGuarded(session SelectionSession) bool { + IsIncorrectlyGuarded := session.IsIncorrectlyGuarded(item.currentTransaction.Tx) + if IsIncorrectlyGuarded { + logSelect.Trace("transactionsHeapItem.detectIncorrectlyGuarded", + "tx", item.currentTransaction.TxHash, + "sender", item.sender, + ) + } + + return IsIncorrectlyGuarded +} + +func (item *transactionsHeapItem) detectNonceDuplicate() bool { + if item.latestSelectedTransaction == nil { + return false + } + + isDuplicate := item.currentTransactionNonce == item.latestSelectedTransactionNonce + if isDuplicate { + logSelect.Trace("transactionsHeapItem.detectNonceDuplicate", + "tx", item.currentTransaction.TxHash, + "sender", item.sender, + "nonce", item.currentTransactionNonce, + ) + } + + return isDuplicate +} + +func (item *transactionsHeapItem) requestAccountStateIfNecessary(session SelectionSession) error { + if item.senderState != nil { + return nil + } + + senderState, err := session.GetAccountState(item.sender) + if err != nil { + return err + } + + item.senderState = senderState + return nil +} + +func (item *transactionsHeapItem) isCurrentTransactionMoreValuableForNetwork(other *transactionsHeapItem) bool { + return item.currentTransaction.isTransactionMoreValuableForNetwork(other.currentTransaction) +} diff --git a/txcache/transactionsHeapItem_test.go b/txcache/transactionsHeapItem_test.go new file mode 100644 index 00000000..55199472 --- /dev/null +++ b/txcache/transactionsHeapItem_test.go @@ -0,0 +1,327 @@ +package txcache + +import ( + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" + "github.com/multiversx/mx-chain-storage-go/types" + "github.com/stretchr/testify/require" +) + +func TestNewTransactionsHeapItem(t *testing.T) { + t.Run("empty bunch", func(t *testing.T) { + item, err := newTransactionsHeapItem(nil) + require.Nil(t, item) + require.Equal(t, errEmptyBunchOfTransactions, err) + }) + + t.Run("non-empty bunch", func(t *testing.T) { + bunch := bunchOfTransactions{ + createTx([]byte("tx-1"), "alice", 42), + } + + item, err := newTransactionsHeapItem(bunch) + require.NotNil(t, item) + require.Nil(t, err) + + require.Equal(t, []byte("alice"), item.sender) + require.Equal(t, bunch, item.bunch) + require.Nil(t, item.senderState) + require.Equal(t, 0, item.currentTransactionIndex) + require.Equal(t, bunch[0], item.currentTransaction) + require.Equal(t, uint64(42), item.currentTransactionNonce) + require.Nil(t, item.latestSelectedTransaction) + require.Equal(t, big.NewInt(0), item.consumedBalance) + }) +} + +func TestTransactionsHeapItem_selectTransaction(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + a := createTx([]byte("tx-1"), "alice", 42) + b := createTx([]byte("tx-2"), "alice", 43) + a.precomputeFields(host) + b.precomputeFields(host) + + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b}) + require.NoError(t, err) + + selected := item.selectCurrentTransaction() + require.Equal(t, a, selected) + require.Equal(t, a, item.latestSelectedTransaction) + require.Equal(t, 42, int(item.latestSelectedTransactionNonce)) + require.Equal(t, "50000000000000", item.consumedBalance.String()) + + ok := item.gotoNextTransaction() + require.True(t, ok) + + selected = item.selectCurrentTransaction() + require.Equal(t, b, selected) + require.Equal(t, b, item.latestSelectedTransaction) + require.Equal(t, 43, int(item.latestSelectedTransactionNonce)) + require.Equal(t, "100000000000000", item.consumedBalance.String()) + + ok = item.gotoNextTransaction() + require.False(t, ok) +} + +func TestTransactionsHeapItem_detectInitialGap(t *testing.T) { + a := createTx([]byte("tx-1"), "alice", 42) + b := createTx([]byte("tx-2"), "alice", 43) + + t.Run("unknown", func(t *testing.T) { + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b}) + require.NoError(t, err) + + require.False(t, item.detectInitialGap()) + }) + + t.Run("known, without gap", func(t *testing.T) { + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b}) + require.NoError(t, err) + + item.senderState = &types.AccountState{ + Nonce: 42, + } + + require.False(t, item.detectInitialGap()) + }) + + t.Run("known, without gap", func(t *testing.T) { + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b}) + require.NoError(t, err) + + item.senderState = &types.AccountState{ + Nonce: 41, + } + + require.True(t, item.detectInitialGap()) + }) +} + +func TestTransactionsHeapItem_detectMiddleGap(t *testing.T) { + a := createTx([]byte("tx-1"), "alice", 42) + b := createTx([]byte("tx-2"), "alice", 43) + c := createTx([]byte("tx-3"), "alice", 44) + + t.Run("unknown", func(t *testing.T) { + item := &transactionsHeapItem{} + item.latestSelectedTransaction = nil + require.False(t, item.detectInitialGap()) + }) + + t.Run("known, without gap", func(t *testing.T) { + item := &transactionsHeapItem{} + item.latestSelectedTransaction = a + item.latestSelectedTransactionNonce = 42 + item.currentTransaction = b + item.currentTransactionNonce = 43 + + require.False(t, item.detectMiddleGap()) + }) + + t.Run("known, without gap", func(t *testing.T) { + item := &transactionsHeapItem{} + item.latestSelectedTransaction = a + item.latestSelectedTransactionNonce = 42 + item.currentTransaction = c + item.currentTransactionNonce = 44 + + require.True(t, item.detectMiddleGap()) + }) +} + +func TestTransactionsHeapItem_detectWillFeeExceedBalance(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + a := createTx([]byte("tx-1"), "alice", 42) + b := createTx([]byte("tx-2"), "alice", 43) + c := createTx([]byte("tx-3"), "alice", 44).withValue(big.NewInt(1000000000000000000)) + d := createTx([]byte("tx-4"), "alice", 45) + + a.precomputeFields(host) + b.precomputeFields(host) + c.precomputeFields(host) + d.precomputeFields(host) + + t.Run("unknown", func(t *testing.T) { + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b}) + + require.NoError(t, err) + require.False(t, item.detectWillFeeExceedBalance()) + }) + + t.Run("known, not exceeded, then exceeded (a)", func(t *testing.T) { + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b}) + require.NoError(t, err) + + item.senderState = &types.AccountState{ + Balance: big.NewInt(50000000000001), + } + + require.False(t, item.detectWillFeeExceedBalance()) + + _ = item.selectCurrentTransaction() + _ = item.gotoNextTransaction() + + require.Equal(t, "50000000000000", item.consumedBalance.String()) + require.True(t, item.detectWillFeeExceedBalance()) + }) + + t.Run("known, not exceeded, then exceeded (b)", func(t *testing.T) { + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b, c, d}) + require.NoError(t, err) + + item.senderState = &types.AccountState{ + Balance: big.NewInt(1000000000000000000 + 2*50000000000000 + 1), + } + + require.False(t, item.detectWillFeeExceedBalance()) + + // Select "a", move to "b". + _ = item.selectCurrentTransaction() + _ = item.gotoNextTransaction() + + require.Equal(t, "50000000000000", item.consumedBalance.String()) + require.False(t, item.detectWillFeeExceedBalance()) + + // Select "b", move to "c". + _ = item.selectCurrentTransaction() + _ = item.gotoNextTransaction() + + require.Equal(t, "100000000000000", item.consumedBalance.String()) + require.False(t, item.detectWillFeeExceedBalance()) + + // Select "c", move to "d". + _ = item.selectCurrentTransaction() + _ = item.gotoNextTransaction() + + require.Equal(t, "1000150000000000000", item.consumedBalance.String()) + require.True(t, item.detectWillFeeExceedBalance()) + }) +} + +func TestTransactionsHeapItem_detectLowerNonce(t *testing.T) { + a := createTx([]byte("tx-1"), "alice", 42) + b := createTx([]byte("tx-2"), "alice", 43) + + t.Run("unknown", func(t *testing.T) { + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b}) + require.NoError(t, err) + + require.False(t, item.detectInitialGap()) + }) + + t.Run("known, good", func(t *testing.T) { + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b}) + require.NoError(t, err) + + item.senderState = &types.AccountState{ + Nonce: 42, + } + + require.False(t, item.detectLowerNonce()) + }) + + t.Run("known, lower", func(t *testing.T) { + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b}) + require.NoError(t, err) + + item.senderState = &types.AccountState{ + Nonce: 44, + } + + require.True(t, item.detectLowerNonce()) + }) +} + +func TestTransactionsHeapItem_detectNonceDuplicate(t *testing.T) { + a := createTx([]byte("tx-1"), "alice", 42) + b := createTx([]byte("tx-2"), "alice", 43) + c := createTx([]byte("tx-3"), "alice", 42) + + t.Run("unknown", func(t *testing.T) { + item := &transactionsHeapItem{} + item.latestSelectedTransaction = nil + require.False(t, item.detectNonceDuplicate()) + }) + + t.Run("no duplicates", func(t *testing.T) { + item := &transactionsHeapItem{} + item.latestSelectedTransaction = a + item.latestSelectedTransactionNonce = 42 + item.currentTransaction = b + item.currentTransactionNonce = 43 + + require.False(t, item.detectNonceDuplicate()) + }) + + t.Run("duplicates", func(t *testing.T) { + item := &transactionsHeapItem{} + item.latestSelectedTransaction = a + item.latestSelectedTransactionNonce = 42 + item.currentTransaction = c + item.currentTransactionNonce = 42 + + require.True(t, item.detectNonceDuplicate()) + }) +} + +func TestTransactionsHeapItem_detectIncorrectlyGuarded(t *testing.T) { + t.Run("is correctly guarded", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + session.IsIncorrectlyGuardedCalled = func(tx data.TransactionHandler) bool { + return false + } + + item, err := newTransactionsHeapItem(bunchOfTransactions{createTx([]byte("tx-1"), "alice", 42)}) + require.NoError(t, err) + + require.False(t, item.detectIncorrectlyGuarded(session)) + }) + + t.Run("is incorrectly guarded", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + session.IsIncorrectlyGuardedCalled = func(tx data.TransactionHandler) bool { + return true + } + + item, err := newTransactionsHeapItem(bunchOfTransactions{createTx([]byte("tx-1"), "alice", 42)}) + require.NoError(t, err) + + require.True(t, item.detectIncorrectlyGuarded(session)) + }) +} + +func TestTransactionsHeapItem_requestAccountStateIfNecessary(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + + noncesByAddress := session.AccountStateByAddress + noncesByAddress["alice"] = &types.AccountState{ + Nonce: 7, + Balance: big.NewInt(1000000000000000000), + } + noncesByAddress["bob"] = &types.AccountState{ + Nonce: 42, + Balance: big.NewInt(1000000000000000000), + } + + a := &transactionsHeapItem{ + sender: []byte("alice"), + } + + b := &transactionsHeapItem{ + sender: []byte("bob"), + } + + c := &transactionsHeapItem{} + + _ = a.requestAccountStateIfNecessary(session) + _ = b.requestAccountStateIfNecessary(session) + + require.Equal(t, uint64(7), a.senderState.Nonce) + require.Equal(t, uint64(42), b.senderState.Nonce) + require.Nil(t, c.senderState) +} diff --git a/txcache/txCache.go b/txcache/txCache.go index d938b976..df69c7ec 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -2,10 +2,11 @@ package txcache import ( "sync" + "time" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-storage-go/common" "github.com/multiversx/mx-chain-storage-go/monitoring" "github.com/multiversx/mx-chain-storage-go/types" ) @@ -14,25 +15,18 @@ var _ types.Cacher = (*TxCache)(nil) // TxCache represents a cache-like structure (it has a fixed capacity and implements an eviction mechanism) for holding transactions type TxCache struct { - name string - txListBySender *txListBySenderMap - txByHash *txByHashMap - config ConfigSourceMe - evictionMutex sync.Mutex - evictionJournal evictionJournal - evictionSnapshotOfSenders []*txListForSender - isEvictionInProgress atomic.Flag - numSendersSelected atomic.Counter - numSendersWithInitialGap atomic.Counter - numSendersWithMiddleGap atomic.Counter - numSendersInGracePeriod atomic.Counter - sweepingMutex sync.Mutex - sweepingListOfSenders []*txListForSender - mutTxOperation sync.Mutex + name string + txListBySender *txListBySenderMap + txByHash *txByHashMap + config ConfigSourceMe + host MempoolHost + evictionMutex sync.Mutex + isEvictionInProgress atomic.Flag + mutTxOperation sync.Mutex } // NewTxCache creates a new transaction cache -func NewTxCache(config ConfigSourceMe, txGasHandler TxGasHandler) (*TxCache, error) { +func NewTxCache(config ConfigSourceMe, host MempoolHost) (*TxCache, error) { log.Debug("NewTxCache", "config", config.String()) monitoring.MonitorNewCache(config.Name, uint64(config.NumBytesThreshold)) @@ -40,25 +34,22 @@ func NewTxCache(config ConfigSourceMe, txGasHandler TxGasHandler) (*TxCache, err if err != nil { return nil, err } - if check.IfNil(txGasHandler) { - return nil, common.ErrNilTxGasHandler + if check.IfNil(host) { + return nil, errNilMempoolHost } // Note: for simplicity, we use the same "numChunks" for both internal concurrent maps numChunks := config.NumChunks senderConstraintsObj := config.getSenderConstraints() - txFeeHelper := newFeeComputationHelper(txGasHandler.MinGasPrice(), txGasHandler.MinGasLimit(), txGasHandler.MinGasPriceForProcessing()) - scoreComputerObj := newDefaultScoreComputer(txFeeHelper) txCache := &TxCache{ - name: config.Name, - txListBySender: newTxListBySenderMap(numChunks, senderConstraintsObj, scoreComputerObj, txGasHandler, txFeeHelper), - txByHash: newTxByHashMap(numChunks), - config: config, - evictionJournal: evictionJournal{}, + name: config.Name, + txListBySender: newTxListBySenderMap(numChunks, senderConstraintsObj), + txByHash: newTxByHashMap(numChunks), + config: config, + host: host, } - txCache.initSweepable() return txCache, nil } @@ -69,13 +60,17 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { return false, false } + logAdd.Trace("TxCache.AddTx", "tx", tx.TxHash, "nonce", tx.Tx.GetNonce(), "sender", tx.Tx.GetSndAddr()) + + tx.precomputeFields(cache.host) + if cache.config.EvictionEnabled { - cache.doEviction() + _ = cache.doEviction() } cache.mutTxOperation.Lock() addedInByHash := cache.txByHash.addTx(tx) - addedInBySender, evicted := cache.txListBySender.addTx(tx) + addedInBySender, evicted := cache.txListBySender.addTxReturnEvicted(tx) cache.mutTxOperation.Unlock() if addedInByHash != addedInBySender { // This can happen when two go-routines concur to add the same transaction: @@ -83,11 +78,11 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { // - B won't add to "txByHash" (duplicate) // - B adds to "txListBySender" // - A won't add to "txListBySender" (duplicate) - log.Trace("TxCache.AddTx(): slight inconsistency detected:", "name", cache.name, "tx", tx.TxHash, "sender", tx.Tx.GetSndAddr(), "addedInByHash", addedInByHash, "addedInBySender", addedInBySender) + logAdd.Debug("TxCache.AddTx: slight inconsistency detected:", "tx", tx.TxHash, "sender", tx.Tx.GetSndAddr(), "addedInByHash", addedInByHash, "addedInBySender", addedInBySender) } if len(evicted) > 0 { - cache.monitorEvictionWrtSenderLimit(tx.Tx.GetSndAddr(), evicted) + logRemove.Trace("TxCache.AddTx with eviction", "sender", tx.Tx.GetSndAddr(), "num evicted txs", len(evicted)) cache.txByHash.RemoveTxsBulk(evicted) } @@ -102,91 +97,62 @@ func (cache *TxCache) GetByTxHash(txHash []byte) (*WrappedTransaction, bool) { return tx, ok } -// SelectTransactionsWithBandwidth selects a reasonably fair list of transactions to be included in the next miniblock -// It returns at most "numRequested" transactions -// Each sender gets the chance to give at least bandwidthPerSender gas worth of transactions, unless "numRequested" limit is reached before iterating over all senders -func (cache *TxCache) SelectTransactionsWithBandwidth(numRequested int, batchSizePerSender int, bandwidthPerSender uint64) []*WrappedTransaction { - result := cache.doSelectTransactions(numRequested, batchSizePerSender, bandwidthPerSender) - go cache.doAfterSelection() - return result -} - -func (cache *TxCache) doSelectTransactions(numRequested int, batchSizePerSender int, bandwidthPerSender uint64) []*WrappedTransaction { - stopWatch := cache.monitorSelectionStart() - - result := make([]*WrappedTransaction, numRequested) - resultFillIndex := 0 - resultIsFull := false - - snapshotOfSenders := cache.getSendersEligibleForSelection() +// SelectTransactions selects the best transactions to be included in the next miniblock. +// It returns up to "maxNum" transactions, with total gas <= "gasRequested". +func (cache *TxCache) SelectTransactions(session SelectionSession, gasRequested uint64, maxNum int, selectionLoopMaximumDuration time.Duration) ([]*WrappedTransaction, uint64) { + if check.IfNil(session) { + log.Error("TxCache.SelectTransactions", "err", errNilSelectionSession) + return nil, 0 + } - for pass := 0; !resultIsFull; pass++ { - copiedInThisPass := 0 + stopWatch := core.NewStopWatch() + stopWatch.Start("selection") - for _, txList := range snapshotOfSenders { - batchSizeWithScoreCoefficient := batchSizePerSender * int(txList.getLastComputedScore()+1) - // Reset happens on first pass only - isFirstBatch := pass == 0 - journal := txList.selectBatchTo(isFirstBatch, result[resultFillIndex:], batchSizeWithScoreCoefficient, bandwidthPerSender) - cache.monitorBatchSelectionEnd(journal) + logSelect.Debug( + "TxCache.SelectTransactions: begin", + "num bytes", cache.NumBytes(), + "num txs", cache.CountTx(), + "num senders", cache.CountSenders(), + ) - if isFirstBatch { - cache.collectSweepable(txList) - } + transactions, accumulatedGas := cache.doSelectTransactions(session, gasRequested, maxNum, selectionLoopMaximumDuration) - resultFillIndex += journal.copied - copiedInThisPass += journal.copied - resultIsFull = resultFillIndex == numRequested - if resultIsFull { - break - } - } + stopWatch.Stop("selection") - nothingCopiedThisPass := copiedInThisPass == 0 + logSelect.Debug( + "TxCache.SelectTransactions: end", + "duration", stopWatch.GetMeasurement("selection"), + "num txs selected", len(transactions), + "gas", accumulatedGas, + ) - // No more passes needed - if nothingCopiedThisPass { - break - } - } - - result = result[:resultFillIndex] - cache.monitorSelectionEnd(result, stopWatch) - return result -} + go cache.diagnoseCounters() + go displaySelectionOutcome(logSelect, "selection", transactions) -func (cache *TxCache) getSendersEligibleForSelection() []*txListForSender { - return cache.txListBySender.getSnapshotDescending() + return transactions, accumulatedGas } -func (cache *TxCache) doAfterSelection() { - cache.sweepSweepable() - cache.Diagnose(false) +func (cache *TxCache) getSenders() []*txListForSender { + return cache.txListBySender.getSenders() } -// RemoveTxByHash removes tx by hash +// RemoveTxByHash removes transactions with nonces lower or equal to the given transaction's nonce func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { cache.mutTxOperation.Lock() defer cache.mutTxOperation.Unlock() tx, foundInByHash := cache.txByHash.removeTx(string(txHash)) if !foundInByHash { + // Transaction might have been removed in the meantime. return false } - foundInBySender := cache.txListBySender.removeTx(tx) - if !foundInBySender { - // This condition can arise often at high load & eviction, when two go-routines concur to remove the same transaction: - // - A = remove transactions upon commit / final - // - B = remove transactions due to high load (eviction) - // - // - A reaches "RemoveTxByHash()", then "cache.txByHash.removeTx()". - // - B reaches "cache.txByHash.RemoveTxsBulk()" - // - B reaches "cache.txListBySender.RemoveSendersBulk()" - // - A reaches "cache.txListBySender.removeTx()", but sender does not exist anymore - log.Trace("TxCache.RemoveTxByHash(): slight inconsistency detected: !foundInBySender", "name", cache.name, "tx", txHash) + evicted := cache.txListBySender.removeTransactionsWithLowerOrEqualNonceReturnHashes(tx) + if len(evicted) > 0 { + cache.txByHash.RemoveTxsBulk(evicted) } + logRemove.Trace("TxCache.RemoveTxByHash", "tx", txHash, "len(evicted)", len(evicted)) return true } @@ -220,6 +186,17 @@ func (cache *TxCache) ForEachTransaction(function ForEachTransaction) { cache.txByHash.forEach(function) } +// getAllTransactions returns all transactions in the cache +func (cache *TxCache) getAllTransactions() []*WrappedTransaction { + transactions := make([]*WrappedTransaction, 0, cache.Len()) + + cache.ForEachTransaction(func(_ []byte, tx *WrappedTransaction) { + transactions = append(transactions, tx) + }) + + return transactions +} + // GetTransactionsPoolForSender returns the list of transaction hashes for the sender func (cache *TxCache) GetTransactionsPoolForSender(sender string) []*WrappedTransaction { listForSender, ok := cache.txListBySender.getListForSender(sender) @@ -227,13 +204,7 @@ func (cache *TxCache) GetTransactionsPoolForSender(sender string) []*WrappedTran return nil } - wrappedTxs := make([]*WrappedTransaction, listForSender.items.Len()) - for element, i := listForSender.items.Front(), 0; element != nil; element, i = element.Next(), i+1 { - tx := element.Value.(*WrappedTransaction) - wrappedTxs[i] = tx - } - - return wrappedTxs + return listForSender.getTxs() } // Clear clears the cache @@ -292,9 +263,9 @@ func (cache *TxCache) Keys() [][]byte { return cache.txByHash.keys() } -// MaxSize is not implemented +// MaxSize returns the maximum number of transactions that can be stored in the cache. +// See: https://github.com/multiversx/mx-chain-go/blob/v1.8.4/dataRetriever/txpool/shardedTxPool.go#L55 func (cache *TxCache) MaxSize() int { - // TODO: Should be analyzed if the returned value represents the max size of one cache in sharded cache configuration return int(cache.config.CountThreshold) } @@ -308,12 +279,6 @@ func (cache *TxCache) UnRegisterHandler(string) { log.Error("TxCache.UnRegisterHandler is not implemented") } -// NotifyAccountNonce should be called by external components (such as interceptors and transactions processor) -// in order to inform the cache about initial nonce gap phenomena -func (cache *TxCache) NotifyAccountNonce(accountKey []byte, nonce uint64) { - cache.txListBySender.notifyAccountNonce(accountKey, nonce) -} - // ImmunizeTxsAgainstEviction does nothing for this type of cache func (cache *TxCache) ImmunizeTxsAgainstEviction(_ [][]byte) { } diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index 3a8b41c4..321f7b25 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -1,82 +1,72 @@ package txcache import ( + "crypto/rand" "errors" "fmt" "math" "sort" "sync" "testing" - "time" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-storage-go/common" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/multiversx/mx-chain-storage-go/types" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func Test_NewTxCache(t *testing.T) { config := ConfigSourceMe{ - Name: "test", - NumChunks: 16, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, + Name: "test", + NumChunks: 16, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 1, } - withEvictionConfig := ConfigSourceMe{ - Name: "test", - NumChunks: 16, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, - EvictionEnabled: true, - NumBytesThreshold: maxNumBytesUpperBound, - CountThreshold: math.MaxUint32, - NumSendersToPreemptivelyEvict: 100, - } - txGasHandler, _ := dummyParams() + host := txcachemocks.NewMempoolHostMock() - cache, err := NewTxCache(config, txGasHandler) + cache, err := NewTxCache(config, host) require.Nil(t, err) require.NotNil(t, cache) badConfig := config badConfig.Name = "" - requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.Name", txGasHandler) + requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.Name", host) badConfig = config badConfig.NumChunks = 0 - requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.NumChunks", txGasHandler) + requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.NumChunks", host) badConfig = config badConfig.NumBytesPerSenderThreshold = 0 - requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.NumBytesPerSenderThreshold", txGasHandler) + requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.NumBytesPerSenderThreshold", host) badConfig = config badConfig.CountPerSenderThreshold = 0 - requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.CountPerSenderThreshold", txGasHandler) + requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.CountPerSenderThreshold", host) badConfig = config cache, err = NewTxCache(config, nil) require.Nil(t, cache) - require.Equal(t, common.ErrNilTxGasHandler, err) + require.Equal(t, errNilMempoolHost, err) - badConfig = withEvictionConfig + badConfig = config badConfig.NumBytesThreshold = 0 - requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.NumBytesThreshold", txGasHandler) + requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.NumBytesThreshold", host) - badConfig = withEvictionConfig + badConfig = config badConfig.CountThreshold = 0 - requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.CountThreshold", txGasHandler) - - badConfig = withEvictionConfig - badConfig.NumSendersToPreemptivelyEvict = 0 - requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.NumSendersToPreemptivelyEvict", txGasHandler) + requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.CountThreshold", host) } -func requireErrorOnNewTxCache(t *testing.T, config ConfigSourceMe, errExpected error, errPartialMessage string, txGasHandler TxGasHandler) { - cache, errReceived := NewTxCache(config, txGasHandler) +func requireErrorOnNewTxCache(t *testing.T, config ConfigSourceMe, errExpected error, errPartialMessage string, host MempoolHost) { + cache, errReceived := NewTxCache(config, host) require.Nil(t, cache) require.True(t, errors.Is(errReceived, errExpected)) require.Contains(t, errReceived.Error(), errPartialMessage) @@ -138,18 +128,18 @@ func Test_AddTx_AppliesSizeConstraintsPerSenderForNumTransactions(t *testing.T) func Test_AddTx_AppliesSizeConstraintsPerSenderForNumBytes(t *testing.T) { cache := newCacheToTest(1024, math.MaxUint32) - cache.AddTx(createTxWithParams([]byte("tx-alice-1"), "alice", 1, 128, 42, 42)) - cache.AddTx(createTxWithParams([]byte("tx-alice-2"), "alice", 2, 512, 42, 42)) - cache.AddTx(createTxWithParams([]byte("tx-alice-4"), "alice", 3, 256, 42, 42)) - cache.AddTx(createTxWithParams([]byte("tx-bob-1"), "bob", 1, 512, 42, 42)) - cache.AddTx(createTxWithParams([]byte("tx-bob-2"), "bob", 2, 513, 42, 42)) + cache.AddTx(createTx([]byte("tx-alice-1"), "alice", 1).withSize(128).withGasLimit(50000)) + cache.AddTx(createTx([]byte("tx-alice-2"), "alice", 2).withSize(512).withGasLimit(1500000)) + cache.AddTx(createTx([]byte("tx-alice-4"), "alice", 3).withSize(256).withGasLimit(1500000)) + cache.AddTx(createTx([]byte("tx-bob-1"), "bob", 1).withSize(512).withGasLimit(1500000)) + cache.AddTx(createTx([]byte("tx-bob-2"), "bob", 2).withSize(513).withGasLimit(1500000)) require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-4"}, cache.getHashesForSender("alice")) require.Equal(t, []string{"tx-bob-1"}, cache.getHashesForSender("bob")) require.True(t, cache.areInternalMapsConsistent()) - cache.AddTx(createTxWithParams([]byte("tx-alice-3"), "alice", 3, 256, 42, 42)) - cache.AddTx(createTxWithParams([]byte("tx-bob-2"), "bob", 3, 512, 42, 42)) + cache.AddTx(createTx([]byte("tx-alice-3"), "alice", 3).withSize(256).withGasLimit(1500000)) + cache.AddTx(createTx([]byte("tx-bob-2"), "bob", 3).withSize(512).withGasLimit(1500000)) require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-3"}, cache.getHashesForSender("alice")) require.Equal(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) require.True(t, cache.areInternalMapsConsistent()) @@ -163,7 +153,12 @@ func Test_RemoveByTxHash(t *testing.T) { removed := cache.RemoveTxByHash([]byte("hash-1")) require.True(t, removed) - cache.Remove([]byte("hash-2")) + + removed = cache.RemoveTxByHash([]byte("hash-2")) + require.True(t, removed) + + removed = cache.RemoveTxByHash([]byte("hash-3")) + require.False(t, removed) foundTx, ok := cache.GetByTxHash([]byte("hash-1")) require.False(t, ok) @@ -172,6 +167,8 @@ func Test_RemoveByTxHash(t *testing.T) { foundTx, ok = cache.GetByTxHash([]byte("hash-2")) require.False(t, ok) require.Nil(t, foundTx) + + require.Equal(t, uint64(0), cache.CountTx()) } func Test_CountTx_And_Len(t *testing.T) { @@ -227,7 +224,7 @@ func Test_RemoveByTxHash_RemovesFromByHash_WhenMapsInconsistency(t *testing.T) { cache.AddTx(tx) // Cause an inconsistency between the two internal maps (theoretically possible in case of misbehaving eviction) - cache.txListBySender.removeTx(tx) + _ = cache.txListBySender.removeTransactionsWithLowerOrEqualNonceReturnHashes(tx) _ = cache.RemoveTxByHash(txHash) require.Equal(t, 0, cache.txByHash.backingMap.Count()) @@ -292,103 +289,12 @@ func Test_GetTransactionsPoolForSender(t *testing.T) { txs = cache.GetTransactionsPoolForSender(txSender2) require.Equal(t, wrappedTxs2, txs) - cache.RemoveTxByHash(txHashes2[0]) + _ = cache.RemoveTxByHash(txHashes2[0]) expectedTxs := wrappedTxs2[1:] txs = cache.GetTransactionsPoolForSender(txSender2) require.Equal(t, expectedTxs, txs) } -func Test_SelectTransactions_Dummy(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4)) - cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) - cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) - cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) - cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) - cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6)) - cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5)) - cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1)) - - sorted := cache.SelectTransactionsWithBandwidth(10, 2, math.MaxUint64) - require.Len(t, sorted, 8) -} - -func Test_SelectTransactionsWithBandwidth_Dummy(t *testing.T) { - cache := newUnconstrainedCacheToTest() - cache.AddTx(createTxWithGasLimit([]byte("hash-alice-4"), "alice", 4, 100000)) - cache.AddTx(createTxWithGasLimit([]byte("hash-alice-3"), "alice", 3, 100000)) - cache.AddTx(createTxWithGasLimit([]byte("hash-alice-2"), "alice", 2, 500000)) - cache.AddTx(createTxWithGasLimit([]byte("hash-alice-1"), "alice", 1, 200000)) - cache.AddTx(createTxWithGasLimit([]byte("hash-bob-7"), "bob", 7, 100000)) - cache.AddTx(createTxWithGasLimit([]byte("hash-bob-6"), "bob", 6, 50000)) - cache.AddTx(createTxWithGasLimit([]byte("hash-bob-5"), "bob", 5, 50000)) - cache.AddTx(createTxWithGasLimit([]byte("hash-carol-1"), "carol", 1, 50000)) - - sorted := cache.SelectTransactionsWithBandwidth(5, 2, 200000) - numSelected := 1 + 1 + 3 // 1 alice, 1 carol, 3 bob - - require.Len(t, sorted, numSelected) -} - -func Test_SelectTransactions_BreaksAtNonceGaps(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) - cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) - cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) - cache.AddTx(createTx([]byte("hash-alice-5"), "alice", 5)) - cache.AddTx(createTx([]byte("hash-bob-42"), "bob", 42)) - cache.AddTx(createTx([]byte("hash-bob-44"), "bob", 44)) - cache.AddTx(createTx([]byte("hash-bob-45"), "bob", 45)) - cache.AddTx(createTx([]byte("hash-carol-7"), "carol", 7)) - cache.AddTx(createTx([]byte("hash-carol-8"), "carol", 8)) - cache.AddTx(createTx([]byte("hash-carol-10"), "carol", 10)) - cache.AddTx(createTx([]byte("hash-carol-11"), "carol", 11)) - - numSelected := 3 + 1 + 2 // 3 alice + 1 bob + 2 carol - - sorted := cache.SelectTransactionsWithBandwidth(10, 2, math.MaxUint64) - require.Len(t, sorted, numSelected) -} - -func Test_SelectTransactions(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - // Add "nSenders" * "nTransactionsPerSender" transactions in the cache (in reversed nonce order) - nSenders := 1000 - nTransactionsPerSender := 100 - nTotalTransactions := nSenders * nTransactionsPerSender - nRequestedTransactions := math.MaxInt16 - - for senderTag := 0; senderTag < nSenders; senderTag++ { - sender := fmt.Sprintf("sender:%d", senderTag) - - for txNonce := nTransactionsPerSender; txNonce > 0; txNonce-- { - txHash := fmt.Sprintf("hash:%d:%d", senderTag, txNonce) - tx := createTx([]byte(txHash), sender, uint64(txNonce)) - cache.AddTx(tx) - } - } - - require.Equal(t, uint64(nTotalTransactions), cache.CountTx()) - - sorted := cache.SelectTransactionsWithBandwidth(nRequestedTransactions, 2, math.MaxUint64) - - require.Len(t, sorted, core.MinInt(nRequestedTransactions, nTotalTransactions)) - - // Check order - nonces := make(map[string]uint64, nSenders) - for _, tx := range sorted { - nonce := tx.Tx.GetNonce() - sender := string(tx.Tx.GetSndAddr()) - previousNonce := nonces[sender] - - require.LessOrEqual(t, previousNonce, nonce) - nonces[sender] = nonce - } -} - func Test_Keys(t *testing.T) { cache := newUnconstrainedCacheToTest() @@ -406,44 +312,111 @@ func Test_Keys(t *testing.T) { } func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { - txGasHandler, _ := dummyParams() - config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - EvictionEnabled: true, - NumBytesThreshold: maxNumBytesUpperBound, - CountThreshold: 100, - NumSendersToPreemptivelyEvict: 1, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, - } + host := txcachemocks.NewMempoolHostMock() + + t.Run("numSenders = 11, numTransactions = 10, countThreshold = 100, numItemsToPreemptivelyEvict = 1", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 100, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 1, + } - // 11 * 10 - cache, err := NewTxCache(config, txGasHandler) - require.Nil(t, err) - require.NotNil(t, cache) + cache, err := NewTxCache(config, host) + require.Nil(t, err) + require.NotNil(t, cache) - addManyTransactionsWithUniformDistribution(cache, 11, 10) - require.LessOrEqual(t, cache.CountTx(), uint64(100)) - - config = ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - EvictionEnabled: true, - NumBytesThreshold: maxNumBytesUpperBound, - CountThreshold: 250000, - NumSendersToPreemptivelyEvict: 1, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, - } + addManyTransactionsWithUniformDistribution(cache, 11, 10) - // 100 * 1000 - cache, err = NewTxCache(config, txGasHandler) - require.Nil(t, err) - require.NotNil(t, cache) + // Eviction happens if the cache capacity is already exceeded, + // but not if the capacity will be exceeded after the addition. + // Thus, for the given value of "NumItemsToPreemptivelyEvict", there will be "countThreshold" + 1 transactions in the cache. + require.Equal(t, 101, int(cache.CountTx())) + }) + + t.Run("numSenders = 3, numTransactions = 5, countThreshold = 4, numItemsToPreemptivelyEvict = 3", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 4, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 3, + } + + cache, err := NewTxCache(config, host) + require.Nil(t, err) + require.NotNil(t, cache) + + addManyTransactionsWithUniformDistribution(cache, 3, 5) + require.Equal(t, 3, int(cache.CountTx())) + }) + + t.Run("numSenders = 11, numTransactions = 10, countThreshold = 100, numItemsToPreemptivelyEvict = 2", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 100, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 2, + } + + cache, err := NewTxCache(config, host) + require.Nil(t, err) + require.NotNil(t, cache) + + addManyTransactionsWithUniformDistribution(cache, 11, 10) + require.Equal(t, 100, int(cache.CountTx())) + }) + + t.Run("numSenders = 100, numTransactions = 1000, countThreshold = 250000 (no eviction)", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 250000, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 1, + } + + cache, err := NewTxCache(config, host) + require.Nil(t, err) + require.NotNil(t, cache) + + addManyTransactionsWithUniformDistribution(cache, 100, 1000) + require.Equal(t, 100000, int(cache.CountTx())) + }) - addManyTransactionsWithUniformDistribution(cache, 100, 1000) - require.LessOrEqual(t, cache.CountTx(), uint64(250000)) + t.Run("numSenders = 1000, numTransactions = 500, countThreshold = 250000, NumItemsToPreemptivelyEvict = 50000", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 250000, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 10000, + } + + cache, err := NewTxCache(config, host) + require.Nil(t, err) + require.NotNil(t, cache) + + addManyTransactionsWithUniformDistribution(cache, 1000, 500) + require.Equal(t, 250000, int(cache.CountTx())) + }) } func Test_NotImplementedFunctions(t *testing.T) { @@ -457,7 +430,6 @@ func Test_NotImplementedFunctions(t *testing.T) { require.False(t, added) require.NotPanics(t, func() { cache.RegisterHandler(nil, "") }) - require.Zero(t, cache.MaxSize()) err := cache.Close() require.Nil(t, err) @@ -475,44 +447,6 @@ func Test_IsInterfaceNil(t *testing.T) { require.True(t, check.IfNil(thisIsNil)) } -func TestTxCache_ConcurrentMutationAndSelection(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - // Alice will quickly move between two score buckets (chunks) - cheapTransaction := createTxWithParams([]byte("alice-x-o"), "alice", 0, 128, 50000, 100*oneBillion) - expensiveTransaction := createTxWithParams([]byte("alice-x-1"), "alice", 1, 128, 50000, 300*oneBillion) - cache.AddTx(cheapTransaction) - cache.AddTx(expensiveTransaction) - - wg := sync.WaitGroup{} - - // Simulate selection - wg.Add(1) - go func() { - for i := 0; i < 100; i++ { - fmt.Println("Selection", i) - cache.SelectTransactionsWithBandwidth(100, 100, math.MaxUint64) - } - - wg.Done() - }() - - // Simulate add / remove transactions - wg.Add(1) - go func() { - for i := 0; i < 100; i++ { - fmt.Println("Add / remove", i) - cache.Remove([]byte("alice-x-1")) - cache.AddTx(expensiveTransaction) - } - - wg.Done() - }() - - timedOut := waitTimeout(&wg, 1*time.Second) - require.False(t, timedOut, "Timed out. Perhaps deadlock?") -} - func TestTxCache_TransactionIsAdded_EvenWhenInternalMapsAreInconsistent(t *testing.T) { cache := newUnconstrainedCacheToTest() @@ -529,7 +463,7 @@ func TestTxCache_TransactionIsAdded_EvenWhenInternalMapsAreInconsistent(t *testi cache.Clear() // Setup inconsistency: transaction already exists in map by sender, but not in map by hash - cache.txListBySender.addTx(createTx([]byte("alice-x"), "alice", 42)) + cache.txListBySender.addTxReturnEvicted(createTx([]byte("alice-x"), "alice", 42)) require.False(t, cache.Has([]byte("alice-x"))) ok, added = cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) @@ -543,7 +477,7 @@ func TestTxCache_TransactionIsAdded_EvenWhenInternalMapsAreInconsistent(t *testi func TestTxCache_NoCriticalInconsistency_WhenConcurrentAdditionsAndRemovals(t *testing.T) { cache := newUnconstrainedCacheToTest() - // A lot of routines concur to add & remove THE FIRST transaction of a sender + // A lot of routines concur to add & remove a transaction for try := 0; try < 100; try++ { var wg sync.WaitGroup @@ -579,61 +513,108 @@ func TestTxCache_NoCriticalInconsistency_WhenConcurrentAdditionsAndRemovals(t *t require.True(t, cache.Has([]byte("alice-x"))) require.Equal(t, []string{"alice-x"}, cache.getHashesForSender("alice")) } +} - cache.Clear() +func TestBenchmarkTxCache_addManyTransactionsWithSameNonce(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 419_430_400, + NumBytesPerSenderThreshold: 12_288_000, + CountThreshold: 300_000, + CountPerSenderThreshold: 5_000, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 50_000, + } - // A lot of routines concur to add & remove subsequent transactions of a sender - cache.AddTx(createTx([]byte("alice-w"), "alice", 41)) + host := txcachemocks.NewMempoolHostMock() + randomBytes := make([]byte, math.MaxUint16*hashLength) + _, err := rand.Read(randomBytes) + require.Nil(t, err) - for try := 0; try < 100; try++ { - var wg sync.WaitGroup + sw := core.NewStopWatch() - for i := 0; i < 50; i++ { - wg.Add(1) - go func() { - cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) - _ = cache.RemoveTxByHash([]byte("alice-x")) - wg.Done() - }() + t.Run("numTransactions = 100 (worst case)", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + numTransactions := 100 + + sw.Start(t.Name()) + + for i := 0; i < numTransactions; i++ { + cache.AddTx(createTx(randomBytes[i*hashLength:(i+1)*hashLength], "alice", 42).withGasPrice(oneBillion + uint64(i))) } - wg.Wait() + sw.Stop(t.Name()) - // In this case, there is the slight chance that: - // go A: add to map by hash - // go B: won't add in map by hash, already there - // go A: add to map by sender (existing sender/list) - // go A: remove from map by hash - // go A: remove from map by sender - // go B: add to map by sender (existing sender/list) - // go B: can't remove from map by hash, not found - // go B: won't remove from map by sender (sender unknown) + require.Equal(t, numTransactions, int(cache.CountTx())) + }) - // Therefore, Alice may have one or two transactions in her list. - require.Equal(t, 1, cache.txByHash.backingMap.Count()) - expectedTxsConsistent := []string{"alice-w"} - expectedTxsSlightlyInconsistent := []string{"alice-w", "alice-x"} - actualTxs := cache.getHashesForSender("alice") - require.True(t, assert.ObjectsAreEqual(expectedTxsConsistent, actualTxs) || assert.ObjectsAreEqual(expectedTxsSlightlyInconsistent, actualTxs)) + t.Run("numTransactions = 1000 (worst case)", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) - // A further addition works: - cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) - require.True(t, cache.Has([]byte("alice-w"))) - require.True(t, cache.Has([]byte("alice-x"))) - require.Equal(t, []string{"alice-w", "alice-x"}, cache.getHashesForSender("alice")) + numTransactions := 1000 + + sw.Start(t.Name()) + + for i := 0; i < numTransactions; i++ { + cache.AddTx(createTx(randomBytes[i*hashLength:(i+1)*hashLength], "alice", 42).withGasPrice(oneBillion + uint64(i))) + } + + sw.Stop(t.Name()) + + require.Equal(t, numTransactions, int(cache.CountTx())) + }) + + t.Run("numTransactions = 5_000 (worst case)", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + numTransactions := 5_000 + + sw.Start(t.Name()) + + for i := 0; i < numTransactions; i++ { + cache.AddTx(createTx(randomBytes[i*hashLength:(i+1)*hashLength], "alice", 42).withGasPrice(oneBillion + uint64(i))) + } + + sw.Stop(t.Name()) + + require.Equal(t, numTransactions, int(cache.CountTx())) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) } - cache.Clear() + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // 0.000117s (TestBenchmarkTxCache_addManyTransactionsWithSameNonce/numTransactions_=_100) + // 0.003117s (TestBenchmarkTxCache_addManyTransactionsWithSameNonce/numTransactions_=_1000) + // 0.056481s (TestBenchmarkTxCache_addManyTransactionsWithSameNonce/numTransactions_=_5_000) } func newUnconstrainedCacheToTest() *TxCache { - txGasHandler, _ := dummyParams() + host := txcachemocks.NewMempoolHostMock() + cache, err := NewTxCache(ConfigSourceMe{ - Name: "test", - NumChunks: 16, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, - }, txGasHandler) + Name: "test", + NumChunks: 16, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: false, + NumItemsToPreemptivelyEvict: 1, + }, host) if err != nil { panic(fmt.Sprintf("newUnconstrainedCacheToTest(): %s", err)) } @@ -642,13 +623,18 @@ func newUnconstrainedCacheToTest() *TxCache { } func newCacheToTest(numBytesPerSenderThreshold uint32, countPerSenderThreshold uint32) *TxCache { - txGasHandler, _ := dummyParams() + host := txcachemocks.NewMempoolHostMock() + cache, err := NewTxCache(ConfigSourceMe{ - Name: "test", - NumChunks: 16, - NumBytesPerSenderThreshold: numBytesPerSenderThreshold, - CountPerSenderThreshold: countPerSenderThreshold, - }, txGasHandler) + Name: "test", + NumChunks: 16, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: numBytesPerSenderThreshold, + CountThreshold: math.MaxUint32, + CountPerSenderThreshold: countPerSenderThreshold, + EvictionEnabled: false, + NumItemsToPreemptivelyEvict: 1, + }, host) if err != nil { panic(fmt.Sprintf("newCacheToTest(): %s", err)) } diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index ccda1ce0..50993268 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -7,16 +7,11 @@ import ( "github.com/multiversx/mx-chain-storage-go/txcache/maps" ) -const numberOfScoreChunks = uint32(100) - // txListBySenderMap is a map-like structure for holding and accessing transactions by sender type txListBySenderMap struct { - backingMap *maps.BucketSortedMap + backingMap *maps.ConcurrentMap senderConstraints senderConstraints counter atomic.Counter - scoreComputer scoreComputer - txGasHandler TxGasHandler - txFeeHelper feeHelper mutex sync.Mutex } @@ -24,26 +19,23 @@ type txListBySenderMap struct { func newTxListBySenderMap( nChunksHint uint32, senderConstraints senderConstraints, - scoreComputer scoreComputer, - txGasHandler TxGasHandler, - txFeeHelper feeHelper, ) *txListBySenderMap { - backingMap := maps.NewBucketSortedMap(nChunksHint, numberOfScoreChunks) + backingMap := maps.NewConcurrentMap(nChunksHint) return &txListBySenderMap{ backingMap: backingMap, senderConstraints: senderConstraints, - scoreComputer: scoreComputer, - txGasHandler: txGasHandler, - txFeeHelper: txFeeHelper, } } -// addTx adds a transaction in the map, in the corresponding list (selected by its sender) -func (txMap *txListBySenderMap) addTx(tx *WrappedTransaction) (bool, [][]byte) { +// addTxReturnEvicted adds a transaction in the map, in the corresponding list (selected by its sender). +// This function returns a boolean indicating whether the transaction was added, and a slice of evicted transaction hashes (upon applying sender-level constraints). +func (txMap *txListBySenderMap) addTxReturnEvicted(tx *WrappedTransaction) (bool, [][]byte) { sender := string(tx.Tx.GetSndAddr()) listForSender := txMap.getOrAddListForSender(sender) - return listForSender.AddTx(tx, txMap.txGasHandler, txMap.txFeeHelper) + + added, evictedHashes := listForSender.AddTx(tx) + return added, evictedHashes } // getOrAddListForSender gets or lazily creates a list (using double-checked locking pattern) @@ -75,43 +67,41 @@ func (txMap *txListBySenderMap) getListForSender(sender string) (*txListForSende } func (txMap *txListBySenderMap) addSender(sender string) *txListForSender { - listForSender := newTxListForSender(sender, &txMap.senderConstraints, txMap.notifyScoreChange) + listForSender := newTxListForSender(sender, &txMap.senderConstraints) - txMap.backingMap.Set(listForSender) + txMap.backingMap.Set(sender, listForSender) txMap.counter.Increment() return listForSender } -// This function should only be called in a critical section managed by a "txListForSender" -func (txMap *txListBySenderMap) notifyScoreChange(txList *txListForSender, scoreParams senderScoreParams) { - score := txMap.scoreComputer.computeScore(scoreParams) - txList.setLastComputedScore(score) - txMap.backingMap.NotifyScoreChange(txList, score) -} - -// removeTx removes a transaction from the map -func (txMap *txListBySenderMap) removeTx(tx *WrappedTransaction) bool { +// removeTransactionsWithLowerOrEqualNonceReturnHashes removes transactions with nonces lower or equal to the given transaction's nonce. +func (txMap *txListBySenderMap) removeTransactionsWithLowerOrEqualNonceReturnHashes(tx *WrappedTransaction) [][]byte { sender := string(tx.Tx.GetSndAddr()) listForSender, ok := txMap.getListForSender(sender) if !ok { // This happens when a sender whose transactions were selected for processing is removed from cache in the meantime. // When it comes to remove one if its transactions due to processing (commited / finalized block), they don't exist in cache anymore. - log.Trace("txListBySenderMap.removeTx() detected slight inconsistency: sender of tx not in cache", "tx", tx.TxHash, "sender", []byte(sender)) - return false + log.Trace("txListBySenderMap.removeTxReturnEvicted detected slight inconsistency: sender of tx not in cache", "tx", tx.TxHash, "sender", []byte(sender)) + return nil } - isFound := listForSender.RemoveTx(tx) - isEmpty := listForSender.IsEmpty() - if isEmpty { - txMap.removeSender(sender) - } + evicted := listForSender.removeTransactionsWithLowerOrEqualNonceReturnHashes(tx.Tx.GetNonce()) + txMap.removeSenderIfEmpty(listForSender) + return evicted +} - return isFound +func (txMap *txListBySenderMap) removeSenderIfEmpty(listForSender *txListForSender) { + if listForSender.IsEmpty() { + txMap.removeSender(listForSender.sender) + } } +// Important note: this doesn't remove the transactions from txCache.txByHash. That is the responsibility of the caller (of this function). func (txMap *txListBySenderMap) removeSender(sender string) bool { + logRemove.Trace("txListBySenderMap.removeSender", "sender", sender) + _, removed := txMap.backingMap.Remove(sender) if removed { txMap.counter.Decrement() @@ -133,36 +123,28 @@ func (txMap *txListBySenderMap) RemoveSendersBulk(senders []string) uint32 { return numRemoved } -func (txMap *txListBySenderMap) notifyAccountNonce(accountKey []byte, nonce uint64) { +// removeTransactionsWithHigherOrEqualNonce removes transactions with nonces higher or equal to the given nonce. +// Useful for the eviction flow. +func (txMap *txListBySenderMap) removeTransactionsWithHigherOrEqualNonce(accountKey []byte, nonce uint64) { sender := string(accountKey) listForSender, ok := txMap.getListForSender(sender) if !ok { return } - listForSender.notifyAccountNonce(nonce) + listForSender.removeTransactionsWithHigherOrEqualNonce(nonce) + txMap.removeSenderIfEmpty(listForSender) } -func (txMap *txListBySenderMap) getSnapshotAscending() []*txListForSender { - itemsSnapshot := txMap.backingMap.GetSnapshotAscending() - listsSnapshot := make([]*txListForSender, len(itemsSnapshot)) - - for i, item := range itemsSnapshot { - listsSnapshot[i] = item.(*txListForSender) - } +func (txMap *txListBySenderMap) getSenders() []*txListForSender { + senders := make([]*txListForSender, 0, txMap.counter.Get()) - return listsSnapshot -} - -func (txMap *txListBySenderMap) getSnapshotDescending() []*txListForSender { - itemsSnapshot := txMap.backingMap.GetSnapshotDescending() - listsSnapshot := make([]*txListForSender, len(itemsSnapshot)) - - for i, item := range itemsSnapshot { - listsSnapshot[i] = item.(*txListForSender) - } + txMap.backingMap.IterCb(func(key string, item interface{}) { + listForSender := item.(*txListForSender) + senders = append(senders, listForSender) + }) - return listsSnapshot + return senders } func (txMap *txListBySenderMap) clear() { diff --git a/txcache/txListBySenderMap_test.go b/txcache/txListBySenderMap_test.go index d3393225..b7f8998d 100644 --- a/txcache/txListBySenderMap_test.go +++ b/txcache/txListBySenderMap_test.go @@ -1,7 +1,6 @@ package txcache import ( - "fmt" "math" "sync" "testing" @@ -12,38 +11,38 @@ import ( func TestSendersMap_AddTx_IncrementsCounter(t *testing.T) { myMap := newSendersMapToTest() - myMap.addTx(createTx([]byte("a"), "alice", uint64(1))) - myMap.addTx(createTx([]byte("aa"), "alice", uint64(2))) - myMap.addTx(createTx([]byte("b"), "bob", uint64(1))) + myMap.addTxReturnEvicted(createTx([]byte("a"), "alice", 1)) + myMap.addTxReturnEvicted(createTx([]byte("aa"), "alice", 2)) + myMap.addTxReturnEvicted(createTx([]byte("b"), "bob", 1)) // There are 2 senders require.Equal(t, int64(2), myMap.counter.Get()) } -func TestSendersMap_RemoveTx_AlsoRemovesSenderWhenNoTransactionLeft(t *testing.T) { +func TestSendersMap_removeTransactionsWithLowerOrEqualNonceReturnHashes_alsoRemovesSenderWhenNoTransactionLeft(t *testing.T) { myMap := newSendersMapToTest() - txAlice1 := createTx([]byte("a1"), "alice", uint64(1)) - txAlice2 := createTx([]byte("a2"), "alice", uint64(2)) - txBob := createTx([]byte("b"), "bob", uint64(1)) + txAlice1 := createTx([]byte("a1"), "alice", 1) + txAlice2 := createTx([]byte("a2"), "alice", 2) + txBob := createTx([]byte("b"), "bob", 1) - myMap.addTx(txAlice1) - myMap.addTx(txAlice2) - myMap.addTx(txBob) + myMap.addTxReturnEvicted(txAlice1) + myMap.addTxReturnEvicted(txAlice2) + myMap.addTxReturnEvicted(txBob) require.Equal(t, int64(2), myMap.counter.Get()) require.Equal(t, uint64(2), myMap.testGetListForSender("alice").countTx()) require.Equal(t, uint64(1), myMap.testGetListForSender("bob").countTx()) - myMap.removeTx(txAlice1) + _ = myMap.removeTransactionsWithLowerOrEqualNonceReturnHashes(txAlice1) require.Equal(t, int64(2), myMap.counter.Get()) require.Equal(t, uint64(1), myMap.testGetListForSender("alice").countTx()) require.Equal(t, uint64(1), myMap.testGetListForSender("bob").countTx()) - myMap.removeTx(txAlice2) + _ = myMap.removeTransactionsWithLowerOrEqualNonceReturnHashes(txAlice2) // All alice's transactions have been removed now require.Equal(t, int64(1), myMap.counter.Get()) - myMap.removeTx(txBob) + _ = myMap.removeTransactionsWithLowerOrEqualNonceReturnHashes(txBob) // Also Bob has no more transactions require.Equal(t, int64(0), myMap.counter.Get()) } @@ -51,7 +50,7 @@ func TestSendersMap_RemoveTx_AlsoRemovesSenderWhenNoTransactionLeft(t *testing.T func TestSendersMap_RemoveSender(t *testing.T) { myMap := newSendersMapToTest() - myMap.addTx(createTx([]byte("a"), "alice", uint64(1))) + myMap.addTxReturnEvicted(createTx([]byte("a"), "alice", 1)) require.Equal(t, int64(1), myMap.counter.Get()) // Bob is unknown @@ -86,9 +85,9 @@ func TestSendersMap_RemoveSendersBulk_ConcurrentWithAddition(t *testing.T) { wg.Add(100) for i := 0; i < 100; i++ { go func(i int) { - myMap.addTx(createTx([]byte("a"), "alice", uint64(i))) - myMap.addTx(createTx([]byte("b"), "bob", uint64(i))) - myMap.addTx(createTx([]byte("c"), "carol", uint64(i))) + myMap.addTxReturnEvicted(createTx([]byte("a"), "alice", uint64(i))) + myMap.addTxReturnEvicted(createTx([]byte("b"), "bob", uint64(i))) + myMap.addTxReturnEvicted(createTx([]byte("c"), "carol", uint64(i))) wg.Done() }(i) @@ -97,88 +96,9 @@ func TestSendersMap_RemoveSendersBulk_ConcurrentWithAddition(t *testing.T) { wg.Wait() } -func TestSendersMap_notifyAccountNonce(t *testing.T) { - myMap := newSendersMapToTest() - - // Discarded notification, since sender not added yet - myMap.notifyAccountNonce([]byte("alice"), 42) - - myMap.addTx(createTx([]byte("tx-42"), "alice", uint64(42))) - alice, _ := myMap.getListForSender("alice") - require.Equal(t, uint64(0), alice.accountNonce.Get()) - require.False(t, alice.accountNonceKnown.IsSet()) - - myMap.notifyAccountNonce([]byte("alice"), 42) - require.Equal(t, uint64(42), alice.accountNonce.Get()) - require.True(t, alice.accountNonceKnown.IsSet()) -} - -func BenchmarkSendersMap_GetSnapshotAscending(b *testing.B) { - if b.N > 10 { - fmt.Println("impractical benchmark: b.N too high") - return - } - - numSenders := 250000 - maps := make([]*txListBySenderMap, b.N) - for i := 0; i < b.N; i++ { - maps[i] = createTxListBySenderMap(numSenders) - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - measureWithStopWatch(b, func() { - snapshot := maps[i].getSnapshotAscending() - require.Len(b, snapshot, numSenders) - }) - } -} - -func TestSendersMap_GetSnapshots_NoPanic_IfAlsoConcurrentMutation(t *testing.T) { - myMap := newSendersMapToTest() - - var wg sync.WaitGroup - - for i := 0; i < 100; i++ { - wg.Add(2) - - go func() { - for j := 0; j < 100; j++ { - myMap.getSnapshotAscending() - } - - wg.Done() - }() - - go func() { - for j := 0; j < 1000; j++ { - sender := fmt.Sprintf("Sender-%d", j) - myMap.removeSender(sender) - } - - wg.Done() - }() - } - - wg.Wait() -} - -func createTxListBySenderMap(numSenders int) *txListBySenderMap { - myMap := newSendersMapToTest() - for i := 0; i < numSenders; i++ { - sender := fmt.Sprintf("Sender-%d", i) - hash := createFakeTxHash([]byte(sender), 1) - myMap.addTx(createTx(hash, sender, uint64(1))) - } - - return myMap -} - func newSendersMapToTest() *txListBySenderMap { - txGasHandler, txFeeHelper := dummyParams() return newTxListBySenderMap(4, senderConstraints{ maxNumBytes: math.MaxUint32, maxNumTxs: math.MaxUint32, - }, &disabledScoreComputer{}, txGasHandler, txFeeHelper) + }) } diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index a12a91d1..67e4e8b6 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -6,50 +6,30 @@ import ( "sync" "github.com/multiversx/mx-chain-core-go/core/atomic" - "github.com/multiversx/mx-chain-storage-go/common" - "github.com/multiversx/mx-chain-storage-go/txcache/maps" ) -var _ maps.BucketSortedMapItem = (*txListForSender)(nil) - // txListForSender represents a sorted list of transactions of a particular sender type txListForSender struct { - copyDetectedGap bool - lastComputedScore atomic.Uint32 - accountNonceKnown atomic.Flag - sweepable atomic.Flag - copyPreviousNonce uint64 - sender string - items *list.List - copyBatchIndex *list.Element - constraints *senderConstraints - scoreChunk *maps.MapChunk - accountNonce atomic.Uint64 - totalBytes atomic.Counter - totalGas atomic.Counter - totalFeeScore atomic.Counter - numFailedSelections atomic.Counter - onScoreChange scoreChangeCallback - - scoreChunkMutex sync.RWMutex - mutex sync.RWMutex -} + sender string + items *list.List + totalBytes atomic.Counter + constraints *senderConstraints -type scoreChangeCallback func(value *txListForSender, scoreParams senderScoreParams) + mutex sync.RWMutex +} // newTxListForSender creates a new (sorted) list of transactions -func newTxListForSender(sender string, constraints *senderConstraints, onScoreChange scoreChangeCallback) *txListForSender { +func newTxListForSender(sender string, constraints *senderConstraints) *txListForSender { return &txListForSender{ - items: list.New(), - sender: sender, - constraints: constraints, - onScoreChange: onScoreChange, + items: list.New(), + sender: sender, + constraints: constraints, } } // AddTx adds a transaction in sender's list // This is a "sorted" insert -func (listForSender *txListForSender) AddTx(tx *WrappedTransaction, gasHandler TxGasHandler, txFeeHelper feeHelper) (bool, [][]byte) { +func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) (bool, [][]byte) { // We don't allow concurrent interceptor goroutines to mutate a given sender's list listForSender.mutex.Lock() defer listForSender.mutex.Unlock() @@ -65,9 +45,9 @@ func (listForSender *txListForSender) AddTx(tx *WrappedTransaction, gasHandler T listForSender.items.InsertAfter(tx, insertionPlace) } - listForSender.onAddedTransaction(tx, gasHandler, txFeeHelper) + listForSender.onAddedTransaction(tx) + evicted := listForSender.applySizeConstraints() - listForSender.triggerScoreChange() return true, evicted } @@ -101,55 +81,53 @@ func (listForSender *txListForSender) isCapacityExceeded() bool { return tooManyBytes || tooManyTxs } -func (listForSender *txListForSender) onAddedTransaction(tx *WrappedTransaction, gasHandler TxGasHandler, txFeeHelper feeHelper) { +func (listForSender *txListForSender) onAddedTransaction(tx *WrappedTransaction) { listForSender.totalBytes.Add(tx.Size) - listForSender.totalGas.Add(int64(estimateTxGas(tx))) - listForSender.totalFeeScore.Add(int64(estimateTxFeeScore(tx, gasHandler, txFeeHelper))) } -func (listForSender *txListForSender) triggerScoreChange() { - scoreParams := listForSender.getScoreParams() - listForSender.onScoreChange(listForSender, scoreParams) -} - -// This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) getScoreParams() senderScoreParams { - fee := listForSender.totalFeeScore.GetUint64() - gas := listForSender.totalGas.GetUint64() - count := listForSender.countTx() - - return senderScoreParams{count: count, feeScore: fee, gas: gas} -} - -// This function should only be used in critical section (listForSender.mutex) +// This function should only be used in critical section (listForSender.mutex). +// When searching for the insertion place, we consider the following rules: +// - transactions are sorted by nonce in ascending order. +// - transactions with the same nonce are sorted by gas price in descending order. +// - transactions with the same nonce and gas price are sorted by hash in ascending order. +// - duplicates are not allowed. +// - "PPU" measurement is not relevant in this context. Competition among transactions of the same sender (and nonce) is based on gas price. func (listForSender *txListForSender) findInsertionPlace(incomingTx *WrappedTransaction) (*list.Element, error) { incomingNonce := incomingTx.Tx.GetNonce() incomingGasPrice := incomingTx.Tx.GetGasPrice() + // The loop iterates from the back to the front of the list. + // Starting from the back allows the function to quickly find the insertion point for transactions with higher nonces, which are more likely to be added. for element := listForSender.items.Back(); element != nil; element = element.Prev() { currentTx := element.Value.(*WrappedTransaction) currentTxNonce := currentTx.Tx.GetNonce() currentTxGasPrice := currentTx.Tx.GetGasPrice() - if incomingTx.sameAs(currentTx) { - // The incoming transaction will be discarded - return nil, common.ErrItemAlreadyInCache - } - if currentTxNonce == incomingNonce { if currentTxGasPrice > incomingGasPrice { - // The incoming transaction will be placed right after the existing one, which has same nonce but higher price. - // If the nonces are the same, but the incoming gas price is higher or equal, the search loop continues. + // The case of same nonce, lower gas price. + // We've found an insertion place: right after "element". return element, nil } + if currentTxGasPrice == incomingGasPrice { - // The incoming transaction will be placed right after the existing one, which has same nonce and the same price. - // (but different hash, because of some other fields like receiver, value or data) - // This will order out the transactions having the same nonce and gas price - if bytes.Compare(currentTx.TxHash, incomingTx.TxHash) < 0 { + // The case of same nonce, same gas price. + + comparison := bytes.Compare(currentTx.TxHash, incomingTx.TxHash) + if comparison == 0 { + // The incoming transaction will be discarded, since it's already in the cache. + return nil, errItemAlreadyInCache + } + if comparison < 0 { + // We've found an insertion place: right after "element". return element, nil } + + // We allow the search loop to continue, since the incoming transaction has a "higher hash". } + + // We allow the search loop to continue, since the incoming transaction has a higher gas price. + continue } if currentTxNonce < incomingNonce { @@ -157,56 +135,17 @@ func (listForSender *txListForSender) findInsertionPlace(incomingTx *WrappedTran // thus the incoming transaction will be placed right after this one. return element, nil } + + // We allow the search loop to continue, since the incoming transaction has a higher nonce. } // The incoming transaction will be inserted at the head of the list. return nil, nil } -// RemoveTx removes a transaction from the sender's list -func (listForSender *txListForSender) RemoveTx(tx *WrappedTransaction) bool { - // We don't allow concurrent interceptor goroutines to mutate a given sender's list - listForSender.mutex.Lock() - defer listForSender.mutex.Unlock() - - marker := listForSender.findListElementWithTx(tx) - isFound := marker != nil - if isFound { - listForSender.items.Remove(marker) - listForSender.onRemovedListElement(marker) - listForSender.triggerScoreChange() - } - - return isFound -} - func (listForSender *txListForSender) onRemovedListElement(element *list.Element) { - value := element.Value.(*WrappedTransaction) - - listForSender.totalBytes.Subtract(value.Size) - listForSender.totalGas.Subtract(int64(estimateTxGas(value))) - listForSender.totalFeeScore.Subtract(int64(value.TxFeeScoreNormalized)) -} - -// This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) findListElementWithTx(txToFind *WrappedTransaction) *list.Element { - txToFindHash := txToFind.TxHash - txToFindNonce := txToFind.Tx.GetNonce() - - for element := listForSender.items.Front(); element != nil; element = element.Next() { - value := element.Value.(*WrappedTransaction) - - if bytes.Equal(value.TxHash, txToFindHash) { - return element - } - - // Optimization: stop search at this point, since the list is sorted by nonce - if value.Tx.GetNonce() > txToFindNonce { - break - } - } - - return nil + tx := element.Value.(*WrappedTransaction) + listForSender.totalBytes.Subtract(tx.Size) } // IsEmpty checks whether the list is empty @@ -214,84 +153,31 @@ func (listForSender *txListForSender) IsEmpty() bool { return listForSender.countTxWithLock() == 0 } -// selectBatchTo copies a batch (usually small) of transactions of a limited gas bandwidth and limited number of transactions to a destination slice -// It also updates the internal state used for copy operations -func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destination []*WrappedTransaction, batchSize int, bandwidth uint64) batchSelectionJournal { - // We can't read from multiple goroutines at the same time - // And we can't mutate the sender's list while reading it - listForSender.mutex.Lock() - defer listForSender.mutex.Unlock() - - journal := batchSelectionJournal{} - - // Reset the internal state used for copy operations - if isFirstBatch { - hasInitialGap := listForSender.verifyInitialGapOnSelectionStart() - - listForSender.copyBatchIndex = listForSender.items.Front() - listForSender.copyPreviousNonce = 0 - listForSender.copyDetectedGap = hasInitialGap - - journal.isFirstBatch = true - journal.hasInitialGap = hasInitialGap - } - - element := listForSender.copyBatchIndex - availableSpace := len(destination) - detectedGap := listForSender.copyDetectedGap - previousNonce := listForSender.copyPreviousNonce - - // If a nonce gap is detected, no transaction is returned in this read. - // There is an exception though: if this is the first read operation for the sender in the current selection process and the sender is in the grace period, - // then one transaction will be returned. But subsequent reads for this sender will return nothing. - if detectedGap { - if isFirstBatch && listForSender.isInGracePeriod() { - journal.isGracePeriod = true - batchSize = 1 - } else { - batchSize = 0 - } - } +// getTxs returns the transactions of the sender +func (listForSender *txListForSender) getTxs() []*WrappedTransaction { + listForSender.mutex.RLock() + defer listForSender.mutex.RUnlock() - copiedBandwidth := uint64(0) - lastTxGasLimit := uint64(0) - copied := 0 - for ; ; copied, copiedBandwidth = copied+1, copiedBandwidth+lastTxGasLimit { - if element == nil || copied == batchSize || copied == availableSpace || copiedBandwidth >= bandwidth { - break - } + result := make([]*WrappedTransaction, 0, listForSender.countTx()) + for element := listForSender.items.Front(); element != nil; element = element.Next() { value := element.Value.(*WrappedTransaction) - txNonce := value.Tx.GetNonce() - lastTxGasLimit = value.Tx.GetGasLimit() - - if previousNonce > 0 && txNonce > previousNonce+1 { - listForSender.copyDetectedGap = true - journal.hasMiddleGap = true - break - } - - destination[copied] = value - element = element.Next() - previousNonce = txNonce + result = append(result, value) } - listForSender.copyBatchIndex = element - listForSender.copyPreviousNonce = previousNonce - journal.copied = copied - return journal + return result } -// getTxHashes returns the hashes of transactions in the list -func (listForSender *txListForSender) getTxHashes() [][]byte { +// getTxsReversed returns the transactions of the sender, in reverse nonce order +func (listForSender *txListForSender) getTxsReversed() []*WrappedTransaction { listForSender.mutex.RLock() defer listForSender.mutex.RUnlock() - result := make([][]byte, 0, listForSender.countTx()) + result := make([]*WrappedTransaction, 0, listForSender.countTx()) - for element := listForSender.items.Front(); element != nil; element = element.Next() { + for element := listForSender.items.Back(); element != nil; element = element.Prev() { value := element.Value.(*WrappedTransaction) - result = append(result, value.TxHash) + result = append(result, value) } return result @@ -308,105 +194,49 @@ func (listForSender *txListForSender) countTxWithLock() uint64 { return uint64(listForSender.items.Len()) } -func approximatelyCountTxInLists(lists []*txListForSender) uint64 { - count := uint64(0) - - for _, listForSender := range lists { - count += listForSender.countTxWithLock() - } - - return count -} - -// notifyAccountNonce does not update the "numFailedSelections" counter, -// since the notification comes at a time when we cannot actually detect whether the initial gap still exists or it was resolved. -func (listForSender *txListForSender) notifyAccountNonce(nonce uint64) { - listForSender.accountNonce.Set(nonce) - _ = listForSender.accountNonceKnown.SetReturningPrevious() -} +// removeTransactionsWithLowerOrEqualNonceReturnHashes removes transactions with nonces lower or equal to the given nonce +func (listForSender *txListForSender) removeTransactionsWithLowerOrEqualNonceReturnHashes(targetNonce uint64) [][]byte { + evictedTxHashes := make([][]byte, 0) -// This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) verifyInitialGapOnSelectionStart() bool { - hasInitialGap := listForSender.hasInitialGap() + // We don't allow concurrent goroutines to mutate a given sender's list + listForSender.mutex.Lock() + defer listForSender.mutex.Unlock() - if hasInitialGap { - listForSender.numFailedSelections.Increment() + for element := listForSender.items.Front(); element != nil; { + tx := element.Value.(*WrappedTransaction) + txNonce := tx.Tx.GetNonce() - if listForSender.isGracePeriodExceeded() { - _ = listForSender.sweepable.SetReturningPrevious() + if txNonce > targetNonce { + break } - } else { - listForSender.numFailedSelections.Reset() - } - - return hasInitialGap -} -// hasInitialGap should only be called at tx selection time, since only then we can detect initial gaps with certainty -// This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) hasInitialGap() bool { - accountNonceKnown := listForSender.accountNonceKnown.IsSet() - if !accountNonceKnown { - return false - } - - firstTx := listForSender.getLowestNonceTx() - if firstTx == nil { - return false - } - - firstTxNonce := firstTx.Tx.GetNonce() - accountNonce := listForSender.accountNonce.Get() - hasGap := firstTxNonce > accountNonce - return hasGap -} + nextElement := element.Next() + _ = listForSender.items.Remove(element) + listForSender.onRemovedListElement(element) + element = nextElement -// This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) getLowestNonceTx() *WrappedTransaction { - front := listForSender.items.Front() - if front == nil { - return nil + // Keep track of removed transactions + evictedTxHashes = append(evictedTxHashes, tx.TxHash) } - value := front.Value.(*WrappedTransaction) - return value -} - -// isInGracePeriod returns whether the sender is grace period due to a number of failed selections -func (listForSender *txListForSender) isInGracePeriod() bool { - numFailedSelections := listForSender.numFailedSelections.Get() - return numFailedSelections >= senderGracePeriodLowerBound && numFailedSelections <= senderGracePeriodUpperBound -} - -func (listForSender *txListForSender) isGracePeriodExceeded() bool { - numFailedSelections := listForSender.numFailedSelections.Get() - return numFailedSelections > senderGracePeriodUpperBound -} - -func (listForSender *txListForSender) getLastComputedScore() uint32 { - return listForSender.lastComputedScore.Get() -} - -func (listForSender *txListForSender) setLastComputedScore(score uint32) { - listForSender.lastComputedScore.Set(score) + return evictedTxHashes } -// GetKey returns the key -func (listForSender *txListForSender) GetKey() string { - return listForSender.sender -} +func (listForSender *txListForSender) removeTransactionsWithHigherOrEqualNonce(givenNonce uint64) { + listForSender.mutex.Lock() + defer listForSender.mutex.Unlock() -// GetScoreChunk returns the score chunk the sender is currently in -func (listForSender *txListForSender) GetScoreChunk() *maps.MapChunk { - listForSender.scoreChunkMutex.RLock() - defer listForSender.scoreChunkMutex.RUnlock() + for element := listForSender.items.Back(); element != nil; { + tx := element.Value.(*WrappedTransaction) + txNonce := tx.Tx.GetNonce() - return listForSender.scoreChunk -} + if txNonce < givenNonce { + break + } -// SetScoreChunk returns the score chunk the sender is currently in -func (listForSender *txListForSender) SetScoreChunk(scoreChunk *maps.MapChunk) { - listForSender.scoreChunkMutex.Lock() - listForSender.scoreChunk = scoreChunk - listForSender.scoreChunkMutex.Unlock() + prevElement := element.Prev() + _ = listForSender.items.Remove(element) + listForSender.onRemovedListElement(element) + element = prevElement + } } diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 523d6bd5..da4bbfad 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -2,49 +2,45 @@ package txcache import ( "math" + "sync" "testing" - "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) func TestListForSender_AddTx_Sorts(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() - list.AddTx(createTx([]byte("a"), ".", 1), txGasHandler, txFeeHelper) - list.AddTx(createTx([]byte("c"), ".", 3), txGasHandler, txFeeHelper) - list.AddTx(createTx([]byte("d"), ".", 4), txGasHandler, txFeeHelper) - list.AddTx(createTx([]byte("b"), ".", 2), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte("a"), ".", 1)) + list.AddTx(createTx([]byte("c"), ".", 3)) + list.AddTx(createTx([]byte("d"), ".", 4)) + list.AddTx(createTx([]byte("b"), ".", 2)) require.Equal(t, []string{"a", "b", "c", "d"}, list.getTxHashesAsStrings()) } func TestListForSender_AddTx_GivesPriorityToHigherGas(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() - list.AddTx(createTxWithParams([]byte("a"), ".", 1, 128, 42, 42), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("b"), ".", 3, 128, 42, 100), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("c"), ".", 3, 128, 42, 99), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("d"), ".", 2, 128, 42, 42), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("e"), ".", 3, 128, 42, 101), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte("a"), ".", 1)) + list.AddTx(createTx([]byte("b"), ".", 3).withGasPrice(1.2 * oneBillion)) + list.AddTx(createTx([]byte("c"), ".", 3).withGasPrice(1.1 * oneBillion)) + list.AddTx(createTx([]byte("d"), ".", 2)) + list.AddTx(createTx([]byte("e"), ".", 3).withGasPrice(1.3 * oneBillion)) require.Equal(t, []string{"a", "d", "e", "b", "c"}, list.getTxHashesAsStrings()) } func TestListForSender_AddTx_SortsCorrectlyWhenSameNonceSamePrice(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() - list.AddTx(createTxWithParams([]byte("a"), ".", 1, 128, 42, 42), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("b"), ".", 3, 128, 42, 100), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("c"), ".", 3, 128, 42, 100), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("d"), ".", 3, 128, 42, 98), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("e"), ".", 3, 128, 42, 101), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("f"), ".", 2, 128, 42, 42), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("g"), ".", 3, 128, 42, 99), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte("a"), ".", 1).withGasPrice(oneBillion)) + list.AddTx(createTx([]byte("b"), ".", 3).withGasPrice(3 * oneBillion)) + list.AddTx(createTx([]byte("c"), ".", 3).withGasPrice(3 * oneBillion)) + list.AddTx(createTx([]byte("d"), ".", 3).withGasPrice(2 * oneBillion)) + list.AddTx(createTx([]byte("e"), ".", 3).withGasPrice(3.5 * oneBillion)) + list.AddTx(createTx([]byte("f"), ".", 2).withGasPrice(oneBillion)) + list.AddTx(createTx([]byte("g"), ".", 3).withGasPrice(2.5 * oneBillion)) // In case of same-nonce, same-price transactions, the newer one has priority require.Equal(t, []string{"a", "f", "e", "b", "c", "g", "d"}, list.getTxHashesAsStrings()) @@ -52,392 +48,150 @@ func TestListForSender_AddTx_SortsCorrectlyWhenSameNonceSamePrice(t *testing.T) func TestListForSender_AddTx_IgnoresDuplicates(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() - added, _ := list.AddTx(createTx([]byte("tx1"), ".", 1), txGasHandler, txFeeHelper) + added, _ := list.AddTx(createTx([]byte("tx1"), ".", 1)) require.True(t, added) - added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2), txGasHandler, txFeeHelper) + added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2)) require.True(t, added) - added, _ = list.AddTx(createTx([]byte("tx3"), ".", 3), txGasHandler, txFeeHelper) + added, _ = list.AddTx(createTx([]byte("tx3"), ".", 3)) require.True(t, added) - added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2), txGasHandler, txFeeHelper) + added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2)) require.False(t, added) } func TestListForSender_AddTx_AppliesSizeConstraintsForNumTransactions(t *testing.T) { list := newListToTest(math.MaxUint32, 3) - txGasHandler, txFeeHelper := dummyParams() - list.AddTx(createTx([]byte("tx1"), ".", 1), txGasHandler, txFeeHelper) - list.AddTx(createTx([]byte("tx5"), ".", 5), txGasHandler, txFeeHelper) - list.AddTx(createTx([]byte("tx4"), ".", 4), txGasHandler, txFeeHelper) - list.AddTx(createTx([]byte("tx2"), ".", 2), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte("tx1"), ".", 1)) + list.AddTx(createTx([]byte("tx5"), ".", 5)) + list.AddTx(createTx([]byte("tx4"), ".", 4)) + list.AddTx(createTx([]byte("tx2"), ".", 2)) require.Equal(t, []string{"tx1", "tx2", "tx4"}, list.getTxHashesAsStrings()) - _, evicted := list.AddTx(createTx([]byte("tx3"), ".", 3), txGasHandler, txFeeHelper) + _, evicted := list.AddTx(createTx([]byte("tx3"), ".", 3)) require.Equal(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) - // Gives priority to higher gas - though undesirably to some extent, "tx3" is evicted - _, evicted = list.AddTx(createTxWithParams([]byte("tx2++"), ".", 2, 128, 42, 42), txGasHandler, txFeeHelper) + // Gives priority to higher gas - though undesirable to some extent, "tx3" is evicted + _, evicted = list.AddTx(createTx([]byte("tx2++"), ".", 2).withGasPrice(1.5 * oneBillion)) require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx3"}, hashesAsStrings(evicted)) - // Though Undesirably to some extent, "tx3++"" is added, then evicted - _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 3, 128, 42, 42), txGasHandler, txFeeHelper) + // Though undesirable to some extent, "tx3++"" is added, then evicted + _, evicted = list.AddTx(createTx([]byte("tx3++"), ".", 3).withGasPrice(1.5 * oneBillion)) require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx3++"}, hashesAsStrings(evicted)) } func TestListForSender_AddTx_AppliesSizeConstraintsForNumBytes(t *testing.T) { list := newListToTest(1024, math.MaxUint32) - txGasHandler, txFeeHelper := dummyParams() - list.AddTx(createTxWithParams([]byte("tx1"), ".", 1, 128, 42, 42), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("tx2"), ".", 2, 512, 42, 42), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("tx3"), ".", 3, 256, 42, 42), txGasHandler, txFeeHelper) - _, evicted := list.AddTx(createTxWithParams([]byte("tx5"), ".", 4, 256, 42, 42), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte("tx1"), ".", 1).withSize(128).withGasLimit(50000)) + list.AddTx(createTx([]byte("tx2"), ".", 2).withSize(512).withGasLimit(1500000)) + list.AddTx(createTx([]byte("tx3"), ".", 3).withSize(256).withGasLimit(1500000)) + _, evicted := list.AddTx(createTx([]byte("tx5"), ".", 4).withSize(256).withGasLimit(1500000)) require.Equal(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx5"}, hashesAsStrings(evicted)) - _, evicted = list.AddTx(createTxWithParams([]byte("tx5--"), ".", 4, 128, 42, 42), txGasHandler, txFeeHelper) + _, evicted = list.AddTx(createTx([]byte("tx5--"), ".", 4).withSize(128).withGasLimit(50000)) require.Equal(t, []string{"tx1", "tx2", "tx3", "tx5--"}, list.getTxHashesAsStrings()) require.Equal(t, []string{}, hashesAsStrings(evicted)) - _, evicted = list.AddTx(createTxWithParams([]byte("tx4"), ".", 4, 128, 42, 42), txGasHandler, txFeeHelper) + _, evicted = list.AddTx(createTx([]byte("tx4"), ".", 4).withSize(128).withGasLimit(50000)) require.Equal(t, []string{"tx1", "tx2", "tx3", "tx4"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx5--"}, hashesAsStrings(evicted)) // Gives priority to higher gas - though undesirably to some extent, "tx4" is evicted - _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 3, 256, 42, 100), txGasHandler, txFeeHelper) + _, evicted = list.AddTx(createTx([]byte("tx3++"), ".", 3).withSize(256).withGasLimit(1500000).withGasPrice(1.5 * oneBillion)) require.Equal(t, []string{"tx1", "tx2", "tx3++", "tx3"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) } -func TestListForSender_findTx(t *testing.T) { +func TestListForSender_removeTransactionsWithLowerOrEqualNonceReturnHashes(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() - - txA := createTx([]byte("A"), ".", 41) - txANewer := createTx([]byte("ANewer"), ".", 41) - txB := createTx([]byte("B"), ".", 42) - txD := createTx([]byte("none"), ".", 43) - list.AddTx(txA, txGasHandler, txFeeHelper) - list.AddTx(txANewer, txGasHandler, txFeeHelper) - list.AddTx(txB, txGasHandler, txFeeHelper) - - elementWithA := list.findListElementWithTx(txA) - elementWithANewer := list.findListElementWithTx(txANewer) - elementWithB := list.findListElementWithTx(txB) - noElementWithD := list.findListElementWithTx(txD) - - require.NotNil(t, elementWithA) - require.NotNil(t, elementWithANewer) - require.NotNil(t, elementWithB) - - require.Equal(t, txA, elementWithA.Value.(*WrappedTransaction)) - require.Equal(t, txANewer, elementWithANewer.Value.(*WrappedTransaction)) - require.Equal(t, txB, elementWithB.Value.(*WrappedTransaction)) - require.Nil(t, noElementWithD) -} -func TestListForSender_findTx_CoverNonceComparisonOptimization(t *testing.T) { - list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() - list.AddTx(createTx([]byte("A"), ".", 42), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte("tx-42"), ".", 42)) + list.AddTx(createTx([]byte("tx-43"), ".", 43)) + list.AddTx(createTx([]byte("tx-44"), ".", 44)) + list.AddTx(createTx([]byte("tx-45"), ".", 45)) - // Find one with a lower nonce, not added to cache - noElement := list.findListElementWithTx(createTx(nil, ".", 41)) - require.Nil(t, noElement) -} + require.Equal(t, 4, list.items.Len()) -func TestListForSender_RemoveTransaction(t *testing.T) { - list := newUnconstrainedListToTest() - tx := createTx([]byte("a"), ".", 1) - txGasHandler, txFeeHelper := dummyParams() + _ = list.removeTransactionsWithLowerOrEqualNonceReturnHashes(43) + require.Equal(t, 2, list.items.Len()) - list.AddTx(tx, txGasHandler, txFeeHelper) + _ = list.removeTransactionsWithLowerOrEqualNonceReturnHashes(44) require.Equal(t, 1, list.items.Len()) - list.RemoveTx(tx) - require.Equal(t, 0, list.items.Len()) -} - -func TestListForSender_RemoveTransaction_NoPanicWhenTxMissing(t *testing.T) { - list := newUnconstrainedListToTest() - tx := createTx([]byte(""), ".", 1) - - list.RemoveTx(tx) + _ = list.removeTransactionsWithLowerOrEqualNonceReturnHashes(99) require.Equal(t, 0, list.items.Len()) } -func TestListForSender_SelectBatchTo(t *testing.T) { - list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() - - for index := 0; index < 100; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler, txFeeHelper) - } - - destination := make([]*WrappedTransaction, 1000) - - // First batch - journal := list.selectBatchTo(true, destination, 50, math.MaxUint64) - require.Equal(t, 50, journal.copied) - require.NotNil(t, destination[49]) - require.Nil(t, destination[50]) - - // Second batch - journal = list.selectBatchTo(false, destination[50:], 50, math.MaxUint64) - require.Equal(t, 50, journal.copied) - require.NotNil(t, destination[99]) - - // No third batch - journal = list.selectBatchTo(false, destination, 50, math.MaxUint64) - require.Equal(t, 0, journal.copied) - - // Restart copy - journal = list.selectBatchTo(true, destination, 12345, math.MaxUint64) - require.Equal(t, 100, journal.copied) -} - -func TestListForSender_SelectBatchToWithLimitedGasBandwidth(t *testing.T) { - list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() - - for index := 0; index < 40; index++ { - wtx := createTx([]byte{byte(index)}, ".", uint64(index)) - tx, _ := wtx.Tx.(*transaction.Transaction) - tx.GasLimit = 1000000 - list.AddTx(wtx, txGasHandler, txFeeHelper) - } - - destination := make([]*WrappedTransaction, 1000) - - // First batch - journal := list.selectBatchTo(true, destination, 50, 500000) - require.Equal(t, 1, journal.copied) - require.NotNil(t, destination[0]) - require.Nil(t, destination[1]) - - // Second batch - journal = list.selectBatchTo(false, destination[1:], 50, 20000000) - require.Equal(t, 20, journal.copied) - require.NotNil(t, destination[20]) - require.Nil(t, destination[21]) - - // third batch - journal = list.selectBatchTo(false, destination[21:], 20, math.MaxUint64) - require.Equal(t, 19, journal.copied) - - // Restart copy - journal = list.selectBatchTo(true, destination[41:], 12345, math.MaxUint64) - require.Equal(t, 40, journal.copied) -} - -func TestListForSender_SelectBatchTo_NoPanicWhenCornerCases(t *testing.T) { - list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() - - for index := 0; index < 100; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler, txFeeHelper) - } - - // When empty destination - destination := make([]*WrappedTransaction, 0) - journal := list.selectBatchTo(true, destination, 10, math.MaxUint64) - require.Equal(t, 0, journal.copied) - - // When small destination - destination = make([]*WrappedTransaction, 5) - journal = list.selectBatchTo(false, destination, 10, math.MaxUint64) - require.Equal(t, 5, journal.copied) -} - -func TestListForSender_SelectBatchTo_WhenInitialGap(t *testing.T) { - list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() - list.notifyAccountNonce(1) - - for index := 10; index < 20; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler, txFeeHelper) - } - - destination := make([]*WrappedTransaction, 1000) - - // First batch of selection, first failure - journal := list.selectBatchTo(true, destination, 50, math.MaxUint64) - require.Equal(t, 0, journal.copied) - require.Nil(t, destination[0]) - require.Equal(t, int64(1), list.numFailedSelections.Get()) - - // Second batch of selection, don't count failure again - journal = list.selectBatchTo(false, destination, 50, math.MaxUint64) - require.Equal(t, 0, journal.copied) - require.Nil(t, destination[0]) - require.Equal(t, int64(1), list.numFailedSelections.Get()) - - // First batch of another selection, second failure, enters grace period - journal = list.selectBatchTo(true, destination, 50, math.MaxUint64) - require.Equal(t, 1, journal.copied) - require.NotNil(t, destination[0]) - require.Nil(t, destination[1]) - require.Equal(t, int64(2), list.numFailedSelections.Get()) -} - -func TestListForSender_SelectBatchTo_WhenGracePeriodWithGapResolve(t *testing.T) { - list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() - list.notifyAccountNonce(1) +func TestListForSender_getTxs(t *testing.T) { + t.Run("without transactions", func(t *testing.T) { + list := newUnconstrainedListToTest() - for index := 2; index < 20; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler, txFeeHelper) - } + require.Len(t, list.getTxs(), 0) + require.Len(t, list.getTxsReversed(), 0) + }) - destination := make([]*WrappedTransaction, 1000) + t.Run("with transactions", func(t *testing.T) { + list := newUnconstrainedListToTest() - // Try a number of selections with failure, reach close to grace period - for i := 1; i < senderGracePeriodLowerBound; i++ { - journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 0, journal.copied) - require.Equal(t, int64(i), list.numFailedSelections.Get()) - } + list.AddTx(createTx([]byte("tx-42"), ".", 42)) + require.Len(t, list.getTxs(), 1) + require.Len(t, list.getTxsReversed(), 1) - // Try selection again. Failure will move the sender to grace period and return 1 transaction - journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 1, journal.copied) - require.Equal(t, int64(senderGracePeriodLowerBound), list.numFailedSelections.Get()) - require.False(t, list.sweepable.IsSet()) - - // Now resolve the gap - list.AddTx(createTx([]byte("resolving-tx"), ".", 1), txGasHandler, txFeeHelper) - // Selection will be successful - journal = list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 19, journal.copied) - require.Equal(t, int64(0), list.numFailedSelections.Get()) - require.False(t, list.sweepable.IsSet()) -} + list.AddTx(createTx([]byte("tx-44"), ".", 44)) + require.Len(t, list.getTxs(), 2) + require.Len(t, list.getTxsReversed(), 2) -func TestListForSender_SelectBatchTo_WhenGracePeriodWithNoGapResolve(t *testing.T) { - list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() - list.notifyAccountNonce(1) + list.AddTx(createTx([]byte("tx-43"), ".", 43)) + require.Len(t, list.getTxs(), 3) + require.Len(t, list.getTxsReversed(), 3) - for index := 2; index < 20; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler, txFeeHelper) - } - - destination := make([]*WrappedTransaction, 1000) - - // Try a number of selections with failure, reach close to grace period - for i := 1; i < senderGracePeriodLowerBound; i++ { - journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 0, journal.copied) - require.Equal(t, int64(i), list.numFailedSelections.Get()) - } - - // Try a number of selections with failure, within the grace period - for i := senderGracePeriodLowerBound; i <= senderGracePeriodUpperBound; i++ { - journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 1, journal.copied) - require.Equal(t, int64(i), list.numFailedSelections.Get()) - } - - // Grace period exceeded now - journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 0, journal.copied) - require.Equal(t, int64(senderGracePeriodUpperBound+1), list.numFailedSelections.Get()) - require.True(t, list.sweepable.IsSet()) -} - -func TestListForSender_NotifyAccountNonce(t *testing.T) { - list := newUnconstrainedListToTest() - - require.Equal(t, uint64(0), list.accountNonce.Get()) - require.False(t, list.accountNonceKnown.IsSet()) - - list.notifyAccountNonce(42) - - require.Equal(t, uint64(42), list.accountNonce.Get()) - require.True(t, list.accountNonceKnown.IsSet()) -} - -func TestListForSender_hasInitialGap(t *testing.T) { - list := newUnconstrainedListToTest() - list.notifyAccountNonce(42) - txGasHandler, txFeeHelper := dummyParams() - - // No transaction, no gap - require.False(t, list.hasInitialGap()) - // One gap - list.AddTx(createTx([]byte("tx-43"), ".", 43), txGasHandler, txFeeHelper) - require.True(t, list.hasInitialGap()) - // Resolve gap - list.AddTx(createTx([]byte("tx-42"), ".", 42), txGasHandler, txFeeHelper) - require.False(t, list.hasInitialGap()) -} - -func TestListForSender_getTxHashes(t *testing.T) { - list := newUnconstrainedListToTest() - require.Len(t, list.getTxHashes(), 0) - txGasHandler, txFeeHelper := dummyParams() - - list.AddTx(createTx([]byte("A"), ".", 1), txGasHandler, txFeeHelper) - require.Len(t, list.getTxHashes(), 1) - - list.AddTx(createTx([]byte("B"), ".", 2), txGasHandler, txFeeHelper) - list.AddTx(createTx([]byte("C"), ".", 3), txGasHandler, txFeeHelper) - require.Len(t, list.getTxHashes(), 3) + require.Equal(t, []byte("tx-42"), list.getTxs()[0].TxHash) + require.Equal(t, []byte("tx-43"), list.getTxs()[1].TxHash) + require.Equal(t, []byte("tx-44"), list.getTxs()[2].TxHash) + require.Equal(t, []byte("tx-44"), list.getTxsReversed()[0].TxHash) + require.Equal(t, []byte("tx-43"), list.getTxsReversed()[1].TxHash) + require.Equal(t, []byte("tx-42"), list.getTxsReversed()[2].TxHash) + }) } func TestListForSender_DetectRaceConditions(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() - go func() { - // These are called concurrently with addition: during eviction, during removal etc. - approximatelyCountTxInLists([]*txListForSender{list}) - list.IsEmpty() - }() + wg := sync.WaitGroup{} - go func() { - list.AddTx(createTx([]byte("test"), ".", 42), txGasHandler, txFeeHelper) - }() -} + doOperations := func() { + // These might be called concurrently: + _ = list.IsEmpty() + _ = list.getTxs() + _ = list.getTxsReversed() + _ = list.countTxWithLock() + _, _ = list.AddTx(createTx([]byte("test"), ".", 42)) -func dummyParamsWithGasPriceAndGasLimit(minGasPrice uint64, minGasLimit uint64) (TxGasHandler, feeHelper) { - minPrice := minGasPrice - divisor := uint64(100) - minPriceProcessing := minGasPrice / divisor - txFeeHelper := newFeeComputationHelper(minPrice, minGasLimit, minPriceProcessing) - txGasHandler := &txcachemocks.TxGasHandlerMock{ - MinimumGasMove: minGasLimit, - MinimumGasPrice: minPrice, - GasProcessingDivisor: divisor, + wg.Done() } - return txGasHandler, txFeeHelper -} -func dummyParamsWithGasPrice(minGasPrice uint64) (TxGasHandler, feeHelper) { - return dummyParamsWithGasPriceAndGasLimit(minGasPrice, 50000) -} + for i := 0; i < 100; i++ { + wg.Add(1) + go doOperations() + } -func dummyParams() (TxGasHandler, feeHelper) { - minPrice := uint64(1000000000) - minGasLimit := uint64(50000) - return dummyParamsWithGasPriceAndGasLimit(minPrice, minGasLimit) + wg.Wait() } func newUnconstrainedListToTest() *txListForSender { - return newTxListForSender(".", &senderConstraints{ - maxNumBytes: math.MaxUint32, - maxNumTxs: math.MaxUint32, - }, func(_ *txListForSender, _ senderScoreParams) {}) + return newListToTest(math.MaxUint32, math.MaxUint32) } func newListToTest(maxNumBytes uint32, maxNumTxs uint32) *txListForSender { - return newTxListForSender(".", &senderConstraints{ + senderConstraints := &senderConstraints{ maxNumBytes: maxNumBytes, maxNumTxs: maxNumTxs, - }, func(_ *txListForSender, _ senderScoreParams) {}) + } + + return newTxListForSender(".", senderConstraints) } diff --git a/txcache/wrappedTransaction.go b/txcache/wrappedTransaction.go index 281dd8ab..499c695e 100644 --- a/txcache/wrappedTransaction.go +++ b/txcache/wrappedTransaction.go @@ -2,71 +2,57 @@ package txcache import ( "bytes" + "math/big" "github.com/multiversx/mx-chain-core-go/data" ) -const processFeeFactor = float64(0.8) // 80% +// bunchOfTransactions is a slice of WrappedTransaction pointers +type bunchOfTransactions []*WrappedTransaction // WrappedTransaction contains a transaction, its hash and extra information type WrappedTransaction struct { - Tx data.TransactionHandler - TxHash []byte - SenderShardID uint32 - ReceiverShardID uint32 - Size int64 - TxFeeScoreNormalized uint64 + Tx data.TransactionHandler + TxHash []byte + SenderShardID uint32 + ReceiverShardID uint32 + Size int64 + + // These fields are only set within "precomputeFields". + // We don't need to protect them with a mutex, since "precomputeFields" is called only once for each transaction. + // Additional note: "WrappedTransaction" objects are created by the Node, in dataRetriever/txpool/shardedTxPool.go. + Fee *big.Int + PricePerUnit uint64 + TransferredValue *big.Int } -func (wrappedTx *WrappedTransaction) sameAs(another *WrappedTransaction) bool { - return bytes.Equal(wrappedTx.TxHash, another.TxHash) -} - -// estimateTxGas returns an approximation for the necessary computation units (gas units) -func estimateTxGas(tx *WrappedTransaction) uint64 { - gasLimit := tx.Tx.GetGasLimit() - return gasLimit -} - -// estimateTxFeeScore returns a normalized approximation for the cost of a transaction -func estimateTxFeeScore(tx *WrappedTransaction, txGasHandler TxGasHandler, txFeeHelper feeHelper) uint64 { - moveGas, processGas := txGasHandler.SplitTxGasInCategories(tx.Tx) - - normalizedMoveGas := moveGas >> txFeeHelper.gasLimitShift() - normalizedProcessGas := processGas >> txFeeHelper.gasLimitShift() - - normalizedGasPriceMove := txGasHandler.GasPriceForMove(tx.Tx) >> txFeeHelper.gasPriceShift() - normalizedGasPriceProcess := normalizeGasPriceProcessing(tx, txGasHandler, txFeeHelper) - - normalizedFeeMove := normalizedMoveGas * normalizedGasPriceMove - normalizedFeeProcess := normalizedProcessGas * normalizedGasPriceProcess +// precomputeFields computes (and caches) the (average) price per gas unit. +func (wrappedTx *WrappedTransaction) precomputeFields(host MempoolHost) { + wrappedTx.Fee = host.ComputeTxFee(wrappedTx.Tx) - adjustmentFactor := computeProcessingGasPriceAdjustment(tx, txGasHandler, txFeeHelper) - - tx.TxFeeScoreNormalized = normalizedFeeMove + normalizedFeeProcess*adjustmentFactor - - return tx.TxFeeScoreNormalized -} + gasLimit := wrappedTx.Tx.GetGasLimit() + if gasLimit != 0 { + wrappedTx.PricePerUnit = wrappedTx.Fee.Uint64() / gasLimit + } -func normalizeGasPriceProcessing(tx *WrappedTransaction, txGasHandler TxGasHandler, txFeeHelper feeHelper) uint64 { - return txGasHandler.GasPriceForProcessing(tx.Tx) >> txFeeHelper.gasPriceShift() + wrappedTx.TransferredValue = host.GetTransferredValue(wrappedTx.Tx) } -func computeProcessingGasPriceAdjustment( - tx *WrappedTransaction, - txGasHandler TxGasHandler, - txFeeHelper feeHelper, -) uint64 { - minPriceFactor := txFeeHelper.minGasPriceFactor() - - if minPriceFactor <= 2 { - return 1 +// Equality is out of scope (not possible in our case). +func (wrappedTx *WrappedTransaction) isTransactionMoreValuableForNetwork(otherTransaction *WrappedTransaction) bool { + // First, compare by PPU (higher PPU is better). + if wrappedTx.PricePerUnit != otherTransaction.PricePerUnit { + return wrappedTx.PricePerUnit > otherTransaction.PricePerUnit } - actualPriceFactor := float64(1) - if txGasHandler.MinGasPriceForProcessing() != 0 { - actualPriceFactor = float64(txGasHandler.GasPriceForProcessing(tx.Tx)) / float64(txGasHandler.MinGasPriceForProcessing()) + // If PPU is the same, compare by gas limit (higher gas limit is better, promoting less "execution fragmentation"). + gasLimit := wrappedTx.Tx.GetGasLimit() + gasLimitOther := otherTransaction.Tx.GetGasLimit() + + if gasLimit != gasLimitOther { + return gasLimit > gasLimitOther } - return uint64(float64(txFeeHelper.minGasPriceFactor()) * processFeeFactor / actualPriceFactor) + // In the end, compare by transaction hash + return bytes.Compare(wrappedTx.TxHash, otherTransaction.TxHash) < 0 } diff --git a/txcache/wrappedTransaction_test.go b/txcache/wrappedTransaction_test.go index ed9a5758..398c97b2 100644 --- a/txcache/wrappedTransaction_test.go +++ b/txcache/wrappedTransaction_test.go @@ -1,75 +1,117 @@ package txcache import ( + "math/big" "testing" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) -func Test_estimateTxFeeScore(t *testing.T) { - txGasHandler, txFeeHelper := dummyParamsWithGasPrice(100 * oneBillion) - A := createTxWithParams([]byte("a"), "a", 1, 200, 50000, 100*oneBillion) - B := createTxWithParams([]byte("b"), "b", 1, 200, 50000000, 100*oneBillion) - C := createTxWithParams([]byte("C"), "c", 1, 200, 1500000000, 100*oneBillion) - - scoreA := estimateTxFeeScore(A, txGasHandler, txFeeHelper) - scoreB := estimateTxFeeScore(B, txGasHandler, txFeeHelper) - scoreC := estimateTxFeeScore(C, txGasHandler, txFeeHelper) - require.Equal(t, uint64(8940), scoreA) - require.Equal(t, uint64(8940), A.TxFeeScoreNormalized) - require.Equal(t, uint64(6837580), scoreB) - require.Equal(t, uint64(6837580), B.TxFeeScoreNormalized) - require.Equal(t, uint64(205079820), scoreC) - require.Equal(t, uint64(205079820), C.TxFeeScoreNormalized) -} +func TestWrappedTransaction_precomputeFields(t *testing.T) { + t.Run("only move balance gas limit", func(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() -func Test_normalizeGasPriceProcessing(t *testing.T) { - txGasHandler, txFeeHelper := dummyParamsWithGasPriceAndDivisor(100*oneBillion, 100) - A := createTxWithParams([]byte("A"), "a", 1, 200, 1500000000, 100*oneBillion) - normalizedGasPriceProcess := normalizeGasPriceProcessing(A, txGasHandler, txFeeHelper) - require.Equal(t, uint64(7), normalizedGasPriceProcess) + tx := createTx([]byte("a"), "a", 1).withValue(oneQuintillionBig).withDataLength(1).withGasLimit(51500).withGasPrice(oneBillion) + tx.precomputeFields(host) - txGasHandler, txFeeHelper = dummyParamsWithGasPriceAndDivisor(100*oneBillion, 50) - normalizedGasPriceProcess = normalizeGasPriceProcessing(A, txGasHandler, txFeeHelper) - require.Equal(t, uint64(14), normalizedGasPriceProcess) + require.Equal(t, "51500000000000", tx.Fee.String()) + require.Equal(t, oneBillion, int(tx.PricePerUnit)) + require.Equal(t, "1000000000000000000", tx.TransferredValue.String()) + }) - txGasHandler, txFeeHelper = dummyParamsWithGasPriceAndDivisor(100*oneBillion, 1) - normalizedGasPriceProcess = normalizeGasPriceProcessing(A, txGasHandler, txFeeHelper) - require.Equal(t, uint64(745), normalizedGasPriceProcess) + t.Run("move balance gas limit and execution gas limit (a)", func(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() - txGasHandler, txFeeHelper = dummyParamsWithGasPriceAndDivisor(100000, 100) - A = createTxWithParams([]byte("A"), "a", 1, 200, 1500000000, 100000) - normalizedGasPriceProcess = normalizeGasPriceProcessing(A, txGasHandler, txFeeHelper) - require.Equal(t, uint64(7), normalizedGasPriceProcess) -} + tx := createTx([]byte("b"), "b", 1).withDataLength(1).withGasLimit(51501).withGasPrice(oneBillion) + tx.precomputeFields(host) + + require.Equal(t, "51500010000000", tx.Fee.String()) + require.Equal(t, 999_980_777, int(tx.PricePerUnit)) + }) + + t.Run("move balance gas limit and execution gas limit (b)", func(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + tx := createTx([]byte("c"), "c", 1).withDataLength(1).withGasLimit(oneMilion).withGasPrice(oneBillion) + tx.precomputeFields(host) + + actualFee := 51500*oneBillion + (oneMilion-51500)*oneBillion/100 + require.Equal(t, "60985000000000", tx.Fee.String()) + require.Equal(t, 60_985_000_000_000, actualFee) + require.Equal(t, actualFee/oneMilion, int(tx.PricePerUnit)) + }) + + t.Run("with guardian", func(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + tx := createTx([]byte("a"), "a", 1).withValue(oneQuintillionBig) + tx.precomputeFields(host) + + require.Equal(t, "50000000000000", tx.Fee.String()) + require.Equal(t, oneBillion, int(tx.PricePerUnit)) + require.Equal(t, "1000000000000000000", tx.TransferredValue.String()) + }) + + t.Run("with nil transferred value", func(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + tx := createTx([]byte("a"), "a", 1) + tx.precomputeFields(host) + + require.Nil(t, tx.TransferredValue) + }) -func Test_computeProcessingGasPriceAdjustment(t *testing.T) { - txGasHandler, txFeeHelper := dummyParamsWithGasPriceAndDivisor(100*oneBillion, 100) - A := createTxWithParams([]byte("A"), "a", 1, 200, 1500000000, 100*oneBillion) - adjustment := computeProcessingGasPriceAdjustment(A, txGasHandler, txFeeHelper) - require.Equal(t, uint64(80), adjustment) - - A = createTxWithParams([]byte("A"), "a", 1, 200, 1500000000, 150*oneBillion) - adjustment = computeProcessingGasPriceAdjustment(A, txGasHandler, txFeeHelper) - expectedAdjustment := float64(100) * processFeeFactor / float64(1.5) - require.Equal(t, uint64(expectedAdjustment), adjustment) - - A = createTxWithParams([]byte("A"), "a", 1, 200, 1500000000, 110*oneBillion) - adjustment = computeProcessingGasPriceAdjustment(A, txGasHandler, txFeeHelper) - expectedAdjustment = float64(100) * processFeeFactor / float64(1.1) - require.Equal(t, uint64(expectedAdjustment), adjustment) + t.Run("queries host", func(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + host.ComputeTxFeeCalled = func(_ data.TransactionWithFeeHandler) *big.Int { + return big.NewInt(42) + } + host.GetTransferredValueCalled = func(_ data.TransactionHandler) *big.Int { + return big.NewInt(43) + } + + tx := createTx([]byte("a"), "a", 1).withGasLimit(50_000) + tx.precomputeFields(host) + + require.Equal(t, "42", tx.Fee.String()) + require.Equal(t, "43", tx.TransferredValue.String()) + }) } -func dummyParamsWithGasPriceAndDivisor(minGasPrice, processingPriceDivisor uint64) (TxGasHandler, feeHelper) { - minPrice := minGasPrice - minPriceProcessing := minGasPrice / processingPriceDivisor - minGasLimit := uint64(50000) - txFeeHelper := newFeeComputationHelper(minPrice, minGasLimit, minPriceProcessing) - txGasHandler := &txcachemocks.TxGasHandlerMock{ - MinimumGasMove: minGasLimit, - MinimumGasPrice: minPrice, - GasProcessingDivisor: processingPriceDivisor, - } - return txGasHandler, txFeeHelper +func TestWrappedTransaction_isTransactionMoreValuableForNetwork(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + t.Run("decide by price per unit", func(t *testing.T) { + a := createTx([]byte("a-1"), "a", 1).withDataLength(1).withGasLimit(51500).withGasPrice(oneBillion) + a.precomputeFields(host) + + b := createTx([]byte("b-1"), "b", 1).withDataLength(1).withGasLimit(51501).withGasPrice(oneBillion) + b.precomputeFields(host) + + require.True(t, a.isTransactionMoreValuableForNetwork(b)) + }) + + t.Run("decide by gas limit (set them up to have the same PPU)", func(t *testing.T) { + a := createTx([]byte("a-7"), "a", 7).withDataLength(30).withGasLimit(95_000).withGasPrice(oneBillion) + a.precomputeFields(host) + + b := createTx([]byte("b-7"), "b", 7).withDataLength(60).withGasLimit(140_000).withGasPrice(oneBillion) + b.precomputeFields(host) + + require.Equal(t, a.PricePerUnit, b.PricePerUnit) + require.True(t, b.isTransactionMoreValuableForNetwork(a)) + }) + + t.Run("decide by transaction hash (set them up to have the same PPU and gas limit)", func(t *testing.T) { + a := createTx([]byte("a-7"), "a", 7) + a.precomputeFields(host) + + b := createTx([]byte("b-7"), "b", 7) + b.precomputeFields(host) + + require.Equal(t, a.PricePerUnit, b.PricePerUnit) + require.True(t, a.isTransactionMoreValuableForNetwork(b)) + }) } diff --git a/types/accountState.go b/types/accountState.go new file mode 100644 index 00000000..13c3f326 --- /dev/null +++ b/types/accountState.go @@ -0,0 +1,9 @@ +package types + +import "math/big" + +// AccountState represents the state of an account, as seen by the mempool +type AccountState struct { + Nonce uint64 + Balance *big.Int +}