From 41fb6b84d619f8f7db340a54fafe0d445079491e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 1 Jul 2024 17:05:30 +0300 Subject: [PATCH 001/124] On NotifyAccountNonce, remove transactions with lower nonces. --- txcache/monitoring.go | 12 ++++++++++-- txcache/txCache.go | 7 ++++++- txcache/txListBySenderMap.go | 6 +++--- txcache/txListForSender.go | 30 +++++++++++++++++++++++++++++- 4 files changed, 48 insertions(+), 7 deletions(-) diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 7d8ad284..17fd0368 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -12,10 +12,18 @@ import ( var log = logger.GetOrCreate("txcache") func (cache *TxCache) monitorEvictionWrtSenderLimit(sender []byte, evicted [][]byte) { - log.Trace("TxCache.AddTx() evict transactions wrt. limit by sender", "name", cache.name, "sender", sender, "num", len(evicted)) + log.Trace("TxCache.monitorEvictionWrtSenderLimit()", "name", cache.name, "sender", sender, "num", len(evicted)) for i := 0; i < core.MinInt(len(evicted), numEvictedTxsToDisplay); i++ { - log.Trace("TxCache.AddTx() evict transactions wrt. limit by sender", "name", cache.name, "sender", sender, "tx", evicted[i]) + log.Trace("TxCache.monitorEvictionWrtSenderLimit()", "name", cache.name, "sender", sender, "tx", evicted[i]) + } +} + +func (cache *TxCache) monitorEvictionWrtSenderNonce(sender []byte, senderNonce uint64, evicted [][]byte) { + log.Trace("TxCache.monitorEvictionWrtSenderNonce()", "name", cache.name, "sender", sender, "nonce", senderNonce, "num", len(evicted)) + + for i := 0; i < core.MinInt(len(evicted), numEvictedTxsToDisplay); i++ { + log.Trace("TxCache.monitorEvictionWrtSenderNonce()", "name", cache.name, "sender", sender, "nonce", senderNonce, "tx", evicted[i]) } } diff --git a/txcache/txCache.go b/txcache/txCache.go index d938b976..cfa31573 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -311,7 +311,12 @@ func (cache *TxCache) UnRegisterHandler(string) { // NotifyAccountNonce should be called by external components (such as interceptors and transactions processor) // in order to inform the cache about initial nonce gap phenomena func (cache *TxCache) NotifyAccountNonce(accountKey []byte, nonce uint64) { - cache.txListBySender.notifyAccountNonce(accountKey, nonce) + evicted := cache.txListBySender.notifyAccountNonce(accountKey, nonce) + + if len(evicted) > 0 { + cache.monitorEvictionWrtSenderNonce(accountKey, nonce, evicted) + cache.txByHash.RemoveTxsBulk(evicted) + } } // ImmunizeTxsAgainstEviction does nothing for this type of cache diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index ccda1ce0..beb9f2fd 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -133,14 +133,14 @@ func (txMap *txListBySenderMap) RemoveSendersBulk(senders []string) uint32 { return numRemoved } -func (txMap *txListBySenderMap) notifyAccountNonce(accountKey []byte, nonce uint64) { +func (txMap *txListBySenderMap) notifyAccountNonce(accountKey []byte, nonce uint64) [][]byte { sender := string(accountKey) listForSender, ok := txMap.getListForSender(sender) if !ok { - return + return nil } - listForSender.notifyAccountNonce(nonce) + return listForSender.notifyAccountNonce(nonce) } func (txMap *txListBySenderMap) getSnapshotAscending() []*txListForSender { diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index a12a91d1..99fcd819 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -320,9 +320,37 @@ func approximatelyCountTxInLists(lists []*txListForSender) uint64 { // notifyAccountNonce does not update the "numFailedSelections" counter, // since the notification comes at a time when we cannot actually detect whether the initial gap still exists or it was resolved. -func (listForSender *txListForSender) notifyAccountNonce(nonce uint64) { +// Removes transactions with lower nonces and returns their hashes. +func (listForSender *txListForSender) notifyAccountNonce(nonce uint64) [][]byte { + listForSender.mutex.Lock() + defer listForSender.mutex.Unlock() + listForSender.accountNonce.Set(nonce) _ = listForSender.accountNonceKnown.SetReturningPrevious() + + return listForSender.evictTransactionsWithLowerNonces(nonce) +} + +// This function should only be used in critical section (listForSender.mutex) +func (listForSender *txListForSender) evictTransactionsWithLowerNonces(accountNonce uint64) [][]byte { + evictedTxHashes := make([][]byte, 0) + + for element := listForSender.items.Front(); element != nil; element = element.Next() { + tx := element.Value.(*WrappedTransaction) + txNonce := tx.Tx.GetNonce() + + if txNonce >= accountNonce { + break + } + + listForSender.items.Remove(element) + listForSender.onRemovedListElement(element) + + // Keep track of removed transactions + evictedTxHashes = append(evictedTxHashes, tx.TxHash) + } + + return evictedTxHashes } // This function should only be used in critical section (listForSender.mutex) From dc2ff1c5b3664f7a8f9b82e3a89d81094949f382 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 3 Jul 2024 22:13:50 +0300 Subject: [PATCH 002/124] WrappedTransaction.computeFee() delegates to the Node's economicsData logic. --- txcache/wrappedTransaction.go | 69 ++++++----------------------- txcache/wrappedTransaction_test.go | 70 +++--------------------------- 2 files changed, 20 insertions(+), 119 deletions(-) diff --git a/txcache/wrappedTransaction.go b/txcache/wrappedTransaction.go index 281dd8ab..2c432a66 100644 --- a/txcache/wrappedTransaction.go +++ b/txcache/wrappedTransaction.go @@ -2,71 +2,30 @@ package txcache import ( "bytes" + "math/big" "github.com/multiversx/mx-chain-core-go/data" ) -const processFeeFactor = float64(0.8) // 80% - // WrappedTransaction contains a transaction, its hash and extra information type WrappedTransaction struct { - Tx data.TransactionHandler - TxHash []byte - SenderShardID uint32 - ReceiverShardID uint32 - Size int64 - TxFeeScoreNormalized uint64 + Tx data.TransactionHandler + TxHash []byte + SenderShardID uint32 + ReceiverShardID uint32 + Size int64 + TxFee float64 } func (wrappedTx *WrappedTransaction) sameAs(another *WrappedTransaction) bool { return bytes.Equal(wrappedTx.TxHash, another.TxHash) } -// estimateTxGas returns an approximation for the necessary computation units (gas units) -func estimateTxGas(tx *WrappedTransaction) uint64 { - gasLimit := tx.Tx.GetGasLimit() - return gasLimit -} - -// estimateTxFeeScore returns a normalized approximation for the cost of a transaction -func estimateTxFeeScore(tx *WrappedTransaction, txGasHandler TxGasHandler, txFeeHelper feeHelper) uint64 { - moveGas, processGas := txGasHandler.SplitTxGasInCategories(tx.Tx) - - normalizedMoveGas := moveGas >> txFeeHelper.gasLimitShift() - normalizedProcessGas := processGas >> txFeeHelper.gasLimitShift() - - normalizedGasPriceMove := txGasHandler.GasPriceForMove(tx.Tx) >> txFeeHelper.gasPriceShift() - normalizedGasPriceProcess := normalizeGasPriceProcessing(tx, txGasHandler, txFeeHelper) - - normalizedFeeMove := normalizedMoveGas * normalizedGasPriceMove - normalizedFeeProcess := normalizedProcessGas * normalizedGasPriceProcess - - adjustmentFactor := computeProcessingGasPriceAdjustment(tx, txGasHandler, txFeeHelper) - - tx.TxFeeScoreNormalized = normalizedFeeMove + normalizedFeeProcess*adjustmentFactor - - return tx.TxFeeScoreNormalized -} - -func normalizeGasPriceProcessing(tx *WrappedTransaction, txGasHandler TxGasHandler, txFeeHelper feeHelper) uint64 { - return txGasHandler.GasPriceForProcessing(tx.Tx) >> txFeeHelper.gasPriceShift() -} - -func computeProcessingGasPriceAdjustment( - tx *WrappedTransaction, - txGasHandler TxGasHandler, - txFeeHelper feeHelper, -) uint64 { - minPriceFactor := txFeeHelper.minGasPriceFactor() - - if minPriceFactor <= 2 { - return 1 - } - - actualPriceFactor := float64(1) - if txGasHandler.MinGasPriceForProcessing() != 0 { - actualPriceFactor = float64(txGasHandler.GasPriceForProcessing(tx.Tx)) / float64(txGasHandler.MinGasPriceForProcessing()) - } - - return uint64(float64(txFeeHelper.minGasPriceFactor()) * processFeeFactor / actualPriceFactor) +// computeFee computes the transaction fee. +// The returned fee is also held on the transaction object. +func (wrappedTx *WrappedTransaction) computeFee(txGasHandler TxGasHandler) float64 { + fee := txGasHandler.ComputeTxFee(wrappedTx.Tx) + feeAsFloat, _ := new(big.Float).SetInt(fee).Float64() + wrappedTx.TxFee = feeAsFloat + return feeAsFloat } diff --git a/txcache/wrappedTransaction_test.go b/txcache/wrappedTransaction_test.go index ed9a5758..56f212d9 100644 --- a/txcache/wrappedTransaction_test.go +++ b/txcache/wrappedTransaction_test.go @@ -7,69 +7,11 @@ import ( "github.com/stretchr/testify/require" ) -func Test_estimateTxFeeScore(t *testing.T) { - txGasHandler, txFeeHelper := dummyParamsWithGasPrice(100 * oneBillion) - A := createTxWithParams([]byte("a"), "a", 1, 200, 50000, 100*oneBillion) - B := createTxWithParams([]byte("b"), "b", 1, 200, 50000000, 100*oneBillion) - C := createTxWithParams([]byte("C"), "c", 1, 200, 1500000000, 100*oneBillion) +func Test_computeTxFee(t *testing.T) { + txGasHandler := txcachemocks.NewTxGasHandlerMock() + tx := createTx([]byte("a"), "a", 1).withDataLength(1).withGasLimit(51500).withGasPrice(oneBillion) + txFee := tx.computeFee(txGasHandler) - scoreA := estimateTxFeeScore(A, txGasHandler, txFeeHelper) - scoreB := estimateTxFeeScore(B, txGasHandler, txFeeHelper) - scoreC := estimateTxFeeScore(C, txGasHandler, txFeeHelper) - require.Equal(t, uint64(8940), scoreA) - require.Equal(t, uint64(8940), A.TxFeeScoreNormalized) - require.Equal(t, uint64(6837580), scoreB) - require.Equal(t, uint64(6837580), B.TxFeeScoreNormalized) - require.Equal(t, uint64(205079820), scoreC) - require.Equal(t, uint64(205079820), C.TxFeeScoreNormalized) -} - -func Test_normalizeGasPriceProcessing(t *testing.T) { - txGasHandler, txFeeHelper := dummyParamsWithGasPriceAndDivisor(100*oneBillion, 100) - A := createTxWithParams([]byte("A"), "a", 1, 200, 1500000000, 100*oneBillion) - normalizedGasPriceProcess := normalizeGasPriceProcessing(A, txGasHandler, txFeeHelper) - require.Equal(t, uint64(7), normalizedGasPriceProcess) - - txGasHandler, txFeeHelper = dummyParamsWithGasPriceAndDivisor(100*oneBillion, 50) - normalizedGasPriceProcess = normalizeGasPriceProcessing(A, txGasHandler, txFeeHelper) - require.Equal(t, uint64(14), normalizedGasPriceProcess) - - txGasHandler, txFeeHelper = dummyParamsWithGasPriceAndDivisor(100*oneBillion, 1) - normalizedGasPriceProcess = normalizeGasPriceProcessing(A, txGasHandler, txFeeHelper) - require.Equal(t, uint64(745), normalizedGasPriceProcess) - - txGasHandler, txFeeHelper = dummyParamsWithGasPriceAndDivisor(100000, 100) - A = createTxWithParams([]byte("A"), "a", 1, 200, 1500000000, 100000) - normalizedGasPriceProcess = normalizeGasPriceProcessing(A, txGasHandler, txFeeHelper) - require.Equal(t, uint64(7), normalizedGasPriceProcess) -} - -func Test_computeProcessingGasPriceAdjustment(t *testing.T) { - txGasHandler, txFeeHelper := dummyParamsWithGasPriceAndDivisor(100*oneBillion, 100) - A := createTxWithParams([]byte("A"), "a", 1, 200, 1500000000, 100*oneBillion) - adjustment := computeProcessingGasPriceAdjustment(A, txGasHandler, txFeeHelper) - require.Equal(t, uint64(80), adjustment) - - A = createTxWithParams([]byte("A"), "a", 1, 200, 1500000000, 150*oneBillion) - adjustment = computeProcessingGasPriceAdjustment(A, txGasHandler, txFeeHelper) - expectedAdjustment := float64(100) * processFeeFactor / float64(1.5) - require.Equal(t, uint64(expectedAdjustment), adjustment) - - A = createTxWithParams([]byte("A"), "a", 1, 200, 1500000000, 110*oneBillion) - adjustment = computeProcessingGasPriceAdjustment(A, txGasHandler, txFeeHelper) - expectedAdjustment = float64(100) * processFeeFactor / float64(1.1) - require.Equal(t, uint64(expectedAdjustment), adjustment) -} - -func dummyParamsWithGasPriceAndDivisor(minGasPrice, processingPriceDivisor uint64) (TxGasHandler, feeHelper) { - minPrice := minGasPrice - minPriceProcessing := minGasPrice / processingPriceDivisor - minGasLimit := uint64(50000) - txFeeHelper := newFeeComputationHelper(minPrice, minGasLimit, minPriceProcessing) - txGasHandler := &txcachemocks.TxGasHandlerMock{ - MinimumGasMove: minGasLimit, - MinimumGasPrice: minPrice, - GasProcessingDivisor: processingPriceDivisor, - } - return txGasHandler, txFeeHelper + require.Equal(t, float64(51500000000000), txFee) + require.Equal(t, txFee, tx.TxFee) } From 1875775e5dc71f4a82c5ca542a813a11a87923ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 3 Jul 2024 22:14:18 +0300 Subject: [PATCH 003/124] Remove older feeComputationHelper. --- txcache/feeComputationHelper.go | 80 ---------------------------- txcache/feeComputationHelper_test.go | 73 ------------------------- 2 files changed, 153 deletions(-) delete mode 100644 txcache/feeComputationHelper.go delete mode 100644 txcache/feeComputationHelper_test.go diff --git a/txcache/feeComputationHelper.go b/txcache/feeComputationHelper.go deleted file mode 100644 index 66e365dc..00000000 --- a/txcache/feeComputationHelper.go +++ /dev/null @@ -1,80 +0,0 @@ -package txcache - -type feeHelper interface { - gasLimitShift() uint64 - gasPriceShift() uint64 - minPricePerUnit() uint64 - normalizedMinFee() uint64 - minGasPriceFactor() uint64 - IsInterfaceNil() bool -} - -type feeComputationHelper struct { - gasShiftingFactor uint64 - priceShiftingFactor uint64 - minFeeNormalized uint64 - minPPUNormalized uint64 - minPriceFactor uint64 -} - -const priceBinaryResolution = 10 -const gasBinaryResolution = 4 - -func newFeeComputationHelper(minPrice, minGasLimit, minPriceProcessing uint64) *feeComputationHelper { - feeComputeHelper := &feeComputationHelper{} - feeComputeHelper.initializeHelperParameters(minPrice, minGasLimit, minPriceProcessing) - return feeComputeHelper -} - -func (fch *feeComputationHelper) gasLimitShift() uint64 { - return fch.gasShiftingFactor -} - -func (fch *feeComputationHelper) gasPriceShift() uint64 { - return fch.priceShiftingFactor -} - -func (fch *feeComputationHelper) normalizedMinFee() uint64 { - return fch.minFeeNormalized -} - -func (fch *feeComputationHelper) minPricePerUnit() uint64 { - return fch.minPPUNormalized -} - -func (fch *feeComputationHelper) minGasPriceFactor() uint64 { - return fch.minPriceFactor -} - -func (fch *feeComputationHelper) initializeHelperParameters(minPrice, minGasLimit, minPriceProcessing uint64) { - fch.priceShiftingFactor = computeShiftMagnitude(minPrice, priceBinaryResolution) - x := minPriceProcessing >> fch.priceShiftingFactor - for x == 0 && fch.priceShiftingFactor > 0 { - fch.priceShiftingFactor-- - x = minPriceProcessing >> fch.priceShiftingFactor - } - - fch.gasShiftingFactor = computeShiftMagnitude(minGasLimit, gasBinaryResolution) - - fch.minPPUNormalized = minPriceProcessing >> fch.priceShiftingFactor - fch.minFeeNormalized = (minGasLimit >> fch.gasLimitShift()) * (minPrice >> fch.priceShiftingFactor) - fch.minPriceFactor = minPrice / minPriceProcessing -} - -// returns the maximum shift magnitude of the number in order to maintain the given binary resolution -func computeShiftMagnitude(x uint64, resolution uint8) uint64 { - m := uint64(0) - stopCondition := uint64(1) << resolution - shiftStep := uint64(1) - - for i := x; i > stopCondition; i >>= shiftStep { - m += shiftStep - } - - return m -} - -// IsInterfaceNil returns nil if the underlying object is nil -func (fch *feeComputationHelper) IsInterfaceNil() bool { - return fch == nil -} diff --git a/txcache/feeComputationHelper_test.go b/txcache/feeComputationHelper_test.go deleted file mode 100644 index 9a015ccf..00000000 --- a/txcache/feeComputationHelper_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package txcache - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func Test_initializeHelperParameters(t *testing.T) { - fch := &feeComputationHelper{ - gasShiftingFactor: 0, - priceShiftingFactor: 0, - minFeeNormalized: 0, - minPPUNormalized: 0, - minPriceFactor: 0, - } - - fch.initializeHelperParameters(1<<20, 1<<10, 1<<10) - require.Equal(t, uint64(10), fch.priceShiftingFactor) - require.Equal(t, uint64(6), fch.gasShiftingFactor) - require.Equal(t, uint64(1<<10), fch.minPriceFactor) - require.Equal(t, uint64((1<<4)*(1<<10)), fch.minFeeNormalized) - require.Equal(t, uint64(1), fch.minPPUNormalized) - - fch.initializeHelperParameters(1<<22, 1<<17, 1<<7) - require.Equal(t, uint64(7), fch.priceShiftingFactor) - require.Equal(t, uint64(13), fch.gasShiftingFactor) - require.Equal(t, uint64(1<<15), fch.minPriceFactor) - require.Equal(t, uint64((1<<4)*(1<<15)), fch.minFeeNormalized) - require.Equal(t, uint64(1), fch.minPPUNormalized) - - fch.initializeHelperParameters(1<<20, 1<<3, 1<<15) - require.Equal(t, uint64(10), fch.priceShiftingFactor) - require.Equal(t, uint64(0), fch.gasShiftingFactor) - require.Equal(t, uint64(1<<5), fch.minPriceFactor) - require.Equal(t, uint64((1<<3)*(1<<10)), fch.minFeeNormalized) - require.Equal(t, uint64(1<<5), fch.minPPUNormalized) -} - -func Test_newFeeComputationHelper(t *testing.T) { - fch := newFeeComputationHelper(1<<20, 1<<10, 1<<10) - require.Equal(t, uint64(10), fch.priceShiftingFactor) - require.Equal(t, uint64(6), fch.gasShiftingFactor) - require.Equal(t, uint64(1<<10), fch.minPriceFactor) - require.Equal(t, uint64((1<<4)*(1<<10)), fch.minFeeNormalized) - require.Equal(t, uint64(1), fch.minPPUNormalized) -} - -func Test_getters(t *testing.T) { - fch := newFeeComputationHelper(1<<20, 1<<10, 1<<10) - gasShift := fch.gasLimitShift() - gasPriceShift := fch.gasPriceShift() - minFeeNormalized := fch.normalizedMinFee() - minPPUNormalized := fch.minPricePerUnit() - minGasPriceFactor := fch.minGasPriceFactor() - - require.Equal(t, uint64(10), gasPriceShift) - require.Equal(t, uint64(6), gasShift) - require.Equal(t, uint64(1<<10), minGasPriceFactor) - require.Equal(t, uint64((1<<4)*(1<<10)), minFeeNormalized) - require.Equal(t, uint64(1), minPPUNormalized) -} - -func Test_computeShiftMagnitude(t *testing.T) { - shift := computeShiftMagnitude(1<<20, 10) - require.Equal(t, uint64(10), shift) - - shift = computeShiftMagnitude(1<<12, 10) - require.Equal(t, uint64(2), shift) - - shift = computeShiftMagnitude(1<<8, 10) - require.Equal(t, uint64(0), shift) -} From 5282c22e795eeee3eeb57e61d9bd4c206ef69263 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 3 Jul 2024 22:47:40 +0300 Subject: [PATCH 004/124] New score function (avg ppu, scaled, logistic, re-scaled). Refactoring. --- testscommon/txcachemocks/txGasHandlerMock.go | 70 +++++++++++------- txcache/interface.go | 9 ++- txcache/score.go | 75 ++++++++++---------- txcache/score_test.go | 13 ++-- txcache/txCache.go | 5 +- txcache/txCache_test.go | 24 ++++--- 6 files changed, 111 insertions(+), 85 deletions(-) diff --git a/testscommon/txcachemocks/txGasHandlerMock.go b/testscommon/txcachemocks/txGasHandlerMock.go index b26c8823..3ac2ae64 100644 --- a/testscommon/txcachemocks/txGasHandlerMock.go +++ b/testscommon/txcachemocks/txGasHandlerMock.go @@ -1,56 +1,76 @@ package txcachemocks import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" ) // TxGasHandler - type TxGasHandler interface { - SplitTxGasInCategories(tx data.TransactionWithFeeHandler) (uint64, uint64) - GasPriceForProcessing(tx data.TransactionWithFeeHandler) uint64 - GasPriceForMove(tx data.TransactionWithFeeHandler) uint64 MinGasPrice() uint64 - MinGasLimit() uint64 - MinGasPriceForProcessing() uint64 + MaxGasLimitPerTx() uint64 + ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int IsInterfaceNil() bool } // TxGasHandlerMock - type TxGasHandlerMock struct { - MinimumGasMove uint64 - MinimumGasPrice uint64 - GasProcessingDivisor uint64 + minGasLimit uint64 + minGasPrice uint64 + maxGasLimitPerTx uint64 + gasPerDataByte uint64 + gasPriceModifier float64 } -// SplitTxGasInCategories - -func (ghm *TxGasHandlerMock) SplitTxGasInCategories(tx data.TransactionWithFeeHandler) (uint64, uint64) { - moveGas := ghm.MinimumGasMove - return moveGas, tx.GetGasLimit() - moveGas +// NewTxGasHandlerMock - +func NewTxGasHandlerMock() *TxGasHandlerMock { + return &TxGasHandlerMock{ + minGasLimit: 50000, + minGasPrice: 1000000000, + maxGasLimitPerTx: 600000000, + gasPerDataByte: 1500, + gasPriceModifier: 0.01, + } } -// GasPriceForProcessing - -func (ghm *TxGasHandlerMock) GasPriceForProcessing(tx data.TransactionWithFeeHandler) uint64 { - return tx.GetGasPrice() / ghm.GasProcessingDivisor +// SetMinGasLimit - +func (ghm *TxGasHandlerMock) SetMinGasLimit(minGasLimit uint64) { + ghm.minGasLimit = minGasLimit } -// GasPriceForMove - -func (ghm *TxGasHandlerMock) GasPriceForMove(tx data.TransactionWithFeeHandler) uint64 { - return tx.GetGasPrice() +// SetMinGasPrice - +func (ghm *TxGasHandlerMock) SetMinGasPrice(minGasPrice uint64) { + ghm.minGasPrice = minGasPrice } // MinGasPrice - func (ghm *TxGasHandlerMock) MinGasPrice() uint64 { - return ghm.MinimumGasPrice + return ghm.minGasPrice } -// MinGasLimit - -func (ghm *TxGasHandlerMock) MinGasLimit() uint64 { - return ghm.MinimumGasMove +// MaxGasLimitPerTx - +func (ghm *TxGasHandlerMock) MaxGasLimitPerTx() uint64 { + return ghm.maxGasLimitPerTx } -// MinGasPriceProcessing - -func (ghm *TxGasHandlerMock) MinGasPriceForProcessing() uint64 { - return ghm.MinimumGasPrice / ghm.GasProcessingDivisor +// ComputeTxFee - +func (ghm *TxGasHandlerMock) ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int { + dataLength := uint64(len(tx.GetData())) + gasPriceForMovement := tx.GetGasPrice() + gasPriceForProcessing := uint64(float64(gasPriceForMovement) * ghm.gasPriceModifier) + + gasLimitForMovement := ghm.minGasLimit + dataLength*ghm.gasPerDataByte + gasLimitForProcessing, err := core.SafeSubUint64(tx.GetGasLimit(), gasLimitForMovement) + if err != nil { + panic(err) + } + + feeForMovement := core.SafeMul(gasPriceForMovement, gasLimitForMovement) + feeForProcessing := core.SafeMul(gasPriceForProcessing, gasLimitForProcessing) + fee := big.NewInt(0).Add(feeForMovement, feeForProcessing) + return fee } // IsInterfaceNil - diff --git a/txcache/interface.go b/txcache/interface.go index 73624759..e099cb1d 100644 --- a/txcache/interface.go +++ b/txcache/interface.go @@ -1,6 +1,8 @@ package txcache import ( + "math/big" + "github.com/multiversx/mx-chain-core-go/data" ) @@ -10,12 +12,9 @@ type scoreComputer interface { // TxGasHandler handles a transaction gas and gas cost type TxGasHandler interface { - SplitTxGasInCategories(tx data.TransactionWithFeeHandler) (uint64, uint64) - GasPriceForProcessing(tx data.TransactionWithFeeHandler) uint64 - GasPriceForMove(tx data.TransactionWithFeeHandler) uint64 MinGasPrice() uint64 - MinGasLimit() uint64 - MinGasPriceForProcessing() uint64 + MaxGasLimitPerTx() uint64 + ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int IsInterfaceNil() bool } diff --git a/txcache/score.go b/txcache/score.go index 06bde537..0cec584d 100644 --- a/txcache/score.go +++ b/txcache/score.go @@ -2,66 +2,69 @@ package txcache import ( "math" + + "github.com/multiversx/mx-chain-core-go/data/transaction" ) var _ scoreComputer = (*defaultScoreComputer)(nil) -// TODO (continued): The score formula should work even if minGasPrice = 0. type senderScoreParams struct { - count uint64 - // Fee score is normalized - feeScore uint64 - gas uint64 + avgPpuNumerator float64 + avgPpuDenominator uint64 + + accountNonce uint64 + accountNonceIsKnown bool + maxTransactionNonce uint64 + minTransactionNonce uint64 + + numOfTransactions uint64 + hasSpotlessSequenceOfNonces bool } type defaultScoreComputer struct { - txFeeHelper feeHelper - ppuDivider uint64 + worstPpu float64 } -func newDefaultScoreComputer(txFeeHelper feeHelper) *defaultScoreComputer { - ppuScoreDivider := txFeeHelper.minGasPriceFactor() - ppuScoreDivider = ppuScoreDivider * ppuScoreDivider * ppuScoreDivider +func newDefaultScoreComputer(txGasHandler TxGasHandler) *defaultScoreComputer { + worstPpu := computeWorstPpu(txGasHandler) return &defaultScoreComputer{ - txFeeHelper: txFeeHelper, - ppuDivider: ppuScoreDivider, + worstPpu: worstPpu, + } +} + +func computeWorstPpu(txGasHandler TxGasHandler) float64 { + minGasPrice := txGasHandler.MinGasPrice() + maxGasLimitPerTx := txGasHandler.MaxGasLimitPerTx() + worstPpuTx := &WrappedTransaction{ + Tx: &transaction.Transaction{ + GasLimit: maxGasLimitPerTx, + GasPrice: minGasPrice, + }, } + + return worstPpuTx.computeFee(txGasHandler) / float64(maxGasLimitPerTx) } -// computeScore computes the score of the sender, as an integer 0-100 +// computeScore computes the score of the sender, as an integer in [0, numberOfScoreChunks] func (computer *defaultScoreComputer) computeScore(scoreParams senderScoreParams) uint32 { rawScore := computer.computeRawScore(scoreParams) truncatedScore := uint32(rawScore) return truncatedScore } -// TODO (optimization): switch to integer operations (as opposed to float operations). func (computer *defaultScoreComputer) computeRawScore(params senderScoreParams) float64 { - allParamsDefined := params.feeScore > 0 && params.gas > 0 && params.count > 0 - if !allParamsDefined { + if !params.hasSpotlessSequenceOfNonces { return 0 } - ppuMin := computer.txFeeHelper.minPricePerUnit() - normalizedGas := params.gas >> computer.txFeeHelper.gasLimitShift() - if normalizedGas == 0 { - normalizedGas = 1 - } - ppuAvg := params.feeScore / normalizedGas - // (<< 3)^3 and >> 9 cancel each other; used to preserve a bit more resolution - ppuRatio := ppuAvg << 3 / ppuMin - ppuScore := ppuRatio * ppuRatio * ppuRatio >> 9 - ppuScoreAdjusted := float64(ppuScore) / float64(computer.ppuDivider) - - countPow2 := params.count * params.count - countScore := math.Log(float64(countPow2)+1) + 1 - - rawScore := ppuScoreAdjusted / countScore - // We apply the logistic function, - // and then subtract 0.5, since we only deal with positive scores, - // and then we multiply by 2, to have full [0..1] range. - asymptoticScore := (1/(1+math.Exp(-rawScore)) - 0.5) * 2 - score := asymptoticScore * float64(numberOfScoreChunks) + avgPpu := params.avgPpuNumerator / float64(params.avgPpuDenominator) + + // We use the worst possible price per unit for normalization. + avgPpuNormalized := avgPpu / computer.worstPpu + + // https://www.wolframalpha.com, with input "((1 / (1 + exp(-x)) - 1/2) * 2) * 100, where x is from 0 to 10" + avgPpuNormalizedSubunitary := (1/(1+math.Exp(-avgPpuNormalized)) - 0.5) * 2 + score := avgPpuNormalizedSubunitary * float64(numberOfScoreChunks) return score } diff --git a/txcache/score_test.go b/txcache/score_test.go index 51e438e1..82e26242 100644 --- a/txcache/score_test.go +++ b/txcache/score_test.go @@ -1,16 +1,19 @@ package txcache import ( - "strconv" "testing" - "github.com/stretchr/testify/assert" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) -func TestDefaultScoreComputer_computeRawScore(t *testing.T) { - _, txFeeHelper := dummyParamsWithGasPrice(oneBillion) - computer := newDefaultScoreComputer(txFeeHelper) +func TestNewDefaultScoreComputer(t *testing.T) { + gasHandler := txcachemocks.NewTxGasHandlerMock() + computer := newDefaultScoreComputer(gasHandler) + + require.NotNil(t, computer) + require.Equal(t, float64(10082500), computer.worstPpu) +} // 50k moveGas, 100Bil minPrice -> normalizedFee 8940 score := computer.computeRawScore(senderScoreParams{count: 1, feeScore: 18000, gas: 100000}) diff --git a/txcache/txCache.go b/txcache/txCache.go index cfa31573..6cc1c95c 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -47,12 +47,11 @@ func NewTxCache(config ConfigSourceMe, txGasHandler TxGasHandler) (*TxCache, err // Note: for simplicity, we use the same "numChunks" for both internal concurrent maps numChunks := config.NumChunks senderConstraintsObj := config.getSenderConstraints() - txFeeHelper := newFeeComputationHelper(txGasHandler.MinGasPrice(), txGasHandler.MinGasLimit(), txGasHandler.MinGasPriceForProcessing()) - scoreComputerObj := newDefaultScoreComputer(txFeeHelper) + scoreComputerObj := newDefaultScoreComputer(txGasHandler) txCache := &TxCache{ name: config.Name, - txListBySender: newTxListBySenderMap(numChunks, senderConstraintsObj, scoreComputerObj, txGasHandler, txFeeHelper), + txListBySender: newTxListBySenderMap(numChunks, senderConstraintsObj, scoreComputerObj, txGasHandler), txByHash: newTxByHashMap(numChunks), config: config, evictionJournal: evictionJournal{}, diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index 3a8b41c4..28fa131b 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-storage-go/common" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/multiversx/mx-chain-storage-go/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -35,7 +36,8 @@ func Test_NewTxCache(t *testing.T) { CountThreshold: math.MaxUint32, NumSendersToPreemptivelyEvict: 100, } - txGasHandler, _ := dummyParams() + + txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(config, txGasHandler) require.Nil(t, err) @@ -138,18 +140,18 @@ func Test_AddTx_AppliesSizeConstraintsPerSenderForNumTransactions(t *testing.T) func Test_AddTx_AppliesSizeConstraintsPerSenderForNumBytes(t *testing.T) { cache := newCacheToTest(1024, math.MaxUint32) - cache.AddTx(createTxWithParams([]byte("tx-alice-1"), "alice", 1, 128, 42, 42)) - cache.AddTx(createTxWithParams([]byte("tx-alice-2"), "alice", 2, 512, 42, 42)) - cache.AddTx(createTxWithParams([]byte("tx-alice-4"), "alice", 3, 256, 42, 42)) - cache.AddTx(createTxWithParams([]byte("tx-bob-1"), "bob", 1, 512, 42, 42)) - cache.AddTx(createTxWithParams([]byte("tx-bob-2"), "bob", 2, 513, 42, 42)) + cache.AddTx(createTxWithParams([]byte("tx-alice-1"), "alice", 1, 128, 50000, 42)) + cache.AddTx(createTxWithParams([]byte("tx-alice-2"), "alice", 2, 512, 1500000, 42)) + cache.AddTx(createTxWithParams([]byte("tx-alice-4"), "alice", 3, 256, 1500000, 42)) + cache.AddTx(createTxWithParams([]byte("tx-bob-1"), "bob", 1, 512, 1500000, 42)) + cache.AddTx(createTxWithParams([]byte("tx-bob-2"), "bob", 2, 513, 1500000, 42)) require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-4"}, cache.getHashesForSender("alice")) require.Equal(t, []string{"tx-bob-1"}, cache.getHashesForSender("bob")) require.True(t, cache.areInternalMapsConsistent()) - cache.AddTx(createTxWithParams([]byte("tx-alice-3"), "alice", 3, 256, 42, 42)) - cache.AddTx(createTxWithParams([]byte("tx-bob-2"), "bob", 3, 512, 42, 42)) + cache.AddTx(createTxWithParams([]byte("tx-alice-3"), "alice", 3, 256, 1500000, 42)) + cache.AddTx(createTxWithParams([]byte("tx-bob-2"), "bob", 3, 512, 1500000, 42)) require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-3"}, cache.getHashesForSender("alice")) require.Equal(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) require.True(t, cache.areInternalMapsConsistent()) @@ -406,7 +408,7 @@ func Test_Keys(t *testing.T) { } func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { - txGasHandler, _ := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() config := ConfigSourceMe{ Name: "untitled", NumChunks: 16, @@ -627,7 +629,7 @@ func TestTxCache_NoCriticalInconsistency_WhenConcurrentAdditionsAndRemovals(t *t } func newUnconstrainedCacheToTest() *TxCache { - txGasHandler, _ := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(ConfigSourceMe{ Name: "test", NumChunks: 16, @@ -642,7 +644,7 @@ func newUnconstrainedCacheToTest() *TxCache { } func newCacheToTest(numBytesPerSenderThreshold uint32, countPerSenderThreshold uint32) *TxCache { - txGasHandler, _ := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(ConfigSourceMe{ Name: "test", NumChunks: 16, From 85046825f0ef07ab1344663ec115fb61e96c7251 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 3 Jul 2024 22:55:16 +0300 Subject: [PATCH 005/124] Integrate new score params in txListForSender. --- txcache/txListBySenderMap.go | 5 +- txcache/txListBySenderMap_test.go | 5 +- txcache/txListForSender.go | 68 +++++++++---- txcache/txListForSender_test.go | 162 ++++++++++++++---------------- 4 files changed, 133 insertions(+), 107 deletions(-) diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index beb9f2fd..98d3375b 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -16,7 +16,6 @@ type txListBySenderMap struct { counter atomic.Counter scoreComputer scoreComputer txGasHandler TxGasHandler - txFeeHelper feeHelper mutex sync.Mutex } @@ -26,7 +25,6 @@ func newTxListBySenderMap( senderConstraints senderConstraints, scoreComputer scoreComputer, txGasHandler TxGasHandler, - txFeeHelper feeHelper, ) *txListBySenderMap { backingMap := maps.NewBucketSortedMap(nChunksHint, numberOfScoreChunks) @@ -35,7 +33,6 @@ func newTxListBySenderMap( senderConstraints: senderConstraints, scoreComputer: scoreComputer, txGasHandler: txGasHandler, - txFeeHelper: txFeeHelper, } } @@ -43,7 +40,7 @@ func newTxListBySenderMap( func (txMap *txListBySenderMap) addTx(tx *WrappedTransaction) (bool, [][]byte) { sender := string(tx.Tx.GetSndAddr()) listForSender := txMap.getOrAddListForSender(sender) - return listForSender.AddTx(tx, txMap.txGasHandler, txMap.txFeeHelper) + return listForSender.AddTx(tx, txMap.txGasHandler) } // getOrAddListForSender gets or lazily creates a list (using double-checked locking pattern) diff --git a/txcache/txListBySenderMap_test.go b/txcache/txListBySenderMap_test.go index d3393225..9c7ac142 100644 --- a/txcache/txListBySenderMap_test.go +++ b/txcache/txListBySenderMap_test.go @@ -6,6 +6,7 @@ import ( "sync" "testing" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) @@ -176,9 +177,9 @@ func createTxListBySenderMap(numSenders int) *txListBySenderMap { } func newSendersMapToTest() *txListBySenderMap { - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() return newTxListBySenderMap(4, senderConstraints{ maxNumBytes: math.MaxUint32, maxNumTxs: math.MaxUint32, - }, &disabledScoreComputer{}, txGasHandler, txFeeHelper) + }, &disabledScoreComputer{}, txGasHandler) } diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 99fcd819..d1353991 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -26,10 +26,13 @@ type txListForSender struct { scoreChunk *maps.MapChunk accountNonce atomic.Uint64 totalBytes atomic.Counter - totalGas atomic.Counter - totalFeeScore atomic.Counter numFailedSelections atomic.Counter - onScoreChange scoreChangeCallback + + avgPpuNumerator float64 + avgPpuDenominator uint64 + noncesTracker *noncesTracker + + onScoreChange scoreChangeCallback scoreChunkMutex sync.RWMutex mutex sync.RWMutex @@ -43,13 +46,14 @@ func newTxListForSender(sender string, constraints *senderConstraints, onScoreCh items: list.New(), sender: sender, constraints: constraints, + noncesTracker: newNoncesTracker(), onScoreChange: onScoreChange, } } // AddTx adds a transaction in sender's list // This is a "sorted" insert -func (listForSender *txListForSender) AddTx(tx *WrappedTransaction, gasHandler TxGasHandler, txFeeHelper feeHelper) (bool, [][]byte) { +func (listForSender *txListForSender) AddTx(tx *WrappedTransaction, gasHandler TxGasHandler) (bool, [][]byte) { // We don't allow concurrent interceptor goroutines to mutate a given sender's list listForSender.mutex.Lock() defer listForSender.mutex.Unlock() @@ -65,7 +69,7 @@ func (listForSender *txListForSender) AddTx(tx *WrappedTransaction, gasHandler T listForSender.items.InsertAfter(tx, insertionPlace) } - listForSender.onAddedTransaction(tx, gasHandler, txFeeHelper) + listForSender.onAddedTransaction(tx, gasHandler) evicted := listForSender.applySizeConstraints() listForSender.triggerScoreChange() return true, evicted @@ -101,10 +105,14 @@ func (listForSender *txListForSender) isCapacityExceeded() bool { return tooManyBytes || tooManyTxs } -func (listForSender *txListForSender) onAddedTransaction(tx *WrappedTransaction, gasHandler TxGasHandler, txFeeHelper feeHelper) { +func (listForSender *txListForSender) onAddedTransaction(tx *WrappedTransaction, gasHandler TxGasHandler) { + nonce := tx.Tx.GetNonce() + gasLimit := tx.Tx.GetGasLimit() + listForSender.totalBytes.Add(tx.Size) - listForSender.totalGas.Add(int64(estimateTxGas(tx))) - listForSender.totalFeeScore.Add(int64(estimateTxFeeScore(tx, gasHandler, txFeeHelper))) + listForSender.avgPpuNumerator += tx.computeFee(gasHandler) + listForSender.avgPpuDenominator += gasLimit + listForSender.noncesTracker.addNonce(nonce) } func (listForSender *txListForSender) triggerScoreChange() { @@ -114,11 +122,34 @@ func (listForSender *txListForSender) triggerScoreChange() { // This function should only be used in critical section (listForSender.mutex) func (listForSender *txListForSender) getScoreParams() senderScoreParams { - fee := listForSender.totalFeeScore.GetUint64() - gas := listForSender.totalGas.GetUint64() - count := listForSender.countTx() + numTxs := listForSender.countTx() + minTransactionNonce := uint64(0) + maxTransactionNonce := uint64(0) + + firstTx := listForSender.items.Front() + lastTx := listForSender.items.Back() + + if firstTx != nil { + minTransactionNonce = firstTx.Value.(*WrappedTransaction).Tx.GetNonce() + } - return senderScoreParams{count: count, feeScore: fee, gas: gas} + if lastTx != nil { + maxTransactionNonce = lastTx.Value.(*WrappedTransaction).Tx.GetNonce() + } + + hasSpotlessSequenceOfNonces := listForSender.noncesTracker.isSpotlessSequence(minTransactionNonce, numTxs) + + return senderScoreParams{ + avgPpuNumerator: listForSender.avgPpuNumerator, + avgPpuDenominator: listForSender.avgPpuDenominator, + numOfTransactions: numTxs, + hasSpotlessSequenceOfNonces: hasSpotlessSequenceOfNonces, + + accountNonce: listForSender.accountNonce.Get(), + accountNonceIsKnown: listForSender.accountNonceKnown.IsSet(), + minTransactionNonce: minTransactionNonce, + maxTransactionNonce: maxTransactionNonce, + } } // This function should only be used in critical section (listForSender.mutex) @@ -181,11 +212,14 @@ func (listForSender *txListForSender) RemoveTx(tx *WrappedTransaction) bool { } func (listForSender *txListForSender) onRemovedListElement(element *list.Element) { - value := element.Value.(*WrappedTransaction) - - listForSender.totalBytes.Subtract(value.Size) - listForSender.totalGas.Subtract(int64(estimateTxGas(value))) - listForSender.totalFeeScore.Subtract(int64(value.TxFeeScoreNormalized)) + tx := element.Value.(*WrappedTransaction) + nonce := tx.Tx.GetNonce() + gasLimit := tx.Tx.GetGasLimit() + + listForSender.totalBytes.Subtract(tx.Size) + listForSender.avgPpuNumerator -= tx.TxFee + listForSender.avgPpuDenominator -= gasLimit + listForSender.noncesTracker.removeNonce(nonce) } // This function should only be used in critical section (listForSender.mutex) diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 523d6bd5..3f5d77d7 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -11,40 +11,40 @@ import ( func TestListForSender_AddTx_Sorts(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTx([]byte("a"), ".", 1), txGasHandler, txFeeHelper) - list.AddTx(createTx([]byte("c"), ".", 3), txGasHandler, txFeeHelper) - list.AddTx(createTx([]byte("d"), ".", 4), txGasHandler, txFeeHelper) - list.AddTx(createTx([]byte("b"), ".", 2), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte("a"), ".", 1), txGasHandler) + list.AddTx(createTx([]byte("c"), ".", 3), txGasHandler) + list.AddTx(createTx([]byte("d"), ".", 4), txGasHandler) + list.AddTx(createTx([]byte("b"), ".", 2), txGasHandler) require.Equal(t, []string{"a", "b", "c", "d"}, list.getTxHashesAsStrings()) } func TestListForSender_AddTx_GivesPriorityToHigherGas(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTxWithParams([]byte("a"), ".", 1, 128, 42, 42), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("b"), ".", 3, 128, 42, 100), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("c"), ".", 3, 128, 42, 99), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("d"), ".", 2, 128, 42, 42), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("e"), ".", 3, 128, 42, 101), txGasHandler, txFeeHelper) + list.AddTx(createTxWithParams([]byte("a"), ".", 1, 128, 50000, 42), txGasHandler) + list.AddTx(createTxWithParams([]byte("b"), ".", 3, 128, 50000, 100), txGasHandler) + list.AddTx(createTxWithParams([]byte("c"), ".", 3, 128, 50000, 99), txGasHandler) + list.AddTx(createTxWithParams([]byte("d"), ".", 2, 128, 50000, 42), txGasHandler) + list.AddTx(createTxWithParams([]byte("e"), ".", 3, 128, 50000, 101), txGasHandler) require.Equal(t, []string{"a", "d", "e", "b", "c"}, list.getTxHashesAsStrings()) } func TestListForSender_AddTx_SortsCorrectlyWhenSameNonceSamePrice(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTxWithParams([]byte("a"), ".", 1, 128, 42, 42), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("b"), ".", 3, 128, 42, 100), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("c"), ".", 3, 128, 42, 100), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("d"), ".", 3, 128, 42, 98), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("e"), ".", 3, 128, 42, 101), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("f"), ".", 2, 128, 42, 42), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("g"), ".", 3, 128, 42, 99), txGasHandler, txFeeHelper) + list.AddTx(createTxWithParams([]byte("a"), ".", 1, 128, 50000, 42), txGasHandler) + list.AddTx(createTxWithParams([]byte("b"), ".", 3, 128, 50000, 100), txGasHandler) + list.AddTx(createTxWithParams([]byte("c"), ".", 3, 128, 50000, 100), txGasHandler) + list.AddTx(createTxWithParams([]byte("d"), ".", 3, 128, 50000, 98), txGasHandler) + list.AddTx(createTxWithParams([]byte("e"), ".", 3, 128, 50000, 101), txGasHandler) + list.AddTx(createTxWithParams([]byte("f"), ".", 2, 128, 50000, 42), txGasHandler) + list.AddTx(createTxWithParams([]byte("g"), ".", 3, 128, 50000, 99), txGasHandler) // In case of same-nonce, same-price transactions, the newer one has priority require.Equal(t, []string{"a", "f", "e", "b", "c", "g", "d"}, list.getTxHashesAsStrings()) @@ -52,79 +52,79 @@ func TestListForSender_AddTx_SortsCorrectlyWhenSameNonceSamePrice(t *testing.T) func TestListForSender_AddTx_IgnoresDuplicates(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() - added, _ := list.AddTx(createTx([]byte("tx1"), ".", 1), txGasHandler, txFeeHelper) + added, _ := list.AddTx(createTx([]byte("tx1"), ".", 1), txGasHandler) require.True(t, added) - added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2), txGasHandler, txFeeHelper) + added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2), txGasHandler) require.True(t, added) - added, _ = list.AddTx(createTx([]byte("tx3"), ".", 3), txGasHandler, txFeeHelper) + added, _ = list.AddTx(createTx([]byte("tx3"), ".", 3), txGasHandler) require.True(t, added) - added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2), txGasHandler, txFeeHelper) + added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2), txGasHandler) require.False(t, added) } func TestListForSender_AddTx_AppliesSizeConstraintsForNumTransactions(t *testing.T) { list := newListToTest(math.MaxUint32, 3) - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTx([]byte("tx1"), ".", 1), txGasHandler, txFeeHelper) - list.AddTx(createTx([]byte("tx5"), ".", 5), txGasHandler, txFeeHelper) - list.AddTx(createTx([]byte("tx4"), ".", 4), txGasHandler, txFeeHelper) - list.AddTx(createTx([]byte("tx2"), ".", 2), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte("tx1"), ".", 1), txGasHandler) + list.AddTx(createTx([]byte("tx5"), ".", 5), txGasHandler) + list.AddTx(createTx([]byte("tx4"), ".", 4), txGasHandler) + list.AddTx(createTx([]byte("tx2"), ".", 2), txGasHandler) require.Equal(t, []string{"tx1", "tx2", "tx4"}, list.getTxHashesAsStrings()) - _, evicted := list.AddTx(createTx([]byte("tx3"), ".", 3), txGasHandler, txFeeHelper) + _, evicted := list.AddTx(createTx([]byte("tx3"), ".", 3), txGasHandler) require.Equal(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) // Gives priority to higher gas - though undesirably to some extent, "tx3" is evicted - _, evicted = list.AddTx(createTxWithParams([]byte("tx2++"), ".", 2, 128, 42, 42), txGasHandler, txFeeHelper) + _, evicted = list.AddTx(createTxWithParams([]byte("tx2++"), ".", 2, 128, 50000, 42), txGasHandler) require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx3"}, hashesAsStrings(evicted)) // Though Undesirably to some extent, "tx3++"" is added, then evicted - _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 3, 128, 42, 42), txGasHandler, txFeeHelper) + _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 3, 128, 50000, 42), txGasHandler) require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx3++"}, hashesAsStrings(evicted)) } func TestListForSender_AddTx_AppliesSizeConstraintsForNumBytes(t *testing.T) { list := newListToTest(1024, math.MaxUint32) - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTxWithParams([]byte("tx1"), ".", 1, 128, 42, 42), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("tx2"), ".", 2, 512, 42, 42), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("tx3"), ".", 3, 256, 42, 42), txGasHandler, txFeeHelper) - _, evicted := list.AddTx(createTxWithParams([]byte("tx5"), ".", 4, 256, 42, 42), txGasHandler, txFeeHelper) + list.AddTx(createTxWithParams([]byte("tx1"), ".", 1, 128, 50000, 42), txGasHandler) + list.AddTx(createTxWithParams([]byte("tx2"), ".", 2, 512, 1500000, 42), txGasHandler) + list.AddTx(createTxWithParams([]byte("tx3"), ".", 3, 256, 1500000, 42), txGasHandler) + _, evicted := list.AddTx(createTxWithParams([]byte("tx5"), ".", 4, 256, 1500000, 42), txGasHandler) require.Equal(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx5"}, hashesAsStrings(evicted)) - _, evicted = list.AddTx(createTxWithParams([]byte("tx5--"), ".", 4, 128, 42, 42), txGasHandler, txFeeHelper) + _, evicted = list.AddTx(createTxWithParams([]byte("tx5--"), ".", 4, 128, 50000, 42), txGasHandler) require.Equal(t, []string{"tx1", "tx2", "tx3", "tx5--"}, list.getTxHashesAsStrings()) require.Equal(t, []string{}, hashesAsStrings(evicted)) - _, evicted = list.AddTx(createTxWithParams([]byte("tx4"), ".", 4, 128, 42, 42), txGasHandler, txFeeHelper) + _, evicted = list.AddTx(createTxWithParams([]byte("tx4"), ".", 4, 128, 50000, 42), txGasHandler) require.Equal(t, []string{"tx1", "tx2", "tx3", "tx4"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx5--"}, hashesAsStrings(evicted)) // Gives priority to higher gas - though undesirably to some extent, "tx4" is evicted - _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 3, 256, 42, 100), txGasHandler, txFeeHelper) + _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 3, 256, 1500000, 100), txGasHandler) require.Equal(t, []string{"tx1", "tx2", "tx3++", "tx3"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) } func TestListForSender_findTx(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() txA := createTx([]byte("A"), ".", 41) txANewer := createTx([]byte("ANewer"), ".", 41) txB := createTx([]byte("B"), ".", 42) txD := createTx([]byte("none"), ".", 43) - list.AddTx(txA, txGasHandler, txFeeHelper) - list.AddTx(txANewer, txGasHandler, txFeeHelper) - list.AddTx(txB, txGasHandler, txFeeHelper) + list.AddTx(txA, txGasHandler) + list.AddTx(txANewer, txGasHandler) + list.AddTx(txB, txGasHandler) elementWithA := list.findListElementWithTx(txA) elementWithANewer := list.findListElementWithTx(txANewer) @@ -143,8 +143,8 @@ func TestListForSender_findTx(t *testing.T) { func TestListForSender_findTx_CoverNonceComparisonOptimization(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() - list.AddTx(createTx([]byte("A"), ".", 42), txGasHandler, txFeeHelper) + txGasHandler := txcachemocks.NewTxGasHandlerMock() + list.AddTx(createTx([]byte("A"), ".", 42), txGasHandler) // Find one with a lower nonce, not added to cache noElement := list.findListElementWithTx(createTx(nil, ".", 41)) @@ -154,9 +154,9 @@ func TestListForSender_findTx_CoverNonceComparisonOptimization(t *testing.T) { func TestListForSender_RemoveTransaction(t *testing.T) { list := newUnconstrainedListToTest() tx := createTx([]byte("a"), ".", 1) - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(tx, txGasHandler, txFeeHelper) + list.AddTx(tx, txGasHandler) require.Equal(t, 1, list.items.Len()) list.RemoveTx(tx) @@ -173,10 +173,10 @@ func TestListForSender_RemoveTransaction_NoPanicWhenTxMissing(t *testing.T) { func TestListForSender_SelectBatchTo(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() for index := 0; index < 100; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler) } destination := make([]*WrappedTransaction, 1000) @@ -203,13 +203,13 @@ func TestListForSender_SelectBatchTo(t *testing.T) { func TestListForSender_SelectBatchToWithLimitedGasBandwidth(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() for index := 0; index < 40; index++ { wtx := createTx([]byte{byte(index)}, ".", uint64(index)) tx, _ := wtx.Tx.(*transaction.Transaction) tx.GasLimit = 1000000 - list.AddTx(wtx, txGasHandler, txFeeHelper) + list.AddTx(wtx, txGasHandler) } destination := make([]*WrappedTransaction, 1000) @@ -237,10 +237,10 @@ func TestListForSender_SelectBatchToWithLimitedGasBandwidth(t *testing.T) { func TestListForSender_SelectBatchTo_NoPanicWhenCornerCases(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() for index := 0; index < 100; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler) } // When empty destination @@ -256,11 +256,11 @@ func TestListForSender_SelectBatchTo_NoPanicWhenCornerCases(t *testing.T) { func TestListForSender_SelectBatchTo_WhenInitialGap(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() list.notifyAccountNonce(1) for index := 10; index < 20; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler) } destination := make([]*WrappedTransaction, 1000) @@ -287,11 +287,11 @@ func TestListForSender_SelectBatchTo_WhenInitialGap(t *testing.T) { func TestListForSender_SelectBatchTo_WhenGracePeriodWithGapResolve(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() list.notifyAccountNonce(1) for index := 2; index < 20; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler) } destination := make([]*WrappedTransaction, 1000) @@ -310,7 +310,7 @@ func TestListForSender_SelectBatchTo_WhenGracePeriodWithGapResolve(t *testing.T) require.False(t, list.sweepable.IsSet()) // Now resolve the gap - list.AddTx(createTx([]byte("resolving-tx"), ".", 1), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte("resolving-tx"), ".", 1), txGasHandler) // Selection will be successful journal = list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) require.Equal(t, 19, journal.copied) @@ -320,11 +320,11 @@ func TestListForSender_SelectBatchTo_WhenGracePeriodWithGapResolve(t *testing.T) func TestListForSender_SelectBatchTo_WhenGracePeriodWithNoGapResolve(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() list.notifyAccountNonce(1) for index := 2; index < 20; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler) } destination := make([]*WrappedTransaction, 1000) @@ -365,34 +365,34 @@ func TestListForSender_NotifyAccountNonce(t *testing.T) { func TestListForSender_hasInitialGap(t *testing.T) { list := newUnconstrainedListToTest() list.notifyAccountNonce(42) - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() // No transaction, no gap require.False(t, list.hasInitialGap()) // One gap - list.AddTx(createTx([]byte("tx-43"), ".", 43), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte("tx-43"), ".", 43), txGasHandler) require.True(t, list.hasInitialGap()) // Resolve gap - list.AddTx(createTx([]byte("tx-42"), ".", 42), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte("tx-42"), ".", 42), txGasHandler) require.False(t, list.hasInitialGap()) } func TestListForSender_getTxHashes(t *testing.T) { list := newUnconstrainedListToTest() require.Len(t, list.getTxHashes(), 0) - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTx([]byte("A"), ".", 1), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte("A"), ".", 1), txGasHandler) require.Len(t, list.getTxHashes(), 1) - list.AddTx(createTx([]byte("B"), ".", 2), txGasHandler, txFeeHelper) - list.AddTx(createTx([]byte("C"), ".", 3), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte("B"), ".", 2), txGasHandler) + list.AddTx(createTx([]byte("C"), ".", 3), txGasHandler) require.Len(t, list.getTxHashes(), 3) } func TestListForSender_DetectRaceConditions(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler, txFeeHelper := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() go func() { // These are called concurrently with addition: during eviction, during removal etc. @@ -401,29 +401,23 @@ func TestListForSender_DetectRaceConditions(t *testing.T) { }() go func() { - list.AddTx(createTx([]byte("test"), ".", 42), txGasHandler, txFeeHelper) + list.AddTx(createTx([]byte("test"), ".", 42), txGasHandler) }() } -func dummyParamsWithGasPriceAndGasLimit(minGasPrice uint64, minGasLimit uint64) (TxGasHandler, feeHelper) { - minPrice := minGasPrice - divisor := uint64(100) - minPriceProcessing := minGasPrice / divisor - txFeeHelper := newFeeComputationHelper(minPrice, minGasLimit, minPriceProcessing) - txGasHandler := &txcachemocks.TxGasHandlerMock{ - MinimumGasMove: minGasLimit, - MinimumGasPrice: minPrice, - GasProcessingDivisor: divisor, - } - return txGasHandler, txFeeHelper +func dummyParamsWithGasPriceAndGasLimit(minGasPrice uint64, minGasLimit uint64) TxGasHandler { + txGasHandler := txcachemocks.NewTxGasHandlerMock() + txGasHandler.SetMinGasLimit(minGasLimit) + txGasHandler.SetMinGasPrice(minGasPrice) + return txGasHandler } -func dummyParamsWithGasPrice(minGasPrice uint64) (TxGasHandler, feeHelper) { +func dummyParamsWithGasPrice(minGasPrice uint64) TxGasHandler { return dummyParamsWithGasPriceAndGasLimit(minGasPrice, 50000) } -func dummyParams() (TxGasHandler, feeHelper) { - minPrice := uint64(1000000000) +func dummyParams() TxGasHandler { + minPrice := uint64(oneBillion) minGasLimit := uint64(50000) return dummyParamsWithGasPriceAndGasLimit(minPrice, minGasLimit) } From 3d0408274bb3fccc590ffbc161551b6636d852dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 3 Jul 2024 22:56:32 +0300 Subject: [PATCH 006/124] Implement noncesTracker. Able to tell if a nonces sequence is "spotless" (no gaps, no duplicates). Sketch some tests. --- txcache/noncesTracker.go | 98 +++++++++++++++++++++++++++++++++++ txcache/noncesTracker_test.go | 63 ++++++++++++++++++++++ 2 files changed, 161 insertions(+) create mode 100644 txcache/noncesTracker.go create mode 100644 txcache/noncesTracker_test.go diff --git a/txcache/noncesTracker.go b/txcache/noncesTracker.go new file mode 100644 index 00000000..5c984bed --- /dev/null +++ b/txcache/noncesTracker.go @@ -0,0 +1,98 @@ +package txcache + +import ( + "math" +) + +var six = uint64(6) +var nonceModulus = uint64(math.MaxUint32) + +// noncesTracker is a helper struct to track nonces for a sender, +// so we can check if the sequence of nonces "is spotless" (has no gaps and no duplicates). +// +// Notes: +// +// (a) math.MaxUint32 * math.MaxUint32 < math.MaxUint64. +// (b) however, math.MaxUint32 * (2 * math.MaxUint32 + 1) > math.MaxUint64 +// (c) we use modular arithmetic, with modulus = nonceModulus (see above). +// (d) memory footprint: 4 * 8 bytes = 32 bytes. +type noncesTracker struct { + sumOfAddedNonces uint64 + sumOfRemovedNonces uint64 + sumOfSquaresOfAddedNonces uint64 + sumOfSquaresOfRemovedNonces uint64 +} + +func newNoncesTracker() *noncesTracker { + return &noncesTracker{} +} + +func (tracker *noncesTracker) addNonce(nonce uint64) { + nonce = tracker.mod(nonce) + nonceSquared := tracker.mod(nonce * nonce) + + tracker.sumOfAddedNonces = tracker.mod(tracker.sumOfAddedNonces + nonce) + tracker.sumOfSquaresOfAddedNonces = tracker.mod(tracker.sumOfSquaresOfAddedNonces + nonceSquared) +} + +func (tracker *noncesTracker) removeNonce(nonce uint64) { + nonce = tracker.mod(nonce) + nonceSquared := tracker.mod(nonce * nonce) + + tracker.sumOfRemovedNonces = tracker.mod(tracker.sumOfRemovedNonces + nonce) + tracker.sumOfSquaresOfRemovedNonces = tracker.mod(tracker.sumOfSquaresOfRemovedNonces + nonceSquared) +} + +func (tracker *noncesTracker) computeExpectedSumOfNonces(firstNonce uint64, count uint64) uint64 { + firstNonce = tracker.mod(firstNonce) + lastNonce := firstNonce + count - 1 + result := (firstNonce + lastNonce) * count / 2 + return tracker.mod(result) +} + +// Computes [lastNonce * (lastNonce + 1) * (2 * lastNonce + 1) - firstNonce * (firstNonce + 1) * (2 * firstNonce + 1)] / 6 * 6 +func (tracker *noncesTracker) computeExpectedSumOfSquaresOfNoncesTimesSix(firstNonce uint64, count uint64) uint64 { + firstNonce = tracker.mod(firstNonce) + lastNonce := firstNonce + count - 1 + nonceBeforeFirst := firstNonce - 1 + + firstTerm := lastNonce + firstTerm = tracker.mod(firstTerm * (lastNonce + 1)) + // See note (b) above. + firstTerm = tracker.mod(firstTerm * tracker.mod(2*lastNonce+1)) + + secondTerm := nonceBeforeFirst + secondTerm = tracker.mod(secondTerm * (nonceBeforeFirst + 1)) + // See note (b) above. + secondTerm = tracker.mod(secondTerm * tracker.mod(2*nonceBeforeFirst+1)) + + result := tracker.modStrict(int64(firstTerm) - int64(secondTerm)) + return uint64(result) +} + +func (tracker *noncesTracker) mod(value uint64) uint64 { + return value % nonceModulus +} + +// See: +// - https://stackoverflow.com/questions/43018206/modulo-of-negative-integers-in-go +func (tracker *noncesTracker) modStrict(value int64) uint64 { + return uint64((value%int64(nonceModulus) + int64(nonceModulus)) % int64(nonceModulus)) +} + +func (tracker *noncesTracker) isSpotlessSequence(firstNonce uint64, count uint64) bool { + sumOfNonces := tracker.modStrict(int64(tracker.sumOfAddedNonces) - int64(tracker.sumOfRemovedNonces)) + expectedSumOfNonces := tracker.computeExpectedSumOfNonces(firstNonce, count) + if sumOfNonces != expectedSumOfNonces { + return false + } + + sumOfSquaresOfNonces := tracker.modStrict(int64(tracker.sumOfSquaresOfAddedNonces) - int64(tracker.sumOfSquaresOfRemovedNonces)) + sumOfSquaresOfNoncesTimesSix := tracker.mod(sumOfSquaresOfNonces * six) + expectedSumOfSquaresOfNoncesTimesSix := tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(firstNonce, count) + if sumOfSquaresOfNoncesTimesSix != expectedSumOfSquaresOfNoncesTimesSix { + return false + } + + return true +} diff --git a/txcache/noncesTracker_test.go b/txcache/noncesTracker_test.go new file mode 100644 index 00000000..4c4bb5da --- /dev/null +++ b/txcache/noncesTracker_test.go @@ -0,0 +1,63 @@ +package txcache + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNoncesTracker_computeExpectedSumOfNonces(t *testing.T) { + tracker := newNoncesTracker() + + sum := tracker.computeExpectedSumOfNonces(0, 0) + require.Equal(t, uint64(0), sum) + + sum = tracker.computeExpectedSumOfNonces(0, 1) + require.Equal(t, uint64(0), sum) + + sum = tracker.computeExpectedSumOfNonces(0, 4) + require.Equal(t, uint64(6), sum) + + sum = tracker.computeExpectedSumOfNonces(1, 4) + require.Equal(t, uint64(10), sum) + + // https://www.wolframalpha.com/input?i=sum+of+consecutive+integers+between+100000+and+100041 + sum = tracker.computeExpectedSumOfNonces(100000, 42) + require.Equal(t, uint64(4200861), sum) + + // https://www.wolframalpha.com/input?i=sum+of+consecutive+integers+between+1000000000000+and+1000000065534 + sum = tracker.computeExpectedSumOfNonces(oneTrillion, 65535) + require.Equal(t, uint64(65535002147385345)%nonceModulus, sum) +} + +func TestNoncesTracker_computeExpectedSumOfSquaresOfNonces(t *testing.T) { + tracker := newNoncesTracker() + + sum := tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(0, 0) + require.Equal(t, uint64(0), sum) + + sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(0, 1) + require.Equal(t, uint64(0), sum) + + sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(0, 4) + require.Equal(t, uint64(14)*six, sum) + + sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(1, 4) + require.Equal(t, uint64(30)*six, sum) + + // https://www.wolframalpha.com/input?i=sum+of+consecutive+squares+between+100000+and+100041 + sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(100000, 42) + require.Equal(t, (uint64(420172223821)*six)%nonceModulus, sum) + + // Python: (sum([i * i for i in range(1000000000, 1000065535)]) * 6) % 4294967295 = 92732025 + sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(oneBillion, 65535) + require.Equal(t, uint64(92732025), sum) + + // Python: (sum([i * i for i in range(1000000000000, 1000000000042)]) * 6) % 4294967295 = 307941426 + sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(oneTrillion, 42) + require.Equal(t, uint64(307941426), sum) + + // Python: (sum([i * i for i in range(1000000000000, 1000000065535)]) * 6) % 4294967295 = 445375860 + sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(oneTrillion, 65535) + require.Equal(t, uint64(445375860), sum) +} From ad6bc759d0bae14811749ea4474be21875d18e9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 4 Jul 2024 15:59:54 +0300 Subject: [PATCH 007/124] Adjust tests. --- testscommon/txcachemocks/txGasHandlerMock.go | 10 +++-- txcache/eviction_test.go | 41 ++++++++++---------- txcache/txListForSender_test.go | 17 -------- 3 files changed, 27 insertions(+), 41 deletions(-) diff --git a/testscommon/txcachemocks/txGasHandlerMock.go b/testscommon/txcachemocks/txGasHandlerMock.go index 3ac2ae64..e8c84ea7 100644 --- a/testscommon/txcachemocks/txGasHandlerMock.go +++ b/testscommon/txcachemocks/txGasHandlerMock.go @@ -35,14 +35,16 @@ func NewTxGasHandlerMock() *TxGasHandlerMock { } } -// SetMinGasLimit - -func (ghm *TxGasHandlerMock) SetMinGasLimit(minGasLimit uint64) { +// WithMinGasLimit - +func (ghm *TxGasHandlerMock) WithMinGasLimit(minGasLimit uint64) *TxGasHandlerMock { ghm.minGasLimit = minGasLimit + return ghm } -// SetMinGasPrice - -func (ghm *TxGasHandlerMock) SetMinGasPrice(minGasPrice uint64) { +// WithMinGasPrice - +func (ghm *TxGasHandlerMock) WithMinGasPrice(minGasPrice uint64) *TxGasHandlerMock { ghm.minGasPrice = minGasPrice + return ghm } // MinGasPrice - diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go index f5555ec0..ffc25b1d 100644 --- a/txcache/eviction_test.go +++ b/txcache/eviction_test.go @@ -5,6 +5,7 @@ import ( "sync" "testing" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) @@ -19,7 +20,7 @@ func TestEviction_EvictSendersWhileTooManyTxs(t *testing.T) { NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, } - txGasHandler, _ := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(config, txGasHandler) require.Nil(t, err) @@ -45,7 +46,7 @@ func TestEviction_EvictSendersWhileTooManyTxs(t *testing.T) { } func TestEviction_EvictSendersWhileTooManyBytes(t *testing.T) { - numBytesPerTx := uint32(1000) + numBytesPerTx := uint32(200) config := ConfigSourceMe{ Name: "untitled", @@ -56,7 +57,7 @@ func TestEviction_EvictSendersWhileTooManyBytes(t *testing.T) { NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, NumSendersToPreemptivelyEvict: 20, } - txGasHandler, _ := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(config, txGasHandler) require.Nil(t, err) @@ -65,7 +66,7 @@ func TestEviction_EvictSendersWhileTooManyBytes(t *testing.T) { // 200 senders, each with 1 transaction for index := 0; index < 200; index++ { sender := string(createFakeSenderAddress(index)) - cache.AddTx(createTxWithParams([]byte{byte(index)}, sender, uint64(1), uint64(numBytesPerTx), 10000, 100*oneBillion)) + cache.AddTx(createTxWithParams([]byte{byte(index)}, sender, uint64(1), uint64(numBytesPerTx), 250000, 100*oneBillion)) } require.Equal(t, int64(200), cache.txListBySender.counter.Get()) @@ -91,14 +92,14 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfCount(t *testing.T) { CountPerSenderThreshold: math.MaxUint32, NumSendersToPreemptivelyEvict: 2, } - txGasHandler, _ := dummyParamsWithGasPrice(100 * oneBillion) + txGasHandler := txcachemocks.NewTxGasHandlerMock().WithMinGasPrice(100 * oneBillion) cache, err := NewTxCache(config, txGasHandler) require.Nil(t, err) require.NotNil(t, cache) - cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 1000, 100000, 100*oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 1000, 100000, 100*oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 1000, 100000, 700*oneBillion)) + cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 200, 1000000, 1*oneBillion)) + cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 200, 1000000, 1*oneBillion)) + cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 200, 1000000, 3*oneBillion)) cache.doEviction() require.Equal(t, uint32(2), cache.evictionJournal.passOneNumTxs) @@ -123,7 +124,7 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfSize(t *testing.T) { NumSendersToPreemptivelyEvict: 2, } - txGasHandler, _ := dummyParamsWithGasPrice(oneBillion) + txGasHandler := txcachemocks.NewTxGasHandlerMock().WithMinGasPrice(oneBillion) cache, err := NewTxCache(config, txGasHandler) require.Nil(t, err) require.NotNil(t, cache) @@ -146,16 +147,16 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfSize(t *testing.T) { scoreChris := cache.getScoreOfSender("chris") scoreRichard := cache.getScoreOfSender("richard") - require.Equal(t, uint32(23), scoreAlice) - require.Equal(t, uint32(23), scoreBob) - require.Equal(t, uint32(7), scoreDave) + require.Equal(t, uint32(100), scoreAlice) + require.Equal(t, uint32(100), scoreBob) + require.Equal(t, uint32(100), scoreDave) require.Equal(t, uint32(100), scoreCarol) require.Equal(t, uint32(100), scoreEve) - require.Equal(t, uint32(33), scoreChris) - require.Equal(t, uint32(54), scoreRichard) + require.Equal(t, uint32(100), scoreChris) + require.Equal(t, uint32(100), scoreRichard) cache.doEviction() - require.Equal(t, uint32(4), cache.evictionJournal.passOneNumTxs) + require.Equal(t, uint32(2), cache.evictionJournal.passOneNumTxs) require.Equal(t, uint32(2), cache.evictionJournal.passOneNumSenders) require.Equal(t, uint32(1), cache.evictionJournal.passOneNumSteps) @@ -163,7 +164,7 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfSize(t *testing.T) { _, ok := cache.GetByTxHash([]byte("hash-carol")) require.True(t, ok) require.Equal(t, uint64(5), cache.CountSenders()) - require.Equal(t, uint64(5), cache.CountTx()) + require.Equal(t, uint64(7), cache.CountTx()) } func TestEviction_doEvictionDoesNothingWhenAlreadyInProgress(t *testing.T) { @@ -176,7 +177,7 @@ func TestEviction_doEvictionDoesNothingWhenAlreadyInProgress(t *testing.T) { CountPerSenderThreshold: math.MaxUint32, } - txGasHandler, _ := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(config, txGasHandler) require.Nil(t, err) require.NotNil(t, cache) @@ -199,7 +200,7 @@ func TestEviction_evictSendersInLoop_CoverLoopBreak_WhenSmallBatch(t *testing.T) CountPerSenderThreshold: math.MaxUint32, } - txGasHandler, _ := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(config, txGasHandler) require.Nil(t, err) require.NotNil(t, cache) @@ -224,7 +225,7 @@ func TestEviction_evictSendersWhile_ShouldContinueBreak(t *testing.T) { CountPerSenderThreshold: math.MaxUint32, } - txGasHandler, _ := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(config, txGasHandler) require.Nil(t, err) require.NotNil(t, cache) @@ -258,7 +259,7 @@ func Test_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { CountPerSenderThreshold: math.MaxUint32, } - txGasHandler, _ := dummyParams() + txGasHandler := txcachemocks.NewTxGasHandlerMock() numSenders := 25000 numTxsPerSender := 10 diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 3f5d77d7..862a44ca 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -405,23 +405,6 @@ func TestListForSender_DetectRaceConditions(t *testing.T) { }() } -func dummyParamsWithGasPriceAndGasLimit(minGasPrice uint64, minGasLimit uint64) TxGasHandler { - txGasHandler := txcachemocks.NewTxGasHandlerMock() - txGasHandler.SetMinGasLimit(minGasLimit) - txGasHandler.SetMinGasPrice(minGasPrice) - return txGasHandler -} - -func dummyParamsWithGasPrice(minGasPrice uint64) TxGasHandler { - return dummyParamsWithGasPriceAndGasLimit(minGasPrice, 50000) -} - -func dummyParams() TxGasHandler { - minPrice := uint64(oneBillion) - minGasLimit := uint64(50000) - return dummyParamsWithGasPriceAndGasLimit(minPrice, minGasLimit) -} - func newUnconstrainedListToTest() *txListForSender { return newTxListForSender(".", &senderConstraints{ maxNumBytes: math.MaxUint32, From d3f9e63bd623eaaa36e0d200dae8fa388f0c3d86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 4 Jul 2024 16:00:35 +0300 Subject: [PATCH 008/124] Adjust log levels. --- txcache/monitoring.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 17fd0368..c8c65bab 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -12,7 +12,7 @@ import ( var log = logger.GetOrCreate("txcache") func (cache *TxCache) monitorEvictionWrtSenderLimit(sender []byte, evicted [][]byte) { - log.Trace("TxCache.monitorEvictionWrtSenderLimit()", "name", cache.name, "sender", sender, "num", len(evicted)) + log.Debug("TxCache.monitorEvictionWrtSenderLimit()", "name", cache.name, "sender", sender, "num", len(evicted)) for i := 0; i < core.MinInt(len(evicted), numEvictedTxsToDisplay); i++ { log.Trace("TxCache.monitorEvictionWrtSenderLimit()", "name", cache.name, "sender", sender, "tx", evicted[i]) From 3dd13fe63068b4caac03e8fb39d0eb38630d492a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 4 Jul 2024 20:04:09 +0300 Subject: [PATCH 009/124] Adjust some tests. --- txcache/eviction_test.go | 34 +++++++++++++++---------------- txcache/testutils_test.go | 42 +++++++++++++++++++++++++++------------ txcache/txCache_test.go | 16 +++++++-------- 3 files changed, 54 insertions(+), 38 deletions(-) diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go index ffc25b1d..67ec578f 100644 --- a/txcache/eviction_test.go +++ b/txcache/eviction_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestEviction_EvictSendersWhileTooManyTxs(t *testing.T) { +func TestTxCache_EvictSendersInLoop_BecauseOfCount(t *testing.T) { config := ConfigSourceMe{ Name: "untitled", NumChunks: 16, @@ -29,7 +29,7 @@ func TestEviction_EvictSendersWhileTooManyTxs(t *testing.T) { // 200 senders, each with 1 transaction for index := 0; index < 200; index++ { sender := string(createFakeSenderAddress(index)) - cache.AddTx(createTx([]byte{byte(index)}, sender, uint64(1))) + cache.AddTx(createTx([]byte{byte(index)}, sender, 1)) } require.Equal(t, int64(200), cache.txListBySender.counter.Get()) @@ -45,7 +45,7 @@ func TestEviction_EvictSendersWhileTooManyTxs(t *testing.T) { require.Equal(t, int64(100), cache.txByHash.counter.Get()) } -func TestEviction_EvictSendersWhileTooManyBytes(t *testing.T) { +func TestTxCache_EvictSendersInLoop_BecauseOfSize(t *testing.T) { numBytesPerTx := uint32(200) config := ConfigSourceMe{ @@ -66,7 +66,7 @@ func TestEviction_EvictSendersWhileTooManyBytes(t *testing.T) { // 200 senders, each with 1 transaction for index := 0; index < 200; index++ { sender := string(createFakeSenderAddress(index)) - cache.AddTx(createTxWithParams([]byte{byte(index)}, sender, uint64(1), uint64(numBytesPerTx), 250000, 100*oneBillion)) + cache.AddTx(createTx([]byte{byte(index)}, sender, 1).withSize(uint64(numBytesPerTx)).withGasLimit(250000)) } require.Equal(t, int64(200), cache.txListBySender.counter.Get()) @@ -82,7 +82,7 @@ func TestEviction_EvictSendersWhileTooManyBytes(t *testing.T) { require.Equal(t, int64(100), cache.txByHash.counter.Get()) } -func TestEviction_DoEvictionDoneInPassTwo_BecauseOfCount(t *testing.T) { +func TestTxCache_DoEviction_BecauseOfCount(t *testing.T) { config := ConfigSourceMe{ Name: "untitled", NumChunks: 16, @@ -92,28 +92,28 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfCount(t *testing.T) { CountPerSenderThreshold: math.MaxUint32, NumSendersToPreemptivelyEvict: 2, } - txGasHandler := txcachemocks.NewTxGasHandlerMock().WithMinGasPrice(100 * oneBillion) + txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(config, txGasHandler) require.Nil(t, err) require.NotNil(t, cache) - cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 200, 1000000, 1*oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 200, 1000000, 1*oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 200, 1000000, 3*oneBillion)) + cache.AddTx(createTx([]byte("hash-alice"), "alice", 1).withGasPrice(1 * oneBillion)) + cache.AddTx(createTx([]byte("hash-bob"), "bob", 1).withGasPrice(1 * oneBillion)) + cache.AddTx(createTx([]byte("hash-carol"), "carol", 1).withGasPrice(3 * oneBillion)) cache.doEviction() require.Equal(t, uint32(2), cache.evictionJournal.passOneNumTxs) require.Equal(t, uint32(2), cache.evictionJournal.passOneNumSenders) require.Equal(t, uint32(1), cache.evictionJournal.passOneNumSteps) - // Alice and Bob evicted. Carol still there. + // Alice and Bob evicted. Carol still there (better score). _, ok := cache.GetByTxHash([]byte("hash-carol")) require.True(t, ok) require.Equal(t, uint64(1), cache.CountSenders()) require.Equal(t, uint64(1), cache.CountTx()) } -func TestEviction_DoEvictionDoneInPassTwo_BecauseOfSize(t *testing.T) { +func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { config := ConfigSourceMe{ Name: "untitled", NumChunks: 16, @@ -124,7 +124,7 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfSize(t *testing.T) { NumSendersToPreemptivelyEvict: 2, } - txGasHandler := txcachemocks.NewTxGasHandlerMock().WithMinGasPrice(oneBillion) + txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(config, txGasHandler) require.Nil(t, err) require.NotNil(t, cache) @@ -167,7 +167,7 @@ func TestEviction_DoEvictionDoneInPassTwo_BecauseOfSize(t *testing.T) { require.Equal(t, uint64(7), cache.CountTx()) } -func TestEviction_doEvictionDoesNothingWhenAlreadyInProgress(t *testing.T) { +func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { config := ConfigSourceMe{ Name: "untitled", NumChunks: 1, @@ -190,7 +190,7 @@ func TestEviction_doEvictionDoesNothingWhenAlreadyInProgress(t *testing.T) { require.False(t, cache.evictionJournal.evictionPerformed) } -func TestEviction_evictSendersInLoop_CoverLoopBreak_WhenSmallBatch(t *testing.T) { +func TestTxCache_EvictSendersInLoop_CodeCoverageForLoopBreak_WhenSmallBatch(t *testing.T) { config := ConfigSourceMe{ Name: "untitled", NumChunks: 1, @@ -215,7 +215,7 @@ func TestEviction_evictSendersInLoop_CoverLoopBreak_WhenSmallBatch(t *testing.T) require.Equal(t, uint32(1), nSenders) } -func TestEviction_evictSendersWhile_ShouldContinueBreak(t *testing.T) { +func TestTxCache_EvictSendersWhile_ShouldContinueBreak(t *testing.T) { config := ConfigSourceMe{ Name: "untitled", NumChunks: 1, @@ -247,7 +247,7 @@ func TestEviction_evictSendersWhile_ShouldContinueBreak(t *testing.T) { // This seems to be the most reasonable "bad-enough" (not worst) scenario to benchmark: // 25000 senders with 10 transactions each, with default "NumSendersToPreemptivelyEvict". // ~1 second on average laptop. -func Test_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { +func TestTxCache_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { config := ConfigSourceMe{ Name: "untitled", NumChunks: 16, @@ -274,7 +274,7 @@ func Test_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { require.GreaterOrEqual(t, uint32(cache.CountTx()), config.CountThreshold-config.NumSendersToPreemptivelyEvict*uint32(numTxsPerSender)) } -func Test_EvictSendersAndTheirTxs_Concurrently(t *testing.T) { +func TestTxCache_EvictSendersAndTheirTxs_Concurrently(t *testing.T) { cache := newUnconstrainedCacheToTest() var wg sync.WaitGroup diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index 5911f434..ece11d85 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -94,22 +94,11 @@ func addManyTransactionsWithUniformDistribution(cache *TxCache, nSenders int, nT } func createTx(hash []byte, sender string, nonce uint64) *WrappedTransaction { - tx := &transaction.Transaction{ - SndAddr: []byte(sender), - Nonce: nonce, - } - - return &WrappedTransaction{ - Tx: tx, - TxHash: hash, - Size: int64(estimatedSizeOfBoundedTxFields), - } -} -func createTxWithGasLimit(hash []byte, sender string, nonce uint64, gasLimit uint64) *WrappedTransaction { tx := &transaction.Transaction{ SndAddr: []byte(sender), Nonce: nonce, - GasLimit: gasLimit, + GasLimit: 50000, + GasPrice: oneBillion, } return &WrappedTransaction{ @@ -140,6 +129,33 @@ func createTxWithParams(hash []byte, sender string, nonce uint64, size uint64, g } } +func (wrappedTx *WrappedTransaction) withSize(size uint64) *WrappedTransaction { + dataLength := size - estimatedSizeOfBoundedTxFields + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.Data = make([]byte, dataLength) + wrappedTx.Size = int64(size) + return wrappedTx +} + +func (wrappedTx *WrappedTransaction) withDataLength(dataLength int) *WrappedTransaction { + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.Data = make([]byte, dataLength) + wrappedTx.Size = int64(dataLength) + int64(estimatedSizeOfBoundedTxFields) + return wrappedTx +} + +func (wrappedTx *WrappedTransaction) withGasPrice(gasPrice uint64) *WrappedTransaction { + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.GasPrice = gasPrice + return wrappedTx +} + +func (wrappedTx *WrappedTransaction) withGasLimit(gasLimit uint64) *WrappedTransaction { + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.GasLimit = gasLimit + return wrappedTx +} + func createFakeSenderAddress(senderTag int) []byte { bytes := make([]byte, 32) binary.LittleEndian.PutUint64(bytes, uint64(senderTag)) diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index 28fa131b..132714f2 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -318,14 +318,14 @@ func Test_SelectTransactions_Dummy(t *testing.T) { func Test_SelectTransactionsWithBandwidth_Dummy(t *testing.T) { cache := newUnconstrainedCacheToTest() - cache.AddTx(createTxWithGasLimit([]byte("hash-alice-4"), "alice", 4, 100000)) - cache.AddTx(createTxWithGasLimit([]byte("hash-alice-3"), "alice", 3, 100000)) - cache.AddTx(createTxWithGasLimit([]byte("hash-alice-2"), "alice", 2, 500000)) - cache.AddTx(createTxWithGasLimit([]byte("hash-alice-1"), "alice", 1, 200000)) - cache.AddTx(createTxWithGasLimit([]byte("hash-bob-7"), "bob", 7, 100000)) - cache.AddTx(createTxWithGasLimit([]byte("hash-bob-6"), "bob", 6, 50000)) - cache.AddTx(createTxWithGasLimit([]byte("hash-bob-5"), "bob", 5, 50000)) - cache.AddTx(createTxWithGasLimit([]byte("hash-carol-1"), "carol", 1, 50000)) + cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2).withGasLimit(500000)) + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1).withGasLimit(200000)) + cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6).withGasLimit(50000)) + cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasLimit(50000)) + cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1).withGasLimit(50000)) sorted := cache.SelectTransactionsWithBandwidth(5, 2, 200000) numSelected := 1 + 1 + 3 // 1 alice, 1 carol, 3 bob From 587416712c61a459fe2e3e581630fe09b2c423c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 5 Jul 2024 13:05:05 +0300 Subject: [PATCH 010/124] Fix score, fix tests. --- txcache/eviction_test.go | 33 ++++++++++---------------- txcache/score.go | 4 +++- txcache/testutils_test.go | 22 +---------------- txcache/txCache_test.go | 18 +++++++------- txcache/txListForSender_test.go | 42 ++++++++++++++++----------------- 5 files changed, 46 insertions(+), 73 deletions(-) diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go index 67ec578f..7b74a185 100644 --- a/txcache/eviction_test.go +++ b/txcache/eviction_test.go @@ -129,31 +129,20 @@ func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { require.Nil(t, err) require.NotNil(t, cache) - cache.AddTx(createTxWithParams([]byte("hash-alice"), "alice", uint64(1), 128, 100000, oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-bob"), "bob", uint64(1), 128, 100000, oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-dave1"), "dave", uint64(3), 128, 40000000, oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-dave2"), "dave", uint64(1), 128, 50000, oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-dave3"), "dave", uint64(2), 128, 50000, oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-chris"), "chris", uint64(1), 128, 50000, oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-richard"), "richard", uint64(1), 128, 50000, uint64(1.2*oneBillion))) - cache.AddTx(createTxWithParams([]byte("hash-carol"), "carol", uint64(1), 128, 100000, 7*oneBillion)) - cache.AddTx(createTxWithParams([]byte("hash-eve"), "eve", uint64(1), 128, 50000, 4*oneBillion)) + cache.AddTx(createTx([]byte("hash-alice"), "alice", 1).withSize(256).withGasLimit(500000)) + cache.AddTx(createTx([]byte("hash-bob"), "bob", 1).withSize(256).withGasLimit(500000)) + cache.AddTx(createTx([]byte("hash-carol"), "carol", 1).withSize(256).withGasLimit(500000).withGasPrice(1.5 * oneBillion)) + cache.AddTx(createTx([]byte("hash-eve"), "eve", 1).withSize(256).withGasLimit(500000).withGasPrice(3 * oneBillion)) scoreAlice := cache.getScoreOfSender("alice") scoreBob := cache.getScoreOfSender("bob") - scoreDave := cache.getScoreOfSender("dave") scoreCarol := cache.getScoreOfSender("carol") scoreEve := cache.getScoreOfSender("eve") - scoreChris := cache.getScoreOfSender("chris") - scoreRichard := cache.getScoreOfSender("richard") - require.Equal(t, uint32(100), scoreAlice) - require.Equal(t, uint32(100), scoreBob) - require.Equal(t, uint32(100), scoreDave) - require.Equal(t, uint32(100), scoreCarol) - require.Equal(t, uint32(100), scoreEve) - require.Equal(t, uint32(100), scoreChris) - require.Equal(t, uint32(100), scoreRichard) + require.Equal(t, uint32(95), scoreAlice) + require.Equal(t, uint32(95), scoreBob) + require.Equal(t, uint32(97), scoreCarol) + require.Equal(t, uint32(98), scoreEve) cache.doEviction() require.Equal(t, uint32(2), cache.evictionJournal.passOneNumTxs) @@ -163,8 +152,10 @@ func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { // Alice and Bob evicted (lower score). Carol and Eve still there. _, ok := cache.GetByTxHash([]byte("hash-carol")) require.True(t, ok) - require.Equal(t, uint64(5), cache.CountSenders()) - require.Equal(t, uint64(7), cache.CountTx()) + _, ok = cache.GetByTxHash([]byte("hash-eve")) + require.True(t, ok) + require.Equal(t, uint64(2), cache.CountSenders()) + require.Equal(t, uint64(2), cache.CountTx()) } func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { diff --git a/txcache/score.go b/txcache/score.go index 0cec584d..f7c884ac 100644 --- a/txcache/score.go +++ b/txcache/score.go @@ -62,9 +62,11 @@ func (computer *defaultScoreComputer) computeRawScore(params senderScoreParams) // We use the worst possible price per unit for normalization. avgPpuNormalized := avgPpu / computer.worstPpu + avgPpuNormalizedLog := math.Log(avgPpuNormalized) // https://www.wolframalpha.com, with input "((1 / (1 + exp(-x)) - 1/2) * 2) * 100, where x is from 0 to 10" - avgPpuNormalizedSubunitary := (1/(1+math.Exp(-avgPpuNormalized)) - 0.5) * 2 + avgPpuNormalizedSubunitary := (1.0/(1+math.Exp(-avgPpuNormalizedLog)) - 0.5) * 2 score := avgPpuNormalizedSubunitary * float64(numberOfScoreChunks) + return score } diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index ece11d85..6aab9c7c 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -12,6 +12,7 @@ import ( const oneMilion = 1000000 const oneBillion = oneMilion * 1000 +const oneTrillion = oneBillion * 1000 const delta = 0.00000001 const estimatedSizeOfBoundedTxFields = uint64(128) @@ -108,27 +109,6 @@ func createTx(hash []byte, sender string, nonce uint64) *WrappedTransaction { } } -func createTxWithParams(hash []byte, sender string, nonce uint64, size uint64, gasLimit uint64, gasPrice uint64) *WrappedTransaction { - dataLength := int(size) - int(estimatedSizeOfBoundedTxFields) - if dataLength < 0 { - panic("createTxWithData(): invalid length for dummy tx") - } - - tx := &transaction.Transaction{ - SndAddr: []byte(sender), - Nonce: nonce, - Data: make([]byte, dataLength), - GasLimit: gasLimit, - GasPrice: gasPrice, - } - - return &WrappedTransaction{ - Tx: tx, - TxHash: hash, - Size: int64(size), - } -} - func (wrappedTx *WrappedTransaction) withSize(size uint64) *WrappedTransaction { dataLength := size - estimatedSizeOfBoundedTxFields tx := wrappedTx.Tx.(*transaction.Transaction) diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index 132714f2..ffe6a372 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -140,18 +140,18 @@ func Test_AddTx_AppliesSizeConstraintsPerSenderForNumTransactions(t *testing.T) func Test_AddTx_AppliesSizeConstraintsPerSenderForNumBytes(t *testing.T) { cache := newCacheToTest(1024, math.MaxUint32) - cache.AddTx(createTxWithParams([]byte("tx-alice-1"), "alice", 1, 128, 50000, 42)) - cache.AddTx(createTxWithParams([]byte("tx-alice-2"), "alice", 2, 512, 1500000, 42)) - cache.AddTx(createTxWithParams([]byte("tx-alice-4"), "alice", 3, 256, 1500000, 42)) - cache.AddTx(createTxWithParams([]byte("tx-bob-1"), "bob", 1, 512, 1500000, 42)) - cache.AddTx(createTxWithParams([]byte("tx-bob-2"), "bob", 2, 513, 1500000, 42)) + cache.AddTx(createTx([]byte("tx-alice-1"), "alice", 1).withSize(128).withGasLimit(50000)) + cache.AddTx(createTx([]byte("tx-alice-2"), "alice", 2).withSize(512).withGasLimit(1500000)) + cache.AddTx(createTx([]byte("tx-alice-4"), "alice", 3).withSize(256).withGasLimit(1500000)) + cache.AddTx(createTx([]byte("tx-bob-1"), "bob", 1).withSize(512).withGasLimit(1500000)) + cache.AddTx(createTx([]byte("tx-bob-2"), "bob", 2).withSize(513).withGasLimit(1500000)) require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-4"}, cache.getHashesForSender("alice")) require.Equal(t, []string{"tx-bob-1"}, cache.getHashesForSender("bob")) require.True(t, cache.areInternalMapsConsistent()) - cache.AddTx(createTxWithParams([]byte("tx-alice-3"), "alice", 3, 256, 1500000, 42)) - cache.AddTx(createTxWithParams([]byte("tx-bob-2"), "bob", 3, 512, 1500000, 42)) + cache.AddTx(createTx([]byte("tx-alice-3"), "alice", 3).withSize(256).withGasLimit(1500000)) + cache.AddTx(createTx([]byte("tx-bob-2"), "bob", 3).withSize(512).withGasLimit(1500000)) require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-3"}, cache.getHashesForSender("alice")) require.Equal(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) require.True(t, cache.areInternalMapsConsistent()) @@ -481,8 +481,8 @@ func TestTxCache_ConcurrentMutationAndSelection(t *testing.T) { cache := newUnconstrainedCacheToTest() // Alice will quickly move between two score buckets (chunks) - cheapTransaction := createTxWithParams([]byte("alice-x-o"), "alice", 0, 128, 50000, 100*oneBillion) - expensiveTransaction := createTxWithParams([]byte("alice-x-1"), "alice", 1, 128, 50000, 300*oneBillion) + cheapTransaction := createTx([]byte("alice-x-o"), "alice", 0).withDataLength(1).withGasLimit(300000000).withGasPrice(oneBillion) + expensiveTransaction := createTx([]byte("alice-x-1"), "alice", 1).withDataLength(42).withGasLimit(50000000).withGasPrice(10 * oneBillion) cache.AddTx(cheapTransaction) cache.AddTx(expensiveTransaction) diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 862a44ca..3a211cd8 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -25,11 +25,11 @@ func TestListForSender_AddTx_GivesPriorityToHigherGas(t *testing.T) { list := newUnconstrainedListToTest() txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTxWithParams([]byte("a"), ".", 1, 128, 50000, 42), txGasHandler) - list.AddTx(createTxWithParams([]byte("b"), ".", 3, 128, 50000, 100), txGasHandler) - list.AddTx(createTxWithParams([]byte("c"), ".", 3, 128, 50000, 99), txGasHandler) - list.AddTx(createTxWithParams([]byte("d"), ".", 2, 128, 50000, 42), txGasHandler) - list.AddTx(createTxWithParams([]byte("e"), ".", 3, 128, 50000, 101), txGasHandler) + list.AddTx(createTx([]byte("a"), ".", 1), txGasHandler) + list.AddTx(createTx([]byte("b"), ".", 3).withGasPrice(1.2*oneBillion), txGasHandler) + list.AddTx(createTx([]byte("c"), ".", 3).withGasPrice(1.1*oneBillion), txGasHandler) + list.AddTx(createTx([]byte("d"), ".", 2), txGasHandler) + list.AddTx(createTx([]byte("e"), ".", 3).withGasPrice(1.3*oneBillion), txGasHandler) require.Equal(t, []string{"a", "d", "e", "b", "c"}, list.getTxHashesAsStrings()) } @@ -38,13 +38,13 @@ func TestListForSender_AddTx_SortsCorrectlyWhenSameNonceSamePrice(t *testing.T) list := newUnconstrainedListToTest() txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTxWithParams([]byte("a"), ".", 1, 128, 50000, 42), txGasHandler) - list.AddTx(createTxWithParams([]byte("b"), ".", 3, 128, 50000, 100), txGasHandler) - list.AddTx(createTxWithParams([]byte("c"), ".", 3, 128, 50000, 100), txGasHandler) - list.AddTx(createTxWithParams([]byte("d"), ".", 3, 128, 50000, 98), txGasHandler) - list.AddTx(createTxWithParams([]byte("e"), ".", 3, 128, 50000, 101), txGasHandler) - list.AddTx(createTxWithParams([]byte("f"), ".", 2, 128, 50000, 42), txGasHandler) - list.AddTx(createTxWithParams([]byte("g"), ".", 3, 128, 50000, 99), txGasHandler) + list.AddTx(createTx([]byte("a"), ".", 1).withGasPrice(oneBillion), txGasHandler) + list.AddTx(createTx([]byte("b"), ".", 3).withGasPrice(3*oneBillion), txGasHandler) + list.AddTx(createTx([]byte("c"), ".", 3).withGasPrice(3*oneBillion), txGasHandler) + list.AddTx(createTx([]byte("d"), ".", 3).withGasPrice(2*oneBillion), txGasHandler) + list.AddTx(createTx([]byte("e"), ".", 3).withGasPrice(3.5*oneBillion), txGasHandler) + list.AddTx(createTx([]byte("f"), ".", 2).withGasPrice(oneBillion), txGasHandler) + list.AddTx(createTx([]byte("g"), ".", 3).withGasPrice(2.5*oneBillion), txGasHandler) // In case of same-nonce, same-price transactions, the newer one has priority require.Equal(t, []string{"a", "f", "e", "b", "c", "g", "d"}, list.getTxHashesAsStrings()) @@ -79,12 +79,12 @@ func TestListForSender_AddTx_AppliesSizeConstraintsForNumTransactions(t *testing require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) // Gives priority to higher gas - though undesirably to some extent, "tx3" is evicted - _, evicted = list.AddTx(createTxWithParams([]byte("tx2++"), ".", 2, 128, 50000, 42), txGasHandler) + _, evicted = list.AddTx(createTx([]byte("tx2++"), ".", 2).withGasPrice(1.5*oneBillion), txGasHandler) require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx3"}, hashesAsStrings(evicted)) // Though Undesirably to some extent, "tx3++"" is added, then evicted - _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 3, 128, 50000, 42), txGasHandler) + _, evicted = list.AddTx(createTx([]byte("tx3++"), ".", 3).withGasPrice(1.5*oneBillion), txGasHandler) require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx3++"}, hashesAsStrings(evicted)) } @@ -93,23 +93,23 @@ func TestListForSender_AddTx_AppliesSizeConstraintsForNumBytes(t *testing.T) { list := newListToTest(1024, math.MaxUint32) txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTxWithParams([]byte("tx1"), ".", 1, 128, 50000, 42), txGasHandler) - list.AddTx(createTxWithParams([]byte("tx2"), ".", 2, 512, 1500000, 42), txGasHandler) - list.AddTx(createTxWithParams([]byte("tx3"), ".", 3, 256, 1500000, 42), txGasHandler) - _, evicted := list.AddTx(createTxWithParams([]byte("tx5"), ".", 4, 256, 1500000, 42), txGasHandler) + list.AddTx(createTx([]byte("tx1"), ".", 1).withSize(128).withGasLimit(50000), txGasHandler) + list.AddTx(createTx([]byte("tx2"), ".", 2).withSize(512).withGasLimit(1500000), txGasHandler) + list.AddTx(createTx([]byte("tx3"), ".", 3).withSize(256).withGasLimit(1500000), txGasHandler) + _, evicted := list.AddTx(createTx([]byte("tx5"), ".", 4).withSize(256).withGasLimit(1500000), txGasHandler) require.Equal(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx5"}, hashesAsStrings(evicted)) - _, evicted = list.AddTx(createTxWithParams([]byte("tx5--"), ".", 4, 128, 50000, 42), txGasHandler) + _, evicted = list.AddTx(createTx([]byte("tx5--"), ".", 4).withSize(128).withGasLimit(50000), txGasHandler) require.Equal(t, []string{"tx1", "tx2", "tx3", "tx5--"}, list.getTxHashesAsStrings()) require.Equal(t, []string{}, hashesAsStrings(evicted)) - _, evicted = list.AddTx(createTxWithParams([]byte("tx4"), ".", 4, 128, 50000, 42), txGasHandler) + _, evicted = list.AddTx(createTx([]byte("tx4"), ".", 4).withSize(128).withGasLimit(50000), txGasHandler) require.Equal(t, []string{"tx1", "tx2", "tx3", "tx4"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx5--"}, hashesAsStrings(evicted)) // Gives priority to higher gas - though undesirably to some extent, "tx4" is evicted - _, evicted = list.AddTx(createTxWithParams([]byte("tx3++"), ".", 3, 256, 1500000, 100), txGasHandler) + _, evicted = list.AddTx(createTx([]byte("tx3++"), ".", 3).withSize(256).withGasLimit(1500000).withGasPrice(1.5*oneBillion), txGasHandler) require.Equal(t, []string{"tx1", "tx2", "tx3++", "tx3"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) } From ccf6cf349b6b88cebf5b6e84efda283662e13157 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 5 Jul 2024 13:08:21 +0300 Subject: [PATCH 011/124] Simplification of sender score params. --- txcache/score.go | 11 ++--------- txcache/txListForSender.go | 13 ------------- 2 files changed, 2 insertions(+), 22 deletions(-) diff --git a/txcache/score.go b/txcache/score.go index f7c884ac..7e6f2f65 100644 --- a/txcache/score.go +++ b/txcache/score.go @@ -9,15 +9,8 @@ import ( var _ scoreComputer = (*defaultScoreComputer)(nil) type senderScoreParams struct { - avgPpuNumerator float64 - avgPpuDenominator uint64 - - accountNonce uint64 - accountNonceIsKnown bool - maxTransactionNonce uint64 - minTransactionNonce uint64 - - numOfTransactions uint64 + avgPpuNumerator float64 + avgPpuDenominator uint64 hasSpotlessSequenceOfNonces bool } diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index d1353991..323532ed 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -124,31 +124,18 @@ func (listForSender *txListForSender) triggerScoreChange() { func (listForSender *txListForSender) getScoreParams() senderScoreParams { numTxs := listForSender.countTx() minTransactionNonce := uint64(0) - maxTransactionNonce := uint64(0) - firstTx := listForSender.items.Front() - lastTx := listForSender.items.Back() if firstTx != nil { minTransactionNonce = firstTx.Value.(*WrappedTransaction).Tx.GetNonce() } - if lastTx != nil { - maxTransactionNonce = lastTx.Value.(*WrappedTransaction).Tx.GetNonce() - } - hasSpotlessSequenceOfNonces := listForSender.noncesTracker.isSpotlessSequence(minTransactionNonce, numTxs) return senderScoreParams{ avgPpuNumerator: listForSender.avgPpuNumerator, avgPpuDenominator: listForSender.avgPpuDenominator, - numOfTransactions: numTxs, hasSpotlessSequenceOfNonces: hasSpotlessSequenceOfNonces, - - accountNonce: listForSender.accountNonce.Get(), - accountNonceIsKnown: listForSender.accountNonceKnown.IsSet(), - minTransactionNonce: minTransactionNonce, - maxTransactionNonce: maxTransactionNonce, } } From 7095682d97492ad9574add467c9b6809f3e77dcb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 5 Jul 2024 19:03:08 +0300 Subject: [PATCH 012/124] Fix score, add tests. --- txcache/constants.go | 4 ++ txcache/score.go | 31 +++++++++----- txcache/score_test.go | 81 ++++++++++++++++++++++++++++++------ txcache/txListBySenderMap.go | 2 - 4 files changed, 92 insertions(+), 26 deletions(-) diff --git a/txcache/constants.go b/txcache/constants.go index a76fb3d3..70b4c470 100644 --- a/txcache/constants.go +++ b/txcache/constants.go @@ -7,3 +7,7 @@ const senderGracePeriodLowerBound = 2 const senderGracePeriodUpperBound = 2 const numEvictedTxsToDisplay = 3 + +const excellentGasPriceFactor = 5 + +const numberOfScoreChunks = uint32(100) diff --git a/txcache/score.go b/txcache/score.go index 7e6f2f65..ebc614ed 100644 --- a/txcache/score.go +++ b/txcache/score.go @@ -15,34 +15,46 @@ type senderScoreParams struct { } type defaultScoreComputer struct { - worstPpu float64 + worstPpu float64 + scoreScalingFactor float64 } func newDefaultScoreComputer(txGasHandler TxGasHandler) *defaultScoreComputer { worstPpu := computeWorstPpu(txGasHandler) + excellentPpu := float64(txGasHandler.MinGasPrice()) * excellentGasPriceFactor + excellentPpuNormalized := excellentPpu / worstPpu + excellentPpuNormalizedLog := math.Log(excellentPpuNormalized) + scoreScalingFactor := float64(numberOfScoreChunks) / excellentPpuNormalizedLog return &defaultScoreComputer{ - worstPpu: worstPpu, + worstPpu: worstPpu, + scoreScalingFactor: scoreScalingFactor, } } func computeWorstPpu(txGasHandler TxGasHandler) float64 { - minGasPrice := txGasHandler.MinGasPrice() - maxGasLimitPerTx := txGasHandler.MaxGasLimitPerTx() + gasLimit := txGasHandler.MaxGasLimitPerTx() + gasPrice := txGasHandler.MinGasPrice() + worstPpuTx := &WrappedTransaction{ Tx: &transaction.Transaction{ - GasLimit: maxGasLimitPerTx, - GasPrice: minGasPrice, + GasLimit: gasLimit, + GasPrice: gasPrice, }, } - return worstPpuTx.computeFee(txGasHandler) / float64(maxGasLimitPerTx) + return worstPpuTx.computeFee(txGasHandler) / float64(gasLimit) } // computeScore computes the score of the sender, as an integer in [0, numberOfScoreChunks] func (computer *defaultScoreComputer) computeScore(scoreParams senderScoreParams) uint32 { rawScore := computer.computeRawScore(scoreParams) truncatedScore := uint32(rawScore) + + if truncatedScore > numberOfScoreChunks { + return numberOfScoreChunks + } + return truncatedScore } @@ -57,9 +69,6 @@ func (computer *defaultScoreComputer) computeRawScore(params senderScoreParams) avgPpuNormalized := avgPpu / computer.worstPpu avgPpuNormalizedLog := math.Log(avgPpuNormalized) - // https://www.wolframalpha.com, with input "((1 / (1 + exp(-x)) - 1/2) * 2) * 100, where x is from 0 to 10" - avgPpuNormalizedSubunitary := (1.0/(1+math.Exp(-avgPpuNormalizedLog)) - 0.5) * 2 - score := avgPpuNormalizedSubunitary * float64(numberOfScoreChunks) - + score := avgPpuNormalizedLog * computer.scoreScalingFactor return score } diff --git a/txcache/score_test.go b/txcache/score_test.go index 82e26242..93189d7e 100644 --- a/txcache/score_test.go +++ b/txcache/score_test.go @@ -3,6 +3,7 @@ package txcache import ( "testing" + "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) @@ -15,24 +16,78 @@ func TestNewDefaultScoreComputer(t *testing.T) { require.Equal(t, float64(10082500), computer.worstPpu) } - // 50k moveGas, 100Bil minPrice -> normalizedFee 8940 - score := computer.computeRawScore(senderScoreParams{count: 1, feeScore: 18000, gas: 100000}) - assert.InDelta(t, float64(16.8753739025), score, delta) +func TestDefaultScoreComputer_computeScore(t *testing.T) { + // Simple transfers: + require.Equal(t, 74, computeScoreOfTransaction(0, 50000, oneBillion)) + require.Equal(t, 80, computeScoreOfTransaction(0, 50000, 1.5*oneBillion)) + require.Equal(t, 85, computeScoreOfTransaction(0, 50000, 2*oneBillion)) + require.Equal(t, 100, computeScoreOfTransaction(0, 50000, 5*oneBillion)) + require.Equal(t, 100, computeScoreOfTransaction(0, 50000, 10*oneBillion)) + + // Simple transfers, with some data (same scores as above): + require.Equal(t, 74, computeScoreOfTransaction(100, 50000+1500*100, oneBillion)) + require.Equal(t, 80, computeScoreOfTransaction(100, 50000+1500*100, 1.5*oneBillion)) + require.Equal(t, 85, computeScoreOfTransaction(100, 50000+1500*100, 2*oneBillion)) + require.Equal(t, 100, computeScoreOfTransaction(100, 50000+1500*100, 5*oneBillion)) + require.Equal(t, 100, computeScoreOfTransaction(100, 50000+1500*100, 10*oneBillion)) + + // Smart contract calls: + require.Equal(t, 28, computeScoreOfTransaction(1, 1000000, oneBillion)) + require.Equal(t, 40, computeScoreOfTransaction(42, 1000000, oneBillion)) + // Even though the gas price is high, it does not compensate the network's contract execution subsidies (thus, score is not excellent). + require.Equal(t, 46, computeScoreOfTransaction(42, 1000000, 1.5*oneBillion)) + require.Equal(t, 51, computeScoreOfTransaction(42, 1000000, 2*oneBillion)) + require.Equal(t, 66, computeScoreOfTransaction(42, 1000000, 5*oneBillion)) + require.Equal(t, 77, computeScoreOfTransaction(42, 1000000, 10*oneBillion)) + require.Equal(t, 88, computeScoreOfTransaction(42, 1000000, 20*oneBillion)) + require.Equal(t, 94, computeScoreOfTransaction(42, 1000000, 30*oneBillion)) + require.Equal(t, 99, computeScoreOfTransaction(42, 1000000, 40*oneBillion)) + require.Equal(t, 100, computeScoreOfTransaction(42, 1000000, 50*oneBillion)) + + // Smart contract calls with extremely large gas limit: + require.Equal(t, 0, computeScoreOfTransaction(3, 150000000, oneBillion)) + require.Equal(t, 0, computeScoreOfTransaction(3, 300000000, oneBillion)) + require.Equal(t, 6, computeScoreOfTransaction(3, 150000000, 1.5*oneBillion)) + require.Equal(t, 11, computeScoreOfTransaction(3, 150000000, 2*oneBillion)) + require.Equal(t, 26, computeScoreOfTransaction(3, 150000000, 5*oneBillion)) + require.Equal(t, 37, computeScoreOfTransaction(3, 150000000, 10*oneBillion)) + require.Equal(t, 48, computeScoreOfTransaction(3, 150000000, 20*oneBillion)) + require.Equal(t, 55, computeScoreOfTransaction(3, 150000000, 30*oneBillion)) + // With a very high gas price, the transaction reaches the score of a simple transfer: + require.Equal(t, 74, computeScoreOfTransaction(3, 150000000, 100*oneBillion)) + + // Smart contract calls with max gas limit: + require.Equal(t, 0, computeScoreOfTransaction(3, 600000000, oneBillion)) + require.Equal(t, 37, computeScoreOfTransaction(3, 600000000, 10*oneBillion)) + require.Equal(t, 63, computeScoreOfTransaction(3, 600000000, 50*oneBillion)) + // With a very high gas price, the transaction reaches the score of a simple transfer: + require.Equal(t, 74, computeScoreOfTransaction(3, 600000000, 100*oneBillion)) + require.Equal(t, 85, computeScoreOfTransaction(3, 600000000, 200*oneBillion)) +} - score = computer.computeRawScore(senderScoreParams{count: 1, feeScore: 1500000, gas: 10000000}) - assert.InDelta(t, float64(9.3096887100), score, delta) +// Generally speaking, the score is computed for a sender, not for a single transaction. +// However, for the sake of testing, we consider a sender with a single transaction. +func computeScoreOfTransaction(dataLength int, gasLimit uint64, gasPrice uint64) int { + gasHandler := txcachemocks.NewTxGasHandlerMock() + computer := newDefaultScoreComputer(gasHandler) - score = computer.computeRawScore(senderScoreParams{count: 1, feeScore: 5000000, gas: 30000000}) - assert.InDelta(t, float64(12.7657690638), score, delta) + tx := &WrappedTransaction{ + Tx: &transaction.Transaction{ + Data: make([]byte, dataLength), + GasLimit: gasLimit, + GasPrice: gasPrice, + }, + } - score = computer.computeRawScore(senderScoreParams{count: 2, feeScore: 36000, gas: 200000}) - assert.InDelta(t, float64(11.0106052638), score, delta) + txFee := tx.computeFee(gasHandler) - score = computer.computeRawScore(senderScoreParams{count: 1000, feeScore: 18000000, gas: 100000000}) - assert.InDelta(t, float64(1.8520698299), score, delta) + scoreParams := senderScoreParams{ + avgPpuNumerator: txFee, + avgPpuDenominator: gasLimit, + hasSpotlessSequenceOfNonces: true, + } - score = computer.computeRawScore(senderScoreParams{count: 10000, feeScore: 180000000, gas: 1000000000}) - assert.InDelta(t, float64(1.4129614707), score, delta) + return int(computer.computeScore(scoreParams)) } func BenchmarkScoreComputer_computeRawScore(b *testing.B) { diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index 98d3375b..e6e2d933 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -7,8 +7,6 @@ import ( "github.com/multiversx/mx-chain-storage-go/txcache/maps" ) -const numberOfScoreChunks = uint32(100) - // txListBySenderMap is a map-like structure for holding and accessing transactions by sender type txListBySenderMap struct { backingMap *maps.BucketSortedMap From 21e13548aeaa8bc7c6ab06a0a1673007567ea119 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 5 Jul 2024 19:15:56 +0300 Subject: [PATCH 013/124] Optimizations. --- txcache/score.go | 12 ++++++++---- txcache/score_test.go | 11 ++++++++--- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/txcache/score.go b/txcache/score.go index ebc614ed..ae24fd66 100644 --- a/txcache/score.go +++ b/txcache/score.go @@ -15,19 +15,20 @@ type senderScoreParams struct { } type defaultScoreComputer struct { - worstPpu float64 + worstPpuLog float64 scoreScalingFactor float64 } func newDefaultScoreComputer(txGasHandler TxGasHandler) *defaultScoreComputer { worstPpu := computeWorstPpu(txGasHandler) + worstPpuLog := math.Log(worstPpu) excellentPpu := float64(txGasHandler.MinGasPrice()) * excellentGasPriceFactor excellentPpuNormalized := excellentPpu / worstPpu excellentPpuNormalizedLog := math.Log(excellentPpuNormalized) scoreScalingFactor := float64(numberOfScoreChunks) / excellentPpuNormalizedLog return &defaultScoreComputer{ - worstPpu: worstPpu, + worstPpuLog: worstPpuLog, scoreScalingFactor: scoreScalingFactor, } } @@ -58,6 +59,9 @@ func (computer *defaultScoreComputer) computeScore(scoreParams senderScoreParams return truncatedScore } +// computeRawScore computes the score of a sender, as follows: +// score = log(sender's average price per unit / worst price per unit) * scoreScalingFactor, +// where scoreScalingFactor = highest score / log(excellent price per unit / worst price per unit) func (computer *defaultScoreComputer) computeRawScore(params senderScoreParams) float64 { if !params.hasSpotlessSequenceOfNonces { return 0 @@ -66,8 +70,8 @@ func (computer *defaultScoreComputer) computeRawScore(params senderScoreParams) avgPpu := params.avgPpuNumerator / float64(params.avgPpuDenominator) // We use the worst possible price per unit for normalization. - avgPpuNormalized := avgPpu / computer.worstPpu - avgPpuNormalizedLog := math.Log(avgPpuNormalized) + // The expression below is same as log(avgPpu / worstPpu), but we precompute "worstPpuLog" in the constructor. + avgPpuNormalizedLog := math.Log(avgPpu) - computer.worstPpuLog score := avgPpuNormalizedLog * computer.scoreScalingFactor return score diff --git a/txcache/score_test.go b/txcache/score_test.go index 93189d7e..589b22af 100644 --- a/txcache/score_test.go +++ b/txcache/score_test.go @@ -13,7 +13,12 @@ func TestNewDefaultScoreComputer(t *testing.T) { computer := newDefaultScoreComputer(gasHandler) require.NotNil(t, computer) - require.Equal(t, float64(10082500), computer.worstPpu) + require.Equal(t, float64(16.12631180572966), computer.worstPpuLog) +} + +func TestComputeWorstPpu(t *testing.T) { + gasHandler := txcachemocks.NewTxGasHandlerMock() + require.Equal(t, float64(10082500), computeWorstPpu(gasHandler)) } func TestDefaultScoreComputer_computeScore(t *testing.T) { @@ -21,14 +26,14 @@ func TestDefaultScoreComputer_computeScore(t *testing.T) { require.Equal(t, 74, computeScoreOfTransaction(0, 50000, oneBillion)) require.Equal(t, 80, computeScoreOfTransaction(0, 50000, 1.5*oneBillion)) require.Equal(t, 85, computeScoreOfTransaction(0, 50000, 2*oneBillion)) - require.Equal(t, 100, computeScoreOfTransaction(0, 50000, 5*oneBillion)) + require.Equal(t, 99, computeScoreOfTransaction(0, 50000, 5*oneBillion)) require.Equal(t, 100, computeScoreOfTransaction(0, 50000, 10*oneBillion)) // Simple transfers, with some data (same scores as above): require.Equal(t, 74, computeScoreOfTransaction(100, 50000+1500*100, oneBillion)) require.Equal(t, 80, computeScoreOfTransaction(100, 50000+1500*100, 1.5*oneBillion)) require.Equal(t, 85, computeScoreOfTransaction(100, 50000+1500*100, 2*oneBillion)) - require.Equal(t, 100, computeScoreOfTransaction(100, 50000+1500*100, 5*oneBillion)) + require.Equal(t, 99, computeScoreOfTransaction(100, 50000+1500*100, 5*oneBillion)) require.Equal(t, 100, computeScoreOfTransaction(100, 50000+1500*100, 10*oneBillion)) // Smart contract calls: From 5e2d116ceda4e8d4914e00671456bb8e5e877d1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 5 Jul 2024 19:19:53 +0300 Subject: [PATCH 014/124] Optimize "findInsertionPlace". --- txcache/txListForSender.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 323532ed..27c8b94d 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -149,12 +149,12 @@ func (listForSender *txListForSender) findInsertionPlace(incomingTx *WrappedTran currentTxNonce := currentTx.Tx.GetNonce() currentTxGasPrice := currentTx.Tx.GetGasPrice() - if incomingTx.sameAs(currentTx) { - // The incoming transaction will be discarded - return nil, common.ErrItemAlreadyInCache - } - if currentTxNonce == incomingNonce { + if incomingTx.sameAs(currentTx) { + // The incoming transaction will be discarded + return nil, common.ErrItemAlreadyInCache + } + if currentTxGasPrice > incomingGasPrice { // The incoming transaction will be placed right after the existing one, which has same nonce but higher price. // If the nonces are the same, but the incoming gas price is higher or equal, the search loop continues. From c795b7a012b1f0347b22faeb36dab8e338344677 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 5 Jul 2024 19:20:02 +0300 Subject: [PATCH 015/124] Fix tests. --- txcache/eviction_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go index 7b74a185..ce2df9bc 100644 --- a/txcache/eviction_test.go +++ b/txcache/eviction_test.go @@ -139,10 +139,10 @@ func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { scoreCarol := cache.getScoreOfSender("carol") scoreEve := cache.getScoreOfSender("eve") - require.Equal(t, uint32(95), scoreAlice) - require.Equal(t, uint32(95), scoreBob) - require.Equal(t, uint32(97), scoreCarol) - require.Equal(t, uint32(98), scoreEve) + require.Equal(t, uint32(62), scoreAlice) + require.Equal(t, uint32(62), scoreBob) + require.Equal(t, uint32(69), scoreCarol) + require.Equal(t, uint32(80), scoreEve) cache.doEviction() require.Equal(t, uint32(2), cache.evictionJournal.passOneNumTxs) From 11ebe839d2b4112e24ef17aad1671e77c0db989d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 8 Jul 2024 14:14:23 +0300 Subject: [PATCH 016/124] Additional tests. --- txcache/noncesTracker_test.go | 113 ++++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) diff --git a/txcache/noncesTracker_test.go b/txcache/noncesTracker_test.go index 4c4bb5da..dca2d6e7 100644 --- a/txcache/noncesTracker_test.go +++ b/txcache/noncesTracker_test.go @@ -61,3 +61,116 @@ func TestNoncesTracker_computeExpectedSumOfSquaresOfNonces(t *testing.T) { sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(oneTrillion, 65535) require.Equal(t, uint64(445375860), sum) } + +func TestNoncesTracker_isSpotlessSequence(t *testing.T) { + t.Run("empty sequence", func(t *testing.T) { + tracker := newNoncesTracker() + + // A little bit of ambiguity (a sequence holding the nonce zero only behaves like an empty sequence): + require.True(t, tracker.isSpotlessSequence(0, 0)) + require.True(t, tracker.isSpotlessSequence(0, 1)) + + require.False(t, tracker.isSpotlessSequence(0, 2)) + require.False(t, tracker.isSpotlessSequence(7, 3)) + }) + + t.Run("1-item sequence", func(t *testing.T) { + tracker := newNoncesTracker() + tracker.addNonce(0) + + // A little bit of ambiguity (a sequence holding the nonce zero only behaves like an empty sequence): + require.True(t, tracker.isSpotlessSequence(0, 1)) + require.True(t, tracker.isSpotlessSequence(0, 0)) + + require.False(t, tracker.isSpotlessSequence(0, 2)) + require.False(t, tracker.isSpotlessSequence(7, 3)) + + tracker.removeNonce(0) + tracker.addNonce(5) + require.True(t, tracker.isSpotlessSequence(5, 1)) + require.False(t, tracker.isSpotlessSequence(5, 2)) + require.False(t, tracker.isSpotlessSequence(7, 1)) + require.False(t, tracker.isSpotlessSequence(7, 2)) + + tracker.removeNonce(5) + tracker.addNonce(42) + require.True(t, tracker.isSpotlessSequence(42, 1)) + require.False(t, tracker.isSpotlessSequence(42, 2)) + require.False(t, tracker.isSpotlessSequence(7, 1)) + require.False(t, tracker.isSpotlessSequence(7, 2)) + }) + + t.Run("with spotless addition and removal", func(t *testing.T) { + t.Parallel() + + tracker := newNoncesTracker() + numTotalTxsSender := uint64(100) + firstNonce := uint64(oneBillion) + lastNonce := firstNonce + numTotalTxsSender - 1 + numCurrentTxs := uint64(0) + + // We add nonces in increasing order: + for nonce := firstNonce; nonce < firstNonce+numTotalTxsSender; nonce++ { + tracker.addNonce(nonce) + numCurrentTxs++ + + isSpotless := tracker.isSpotlessSequence(firstNonce, numCurrentTxs) + if !isSpotless { + require.Fail(t, "nonce sequence is not spotless (after add)", "nonce: %d", nonce) + } + } + + // We remove nonces in decreasing order: + for nonce := lastNonce; nonce >= firstNonce; nonce-- { + tracker.removeNonce(nonce) + numCurrentTxs-- + + isSpotless := tracker.isSpotlessSequence(firstNonce, numCurrentTxs) + if !isSpotless { + require.Fail(t, "nonce sequence is not spotless (after remove)", "nonce: %d", nonce) + } + } + }) + + t.Run("with initial gap", func(t *testing.T) { + tracker := newNoncesTracker() + + tracker.addNonce(5) + tracker.addNonce(6) + tracker.addNonce(7) + + require.False(t, tracker.isSpotlessSequence(2, 3)) + }) + + t.Run("with initial duplicate", func(t *testing.T) { + tracker := newNoncesTracker() + + tracker.addNonce(5) + tracker.addNonce(5) + tracker.addNonce(6) + + require.False(t, tracker.isSpotlessSequence(2, 3)) + }) + + t.Run("with middle gap", func(t *testing.T) { + tracker := newNoncesTracker() + + tracker.addNonce(5) + tracker.addNonce(6) + tracker.addNonce(8) + + require.False(t, tracker.isSpotlessSequence(5, 3)) + }) + + t.Run("with middle duplicate", func(t *testing.T) { + tracker := newNoncesTracker() + + tracker.addNonce(5) + tracker.addNonce(6) + tracker.addNonce(6) + tracker.addNonce(8) + + require.False(t, tracker.isSpotlessSequence(5, 4)) + require.False(t, tracker.isSpotlessSequence(5, 3)) + }) +} From 5fb4b11ef52021d12077cd0fbb33c730b946f029 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 8 Jul 2024 14:15:23 +0300 Subject: [PATCH 017/124] Monitoring cleanup. --- txcache/monitoring.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/txcache/monitoring.go b/txcache/monitoring.go index c8c65bab..21c45275 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -110,7 +110,7 @@ func (cache *TxCache) monitorSweepingEnd(numTxs uint32, numSenders uint32, stopW func (cache *TxCache) displaySendersHistogram() { backingMap := cache.txListBySender.backingMap - log.Debug("TxCache.sendersHistogram:", "chunks", backingMap.ChunksCounts(), "scoreChunks", backingMap.ScoreChunksCounts()) + log.Debug("TxCache.sendersHistogram:", "scoreChunks", backingMap.ScoreChunksCounts()) } // evictionJournal keeps a short journal about the eviction process From b7dd5039f78edc9e764399d61c9bc50cc147206f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 8 Jul 2024 17:15:00 +0300 Subject: [PATCH 018/124] Compute score on tx addition / removal. Sort senders only on selection & eviction. --- txcache/benchmarks.sh | 2 - txcache/maps/bucketSortedMap.go | 342 ---------------------- txcache/maps/bucketSortedMapItem.go | 8 - txcache/maps/bucketSortedMap_test.go | 421 --------------------------- txcache/monitoring.go | 17 +- txcache/score_test.go | 33 ++- txcache/txCache.go | 2 +- txcache/txListBySenderMap.go | 50 ++-- txcache/txListForSender.go | 50 +--- txcache/txListForSender_test.go | 14 +- 10 files changed, 82 insertions(+), 857 deletions(-) delete mode 100644 txcache/benchmarks.sh delete mode 100644 txcache/maps/bucketSortedMap.go delete mode 100644 txcache/maps/bucketSortedMapItem.go delete mode 100644 txcache/maps/bucketSortedMap_test.go diff --git a/txcache/benchmarks.sh b/txcache/benchmarks.sh deleted file mode 100644 index a3f9fa36..00000000 --- a/txcache/benchmarks.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -go test -bench="BenchmarkSendersMap_GetSnapshotAscending$" -benchtime=1x diff --git a/txcache/maps/bucketSortedMap.go b/txcache/maps/bucketSortedMap.go deleted file mode 100644 index 90a94162..00000000 --- a/txcache/maps/bucketSortedMap.go +++ /dev/null @@ -1,342 +0,0 @@ -package maps - -import ( - "sync" -) - -// BucketSortedMap is -type BucketSortedMap struct { - mutex sync.RWMutex - nChunks uint32 - nScoreChunks uint32 - maxScore uint32 - chunks []*MapChunk - scoreChunks []*MapChunk -} - -// MapChunk is -type MapChunk struct { - items map[string]BucketSortedMapItem - mutex sync.RWMutex -} - -// NewBucketSortedMap creates a new map. -func NewBucketSortedMap(nChunks uint32, nScoreChunks uint32) *BucketSortedMap { - if nChunks == 0 { - nChunks = 1 - } - if nScoreChunks == 0 { - nScoreChunks = 1 - } - - sortedMap := BucketSortedMap{ - nChunks: nChunks, - nScoreChunks: nScoreChunks, - maxScore: nScoreChunks - 1, - } - - sortedMap.initializeChunks() - - return &sortedMap -} - -func (sortedMap *BucketSortedMap) initializeChunks() { - // Assignment is not an atomic operation, so we have to wrap this in a critical section - sortedMap.mutex.Lock() - defer sortedMap.mutex.Unlock() - - sortedMap.chunks = make([]*MapChunk, sortedMap.nChunks) - sortedMap.scoreChunks = make([]*MapChunk, sortedMap.nScoreChunks) - - for i := uint32(0); i < sortedMap.nChunks; i++ { - sortedMap.chunks[i] = &MapChunk{ - items: make(map[string]BucketSortedMapItem), - } - } - - for i := uint32(0); i < sortedMap.nScoreChunks; i++ { - sortedMap.scoreChunks[i] = &MapChunk{ - items: make(map[string]BucketSortedMapItem), - } - } -} - -// Set puts the item in the map -// This doesn't add the item to the score chunks (not necessary) -func (sortedMap *BucketSortedMap) Set(item BucketSortedMapItem) { - chunk := sortedMap.getChunk(item.GetKey()) - chunk.setItem(item) -} - -// NotifyScoreChange moves or adds the item to the corresponding score chunk -func (sortedMap *BucketSortedMap) NotifyScoreChange(item BucketSortedMapItem, newScore uint32) { - if newScore > sortedMap.maxScore { - newScore = sortedMap.maxScore - } - - newScoreChunk := sortedMap.getScoreChunks()[newScore] - if newScoreChunk != item.GetScoreChunk() { - removeFromScoreChunk(item) - newScoreChunk.setItem(item) - item.SetScoreChunk(newScoreChunk) - } -} - -func removeFromScoreChunk(item BucketSortedMapItem) { - currentScoreChunk := item.GetScoreChunk() - if currentScoreChunk != nil { - currentScoreChunk.removeItem(item) - } -} - -// Get retrieves an element from map under given key. -func (sortedMap *BucketSortedMap) Get(key string) (BucketSortedMapItem, bool) { - chunk := sortedMap.getChunk(key) - chunk.mutex.RLock() - val, ok := chunk.items[key] - chunk.mutex.RUnlock() - return val, ok -} - -// Has looks up an item under specified key -func (sortedMap *BucketSortedMap) Has(key string) bool { - chunk := sortedMap.getChunk(key) - chunk.mutex.RLock() - _, ok := chunk.items[key] - chunk.mutex.RUnlock() - return ok -} - -// Remove removes an element from the map -func (sortedMap *BucketSortedMap) Remove(key string) (interface{}, bool) { - chunk := sortedMap.getChunk(key) - item := chunk.removeItemByKey(key) - if item != nil { - removeFromScoreChunk(item) - } - - return item, item != nil -} - -// getChunk returns the chunk holding the given key. -func (sortedMap *BucketSortedMap) getChunk(key string) *MapChunk { - sortedMap.mutex.RLock() - defer sortedMap.mutex.RUnlock() - return sortedMap.chunks[fnv32Hash(key)%sortedMap.nChunks] -} - -// fnv32Hash implements https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function for 32 bits -func fnv32Hash(key string) uint32 { - hash := uint32(2166136261) - const prime32 = uint32(16777619) - for i := 0; i < len(key); i++ { - hash *= prime32 - hash ^= uint32(key[i]) - } - return hash -} - -// Clear clears the map -func (sortedMap *BucketSortedMap) Clear() { - // There is no need to explicitly remove each item for each chunk - // The garbage collector will remove the data from memory - sortedMap.initializeChunks() -} - -// Count returns the number of elements within the map -func (sortedMap *BucketSortedMap) Count() uint32 { - count := uint32(0) - for _, chunk := range sortedMap.getChunks() { - count += chunk.countItems() - } - return count -} - -// CountSorted returns the number of sorted elements within the map -func (sortedMap *BucketSortedMap) CountSorted() uint32 { - count := uint32(0) - for _, chunk := range sortedMap.getScoreChunks() { - count += chunk.countItems() - } - return count -} - -// ChunksCounts returns the number of elements by chunk -func (sortedMap *BucketSortedMap) ChunksCounts() []uint32 { - counts := make([]uint32, sortedMap.nChunks) - for i, chunk := range sortedMap.getChunks() { - counts[i] = chunk.countItems() - } - return counts -} - -// ScoreChunksCounts returns the number of elements by chunk -func (sortedMap *BucketSortedMap) ScoreChunksCounts() []uint32 { - counts := make([]uint32, sortedMap.nScoreChunks) - for i, chunk := range sortedMap.getScoreChunks() { - counts[i] = chunk.countItems() - } - return counts -} - -// SortedMapIterCb is an iterator callback -type SortedMapIterCb func(key string, value BucketSortedMapItem) - -// GetSnapshotAscending gets a snapshot of the items -func (sortedMap *BucketSortedMap) GetSnapshotAscending() []BucketSortedMapItem { - return sortedMap.getSortedSnapshot(sortedMap.fillSnapshotAscending) -} - -// GetSnapshotDescending gets a snapshot of the items -func (sortedMap *BucketSortedMap) GetSnapshotDescending() []BucketSortedMapItem { - return sortedMap.getSortedSnapshot(sortedMap.fillSnapshotDescending) -} - -// This applies a read lock on all chunks, so that they aren't mutated during snapshot -func (sortedMap *BucketSortedMap) getSortedSnapshot(fillSnapshot func(scoreChunks []*MapChunk, snapshot []BucketSortedMapItem)) []BucketSortedMapItem { - counter := uint32(0) - scoreChunks := sortedMap.getScoreChunks() - - for _, chunk := range scoreChunks { - chunk.mutex.RLock() - counter += uint32(len(chunk.items)) - } - - snapshot := make([]BucketSortedMapItem, counter) - fillSnapshot(scoreChunks, snapshot) - - for _, chunk := range scoreChunks { - chunk.mutex.RUnlock() - } - - return snapshot -} - -// This function should only be called under already read-locked score chunks -func (sortedMap *BucketSortedMap) fillSnapshotAscending(scoreChunks []*MapChunk, snapshot []BucketSortedMapItem) { - i := 0 - for _, chunk := range scoreChunks { - for _, item := range chunk.items { - snapshot[i] = item - i++ - } - } -} - -// This function should only be called under already read-locked score chunks -func (sortedMap *BucketSortedMap) fillSnapshotDescending(scoreChunks []*MapChunk, snapshot []BucketSortedMapItem) { - i := 0 - for chunkIndex := len(scoreChunks) - 1; chunkIndex >= 0; chunkIndex-- { - chunk := scoreChunks[chunkIndex] - for _, item := range chunk.items { - snapshot[i] = item - i++ - } - } -} - -// IterCbSortedAscending iterates over the sorted elements in the map -func (sortedMap *BucketSortedMap) IterCbSortedAscending(callback SortedMapIterCb) { - for _, chunk := range sortedMap.getScoreChunks() { - chunk.forEachItem(callback) - } -} - -// IterCbSortedDescending iterates over the sorted elements in the map -func (sortedMap *BucketSortedMap) IterCbSortedDescending(callback SortedMapIterCb) { - chunks := sortedMap.getScoreChunks() - for i := len(chunks) - 1; i >= 0; i-- { - chunk := chunks[i] - chunk.forEachItem(callback) - } -} - -// Keys returns all keys as []string -func (sortedMap *BucketSortedMap) Keys() []string { - count := sortedMap.Count() - // count is not exact anymore, since we are in a different lock than the one aquired by Count() (but is a good approximation) - keys := make([]string, 0, count) - - for _, chunk := range sortedMap.getChunks() { - keys = chunk.appendKeys(keys) - } - - return keys -} - -// KeysSorted returns all keys of the sorted items as []string -func (sortedMap *BucketSortedMap) KeysSorted() []string { - count := sortedMap.CountSorted() - // count is not exact anymore, since we are in a different lock than the one aquired by CountSorted() (but is a good approximation) - keys := make([]string, 0, count) - - for _, chunk := range sortedMap.getScoreChunks() { - keys = chunk.appendKeys(keys) - } - - return keys -} - -func (sortedMap *BucketSortedMap) getChunks() []*MapChunk { - sortedMap.mutex.RLock() - defer sortedMap.mutex.RUnlock() - return sortedMap.chunks -} - -func (sortedMap *BucketSortedMap) getScoreChunks() []*MapChunk { - sortedMap.mutex.RLock() - defer sortedMap.mutex.RUnlock() - return sortedMap.scoreChunks -} - -func (chunk *MapChunk) removeItem(item BucketSortedMapItem) { - chunk.mutex.Lock() - defer chunk.mutex.Unlock() - - key := item.GetKey() - delete(chunk.items, key) -} - -func (chunk *MapChunk) removeItemByKey(key string) BucketSortedMapItem { - chunk.mutex.Lock() - defer chunk.mutex.Unlock() - - item := chunk.items[key] - delete(chunk.items, key) - return item -} - -func (chunk *MapChunk) setItem(item BucketSortedMapItem) { - chunk.mutex.Lock() - defer chunk.mutex.Unlock() - - key := item.GetKey() - chunk.items[key] = item -} - -func (chunk *MapChunk) countItems() uint32 { - chunk.mutex.RLock() - defer chunk.mutex.RUnlock() - - return uint32(len(chunk.items)) -} - -func (chunk *MapChunk) forEachItem(callback SortedMapIterCb) { - chunk.mutex.RLock() - defer chunk.mutex.RUnlock() - - for key, value := range chunk.items { - callback(key, value) - } -} - -func (chunk *MapChunk) appendKeys(keysAccumulator []string) []string { - chunk.mutex.RLock() - defer chunk.mutex.RUnlock() - - for key := range chunk.items { - keysAccumulator = append(keysAccumulator, key) - } - - return keysAccumulator -} diff --git a/txcache/maps/bucketSortedMapItem.go b/txcache/maps/bucketSortedMapItem.go deleted file mode 100644 index 4ba55181..00000000 --- a/txcache/maps/bucketSortedMapItem.go +++ /dev/null @@ -1,8 +0,0 @@ -package maps - -// BucketSortedMapItem defines an item of the bucket sorted map -type BucketSortedMapItem interface { - GetKey() string - GetScoreChunk() *MapChunk - SetScoreChunk(*MapChunk) -} diff --git a/txcache/maps/bucketSortedMap_test.go b/txcache/maps/bucketSortedMap_test.go deleted file mode 100644 index 2bb4a7ee..00000000 --- a/txcache/maps/bucketSortedMap_test.go +++ /dev/null @@ -1,421 +0,0 @@ -package maps - -import ( - "fmt" - "sync" - "testing" - - "github.com/multiversx/mx-chain-core-go/core/atomic" - "github.com/stretchr/testify/require" -) - -type dummyItem struct { - score atomic.Uint32 - key string - chunk *MapChunk - chunkMutex sync.RWMutex - mutex sync.Mutex -} - -func newDummyItem(key string) *dummyItem { - return &dummyItem{ - key: key, - } -} - -func newScoredDummyItem(key string, score uint32) *dummyItem { - item := &dummyItem{ - key: key, - } - item.score.Set(score) - return item -} - -func (item *dummyItem) GetKey() string { - return item.key -} - -func (item *dummyItem) GetScoreChunk() *MapChunk { - item.chunkMutex.RLock() - defer item.chunkMutex.RUnlock() - - return item.chunk -} - -func (item *dummyItem) SetScoreChunk(chunk *MapChunk) { - item.chunkMutex.Lock() - defer item.chunkMutex.Unlock() - - item.chunk = chunk -} - -func (item *dummyItem) simulateMutationThatChangesScore(myMap *BucketSortedMap) { - item.mutex.Lock() - myMap.NotifyScoreChange(item, item.score.Get()) - item.mutex.Unlock() -} - -func simulateMutationThatChangesScore(myMap *BucketSortedMap, key string) { - item, ok := myMap.Get(key) - if !ok { - return - } - - itemAsDummy := item.(*dummyItem) - itemAsDummy.simulateMutationThatChangesScore(myMap) -} - -func TestNewBucketSortedMap(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - require.Equal(t, uint32(4), myMap.nChunks) - require.Equal(t, 4, len(myMap.chunks)) - require.Equal(t, uint32(100), myMap.nScoreChunks) - require.Equal(t, 100, len(myMap.scoreChunks)) - - // 1 is minimum number of chunks - myMap = NewBucketSortedMap(0, 0) - require.Equal(t, uint32(1), myMap.nChunks) - require.Equal(t, uint32(1), myMap.nScoreChunks) -} - -func TestBucketSortedMap_Count(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - myMap.Set(newScoredDummyItem("a", 0)) - myMap.Set(newScoredDummyItem("b", 1)) - myMap.Set(newScoredDummyItem("c", 2)) - myMap.Set(newScoredDummyItem("d", 3)) - - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - simulateMutationThatChangesScore(myMap, "c") - simulateMutationThatChangesScore(myMap, "d") - - require.Equal(t, uint32(4), myMap.Count()) - require.Equal(t, uint32(4), myMap.CountSorted()) - - counts := myMap.ChunksCounts() - require.Equal(t, uint32(1), counts[0]) - require.Equal(t, uint32(1), counts[1]) - require.Equal(t, uint32(1), counts[2]) - require.Equal(t, uint32(1), counts[3]) - - counts = myMap.ScoreChunksCounts() - require.Equal(t, uint32(1), counts[0]) - require.Equal(t, uint32(1), counts[1]) - require.Equal(t, uint32(1), counts[2]) - require.Equal(t, uint32(1), counts[3]) -} - -func TestBucketSortedMap_Keys(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - myMap.Set(newDummyItem("a")) - myMap.Set(newDummyItem("b")) - myMap.Set(newDummyItem("c")) - - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - simulateMutationThatChangesScore(myMap, "c") - - require.Equal(t, 3, len(myMap.Keys())) - require.Equal(t, 3, len(myMap.KeysSorted())) -} - -func TestBucketSortedMap_KeysSorted(t *testing.T) { - myMap := NewBucketSortedMap(1, 4) - - myMap.Set(newScoredDummyItem("d", 3)) - myMap.Set(newScoredDummyItem("a", 0)) - myMap.Set(newScoredDummyItem("c", 2)) - myMap.Set(newScoredDummyItem("b", 1)) - myMap.Set(newScoredDummyItem("f", 5)) - myMap.Set(newScoredDummyItem("e", 4)) - - simulateMutationThatChangesScore(myMap, "d") - simulateMutationThatChangesScore(myMap, "e") - simulateMutationThatChangesScore(myMap, "f") - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - simulateMutationThatChangesScore(myMap, "c") - - keys := myMap.KeysSorted() - require.Equal(t, "a", keys[0]) - require.Equal(t, "b", keys[1]) - require.Equal(t, "c", keys[2]) - - counts := myMap.ScoreChunksCounts() - require.Equal(t, uint32(1), counts[0]) - require.Equal(t, uint32(1), counts[1]) - require.Equal(t, uint32(1), counts[2]) - require.Equal(t, uint32(3), counts[3]) -} - -func TestBucketSortedMap_ItemMovesOnNotifyScoreChange(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - - a := newScoredDummyItem("a", 1) - b := newScoredDummyItem("b", 42) - myMap.Set(a) - myMap.Set(b) - - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - - require.Equal(t, myMap.scoreChunks[1], a.GetScoreChunk()) - require.Equal(t, myMap.scoreChunks[42], b.GetScoreChunk()) - - a.score.Set(2) - b.score.Set(43) - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - - require.Equal(t, myMap.scoreChunks[2], a.GetScoreChunk()) - require.Equal(t, myMap.scoreChunks[43], b.GetScoreChunk()) -} - -func TestBucketSortedMap_Has(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - myMap.Set(newDummyItem("a")) - myMap.Set(newDummyItem("b")) - - require.True(t, myMap.Has("a")) - require.True(t, myMap.Has("b")) - require.False(t, myMap.Has("c")) -} - -func TestBucketSortedMap_Remove(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - myMap.Set(newDummyItem("a")) - myMap.Set(newDummyItem("b")) - - _, ok := myMap.Remove("b") - require.True(t, ok) - _, ok = myMap.Remove("x") - require.False(t, ok) - - require.True(t, myMap.Has("a")) - require.False(t, myMap.Has("b")) -} - -func TestBucketSortedMap_Clear(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - myMap.Set(newDummyItem("a")) - myMap.Set(newDummyItem("b")) - - myMap.Clear() - - require.Equal(t, uint32(0), myMap.Count()) - require.Equal(t, uint32(0), myMap.CountSorted()) -} - -func TestBucketSortedMap_IterCb(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - - myMap.Set(newScoredDummyItem("a", 15)) - myMap.Set(newScoredDummyItem("b", 101)) - myMap.Set(newScoredDummyItem("c", 3)) - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - simulateMutationThatChangesScore(myMap, "c") - - sorted := []string{"c", "a", "b"} - - i := 0 - myMap.IterCbSortedAscending(func(key string, value BucketSortedMapItem) { - require.Equal(t, sorted[i], key) - i++ - }) - - require.Equal(t, 3, i) - - i = len(sorted) - 1 - myMap.IterCbSortedDescending(func(key string, value BucketSortedMapItem) { - require.Equal(t, sorted[i], key) - i-- - }) - - require.Equal(t, 0, i+1) -} - -func TestBucketSortedMap_GetSnapshotAscending(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - - snapshot := myMap.GetSnapshotAscending() - require.Equal(t, []BucketSortedMapItem{}, snapshot) - - a := newScoredDummyItem("a", 15) - b := newScoredDummyItem("b", 101) - c := newScoredDummyItem("c", 3) - - myMap.Set(a) - myMap.Set(b) - myMap.Set(c) - - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - simulateMutationThatChangesScore(myMap, "c") - - snapshot = myMap.GetSnapshotAscending() - require.Equal(t, []BucketSortedMapItem{c, a, b}, snapshot) -} - -func TestBucketSortedMap_GetSnapshotDescending(t *testing.T) { - myMap := NewBucketSortedMap(4, 100) - - snapshot := myMap.GetSnapshotDescending() - require.Equal(t, []BucketSortedMapItem{}, snapshot) - - a := newScoredDummyItem("a", 15) - b := newScoredDummyItem("b", 101) - c := newScoredDummyItem("c", 3) - - myMap.Set(a) - myMap.Set(b) - myMap.Set(c) - - simulateMutationThatChangesScore(myMap, "a") - simulateMutationThatChangesScore(myMap, "b") - simulateMutationThatChangesScore(myMap, "c") - - snapshot = myMap.GetSnapshotDescending() - require.Equal(t, []BucketSortedMapItem{b, a, c}, snapshot) -} - -func TestBucketSortedMap_AddManyItems(t *testing.T) { - numGoroutines := 42 - numItemsPerGoroutine := 1000 - numScoreChunks := 100 - numItemsInScoreChunkPerGoroutine := numItemsPerGoroutine / numScoreChunks - numItemsInScoreChunk := numItemsInScoreChunkPerGoroutine * numGoroutines - - myMap := NewBucketSortedMap(16, uint32(numScoreChunks)) - - var waitGroup sync.WaitGroup - waitGroup.Add(numGoroutines) - - for i := 0; i < numGoroutines; i++ { - go func(i int) { - for j := 0; j < numItemsPerGoroutine; j++ { - key := fmt.Sprintf("%d_%d", i, j) - item := newScoredDummyItem(key, uint32(j%numScoreChunks)) - myMap.Set(item) - simulateMutationThatChangesScore(myMap, key) - } - - waitGroup.Done() - }(i) - } - - waitGroup.Wait() - - require.Equal(t, uint32(numGoroutines*numItemsPerGoroutine), myMap.CountSorted()) - - counts := myMap.ScoreChunksCounts() - for i := 0; i < numScoreChunks; i++ { - require.Equal(t, uint32(numItemsInScoreChunk), counts[i]) - } -} - -func TestBucketSortedMap_ClearConcurrentWithRead(t *testing.T) { - numChunks := uint32(4) - numScoreChunks := uint32(4) - myMap := NewBucketSortedMap(numChunks, numScoreChunks) - - var wg sync.WaitGroup - wg.Add(2) - - go func() { - defer wg.Done() - - for j := 0; j < 1000; j++ { - myMap.Clear() - } - }() - - go func() { - defer wg.Done() - - for j := 0; j < 1000; j++ { - require.Equal(t, uint32(0), myMap.Count()) - require.Equal(t, uint32(0), myMap.CountSorted()) - require.Len(t, myMap.ChunksCounts(), int(numChunks)) - require.Len(t, myMap.ScoreChunksCounts(), int(numScoreChunks)) - require.Len(t, myMap.Keys(), 0) - require.Len(t, myMap.KeysSorted(), 0) - require.Equal(t, false, myMap.Has("foobar")) - item, ok := myMap.Get("foobar") - require.Nil(t, item) - require.False(t, ok) - require.Len(t, myMap.GetSnapshotAscending(), 0) - myMap.IterCbSortedAscending(func(key string, item BucketSortedMapItem) { - }) - myMap.IterCbSortedDescending(func(key string, item BucketSortedMapItem) { - }) - } - }() - - wg.Wait() -} - -func TestBucketSortedMap_ClearConcurrentWithWrite(t *testing.T) { - myMap := NewBucketSortedMap(4, 4) - - var wg sync.WaitGroup - wg.Add(2) - - go func() { - for j := 0; j < 10000; j++ { - myMap.Clear() - } - - wg.Done() - }() - - go func() { - for j := 0; j < 10000; j++ { - myMap.Set(newDummyItem("foobar")) - _, _ = myMap.Remove("foobar") - myMap.NotifyScoreChange(newDummyItem("foobar"), 42) - simulateMutationThatChangesScore(myMap, "foobar") - } - - wg.Done() - }() - - wg.Wait() -} - -func TestBucketSortedMap_NoForgottenItemsOnConcurrentScoreChanges(t *testing.T) { - // This test helped us to find a memory leak occuring on concurrent score changes (concurrent movements across buckets) - - for i := 0; i < 1000; i++ { - myMap := NewBucketSortedMap(16, 16) - a := newScoredDummyItem("a", 0) - myMap.Set(a) - simulateMutationThatChangesScore(myMap, "a") - - var wg sync.WaitGroup - wg.Add(2) - - go func() { - a.score.Set(1) - simulateMutationThatChangesScore(myMap, "a") - wg.Done() - }() - - go func() { - a.score.Set(2) - simulateMutationThatChangesScore(myMap, "a") - wg.Done() - }() - - wg.Wait() - - require.Equal(t, uint32(1), myMap.CountSorted()) - require.Equal(t, uint32(1), myMap.Count()) - - _, _ = myMap.Remove("a") - - require.Equal(t, uint32(0), myMap.CountSorted()) - require.Equal(t, uint32(0), myMap.Count()) - } -} diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 21c45275..8dbd0dc0 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -29,7 +29,6 @@ func (cache *TxCache) monitorEvictionWrtSenderNonce(sender []byte, senderNonce u func (cache *TxCache) monitorEvictionStart() *core.StopWatch { log.Debug("TxCache: eviction started", "name", cache.name, "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) - cache.displaySendersHistogram() sw := core.NewStopWatch() sw.Start("eviction") return sw @@ -40,12 +39,10 @@ func (cache *TxCache) monitorEvictionEnd(stopWatch *core.StopWatch) { duration := stopWatch.GetMeasurement("eviction") log.Debug("TxCache: eviction ended", "name", cache.name, "duration", duration, "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) cache.evictionJournal.display() - cache.displaySendersHistogram() } func (cache *TxCache) monitorSelectionStart() *core.StopWatch { log.Debug("TxCache: selection started", "name", cache.name, "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) - cache.displaySendersHistogram() sw := core.NewStopWatch() sw.Start("selection") return sw @@ -142,25 +139,23 @@ func (cache *TxCache) diagnoseShallowly() { numTxsEstimate := int(cache.CountTx()) numTxsInChunks := cache.txByHash.backingMap.Count() txsKeys := cache.txByHash.backingMap.Keys() - numSendersEstimate := uint32(cache.CountSenders()) + numSendersEstimate := int(cache.CountSenders()) numSendersInChunks := cache.txListBySender.backingMap.Count() - numSendersInScoreChunks := cache.txListBySender.backingMap.CountSorted() sendersKeys := cache.txListBySender.backingMap.Keys() - sendersKeysSorted := cache.txListBySender.backingMap.KeysSorted() sendersSnapshot := cache.txListBySender.getSnapshotAscending() sw.Stop("diagnose") duration := sw.GetMeasurement("diagnose") - fine := numSendersEstimate == numSendersInChunks && numSendersEstimate == numSendersInScoreChunks - fine = fine && (len(sendersKeys) == len(sendersKeysSorted) && len(sendersKeys) == len(sendersSnapshot)) + fine := numSendersEstimate == numSendersInChunks + fine = fine && len(sendersKeys) == len(sendersSnapshot) fine = fine && (int(numSendersEstimate) == len(sendersKeys)) fine = fine && (numTxsEstimate == numTxsInChunks && numTxsEstimate == len(txsKeys)) log.Debug("TxCache.diagnoseShallowly()", "name", cache.name, "duration", duration, "fine", fine) log.Debug("TxCache.Size:", "current", sizeInBytes, "max", cache.config.NumBytesThreshold) - log.Debug("TxCache.NumSenders:", "estimate", numSendersEstimate, "inChunks", numSendersInChunks, "inScoreChunks", numSendersInScoreChunks) - log.Debug("TxCache.NumSenders (continued):", "keys", len(sendersKeys), "keysSorted", len(sendersKeysSorted), "snapshot", len(sendersSnapshot)) + log.Debug("TxCache.NumSenders:", "estimate", numSendersEstimate, "inChunks", numSendersInChunks) + log.Debug("TxCache.NumSenders (continued):", "keys", len(sendersKeys), "snapshot", len(sendersSnapshot)) log.Debug("TxCache.NumTxs:", "estimate", numTxsEstimate, "inChunks", numTxsInChunks, "keys", len(txsKeys)) } @@ -238,7 +233,7 @@ func (cache *TxCache) displaySendersSummary() { accountNonce := sender.accountNonce.Get() accountNonceKnown := sender.accountNonceKnown.IsSet() numFailedSelections := sender.numFailedSelections.Get() - score := sender.getLastComputedScore() + score := sender.getScore() numTxs := sender.countTxWithLock() lowestTxNonce := -1 diff --git a/txcache/score_test.go b/txcache/score_test.go index 589b22af..e335bb88 100644 --- a/txcache/score_test.go +++ b/txcache/score_test.go @@ -96,20 +96,35 @@ func computeScoreOfTransaction(dataLength int, gasLimit uint64, gasPrice uint64) } func BenchmarkScoreComputer_computeRawScore(b *testing.B) { - _, txFeeHelper := dummyParams() - computer := newDefaultScoreComputer(txFeeHelper) + gasHandler := txcachemocks.NewTxGasHandlerMock() + computer := newDefaultScoreComputer(gasHandler) + + tx := &WrappedTransaction{ + Tx: &transaction.Transaction{ + Data: make([]byte, 42), + GasLimit: 50000000, + GasPrice: 1000000000, + }, + } for i := 0; i < b.N; i++ { - for j := uint64(0); j < 10000000; j++ { - computer.computeRawScore(senderScoreParams{count: j, feeScore: uint64(float64(8000) * float64(j)), gas: 100000 * j}) + txFee := tx.computeFee(gasHandler) + + for j := uint64(0); j < 1000000; j++ { + computer.computeScore(senderScoreParams{ + avgPpuNumerator: txFee, + avgPpuDenominator: tx.Tx.GetGasLimit(), + hasSpotlessSequenceOfNonces: true, + }) } } -} -func TestDefaultScoreComputer_computeRawScoreOfTxListForSender(t *testing.T) { - txGasHandler, txFeeHelper := dummyParamsWithGasPrice(oneBillion) - computer := newDefaultScoreComputer(txFeeHelper) - list := newUnconstrainedListToTest() + // Results: + // + // (a) 10 millisecond(s) to compute the score 1 million times. + // cpu: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // BenchmarkScoreComputer_computeRawScore-8 124 9812711 ns/op 295 B/op 12 allocs/op +} list.AddTx(createTxWithParams([]byte("a"), ".", 1, 1000, 50000, oneBillion), txGasHandler, txFeeHelper) list.AddTx(createTxWithParams([]byte("b"), ".", 1, 500, 100000, oneBillion), txGasHandler, txFeeHelper) diff --git a/txcache/txCache.go b/txcache/txCache.go index 6cc1c95c..e4419260 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -123,7 +123,7 @@ func (cache *TxCache) doSelectTransactions(numRequested int, batchSizePerSender copiedInThisPass := 0 for _, txList := range snapshotOfSenders { - batchSizeWithScoreCoefficient := batchSizePerSender * int(txList.getLastComputedScore()+1) + batchSizeWithScoreCoefficient := batchSizePerSender * int(txList.getScore()+1) // Reset happens on first pass only isFirstBatch := pass == 0 journal := txList.selectBatchTo(isFirstBatch, result[resultFillIndex:], batchSizeWithScoreCoefficient, bandwidthPerSender) diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index e6e2d933..f0934acf 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -9,7 +9,7 @@ import ( // txListBySenderMap is a map-like structure for holding and accessing transactions by sender type txListBySenderMap struct { - backingMap *maps.BucketSortedMap + backingMap *maps.ConcurrentMap senderConstraints senderConstraints counter atomic.Counter scoreComputer scoreComputer @@ -24,7 +24,7 @@ func newTxListBySenderMap( scoreComputer scoreComputer, txGasHandler TxGasHandler, ) *txListBySenderMap { - backingMap := maps.NewBucketSortedMap(nChunksHint, numberOfScoreChunks) + backingMap := maps.NewConcurrentMap(nChunksHint) return &txListBySenderMap{ backingMap: backingMap, @@ -70,21 +70,14 @@ func (txMap *txListBySenderMap) getListForSender(sender string) (*txListForSende } func (txMap *txListBySenderMap) addSender(sender string) *txListForSender { - listForSender := newTxListForSender(sender, &txMap.senderConstraints, txMap.notifyScoreChange) + listForSender := newTxListForSender(sender, &txMap.senderConstraints, txMap.scoreComputer) - txMap.backingMap.Set(listForSender) + txMap.backingMap.Set(sender, listForSender) txMap.counter.Increment() return listForSender } -// This function should only be called in a critical section managed by a "txListForSender" -func (txMap *txListBySenderMap) notifyScoreChange(txList *txListForSender, scoreParams senderScoreParams) { - score := txMap.scoreComputer.computeScore(scoreParams) - txList.setLastComputedScore(score) - txMap.backingMap.NotifyScoreChange(txList, score) -} - // removeTx removes a transaction from the map func (txMap *txListBySenderMap) removeTx(tx *WrappedTransaction) bool { sender := string(tx.Tx.GetSndAddr()) @@ -139,27 +132,46 @@ func (txMap *txListBySenderMap) notifyAccountNonce(accountKey []byte, nonce uint } func (txMap *txListBySenderMap) getSnapshotAscending() []*txListForSender { - itemsSnapshot := txMap.backingMap.GetSnapshotAscending() - listsSnapshot := make([]*txListForSender, len(itemsSnapshot)) + scoreGroups := txMap.getSendersGroupedByScore() + listsSnapshot := make([]*txListForSender, 0, txMap.counter.Get()) - for i, item := range itemsSnapshot { - listsSnapshot[i] = item.(*txListForSender) + for i := 0; i < len(scoreGroups); i++ { + listsSnapshot = append(listsSnapshot, scoreGroups[i]...) } return listsSnapshot } func (txMap *txListBySenderMap) getSnapshotDescending() []*txListForSender { - itemsSnapshot := txMap.backingMap.GetSnapshotDescending() - listsSnapshot := make([]*txListForSender, len(itemsSnapshot)) + scoreGroups := txMap.getSendersGroupedByScore() + listsSnapshot := make([]*txListForSender, 0, txMap.counter.Get()) - for i, item := range itemsSnapshot { - listsSnapshot[i] = item.(*txListForSender) + for i := len(scoreGroups) - 1; i >= 0; i-- { + listsSnapshot = append(listsSnapshot, scoreGroups[i]...) } return listsSnapshot } +func (txMap *txListBySenderMap) getSendersGroupedByScore() [][]*txListForSender { + groups := make([][]*txListForSender, numberOfScoreChunks) + // Hint for pre-allocating slice for each group (imprecise, but reasonable). + groupSizeHint := txMap.counter.Get() / int64(numberOfScoreChunks) / 2 + + txMap.backingMap.IterCb(func(key string, item interface{}) { + listForSender := item.(*txListForSender) + score := listForSender.getScore() + + if groups[score] == nil { + groups[score] = make([]*txListForSender, 0, groupSizeHint) + } + + groups[score] = append(groups[score], listForSender) + }) + + return groups +} + func (txMap *txListBySenderMap) clear() { txMap.backingMap.Clear() txMap.counter.Set(0) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 27c8b94d..b33c6d28 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -7,15 +7,12 @@ import ( "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-storage-go/common" - "github.com/multiversx/mx-chain-storage-go/txcache/maps" ) -var _ maps.BucketSortedMapItem = (*txListForSender)(nil) - // txListForSender represents a sorted list of transactions of a particular sender type txListForSender struct { copyDetectedGap bool - lastComputedScore atomic.Uint32 + score atomic.Uint32 accountNonceKnown atomic.Flag sweepable atomic.Flag copyPreviousNonce uint64 @@ -23,7 +20,6 @@ type txListForSender struct { items *list.List copyBatchIndex *list.Element constraints *senderConstraints - scoreChunk *maps.MapChunk accountNonce atomic.Uint64 totalBytes atomic.Counter numFailedSelections atomic.Counter @@ -31,23 +27,19 @@ type txListForSender struct { avgPpuNumerator float64 avgPpuDenominator uint64 noncesTracker *noncesTracker + scoreComputer scoreComputer - onScoreChange scoreChangeCallback - - scoreChunkMutex sync.RWMutex - mutex sync.RWMutex + mutex sync.RWMutex } -type scoreChangeCallback func(value *txListForSender, scoreParams senderScoreParams) - // newTxListForSender creates a new (sorted) list of transactions -func newTxListForSender(sender string, constraints *senderConstraints, onScoreChange scoreChangeCallback) *txListForSender { +func newTxListForSender(sender string, constraints *senderConstraints, scoreComputer scoreComputer) *txListForSender { return &txListForSender{ items: list.New(), sender: sender, constraints: constraints, noncesTracker: newNoncesTracker(), - onScoreChange: onScoreChange, + scoreComputer: scoreComputer, } } @@ -71,7 +63,7 @@ func (listForSender *txListForSender) AddTx(tx *WrappedTransaction, gasHandler T listForSender.onAddedTransaction(tx, gasHandler) evicted := listForSender.applySizeConstraints() - listForSender.triggerScoreChange() + listForSender.recomputeScore() return true, evicted } @@ -115,9 +107,10 @@ func (listForSender *txListForSender) onAddedTransaction(tx *WrappedTransaction, listForSender.noncesTracker.addNonce(nonce) } -func (listForSender *txListForSender) triggerScoreChange() { +func (listForSender *txListForSender) recomputeScore() { scoreParams := listForSender.getScoreParams() - listForSender.onScoreChange(listForSender, scoreParams) + score := listForSender.scoreComputer.computeScore(scoreParams) + listForSender.score.Set(score) } // This function should only be used in critical section (listForSender.mutex) @@ -192,7 +185,7 @@ func (listForSender *txListForSender) RemoveTx(tx *WrappedTransaction) bool { if isFound { listForSender.items.Remove(marker) listForSender.onRemovedListElement(marker) - listForSender.triggerScoreChange() + listForSender.recomputeScore() } return isFound @@ -432,30 +425,11 @@ func (listForSender *txListForSender) isGracePeriodExceeded() bool { return numFailedSelections > senderGracePeriodUpperBound } -func (listForSender *txListForSender) getLastComputedScore() uint32 { - return listForSender.lastComputedScore.Get() -} - -func (listForSender *txListForSender) setLastComputedScore(score uint32) { - listForSender.lastComputedScore.Set(score) +func (listForSender *txListForSender) getScore() uint32 { + return listForSender.score.Get() } // GetKey returns the key func (listForSender *txListForSender) GetKey() string { return listForSender.sender } - -// GetScoreChunk returns the score chunk the sender is currently in -func (listForSender *txListForSender) GetScoreChunk() *maps.MapChunk { - listForSender.scoreChunkMutex.RLock() - defer listForSender.scoreChunkMutex.RUnlock() - - return listForSender.scoreChunk -} - -// SetScoreChunk returns the score chunk the sender is currently in -func (listForSender *txListForSender) SetScoreChunk(scoreChunk *maps.MapChunk) { - listForSender.scoreChunkMutex.Lock() - listForSender.scoreChunk = scoreChunk - listForSender.scoreChunkMutex.Unlock() -} diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 3a211cd8..66539bbc 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -406,15 +406,17 @@ func TestListForSender_DetectRaceConditions(t *testing.T) { } func newUnconstrainedListToTest() *txListForSender { - return newTxListForSender(".", &senderConstraints{ - maxNumBytes: math.MaxUint32, - maxNumTxs: math.MaxUint32, - }, func(_ *txListForSender, _ senderScoreParams) {}) + return newListToTest(math.MaxUint32, math.MaxUint32) } func newListToTest(maxNumBytes uint32, maxNumTxs uint32) *txListForSender { - return newTxListForSender(".", &senderConstraints{ + senderConstraints := &senderConstraints{ maxNumBytes: maxNumBytes, maxNumTxs: maxNumTxs, - }, func(_ *txListForSender, _ senderScoreParams) {}) + } + + txGasHandler := txcachemocks.NewTxGasHandlerMock() + scoreComputer := newDefaultScoreComputer(txGasHandler) + + return newTxListForSender(".", senderConstraints, scoreComputer) } From 6f61e51c12db87bc2e917421b7b0c43da86e1272 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 8 Jul 2024 18:23:19 +0300 Subject: [PATCH 019/124] Fix tests, benchmarks. --- txcache/score_test.go | 116 ++---------------------------- txcache/testutils_test.go | 12 ---- txcache/txListBySenderMap_test.go | 35 ++++----- 3 files changed, 23 insertions(+), 140 deletions(-) diff --git a/txcache/score_test.go b/txcache/score_test.go index e335bb88..fbf1b2ba 100644 --- a/txcache/score_test.go +++ b/txcache/score_test.go @@ -95,7 +95,7 @@ func computeScoreOfTransaction(dataLength int, gasLimit uint64, gasPrice uint64) return int(computer.computeScore(scoreParams)) } -func BenchmarkScoreComputer_computeRawScore(b *testing.B) { +func BenchmarkScoreComputer_computeScore(b *testing.B) { gasHandler := txcachemocks.NewTxGasHandlerMock() computer := newDefaultScoreComputer(gasHandler) @@ -121,115 +121,7 @@ func BenchmarkScoreComputer_computeRawScore(b *testing.B) { // Results: // - // (a) 10 millisecond(s) to compute the score 1 million times. - // cpu: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz - // BenchmarkScoreComputer_computeRawScore-8 124 9812711 ns/op 295 B/op 12 allocs/op -} - - list.AddTx(createTxWithParams([]byte("a"), ".", 1, 1000, 50000, oneBillion), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("b"), ".", 1, 500, 100000, oneBillion), txGasHandler, txFeeHelper) - list.AddTx(createTxWithParams([]byte("c"), ".", 1, 500, 100000, oneBillion), txGasHandler, txFeeHelper) - - require.Equal(t, uint64(3), list.countTx()) - require.Equal(t, int64(2000), list.totalBytes.Get()) - require.Equal(t, int64(250000), list.totalGas.Get()) - require.Equal(t, int64(51588), list.totalFeeScore.Get()) - - scoreParams := list.getScoreParams() - rawScore := computer.computeRawScore(scoreParams) - require.InDelta(t, float64(12.4595615805), rawScore, delta) -} - -func TestDefaultScoreComputer_scoreFluctuatesDeterministicallyWhileTxListForSenderMutates(t *testing.T) { - txGasHandler, txFeeHelper := dummyParamsWithGasPrice(oneBillion) - computer := newDefaultScoreComputer(txFeeHelper) - list := newUnconstrainedListToTest() - - A := createTxWithParams([]byte("A"), ".", 1, 1000, 200000, oneBillion) - B := createTxWithParams([]byte("b"), ".", 1, 500, 100000, oneBillion) - C := createTxWithParams([]byte("c"), ".", 1, 500, 100000, oneBillion) - D := createTxWithParams([]byte("d"), ".", 1, 128, 50000, oneBillion) - - scoreNone := int(computer.computeScore(list.getScoreParams())) - list.AddTx(A, txGasHandler, txFeeHelper) - scoreA := int(computer.computeScore(list.getScoreParams())) - list.AddTx(B, txGasHandler, txFeeHelper) - scoreAB := int(computer.computeScore(list.getScoreParams())) - list.AddTx(C, txGasHandler, txFeeHelper) - scoreABC := int(computer.computeScore(list.getScoreParams())) - list.AddTx(D, txGasHandler, txFeeHelper) - scoreABCD := int(computer.computeScore(list.getScoreParams())) - - require.Equal(t, 0, scoreNone) - require.Equal(t, 18, scoreA) - require.Equal(t, 12, scoreAB) - require.Equal(t, 10, scoreABC) - require.Equal(t, 9, scoreABCD) - - list.RemoveTx(D) - scoreABC = int(computer.computeScore(list.getScoreParams())) - list.RemoveTx(C) - scoreAB = int(computer.computeScore(list.getScoreParams())) - list.RemoveTx(B) - scoreA = int(computer.computeScore(list.getScoreParams())) - list.RemoveTx(A) - scoreNone = int(computer.computeScore(list.getScoreParams())) - - require.Equal(t, 0, scoreNone) - require.Equal(t, 18, scoreA) - require.Equal(t, 12, scoreAB) - require.Equal(t, 10, scoreABC) -} - -func TestDefaultScoreComputer_DifferentSenders(t *testing.T) { - txGasHandler, txFeeHelper := dummyParamsWithGasPrice(oneBillion) - computer := newDefaultScoreComputer(txFeeHelper) - - A := createTxWithParams([]byte("a"), "a", 1, 128, 50000, oneBillion) // min value normal tx - B := createTxWithParams([]byte("b"), "b", 1, 128, 50000, uint64(1.5*oneBillion)) // 50% higher value normal tx - C := createTxWithParams([]byte("c"), "c", 1, 128, 10000000, oneBillion) // min value SC call - D := createTxWithParams([]byte("d"), "d", 1, 128, 10000000, uint64(1.5*oneBillion)) // 50% higher value SC call - - listA := newUnconstrainedListToTest() - listA.AddTx(A, txGasHandler, txFeeHelper) - scoreA := int(computer.computeScore(listA.getScoreParams())) - - listB := newUnconstrainedListToTest() - listB.AddTx(B, txGasHandler, txFeeHelper) - scoreB := int(computer.computeScore(listB.getScoreParams())) - - listC := newUnconstrainedListToTest() - listC.AddTx(C, txGasHandler, txFeeHelper) - scoreC := int(computer.computeScore(listC.getScoreParams())) - - listD := newUnconstrainedListToTest() - listD.AddTx(D, txGasHandler, txFeeHelper) - scoreD := int(computer.computeScore(listD.getScoreParams())) - - require.Equal(t, 33, scoreA) - require.Equal(t, 82, scoreB) - require.Equal(t, 15, scoreC) - require.Equal(t, 16, scoreD) - - // adding same type of transactions for each sender decreases the score - for i := 2; i < 1000; i++ { - A = createTxWithParams([]byte("a"+strconv.Itoa(i)), "a", uint64(i), 128, 50000, oneBillion) // min value normal tx - listA.AddTx(A, txGasHandler, txFeeHelper) - B = createTxWithParams([]byte("b"+strconv.Itoa(i)), "b", uint64(i), 128, 50000, uint64(1.5*oneBillion)) // 50% higher value normal tx - listB.AddTx(B, txGasHandler, txFeeHelper) - C = createTxWithParams([]byte("c"+strconv.Itoa(i)), "c", uint64(i), 128, 10000000, oneBillion) // min value SC call - listC.AddTx(C, txGasHandler, txFeeHelper) - D = createTxWithParams([]byte("d"+strconv.Itoa(i)), "d", uint64(i), 128, 10000000, uint64(1.5*oneBillion)) // 50% higher value SC call - listD.AddTx(D, txGasHandler, txFeeHelper) - } - - scoreA = int(computer.computeScore(listA.getScoreParams())) - scoreB = int(computer.computeScore(listB.getScoreParams())) - scoreC = int(computer.computeScore(listC.getScoreParams())) - scoreD = int(computer.computeScore(listD.getScoreParams())) - - require.Equal(t, 3, scoreA) - require.Equal(t, 12, scoreB) - require.Equal(t, 1, scoreC) - require.Equal(t, 1, scoreD) + // (a) 10 ms to compute the score 1 million times: + // cpu: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // BenchmarkScoreComputer_computeRawScore-8 124 9812711 ns/op 295 B/op 12 allocs/op } diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index 6aab9c7c..1ff1cc44 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -3,10 +3,8 @@ package txcache import ( "encoding/binary" "sync" - "testing" "time" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/transaction" ) @@ -151,16 +149,6 @@ func createFakeTxHash(fakeSenderAddress []byte, nonce int) []byte { return bytes } -func measureWithStopWatch(b *testing.B, function func()) { - sw := core.NewStopWatch() - sw.Start("time") - function() - sw.Stop("time") - - duration := sw.GetMeasurementsMap()["time"] - b.ReportMetric(duration, "time@stopWatch") -} - // waitTimeout waits for the waitgroup for the specified max timeout. // Returns true if waiting timed out. // Reference: https://stackoverflow.com/a/32843750/1475331 diff --git a/txcache/txListBySenderMap_test.go b/txcache/txListBySenderMap_test.go index 9c7ac142..72ad56b0 100644 --- a/txcache/txListBySenderMap_test.go +++ b/txcache/txListBySenderMap_test.go @@ -6,6 +6,7 @@ import ( "sync" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) @@ -114,26 +115,28 @@ func TestSendersMap_notifyAccountNonce(t *testing.T) { require.True(t, alice.accountNonceKnown.IsSet()) } -func BenchmarkSendersMap_GetSnapshotAscending(b *testing.B) { - if b.N > 10 { - fmt.Println("impractical benchmark: b.N too high") - return - } +func TestBenchmarkSendersMap_GetSnapshotAscending(t *testing.T) { + numSendersValues := []int{50000, 100000, 300000} - numSenders := 250000 - maps := make([]*txListBySenderMap, b.N) - for i := 0; i < b.N; i++ { - maps[i] = createTxListBySenderMap(numSenders) - } + for _, numSenders := range numSendersValues { + myMap := createTxListBySenderMap(numSenders) - b.ResetTimer() + sw := core.NewStopWatch() + sw.Start("time") + snapshot := myMap.getSnapshotAscending() + sw.Stop("time") - for i := 0; i < b.N; i++ { - measureWithStopWatch(b, func() { - snapshot := maps[i].getSnapshotAscending() - require.Len(b, snapshot, numSenders) - }) + require.Len(t, snapshot, numSenders) + fmt.Printf("took %v to sort %d senders\n", sw.GetMeasurementsMap()["time"], numSenders) } + + // Results: + // + // (a) 22 ms to sort 300k senders: + // cpu: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // took 0.004527414 to sort 50000 senders + // took 0.00745592 to sort 100000 senders + // took 0.022954026 to sort 300000 senders } func TestSendersMap_GetSnapshots_NoPanic_IfAlsoConcurrentMutation(t *testing.T) { From c1697a5b38672b75387e836a1e928dec97058ef3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 8 Jul 2024 18:36:52 +0300 Subject: [PATCH 020/124] Optimization at insertion time. --- txcache/txListForSender.go | 13 +++++++------ txcache/wrappedTransaction.go | 5 ----- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index b33c6d28..36f777e5 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -143,11 +143,6 @@ func (listForSender *txListForSender) findInsertionPlace(incomingTx *WrappedTran currentTxGasPrice := currentTx.Tx.GetGasPrice() if currentTxNonce == incomingNonce { - if incomingTx.sameAs(currentTx) { - // The incoming transaction will be discarded - return nil, common.ErrItemAlreadyInCache - } - if currentTxGasPrice > incomingGasPrice { // The incoming transaction will be placed right after the existing one, which has same nonce but higher price. // If the nonces are the same, but the incoming gas price is higher or equal, the search loop continues. @@ -157,7 +152,13 @@ func (listForSender *txListForSender) findInsertionPlace(incomingTx *WrappedTran // The incoming transaction will be placed right after the existing one, which has same nonce and the same price. // (but different hash, because of some other fields like receiver, value or data) // This will order out the transactions having the same nonce and gas price - if bytes.Compare(currentTx.TxHash, incomingTx.TxHash) < 0 { + + comparison := bytes.Compare(currentTx.TxHash, incomingTx.TxHash) + if comparison == 0 { + // The incoming transaction will be discarded + return nil, common.ErrItemAlreadyInCache + } + if comparison < 0 { return element, nil } } diff --git a/txcache/wrappedTransaction.go b/txcache/wrappedTransaction.go index 2c432a66..11b60c7a 100644 --- a/txcache/wrappedTransaction.go +++ b/txcache/wrappedTransaction.go @@ -1,7 +1,6 @@ package txcache import ( - "bytes" "math/big" "github.com/multiversx/mx-chain-core-go/data" @@ -17,10 +16,6 @@ type WrappedTransaction struct { TxFee float64 } -func (wrappedTx *WrappedTransaction) sameAs(another *WrappedTransaction) bool { - return bytes.Equal(wrappedTx.TxHash, another.TxHash) -} - // computeFee computes the transaction fee. // The returned fee is also held on the transaction object. func (wrappedTx *WrappedTransaction) computeFee(txGasHandler TxGasHandler) float64 { From a2bc648cbb7aba88398067ac46c1d32632599493 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 8 Jul 2024 19:15:36 +0300 Subject: [PATCH 021/124] Optimization at tx removal. --- txcache/txListForSender.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 36f777e5..dae5567a 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -210,13 +210,17 @@ func (listForSender *txListForSender) findListElementWithTx(txToFind *WrappedTra for element := listForSender.items.Front(); element != nil; element = element.Next() { value := element.Value.(*WrappedTransaction) + nonce := value.Tx.GetNonce() - if bytes.Equal(value.TxHash, txToFindHash) { - return element + // Optimization: first, compare nonces, then hashes. + if nonce == txToFindNonce { + if bytes.Equal(value.TxHash, txToFindHash) { + return element + } } // Optimization: stop search at this point, since the list is sorted by nonce - if value.Tx.GetNonce() > txToFindNonce { + if nonce > txToFindNonce { break } } From 377369b43788016cf747e3be5735d8c1084a1fb0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 8 Jul 2024 22:19:11 +0300 Subject: [PATCH 022/124] Bit of refactoring, more tests. --- testscommon/txcachemocks/txGasHandlerMock.go | 6 +- txcache/constants.go | 2 +- txcache/eviction_test.go | 12 ++-- txcache/interface.go | 2 +- txcache/score.go | 4 +- txcache/testutils_test.go | 4 +- txcache/txCache.go | 2 +- txcache/txListForSender.go | 6 +- txcache/txListForSender_test.go | 67 ++++++++++++++++++++ 9 files changed, 86 insertions(+), 19 deletions(-) diff --git a/testscommon/txcachemocks/txGasHandlerMock.go b/testscommon/txcachemocks/txGasHandlerMock.go index e8c84ea7..b4318139 100644 --- a/testscommon/txcachemocks/txGasHandlerMock.go +++ b/testscommon/txcachemocks/txGasHandlerMock.go @@ -64,11 +64,11 @@ func (ghm *TxGasHandlerMock) ComputeTxFee(tx data.TransactionWithFeeHandler) *bi gasPriceForProcessing := uint64(float64(gasPriceForMovement) * ghm.gasPriceModifier) gasLimitForMovement := ghm.minGasLimit + dataLength*ghm.gasPerDataByte - gasLimitForProcessing, err := core.SafeSubUint64(tx.GetGasLimit(), gasLimitForMovement) - if err != nil { - panic(err) + if tx.GetGasLimit() < gasLimitForMovement { + panic("tx.GetGasLimit() < gasLimitForMovement") } + gasLimitForProcessing := tx.GetGasLimit() - gasLimitForMovement feeForMovement := core.SafeMul(gasPriceForMovement, gasLimitForMovement) feeForProcessing := core.SafeMul(gasPriceForProcessing, gasLimitForProcessing) fee := big.NewInt(0).Add(feeForMovement, feeForProcessing) diff --git a/txcache/constants.go b/txcache/constants.go index 70b4c470..3a95f02d 100644 --- a/txcache/constants.go +++ b/txcache/constants.go @@ -10,4 +10,4 @@ const numEvictedTxsToDisplay = 3 const excellentGasPriceFactor = 5 -const numberOfScoreChunks = uint32(100) +const numberOfScoreChunks = 100 diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go index ce2df9bc..e9df616e 100644 --- a/txcache/eviction_test.go +++ b/txcache/eviction_test.go @@ -139,10 +139,10 @@ func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { scoreCarol := cache.getScoreOfSender("carol") scoreEve := cache.getScoreOfSender("eve") - require.Equal(t, uint32(62), scoreAlice) - require.Equal(t, uint32(62), scoreBob) - require.Equal(t, uint32(69), scoreCarol) - require.Equal(t, uint32(80), scoreEve) + require.Equal(t, 62, scoreAlice) + require.Equal(t, 62, scoreBob) + require.Equal(t, 69, scoreCarol) + require.Equal(t, 80, scoreEve) cache.doEviction() require.Equal(t, uint32(2), cache.evictionJournal.passOneNumTxs) @@ -221,8 +221,8 @@ func TestTxCache_EvictSendersWhile_ShouldContinueBreak(t *testing.T) { require.Nil(t, err) require.NotNil(t, cache) - cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) - cache.AddTx(createTx([]byte("hash-bob"), "bob", uint64(1))) + cache.AddTx(createTx([]byte("hash-alice"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-bob"), "bob", 1)) cache.makeSnapshotOfSenders() diff --git a/txcache/interface.go b/txcache/interface.go index e099cb1d..6037b087 100644 --- a/txcache/interface.go +++ b/txcache/interface.go @@ -7,7 +7,7 @@ import ( ) type scoreComputer interface { - computeScore(scoreParams senderScoreParams) uint32 + computeScore(scoreParams senderScoreParams) int } // TxGasHandler handles a transaction gas and gas cost diff --git a/txcache/score.go b/txcache/score.go index ae24fd66..cc32f6ca 100644 --- a/txcache/score.go +++ b/txcache/score.go @@ -48,9 +48,9 @@ func computeWorstPpu(txGasHandler TxGasHandler) float64 { } // computeScore computes the score of the sender, as an integer in [0, numberOfScoreChunks] -func (computer *defaultScoreComputer) computeScore(scoreParams senderScoreParams) uint32 { +func (computer *defaultScoreComputer) computeScore(scoreParams senderScoreParams) int { rawScore := computer.computeRawScore(scoreParams) - truncatedScore := uint32(rawScore) + truncatedScore := int(rawScore) if truncatedScore > numberOfScoreChunks { return numberOfScoreChunks diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index 1ff1cc44..83892ae7 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -36,7 +36,7 @@ func (txMap *txListBySenderMap) testGetListForSender(sender string) *txListForSe return list } -func (cache *TxCache) getScoreOfSender(sender string) uint32 { +func (cache *TxCache) getScoreOfSender(sender string) int { list := cache.getListForSender(sender) scoreParams := list.getScoreParams() computer := cache.txListBySender.scoreComputer @@ -171,6 +171,6 @@ var _ scoreComputer = (*disabledScoreComputer)(nil) type disabledScoreComputer struct { } -func (computer *disabledScoreComputer) computeScore(_ senderScoreParams) uint32 { +func (computer *disabledScoreComputer) computeScore(_ senderScoreParams) int { return 0 } diff --git a/txcache/txCache.go b/txcache/txCache.go index e4419260..b8d34a27 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -123,7 +123,7 @@ func (cache *TxCache) doSelectTransactions(numRequested int, batchSizePerSender copiedInThisPass := 0 for _, txList := range snapshotOfSenders { - batchSizeWithScoreCoefficient := batchSizePerSender * int(txList.getScore()+1) + batchSizeWithScoreCoefficient := batchSizePerSender * (txList.getScore() + 1) // Reset happens on first pass only isFirstBatch := pass == 0 journal := txList.selectBatchTo(isFirstBatch, result[resultFillIndex:], batchSizeWithScoreCoefficient, bandwidthPerSender) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index dae5567a..341b054f 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -110,7 +110,7 @@ func (listForSender *txListForSender) onAddedTransaction(tx *WrappedTransaction, func (listForSender *txListForSender) recomputeScore() { scoreParams := listForSender.getScoreParams() score := listForSender.scoreComputer.computeScore(scoreParams) - listForSender.score.Set(score) + listForSender.score.Set(uint32(score)) } // This function should only be used in critical section (listForSender.mutex) @@ -430,8 +430,8 @@ func (listForSender *txListForSender) isGracePeriodExceeded() bool { return numFailedSelections > senderGracePeriodUpperBound } -func (listForSender *txListForSender) getScore() uint32 { - return listForSender.score.Get() +func (listForSender *txListForSender) getScore() int { + return int(listForSender.score.Get()) } // GetKey returns the key diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 66539bbc..ef87d657 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -405,6 +405,73 @@ func TestListForSender_DetectRaceConditions(t *testing.T) { }() } +func TestListForSender_transactionAddAndRemove_updateScore(t *testing.T) { + txGasHandler := txcachemocks.NewTxGasHandlerMock() + alice := newUnconstrainedListToTest() + bob := newUnconstrainedListToTest() + + a := createTx([]byte("a"), ".", 1) + b := createTx([]byte("b"), ".", 1) + c := createTx([]byte("c"), ".", 2).withDataLength(42).withGasLimit(50000 + 1500*42) + d := createTx([]byte("d"), ".", 2).withDataLength(84).withGasLimit(50000 + 1500*84) + e := createTx([]byte("e"), ".", 3).withDataLength(1).withGasLimit(50000000).withGasPrice(oneBillion) + f := createTx([]byte("f"), ".", 3).withDataLength(1).withGasLimit(150000000).withGasPrice(oneBillion) + g := createTx([]byte("g"), ".", 4).withDataLength(7).withGasLimit(5000000).withGasPrice(oneBillion) + h := createTx([]byte("h"), ".", 4).withDataLength(7).withGasLimit(5000000).withGasPrice(oneBillion) + i := createTx([]byte("i"), ".", 5).withDataLength(42).withGasLimit(5000000).withGasPrice(2 * oneBillion) + j := createTx([]byte("j"), ".", 5).withDataLength(42).withGasLimit(5000000).withGasPrice(3 * oneBillion) + k := createTx([]byte("k"), ".", 5).withDataLength(42).withGasLimit(5000000).withGasPrice(2 * oneBillion) + l := createTx([]byte("l"), ".", 8) + + alice.AddTx(a, txGasHandler) + bob.AddTx(b, txGasHandler) + + require.Equal(t, 74, alice.getScore()) + require.Equal(t, 74, bob.getScore()) + + alice.AddTx(c, txGasHandler) + bob.AddTx(d, txGasHandler) + + require.Equal(t, 74, alice.getScore()) + require.Equal(t, 74, bob.getScore()) + + alice.AddTx(e, txGasHandler) + bob.AddTx(f, txGasHandler) + + require.Equal(t, 5, alice.getScore()) + require.Equal(t, 2, bob.getScore()) + + alice.AddTx(g, txGasHandler) + bob.AddTx(h, txGasHandler) + + require.Equal(t, 6, alice.getScore()) + require.Equal(t, 3, bob.getScore()) + + alice.AddTx(i, txGasHandler) + bob.AddTx(j, txGasHandler) + + require.Equal(t, 10, alice.getScore()) + require.Equal(t, 6, bob.getScore()) + + // Bob adds a transaction with duplicated nonce + bob.AddTx(k, txGasHandler) + + require.Equal(t, 10, alice.getScore()) + require.Equal(t, 0, bob.getScore()) + + require.True(t, alice.RemoveTx(a)) + require.True(t, alice.RemoveTx(c)) + + require.Equal(t, 7, alice.getScore()) + require.Equal(t, 0, bob.getScore()) + + // Alice comes with a nonce gap + alice.AddTx(l, txGasHandler) + + require.Equal(t, 0, alice.getScore()) + require.Equal(t, 0, bob.getScore()) +} + func newUnconstrainedListToTest() *txListForSender { return newListToTest(math.MaxUint32, math.MaxUint32) } From 610342c602180f9f82983311fef2b2e89b1b013d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 10 Jul 2024 06:54:12 +0300 Subject: [PATCH 023/124] Monitor senders histogram. --- testscommon/txcachemocks/txGasHandlerMock.go | 6 ++++++ txcache/monitoring.go | 17 ++++++++++------- txcache/txListBySenderMap.go | 2 ++ 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/testscommon/txcachemocks/txGasHandlerMock.go b/testscommon/txcachemocks/txGasHandlerMock.go index b4318139..ba0e849a 100644 --- a/testscommon/txcachemocks/txGasHandlerMock.go +++ b/testscommon/txcachemocks/txGasHandlerMock.go @@ -47,6 +47,12 @@ func (ghm *TxGasHandlerMock) WithMinGasPrice(minGasPrice uint64) *TxGasHandlerMo return ghm } +// WithGasPriceModifier - +func (ghm *TxGasHandlerMock) WithGasPriceModifier(gasPriceModifier float64) *TxGasHandlerMock { + ghm.gasPriceModifier = gasPriceModifier + return ghm +} + // MinGasPrice - func (ghm *TxGasHandlerMock) MinGasPrice() uint64 { return ghm.minGasPrice diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 8dbd0dc0..2075d8ca 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -102,12 +102,6 @@ func (cache *TxCache) monitorSweepingEnd(numTxs uint32, numSenders uint32, stopW stopWatch.Stop("sweeping") duration := stopWatch.GetMeasurement("sweeping") log.Debug("TxCache: swept senders:", "name", cache.name, "duration", duration, "txs", numTxs, "senders", numSenders) - cache.displaySendersHistogram() -} - -func (cache *TxCache) displaySendersHistogram() { - backingMap := cache.txListBySender.backingMap - log.Debug("TxCache.sendersHistogram:", "scoreChunks", backingMap.ScoreChunksCounts()) } // evictionJournal keeps a short journal about the eviction process @@ -171,7 +165,6 @@ func (cache *TxCache) diagnoseDeeply() { log.Debug("TxCache.diagnoseDeeply()", "name", cache.name, "duration", duration) journal.display() - cache.displaySendersHistogram() } type internalConsistencyJournal struct { @@ -248,3 +241,13 @@ func (cache *TxCache) displaySendersSummary() { summary := builder.String() log.Debug("TxCache.displaySendersSummary()", "name", cache.name, "summary\n", summary) } + +func monitorSendersScoreHistogram(scoreGroups [][]*txListForSender) { + histogram := make([]int, numberOfScoreChunks) + + for i := 0; i < len(scoreGroups); i++ { + histogram[i] = len(scoreGroups[i]) + } + + log.Debug("TxCache.monitorSendersScoreHistogram():", "histogram", histogram) +} diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index f0934acf..764e5a93 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -169,6 +169,8 @@ func (txMap *txListBySenderMap) getSendersGroupedByScore() [][]*txListForSender groups[score] = append(groups[score], listForSender) }) + monitorSendersScoreHistogram(groups) + return groups } From ce5c4339178ffcd29291cd1dd0fd42eef0d19281 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 10 Jul 2024 08:36:08 +0300 Subject: [PATCH 024/124] Additional logging. --- txcache/txCache.go | 8 ++++++-- txcache/txListBySenderMap.go | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/txcache/txCache.go b/txcache/txCache.go index b8d34a27..b86ef244 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -68,6 +68,8 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { return false, false } + log.Trace("TxCache.AddTx()", "name", cache.name, "tx", tx.TxHash) + if cache.config.EvictionEnabled { cache.doEviction() } @@ -82,7 +84,7 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { // - B won't add to "txByHash" (duplicate) // - B adds to "txListBySender" // - A won't add to "txListBySender" (duplicate) - log.Trace("TxCache.AddTx(): slight inconsistency detected:", "name", cache.name, "tx", tx.TxHash, "sender", tx.Tx.GetSndAddr(), "addedInByHash", addedInByHash, "addedInBySender", addedInBySender) + log.Debug("TxCache.AddTx(): slight inconsistency detected:", "name", cache.name, "tx", tx.TxHash, "sender", tx.Tx.GetSndAddr(), "addedInByHash", addedInByHash, "addedInBySender", addedInBySender) } if len(evicted) > 0 { @@ -168,6 +170,8 @@ func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { cache.mutTxOperation.Lock() defer cache.mutTxOperation.Unlock() + log.Trace("TxCache.RemoveTxByHash()", "name", cache.name, "tx", txHash) + tx, foundInByHash := cache.txByHash.removeTx(string(txHash)) if !foundInByHash { return false @@ -183,7 +187,7 @@ func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { // - B reaches "cache.txByHash.RemoveTxsBulk()" // - B reaches "cache.txListBySender.RemoveSendersBulk()" // - A reaches "cache.txListBySender.removeTx()", but sender does not exist anymore - log.Trace("TxCache.RemoveTxByHash(): slight inconsistency detected: !foundInBySender", "name", cache.name, "tx", txHash) + log.Debug("TxCache.RemoveTxByHash(): slight inconsistency detected: !foundInBySender", "name", cache.name, "tx", txHash) } return true diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index 764e5a93..acd6182b 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -86,7 +86,7 @@ func (txMap *txListBySenderMap) removeTx(tx *WrappedTransaction) bool { if !ok { // This happens when a sender whose transactions were selected for processing is removed from cache in the meantime. // When it comes to remove one if its transactions due to processing (commited / finalized block), they don't exist in cache anymore. - log.Trace("txListBySenderMap.removeTx() detected slight inconsistency: sender of tx not in cache", "tx", tx.TxHash, "sender", []byte(sender)) + log.Debug("txListBySenderMap.removeTx() detected slight inconsistency: sender of tx not in cache", "tx", tx.TxHash, "sender", []byte(sender)) return false } From 4d835de3b0d2d9c86324191db17241704bf8b9fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 10 Jul 2024 09:01:19 +0300 Subject: [PATCH 025/124] Batchsize, bandwidth computation. --- txcache/txCache.go | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/txcache/txCache.go b/txcache/txCache.go index b86ef244..d8ba7aab 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -125,10 +125,13 @@ func (cache *TxCache) doSelectTransactions(numRequested int, batchSizePerSender copiedInThisPass := 0 for _, txList := range snapshotOfSenders { - batchSizeWithScoreCoefficient := batchSizePerSender * (txList.getScore() + 1) + score := txList.getScore() + batchSize := cache.computeSenderBatchSize(score, batchSizePerSender) + bandwidth := cache.computeSenderBandwidth(score, bandwidthPerSender) + // Reset happens on first pass only isFirstBatch := pass == 0 - journal := txList.selectBatchTo(isFirstBatch, result[resultFillIndex:], batchSizeWithScoreCoefficient, bandwidthPerSender) + journal := txList.selectBatchTo(isFirstBatch, result[resultFillIndex:], batchSize, bandwidth) cache.monitorBatchSelectionEnd(journal) if isFirstBatch { @@ -160,6 +163,22 @@ func (cache *TxCache) getSendersEligibleForSelection() []*txListForSender { return cache.txListBySender.getSnapshotDescending() } +func (cache *TxCache) computeSenderBatchSize(score int, baseBatchSize int) int { + if score == 0 { + return 1 + } + + return baseBatchSize * score +} + +func (cache *TxCache) computeSenderBandwidth(score int, baseBandwidth uint64) uint64 { + if score == 0 { + return 1 + } + + return uint64(float64(baseBandwidth) * float64(score) / float64(numberOfScoreChunks)) +} + func (cache *TxCache) doAfterSelection() { cache.sweepSweepable() cache.Diagnose(false) From aec42e770acb8cfaa1fcd4aaa8011ca77db8f603 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 10 Jul 2024 10:15:51 +0300 Subject: [PATCH 026/124] Additional monitoring. --- txcache/monitoring.go | 13 +++++++- txcache/printing.go | 73 +++++++++++++++++++++++++++++++++++++++++++ txcache/txCache.go | 6 ++-- 3 files changed, 88 insertions(+), 4 deletions(-) create mode 100644 txcache/printing.go diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 2075d8ca..03b9b8dd 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -10,6 +10,7 @@ import ( ) var log = logger.GetOrCreate("txcache") +var logSelection = logger.GetOrCreate("txcache-selection") func (cache *TxCache) monitorEvictionWrtSenderLimit(sender []byte, evicted [][]byte) { log.Debug("TxCache.monitorEvictionWrtSenderLimit()", "name", cache.name, "sender", sender, "num", len(evicted)) @@ -48,7 +49,7 @@ func (cache *TxCache) monitorSelectionStart() *core.StopWatch { return sw } -func (cache *TxCache) monitorSelectionEnd(selection []*WrappedTransaction, stopWatch *core.StopWatch) { +func (cache *TxCache) monitorSelectionEnd(sortedSenders []*txListForSender, selection []*WrappedTransaction, stopWatch *core.StopWatch) { stopWatch.Stop("selection") duration := stopWatch.GetMeasurement("selection") numSendersSelected := cache.numSendersSelected.Reset() @@ -63,6 +64,16 @@ func (cache *TxCache) monitorSelectionEnd(selection []*WrappedTransaction, stopW "numSendersWithMiddleGap", numSendersWithMiddleGap, "numSendersInGracePeriod", numSendersInGracePeriod, ) + + if logSelection.GetLevel() != logger.LogTrace { + return + } + + logSelection.Trace("Sorted senders (as newline-separated JSON):") + logSelection.Trace(marshalSendersToNewlineDelimitedJson(sortedSenders)) + + logSelection.Trace("Selected transactions (as newline-separated JSON):") + logSelection.Trace(marshalTransactionsToNewlineDelimitedJson(selection)) } type batchSelectionJournal struct { diff --git a/txcache/printing.go b/txcache/printing.go new file mode 100644 index 00000000..f2c5a62c --- /dev/null +++ b/txcache/printing.go @@ -0,0 +1,73 @@ +package txcache + +import ( + "encoding/hex" + "encoding/json" + "strings" +) + +type printedTransaction struct { + Hash string `json:"hash"` + Nonce uint64 `json:"nonce"` + Receiver string `json:"receiver"` + Sender string `json:"sender"` + GasPrice uint64 `json:"gasPrice"` + GasLimit uint64 `json:"gasLimit"` +} + +type printedSender struct { + Address string `json:"address"` + Score int `json:"score"` + Nonce uint64 `json:"nonce"` + IsNonceKnown bool `json:"isNonceKnown"` +} + +func marshalSendersToNewlineDelimitedJson(senders []*txListForSender) string { + builder := strings.Builder{} + builder.WriteString("\n") + + for _, txListForSender := range senders { + printedSender := convertTxListForSenderToPrintedSender(txListForSender) + printedSenderJson, _ := json.Marshal(printedSender) + builder.WriteString(string(printedSenderJson)) + } + + builder.WriteString("\n") + return builder.String() +} + +func marshalTransactionsToNewlineDelimitedJson(transactions []*WrappedTransaction) string { + builder := strings.Builder{} + builder.WriteString("\n") + + for _, wrappedTx := range transactions { + printedTx := convertWrappedTransactionToPrintedTransaction(wrappedTx) + printedTxJson, _ := json.Marshal(printedTx) + builder.WriteString(string(printedTxJson)) + } + + builder.WriteString("\n") + return builder.String() +} + +func convertWrappedTransactionToPrintedTransaction(wrappedTx *WrappedTransaction) *printedTransaction { + transaction := wrappedTx.Tx + + return &printedTransaction{ + Hash: hex.EncodeToString(wrappedTx.TxHash), + Nonce: transaction.GetNonce(), + Receiver: hex.EncodeToString(transaction.GetRcvAddr()), + Sender: hex.EncodeToString(transaction.GetSndAddr()), + GasPrice: transaction.GetGasPrice(), + GasLimit: transaction.GetGasLimit(), + } +} + +func convertTxListForSenderToPrintedSender(txListForSender *txListForSender) *printedSender { + return &printedSender{ + Address: hex.EncodeToString([]byte(txListForSender.sender)), + Score: txListForSender.getScore(), + Nonce: txListForSender.accountNonce.Get(), + IsNonceKnown: txListForSender.accountNonceKnown.IsSet(), + } +} diff --git a/txcache/txCache.go b/txcache/txCache.go index d8ba7aab..9acc07b6 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -119,12 +119,12 @@ func (cache *TxCache) doSelectTransactions(numRequested int, batchSizePerSender resultFillIndex := 0 resultIsFull := false - snapshotOfSenders := cache.getSendersEligibleForSelection() + senders := cache.getSendersEligibleForSelection() for pass := 0; !resultIsFull; pass++ { copiedInThisPass := 0 - for _, txList := range snapshotOfSenders { + for _, txList := range senders { score := txList.getScore() batchSize := cache.computeSenderBatchSize(score, batchSizePerSender) bandwidth := cache.computeSenderBandwidth(score, bandwidthPerSender) @@ -155,7 +155,7 @@ func (cache *TxCache) doSelectTransactions(numRequested int, batchSizePerSender } result = result[:resultFillIndex] - cache.monitorSelectionEnd(result, stopWatch) + cache.monitorSelectionEnd(senders, result, stopWatch) return result } From 14f871996c2de61dcdc87e4091fb1653f47f09d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 10 Jul 2024 11:00:44 +0300 Subject: [PATCH 027/124] Adjust printing. --- txcache/monitoring.go | 12 ++++++++---- txcache/printing.go | 4 ++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 03b9b8dd..e5155024 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -69,11 +69,15 @@ func (cache *TxCache) monitorSelectionEnd(sortedSenders []*txListForSender, sele return } - logSelection.Trace("Sorted senders (as newline-separated JSON):") - logSelection.Trace(marshalSendersToNewlineDelimitedJson(sortedSenders)) + if len(sortedSenders) > 0 { + logSelection.Trace("Sorted senders (as newline-separated JSON):") + logSelection.Trace(marshalSendersToNewlineDelimitedJson(sortedSenders)) + } - logSelection.Trace("Selected transactions (as newline-separated JSON):") - logSelection.Trace(marshalTransactionsToNewlineDelimitedJson(selection)) + if len(selection) == 0 { + logSelection.Trace("Selected transactions (as newline-separated JSON):") + logSelection.Trace(marshalTransactionsToNewlineDelimitedJson(selection)) + } } type batchSelectionJournal struct { diff --git a/txcache/printing.go b/txcache/printing.go index f2c5a62c..17f45d28 100644 --- a/txcache/printing.go +++ b/txcache/printing.go @@ -9,10 +9,10 @@ import ( type printedTransaction struct { Hash string `json:"hash"` Nonce uint64 `json:"nonce"` - Receiver string `json:"receiver"` - Sender string `json:"sender"` GasPrice uint64 `json:"gasPrice"` GasLimit uint64 `json:"gasLimit"` + Sender string `json:"sender"` + Receiver string `json:"receiver"` } type printedSender struct { From 3557e7c04b592e21d886a47d60946d065796c82d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 10 Jul 2024 14:11:15 +0300 Subject: [PATCH 028/124] computeSelectionSenderConstraints() based on score. --- txcache/txCache.go | 19 +++++++------------ txcache/txCache_test.go | 30 ++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 12 deletions(-) diff --git a/txcache/txCache.go b/txcache/txCache.go index 9acc07b6..303ad04d 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -126,8 +126,7 @@ func (cache *TxCache) doSelectTransactions(numRequested int, batchSizePerSender for _, txList := range senders { score := txList.getScore() - batchSize := cache.computeSenderBatchSize(score, batchSizePerSender) - bandwidth := cache.computeSenderBandwidth(score, bandwidthPerSender) + batchSize, bandwidth := cache.computeSelectionSenderConstraints(score, batchSizePerSender, bandwidthPerSender) // Reset happens on first pass only isFirstBatch := pass == 0 @@ -163,20 +162,16 @@ func (cache *TxCache) getSendersEligibleForSelection() []*txListForSender { return cache.txListBySender.getSnapshotDescending() } -func (cache *TxCache) computeSenderBatchSize(score int, baseBatchSize int) int { +func (cache *TxCache) computeSelectionSenderConstraints(score int, baseBatchSize int, baseBandwidth uint64) (int, uint64) { if score == 0 { - return 1 + return 1, 1 } - return baseBatchSize * score -} - -func (cache *TxCache) computeSenderBandwidth(score int, baseBandwidth uint64) uint64 { - if score == 0 { - return 1 - } + scoreDivision := float64(score) / float64(numberOfScoreChunks) + batchSize := int(float64(baseBatchSize) * scoreDivision) + bandwidth := uint64(float64(baseBandwidth) * scoreDivision) - return uint64(float64(baseBandwidth) * float64(score) / float64(numberOfScoreChunks)) + return batchSize, bandwidth } func (cache *TxCache) doAfterSelection() { diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index ffe6a372..26134e9c 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -628,6 +628,36 @@ func TestTxCache_NoCriticalInconsistency_WhenConcurrentAdditionsAndRemovals(t *t cache.Clear() } +func TestTxCache_computeSelectionSenderConstraints(t *testing.T) { + cache := newUnconstrainedCacheToTest() + baseBatchSize := 100 + baseBandwidth := uint64(120000000) + + batchSize, bandwidth := cache.computeSelectionSenderConstraints(100, baseBatchSize, baseBandwidth) + require.Equal(t, 100, batchSize) + require.Equal(t, 120000000, int(bandwidth)) + + batchSize, bandwidth = cache.computeSelectionSenderConstraints(99, baseBatchSize, baseBandwidth) + require.Equal(t, 99, batchSize) + require.Equal(t, 118800000, int(bandwidth)) + + batchSize, bandwidth = cache.computeSelectionSenderConstraints(74, baseBatchSize, baseBandwidth) + require.Equal(t, 74, batchSize) + require.Equal(t, 88800000, int(bandwidth)) + + batchSize, bandwidth = cache.computeSelectionSenderConstraints(74, baseBatchSize, baseBandwidth) + require.Equal(t, 74, batchSize) + require.Equal(t, 88800000, int(bandwidth)) + + batchSize, bandwidth = cache.computeSelectionSenderConstraints(1, baseBatchSize, baseBandwidth) + require.Equal(t, 1, batchSize) + require.Equal(t, 1200000, int(bandwidth)) + + batchSize, bandwidth = cache.computeSelectionSenderConstraints(0, baseBatchSize, baseBandwidth) + require.Equal(t, 1, batchSize) + require.Equal(t, 1, int(bandwidth)) +} + func newUnconstrainedCacheToTest() *TxCache { txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(ConfigSourceMe{ From b1b22d27d903405ac1e42f87cd4ff536ea754f25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 10 Jul 2024 15:10:22 +0300 Subject: [PATCH 029/124] Fix condition. --- txcache/monitoring.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/txcache/monitoring.go b/txcache/monitoring.go index e5155024..56be0462 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -74,7 +74,7 @@ func (cache *TxCache) monitorSelectionEnd(sortedSenders []*txListForSender, sele logSelection.Trace(marshalSendersToNewlineDelimitedJson(sortedSenders)) } - if len(selection) == 0 { + if len(selection) > 0 { logSelection.Trace("Selected transactions (as newline-separated JSON):") logSelection.Trace(marshalTransactionsToNewlineDelimitedJson(selection)) } From acdfd8d2d25ce640bf2106339f78b3333824de17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 10 Jul 2024 18:47:41 +0300 Subject: [PATCH 030/124] Rename constant, fix array length. --- txcache/constants.go | 2 +- txcache/monitoring.go | 2 +- txcache/score.go | 9 ++++++--- txcache/txCache.go | 2 +- txcache/txListBySenderMap.go | 4 ++-- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/txcache/constants.go b/txcache/constants.go index 3a95f02d..ee6509c3 100644 --- a/txcache/constants.go +++ b/txcache/constants.go @@ -10,4 +10,4 @@ const numEvictedTxsToDisplay = 3 const excellentGasPriceFactor = 5 -const numberOfScoreChunks = 100 +const maxSenderScore = 100 diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 56be0462..37a3da08 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -258,7 +258,7 @@ func (cache *TxCache) displaySendersSummary() { } func monitorSendersScoreHistogram(scoreGroups [][]*txListForSender) { - histogram := make([]int, numberOfScoreChunks) + histogram := make([]int, len(scoreGroups)) for i := 0; i < len(scoreGroups); i++ { histogram[i] = len(scoreGroups[i]) diff --git a/txcache/score.go b/txcache/score.go index cc32f6ca..ddf5fcad 100644 --- a/txcache/score.go +++ b/txcache/score.go @@ -25,7 +25,7 @@ func newDefaultScoreComputer(txGasHandler TxGasHandler) *defaultScoreComputer { excellentPpu := float64(txGasHandler.MinGasPrice()) * excellentGasPriceFactor excellentPpuNormalized := excellentPpu / worstPpu excellentPpuNormalizedLog := math.Log(excellentPpuNormalized) - scoreScalingFactor := float64(numberOfScoreChunks) / excellentPpuNormalizedLog + scoreScalingFactor := float64(maxSenderScore) / excellentPpuNormalizedLog return &defaultScoreComputer{ worstPpuLog: worstPpuLog, @@ -52,8 +52,11 @@ func (computer *defaultScoreComputer) computeScore(scoreParams senderScoreParams rawScore := computer.computeRawScore(scoreParams) truncatedScore := int(rawScore) - if truncatedScore > numberOfScoreChunks { - return numberOfScoreChunks + if truncatedScore < 0 { + return 0 + } + if truncatedScore > maxSenderScore { + return maxSenderScore } return truncatedScore diff --git a/txcache/txCache.go b/txcache/txCache.go index 303ad04d..bc778a04 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -167,7 +167,7 @@ func (cache *TxCache) computeSelectionSenderConstraints(score int, baseBatchSize return 1, 1 } - scoreDivision := float64(score) / float64(numberOfScoreChunks) + scoreDivision := float64(score) / float64(maxSenderScore) batchSize := int(float64(baseBatchSize) * scoreDivision) bandwidth := uint64(float64(baseBandwidth) * scoreDivision) diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index acd6182b..30be499f 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -154,9 +154,9 @@ func (txMap *txListBySenderMap) getSnapshotDescending() []*txListForSender { } func (txMap *txListBySenderMap) getSendersGroupedByScore() [][]*txListForSender { - groups := make([][]*txListForSender, numberOfScoreChunks) + groups := make([][]*txListForSender, maxSenderScore+1) // Hint for pre-allocating slice for each group (imprecise, but reasonable). - groupSizeHint := txMap.counter.Get() / int64(numberOfScoreChunks) / 2 + groupSizeHint := txMap.counter.Get() / int64(maxSenderScore) / 2 txMap.backingMap.IterCb(func(key string, item interface{}) { listForSender := item.(*txListForSender) From f996c680b3a4503d7be07f61b11e2c2c62902d7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 10 Jul 2024 20:14:29 +0300 Subject: [PATCH 031/124] Renaming. --- txcache/monitoring.go | 4 ++-- txcache/txCache.go | 10 ++++----- txcache/txListForSender.go | 2 +- txcache/txListForSender_test.go | 38 ++++++++++++++++----------------- 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 37a3da08..79ee3e7e 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -81,7 +81,7 @@ func (cache *TxCache) monitorSelectionEnd(sortedSenders []*txListForSender, sele } type batchSelectionJournal struct { - copied int + selectedNum int isFirstBatch bool hasInitialGap bool hasMiddleGap bool @@ -102,7 +102,7 @@ func (cache *TxCache) monitorBatchSelectionEnd(journal batchSelectionJournal) { if journal.isGracePeriod { cache.numSendersInGracePeriod.Increment() - } else if journal.copied > 0 { + } else if journal.selectedNum > 0 { cache.numSendersSelected.Increment() } } diff --git a/txcache/txCache.go b/txcache/txCache.go index bc778a04..c5139dda 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -122,7 +122,7 @@ func (cache *TxCache) doSelectTransactions(numRequested int, batchSizePerSender senders := cache.getSendersEligibleForSelection() for pass := 0; !resultIsFull; pass++ { - copiedInThisPass := 0 + numSelectedInThisPass := 0 for _, txList := range senders { score := txList.getScore() @@ -137,18 +137,18 @@ func (cache *TxCache) doSelectTransactions(numRequested int, batchSizePerSender cache.collectSweepable(txList) } - resultFillIndex += journal.copied - copiedInThisPass += journal.copied + resultFillIndex += journal.selectedNum + numSelectedInThisPass += journal.selectedNum resultIsFull = resultFillIndex == numRequested if resultIsFull { break } } - nothingCopiedThisPass := copiedInThisPass == 0 + nothingSelectedInThisPass := numSelectedInThisPass == 0 // No more passes needed - if nothingCopiedThisPass { + if nothingSelectedInThisPass { break } } diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 341b054f..3a04da9d 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -297,7 +297,7 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati listForSender.copyBatchIndex = element listForSender.copyPreviousNonce = previousNonce - journal.copied = copied + journal.selectedNum = copied return journal } diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index ef87d657..b9a4963d 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -183,22 +183,22 @@ func TestListForSender_SelectBatchTo(t *testing.T) { // First batch journal := list.selectBatchTo(true, destination, 50, math.MaxUint64) - require.Equal(t, 50, journal.copied) + require.Equal(t, 50, journal.selectedNum) require.NotNil(t, destination[49]) require.Nil(t, destination[50]) // Second batch journal = list.selectBatchTo(false, destination[50:], 50, math.MaxUint64) - require.Equal(t, 50, journal.copied) + require.Equal(t, 50, journal.selectedNum) require.NotNil(t, destination[99]) // No third batch journal = list.selectBatchTo(false, destination, 50, math.MaxUint64) - require.Equal(t, 0, journal.copied) + require.Equal(t, 0, journal.selectedNum) // Restart copy journal = list.selectBatchTo(true, destination, 12345, math.MaxUint64) - require.Equal(t, 100, journal.copied) + require.Equal(t, 100, journal.selectedNum) } func TestListForSender_SelectBatchToWithLimitedGasBandwidth(t *testing.T) { @@ -216,23 +216,23 @@ func TestListForSender_SelectBatchToWithLimitedGasBandwidth(t *testing.T) { // First batch journal := list.selectBatchTo(true, destination, 50, 500000) - require.Equal(t, 1, journal.copied) + require.Equal(t, 1, journal.selectedNum) require.NotNil(t, destination[0]) require.Nil(t, destination[1]) // Second batch journal = list.selectBatchTo(false, destination[1:], 50, 20000000) - require.Equal(t, 20, journal.copied) + require.Equal(t, 20, journal.selectedNum) require.NotNil(t, destination[20]) require.Nil(t, destination[21]) // third batch journal = list.selectBatchTo(false, destination[21:], 20, math.MaxUint64) - require.Equal(t, 19, journal.copied) + require.Equal(t, 19, journal.selectedNum) // Restart copy journal = list.selectBatchTo(true, destination[41:], 12345, math.MaxUint64) - require.Equal(t, 40, journal.copied) + require.Equal(t, 40, journal.selectedNum) } func TestListForSender_SelectBatchTo_NoPanicWhenCornerCases(t *testing.T) { @@ -246,12 +246,12 @@ func TestListForSender_SelectBatchTo_NoPanicWhenCornerCases(t *testing.T) { // When empty destination destination := make([]*WrappedTransaction, 0) journal := list.selectBatchTo(true, destination, 10, math.MaxUint64) - require.Equal(t, 0, journal.copied) + require.Equal(t, 0, journal.selectedNum) // When small destination destination = make([]*WrappedTransaction, 5) journal = list.selectBatchTo(false, destination, 10, math.MaxUint64) - require.Equal(t, 5, journal.copied) + require.Equal(t, 5, journal.selectedNum) } func TestListForSender_SelectBatchTo_WhenInitialGap(t *testing.T) { @@ -267,19 +267,19 @@ func TestListForSender_SelectBatchTo_WhenInitialGap(t *testing.T) { // First batch of selection, first failure journal := list.selectBatchTo(true, destination, 50, math.MaxUint64) - require.Equal(t, 0, journal.copied) + require.Equal(t, 0, journal.selectedNum) require.Nil(t, destination[0]) require.Equal(t, int64(1), list.numFailedSelections.Get()) // Second batch of selection, don't count failure again journal = list.selectBatchTo(false, destination, 50, math.MaxUint64) - require.Equal(t, 0, journal.copied) + require.Equal(t, 0, journal.selectedNum) require.Nil(t, destination[0]) require.Equal(t, int64(1), list.numFailedSelections.Get()) // First batch of another selection, second failure, enters grace period journal = list.selectBatchTo(true, destination, 50, math.MaxUint64) - require.Equal(t, 1, journal.copied) + require.Equal(t, 1, journal.selectedNum) require.NotNil(t, destination[0]) require.Nil(t, destination[1]) require.Equal(t, int64(2), list.numFailedSelections.Get()) @@ -299,13 +299,13 @@ func TestListForSender_SelectBatchTo_WhenGracePeriodWithGapResolve(t *testing.T) // Try a number of selections with failure, reach close to grace period for i := 1; i < senderGracePeriodLowerBound; i++ { journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 0, journal.copied) + require.Equal(t, 0, journal.selectedNum) require.Equal(t, int64(i), list.numFailedSelections.Get()) } // Try selection again. Failure will move the sender to grace period and return 1 transaction journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 1, journal.copied) + require.Equal(t, 1, journal.selectedNum) require.Equal(t, int64(senderGracePeriodLowerBound), list.numFailedSelections.Get()) require.False(t, list.sweepable.IsSet()) @@ -313,7 +313,7 @@ func TestListForSender_SelectBatchTo_WhenGracePeriodWithGapResolve(t *testing.T) list.AddTx(createTx([]byte("resolving-tx"), ".", 1), txGasHandler) // Selection will be successful journal = list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 19, journal.copied) + require.Equal(t, 19, journal.selectedNum) require.Equal(t, int64(0), list.numFailedSelections.Get()) require.False(t, list.sweepable.IsSet()) } @@ -332,20 +332,20 @@ func TestListForSender_SelectBatchTo_WhenGracePeriodWithNoGapResolve(t *testing. // Try a number of selections with failure, reach close to grace period for i := 1; i < senderGracePeriodLowerBound; i++ { journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 0, journal.copied) + require.Equal(t, 0, journal.selectedNum) require.Equal(t, int64(i), list.numFailedSelections.Get()) } // Try a number of selections with failure, within the grace period for i := senderGracePeriodLowerBound; i <= senderGracePeriodUpperBound; i++ { journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 1, journal.copied) + require.Equal(t, 1, journal.selectedNum) require.Equal(t, int64(i), list.numFailedSelections.Get()) } // Grace period exceeded now journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 0, journal.copied) + require.Equal(t, 0, journal.selectedNum) require.Equal(t, int64(senderGracePeriodUpperBound+1), list.numFailedSelections.Get()) require.True(t, list.sweepable.IsSet()) } From 46c0d40d7e71283c6c4c53d276d6124325e56296 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 11 Jul 2024 09:00:10 +0300 Subject: [PATCH 032/124] Some todos, notes. --- txcache/txListBySenderMap.go | 2 ++ txcache/txListForSender.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index 30be499f..949048f7 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -99,6 +99,7 @@ func (txMap *txListBySenderMap) removeTx(tx *WrappedTransaction) bool { return isFound } +// Important: this doesn't remove the transactions from txCache.txByHash. That's done by the caller. func (txMap *txListBySenderMap) removeSender(sender string) bool { _, removed := txMap.backingMap.Remove(sender) if removed { @@ -166,6 +167,7 @@ func (txMap *txListBySenderMap) getSendersGroupedByScore() [][]*txListForSender groups[score] = make([]*txListForSender, 0, groupSizeHint) } + // TODO (next PR) randomize / shuffle. groups[score] = append(groups[score], listForSender) }) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 3a04da9d..8afce3fb 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -62,6 +62,8 @@ func (listForSender *txListForSender) AddTx(tx *WrappedTransaction, gasHandler T } listForSender.onAddedTransaction(tx, gasHandler) + + // TODO: Check how does the sender get removed if empty afterwards (maybe the answer is: "it never gets empty after applySizeConstraints()"). evicted := listForSender.applySizeConstraints() listForSender.recomputeScore() return true, evicted From 73b5848fd6b6c63178d41187b707defd507481c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 11 Jul 2024 09:00:51 +0300 Subject: [PATCH 033/124] notifyAccountNonce: remove sender if empty after txs removal. --- txcache/txListBySenderMap.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index 949048f7..0a964b06 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -129,7 +129,12 @@ func (txMap *txListBySenderMap) notifyAccountNonce(accountKey []byte, nonce uint return nil } - return listForSender.notifyAccountNonce(nonce) + evictedTxHashes := listForSender.notifyAccountNonce(nonce) + if listForSender.IsEmpty() { + txMap.removeSender(sender) + } + + return evictedTxHashes } func (txMap *txListBySenderMap) getSnapshotAscending() []*txListForSender { From 9c234afea467239a3ec74a348f2fa1be5eead44d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 11 Jul 2024 17:16:41 +0300 Subject: [PATCH 034/124] No more logic on "sweep sweepable senders". Old transactions are removed by Node's transactions cleaner. Senders with any gaps have score 0 - they are the last to select, first to evict. --- txcache/constants.go | 2 - txcache/eviction.go | 2 - txcache/monitoring.go | 20 +----- txcache/sweeping.go | 29 -------- txcache/sweeping_test.go | 118 -------------------------------- txcache/testutils_test.go | 10 --- txcache/txCache.go | 9 --- txcache/txListForSender.go | 6 -- txcache/txListForSender_test.go | 3 - 9 files changed, 1 insertion(+), 198 deletions(-) delete mode 100644 txcache/sweeping.go delete mode 100644 txcache/sweeping_test.go diff --git a/txcache/constants.go b/txcache/constants.go index ee6509c3..c3a21fc1 100644 --- a/txcache/constants.go +++ b/txcache/constants.go @@ -1,7 +1,5 @@ package txcache -const estimatedNumOfSweepableSendersPerSelection = 100 - const senderGracePeriodLowerBound = 2 const senderGracePeriodUpperBound = 2 diff --git a/txcache/eviction.go b/txcache/eviction.go index 985a1986..5ac3f952 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -67,7 +67,6 @@ func (cache *TxCache) areThereTooManyTxs() bool { return tooManyTxs } -// This is called concurrently by two goroutines: the eviction one and the sweeping one func (cache *TxCache) doEvictItems(txsToEvict [][]byte, sendersToEvict []string) (countTxs uint32, countSenders uint32) { countTxs = cache.txByHash.RemoveTxsBulk(txsToEvict) countSenders = cache.txListBySender.RemoveSendersBulk(sendersToEvict) @@ -114,7 +113,6 @@ func (cache *TxCache) evictSendersWhile(shouldContinue func() bool) (step uint32 return } -// This is called concurrently by two goroutines: the eviction one and the sweeping one func (cache *TxCache) evictSendersAndTheirTxs(listsToEvict []*txListForSender) (uint32, uint32) { sendersToEvict := make([]string, 0, len(listsToEvict)) txsToEvict := make([][]byte, 0, approximatelyCountTxInLists(listsToEvict)) diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 79ee3e7e..06af31f5 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -55,14 +55,11 @@ func (cache *TxCache) monitorSelectionEnd(sortedSenders []*txListForSender, sele numSendersSelected := cache.numSendersSelected.Reset() numSendersWithInitialGap := cache.numSendersWithInitialGap.Reset() numSendersWithMiddleGap := cache.numSendersWithMiddleGap.Reset() - numSendersInGracePeriod := cache.numSendersInGracePeriod.Reset() - log.Debug("TxCache: selection ended", "name", cache.name, "duration", duration, "numTxSelected", len(selection), "numSendersSelected", numSendersSelected, "numSendersWithInitialGap", numSendersWithInitialGap, "numSendersWithMiddleGap", numSendersWithMiddleGap, - "numSendersInGracePeriod", numSendersInGracePeriod, ) if logSelection.GetLevel() != logger.LogTrace { @@ -85,7 +82,6 @@ type batchSelectionJournal struct { isFirstBatch bool hasInitialGap bool hasMiddleGap bool - isGracePeriod bool } func (cache *TxCache) monitorBatchSelectionEnd(journal batchSelectionJournal) { @@ -100,25 +96,11 @@ func (cache *TxCache) monitorBatchSelectionEnd(journal batchSelectionJournal) { cache.numSendersWithMiddleGap.Increment() } - if journal.isGracePeriod { - cache.numSendersInGracePeriod.Increment() - } else if journal.selectedNum > 0 { + if journal.selectedNum > 0 { cache.numSendersSelected.Increment() } } -func (cache *TxCache) monitorSweepingStart() *core.StopWatch { - sw := core.NewStopWatch() - sw.Start("sweeping") - return sw -} - -func (cache *TxCache) monitorSweepingEnd(numTxs uint32, numSenders uint32, stopWatch *core.StopWatch) { - stopWatch.Stop("sweeping") - duration := stopWatch.GetMeasurement("sweeping") - log.Debug("TxCache: swept senders:", "name", cache.name, "duration", duration, "txs", numTxs, "senders", numSenders) -} - // evictionJournal keeps a short journal about the eviction process // This is useful for debugging and reasoning about the eviction type evictionJournal struct { diff --git a/txcache/sweeping.go b/txcache/sweeping.go deleted file mode 100644 index 92255309..00000000 --- a/txcache/sweeping.go +++ /dev/null @@ -1,29 +0,0 @@ -package txcache - -func (cache *TxCache) initSweepable() { - cache.sweepingListOfSenders = make([]*txListForSender, 0, estimatedNumOfSweepableSendersPerSelection) -} - -func (cache *TxCache) collectSweepable(list *txListForSender) { - if !list.sweepable.IsSet() { - return - } - - cache.sweepingMutex.Lock() - cache.sweepingListOfSenders = append(cache.sweepingListOfSenders, list) - cache.sweepingMutex.Unlock() -} - -func (cache *TxCache) sweepSweepable() { - cache.sweepingMutex.Lock() - defer cache.sweepingMutex.Unlock() - - if len(cache.sweepingListOfSenders) == 0 { - return - } - - stopWatch := cache.monitorSweepingStart() - numTxs, numSenders := cache.evictSendersAndTheirTxs(cache.sweepingListOfSenders) - cache.initSweepable() - cache.monitorSweepingEnd(numTxs, numSenders, stopWatch) -} diff --git a/txcache/sweeping_test.go b/txcache/sweeping_test.go deleted file mode 100644 index a700f7a8..00000000 --- a/txcache/sweeping_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package txcache - -import ( - "math" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSweeping_CollectSweepable(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - cache.AddTx(createTx([]byte("alice-42"), "alice", 42)) - cache.AddTx(createTx([]byte("bob-42"), "bob", 42)) - cache.AddTx(createTx([]byte("carol-42"), "carol", 42)) - - // Senders have no initial gaps - selection := cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 3, len(selection)) - require.Equal(t, 0, len(cache.sweepingListOfSenders)) - - // Alice and Bob have initial gaps, Carol doesn't - cache.NotifyAccountNonce([]byte("alice"), 10) - cache.NotifyAccountNonce([]byte("bob"), 20) - - // 1st fail - selection = cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 1, len(selection)) - require.Equal(t, 0, len(cache.sweepingListOfSenders)) - require.Equal(t, 1, cache.getNumFailedSelectionsOfSender("alice")) - require.Equal(t, 1, cache.getNumFailedSelectionsOfSender("bob")) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("carol")) - - // 2nd fail, grace period, one grace transaction for Alice and Bob - selection = cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 3, len(selection)) - require.Equal(t, 0, len(cache.sweepingListOfSenders)) - require.Equal(t, 2, cache.getNumFailedSelectionsOfSender("alice")) - require.Equal(t, 2, cache.getNumFailedSelectionsOfSender("bob")) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("carol")) - - // 3nd fail, collect Alice and Bob as sweepables - selection = cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 1, len(selection)) - require.Equal(t, 2, len(cache.sweepingListOfSenders)) - require.True(t, cache.isSenderSweepable("alice")) - require.True(t, cache.isSenderSweepable("bob")) - require.Equal(t, 3, cache.getNumFailedSelectionsOfSender("alice")) - require.Equal(t, 3, cache.getNumFailedSelectionsOfSender("bob")) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("carol")) -} - -func TestSweeping_WhenSendersEscapeCollection(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - cache.AddTx(createTx([]byte("alice-42"), "alice", 42)) - cache.AddTx(createTx([]byte("bob-42"), "bob", 42)) - cache.AddTx(createTx([]byte("carol-42"), "carol", 42)) - - // Senders have no initial gaps - selection := cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 3, len(selection)) - require.Equal(t, 0, len(cache.sweepingListOfSenders)) - - // Alice and Bob have initial gaps, Carol doesn't - cache.NotifyAccountNonce([]byte("alice"), 10) - cache.NotifyAccountNonce([]byte("bob"), 20) - - // 1st fail - selection = cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 1, len(selection)) - require.Equal(t, 0, len(cache.sweepingListOfSenders)) - require.Equal(t, 1, cache.getNumFailedSelectionsOfSender("alice")) - require.Equal(t, 1, cache.getNumFailedSelectionsOfSender("bob")) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("carol")) - - // 2nd fail, grace period, one grace transaction for Alice and Bob - selection = cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 3, len(selection)) - require.Equal(t, 0, len(cache.sweepingListOfSenders)) - require.Equal(t, 2, cache.getNumFailedSelectionsOfSender("alice")) - require.Equal(t, 2, cache.getNumFailedSelectionsOfSender("bob")) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("carol")) - - // 3rd attempt, but with gaps resolved - // Alice and Bob escape and won't be collected as sweepables - cache.NotifyAccountNonce([]byte("alice"), 42) - cache.NotifyAccountNonce([]byte("bob"), 42) - - selection = cache.doSelectTransactions(1000, 1000, math.MaxUint64) - require.Equal(t, 3, len(selection)) - require.Equal(t, 0, len(cache.sweepingListOfSenders)) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("alice")) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("bob")) - require.Equal(t, 0, cache.getNumFailedSelectionsOfSender("carol")) -} - -func TestSweeping_SweepSweepable(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - cache.AddTx(createTx([]byte("alice-42"), "alice", 42)) - cache.AddTx(createTx([]byte("bob-42"), "bob", 42)) - cache.AddTx(createTx([]byte("carol-42"), "carol", 42)) - - // Fake "Alice" and "Bob" as sweepable - cache.sweepingListOfSenders = []*txListForSender{ - cache.getListForSender("alice"), - cache.getListForSender("bob"), - } - - require.Equal(t, uint64(3), cache.CountTx()) - require.Equal(t, uint64(3), cache.CountSenders()) - - cache.sweepSweepable() - - require.Equal(t, uint64(1), cache.CountTx()) - require.Equal(t, uint64(1), cache.CountSenders()) -} diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index 83892ae7..6a6a2239 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -47,16 +47,6 @@ func (cache *TxCache) getNumFailedSelectionsOfSender(sender string) int { return int(cache.getListForSender(sender).numFailedSelections.Get()) } -func (cache *TxCache) isSenderSweepable(sender string) bool { - for _, item := range cache.sweepingListOfSenders { - if item.sender == sender { - return true - } - } - - return false -} - func (listForSender *txListForSender) getTxHashesAsStrings() []string { hashes := listForSender.getTxHashes() return hashesAsStrings(hashes) diff --git a/txcache/txCache.go b/txcache/txCache.go index c5139dda..c5c87bed 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -25,9 +25,6 @@ type TxCache struct { numSendersSelected atomic.Counter numSendersWithInitialGap atomic.Counter numSendersWithMiddleGap atomic.Counter - numSendersInGracePeriod atomic.Counter - sweepingMutex sync.Mutex - sweepingListOfSenders []*txListForSender mutTxOperation sync.Mutex } @@ -57,7 +54,6 @@ func NewTxCache(config ConfigSourceMe, txGasHandler TxGasHandler) (*TxCache, err evictionJournal: evictionJournal{}, } - txCache.initSweepable() return txCache, nil } @@ -133,10 +129,6 @@ func (cache *TxCache) doSelectTransactions(numRequested int, batchSizePerSender journal := txList.selectBatchTo(isFirstBatch, result[resultFillIndex:], batchSize, bandwidth) cache.monitorBatchSelectionEnd(journal) - if isFirstBatch { - cache.collectSweepable(txList) - } - resultFillIndex += journal.selectedNum numSelectedInThisPass += journal.selectedNum resultIsFull = resultFillIndex == numRequested @@ -175,7 +167,6 @@ func (cache *TxCache) computeSelectionSenderConstraints(score int, baseBatchSize } func (cache *TxCache) doAfterSelection() { - cache.sweepSweepable() cache.Diagnose(false) } diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 8afce3fb..217c58b1 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -14,7 +14,6 @@ type txListForSender struct { copyDetectedGap bool score atomic.Uint32 accountNonceKnown atomic.Flag - sweepable atomic.Flag copyPreviousNonce uint64 sender string items *list.List @@ -267,7 +266,6 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati // then one transaction will be returned. But subsequent reads for this sender will return nothing. if detectedGap { if isFirstBatch && listForSender.isInGracePeriod() { - journal.isGracePeriod = true batchSize = 1 } else { batchSize = 0 @@ -380,10 +378,6 @@ func (listForSender *txListForSender) verifyInitialGapOnSelectionStart() bool { if hasInitialGap { listForSender.numFailedSelections.Increment() - - if listForSender.isGracePeriodExceeded() { - _ = listForSender.sweepable.SetReturningPrevious() - } } else { listForSender.numFailedSelections.Reset() } diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index b9a4963d..052054a1 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -307,7 +307,6 @@ func TestListForSender_SelectBatchTo_WhenGracePeriodWithGapResolve(t *testing.T) journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) require.Equal(t, 1, journal.selectedNum) require.Equal(t, int64(senderGracePeriodLowerBound), list.numFailedSelections.Get()) - require.False(t, list.sweepable.IsSet()) // Now resolve the gap list.AddTx(createTx([]byte("resolving-tx"), ".", 1), txGasHandler) @@ -315,7 +314,6 @@ func TestListForSender_SelectBatchTo_WhenGracePeriodWithGapResolve(t *testing.T) journal = list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) require.Equal(t, 19, journal.selectedNum) require.Equal(t, int64(0), list.numFailedSelections.Get()) - require.False(t, list.sweepable.IsSet()) } func TestListForSender_SelectBatchTo_WhenGracePeriodWithNoGapResolve(t *testing.T) { @@ -347,7 +345,6 @@ func TestListForSender_SelectBatchTo_WhenGracePeriodWithNoGapResolve(t *testing. journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) require.Equal(t, 0, journal.selectedNum) require.Equal(t, int64(senderGracePeriodUpperBound+1), list.numFailedSelections.Get()) - require.True(t, list.sweepable.IsSet()) } func TestListForSender_NotifyAccountNonce(t *testing.T) { From c5cb6fbd0d11f1a78aa28506d1b182b12b1be12a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 11 Jul 2024 17:18:17 +0300 Subject: [PATCH 035/124] Remove concept of "grace period" for senders with initial gaps. No transactions of a sender with initial gap are selected anymore. If this is an issue, sender has to re-broadcast - pool will get the notification regarding account nonce. --- txcache/constants.go | 4 -- txcache/txListForSender.go | 19 +-------- txcache/txListForSender_test.go | 69 --------------------------------- 3 files changed, 1 insertion(+), 91 deletions(-) diff --git a/txcache/constants.go b/txcache/constants.go index c3a21fc1..6a6c7944 100644 --- a/txcache/constants.go +++ b/txcache/constants.go @@ -1,9 +1,5 @@ package txcache -const senderGracePeriodLowerBound = 2 - -const senderGracePeriodUpperBound = 2 - const numEvictedTxsToDisplay = 3 const excellentGasPriceFactor = 5 diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 217c58b1..839aa441 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -262,14 +262,8 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati previousNonce := listForSender.copyPreviousNonce // If a nonce gap is detected, no transaction is returned in this read. - // There is an exception though: if this is the first read operation for the sender in the current selection process and the sender is in the grace period, - // then one transaction will be returned. But subsequent reads for this sender will return nothing. if detectedGap { - if isFirstBatch && listForSender.isInGracePeriod() { - batchSize = 1 - } else { - batchSize = 0 - } + batchSize = 0 } copiedBandwidth := uint64(0) @@ -415,17 +409,6 @@ func (listForSender *txListForSender) getLowestNonceTx() *WrappedTransaction { return value } -// isInGracePeriod returns whether the sender is grace period due to a number of failed selections -func (listForSender *txListForSender) isInGracePeriod() bool { - numFailedSelections := listForSender.numFailedSelections.Get() - return numFailedSelections >= senderGracePeriodLowerBound && numFailedSelections <= senderGracePeriodUpperBound -} - -func (listForSender *txListForSender) isGracePeriodExceeded() bool { - numFailedSelections := listForSender.numFailedSelections.Get() - return numFailedSelections > senderGracePeriodUpperBound -} - func (listForSender *txListForSender) getScore() int { return int(listForSender.score.Get()) } diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 052054a1..5c2de6a3 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -276,75 +276,6 @@ func TestListForSender_SelectBatchTo_WhenInitialGap(t *testing.T) { require.Equal(t, 0, journal.selectedNum) require.Nil(t, destination[0]) require.Equal(t, int64(1), list.numFailedSelections.Get()) - - // First batch of another selection, second failure, enters grace period - journal = list.selectBatchTo(true, destination, 50, math.MaxUint64) - require.Equal(t, 1, journal.selectedNum) - require.NotNil(t, destination[0]) - require.Nil(t, destination[1]) - require.Equal(t, int64(2), list.numFailedSelections.Get()) -} - -func TestListForSender_SelectBatchTo_WhenGracePeriodWithGapResolve(t *testing.T) { - list := newUnconstrainedListToTest() - txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.notifyAccountNonce(1) - - for index := 2; index < 20; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler) - } - - destination := make([]*WrappedTransaction, 1000) - - // Try a number of selections with failure, reach close to grace period - for i := 1; i < senderGracePeriodLowerBound; i++ { - journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 0, journal.selectedNum) - require.Equal(t, int64(i), list.numFailedSelections.Get()) - } - - // Try selection again. Failure will move the sender to grace period and return 1 transaction - journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 1, journal.selectedNum) - require.Equal(t, int64(senderGracePeriodLowerBound), list.numFailedSelections.Get()) - - // Now resolve the gap - list.AddTx(createTx([]byte("resolving-tx"), ".", 1), txGasHandler) - // Selection will be successful - journal = list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 19, journal.selectedNum) - require.Equal(t, int64(0), list.numFailedSelections.Get()) -} - -func TestListForSender_SelectBatchTo_WhenGracePeriodWithNoGapResolve(t *testing.T) { - list := newUnconstrainedListToTest() - txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.notifyAccountNonce(1) - - for index := 2; index < 20; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler) - } - - destination := make([]*WrappedTransaction, 1000) - - // Try a number of selections with failure, reach close to grace period - for i := 1; i < senderGracePeriodLowerBound; i++ { - journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 0, journal.selectedNum) - require.Equal(t, int64(i), list.numFailedSelections.Get()) - } - - // Try a number of selections with failure, within the grace period - for i := senderGracePeriodLowerBound; i <= senderGracePeriodUpperBound; i++ { - journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 1, journal.selectedNum) - require.Equal(t, int64(i), list.numFailedSelections.Get()) - } - - // Grace period exceeded now - journal := list.selectBatchTo(true, destination, math.MaxInt32, math.MaxUint64) - require.Equal(t, 0, journal.selectedNum) - require.Equal(t, int64(senderGracePeriodUpperBound+1), list.numFailedSelections.Get()) } func TestListForSender_NotifyAccountNonce(t *testing.T) { From 3a15954b786c0b766f21aa678160a7a22c9c0479 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 11 Jul 2024 17:31:06 +0300 Subject: [PATCH 036/124] Remove the counter numFailedSelections. --- txcache/monitoring.go | 5 ++--- txcache/testutils_test.go | 4 ---- txcache/txListForSender.go | 38 ++++++++++----------------------- txcache/txListForSender_test.go | 6 ++---- 4 files changed, 15 insertions(+), 38 deletions(-) diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 06af31f5..9d8b75fd 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -216,13 +216,12 @@ func (cache *TxCache) displaySendersSummary() { } var builder strings.Builder - builder.WriteString("\n[#index (score)] address [nonce known / nonce vs lowestTxNonce] txs = numTxs, !numFailedSelections\n") + builder.WriteString("\n[#index (score)] address [nonce known / nonce vs lowestTxNonce] txs = numTxs\n") for i, sender := range senders { address := hex.EncodeToString([]byte(sender.sender)) accountNonce := sender.accountNonce.Get() accountNonceKnown := sender.accountNonceKnown.IsSet() - numFailedSelections := sender.numFailedSelections.Get() score := sender.getScore() numTxs := sender.countTxWithLock() @@ -232,7 +231,7 @@ func (cache *TxCache) displaySendersSummary() { lowestTxNonce = int(lowestTx.Tx.GetNonce()) } - _, _ = fmt.Fprintf(&builder, "[#%d (%d)] %s [%t / %d vs %d] txs = %d, !%d\n", i, score, address, accountNonceKnown, accountNonce, lowestTxNonce, numTxs, numFailedSelections) + _, _ = fmt.Fprintf(&builder, "[#%d (%d)] %s [%t / %d vs %d] txs = %d\n", i, score, address, accountNonceKnown, accountNonce, lowestTxNonce, numTxs) } summary := builder.String() diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index 6a6a2239..27dd2825 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -43,10 +43,6 @@ func (cache *TxCache) getScoreOfSender(sender string) int { return computer.computeScore(scoreParams) } -func (cache *TxCache) getNumFailedSelectionsOfSender(sender string) int { - return int(cache.getListForSender(sender).numFailedSelections.Get()) -} - func (listForSender *txListForSender) getTxHashesAsStrings() []string { hashes := listForSender.getTxHashes() return hashesAsStrings(hashes) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 839aa441..da3835c0 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -11,17 +11,16 @@ import ( // txListForSender represents a sorted list of transactions of a particular sender type txListForSender struct { - copyDetectedGap bool - score atomic.Uint32 - accountNonceKnown atomic.Flag - copyPreviousNonce uint64 - sender string - items *list.List - copyBatchIndex *list.Element - constraints *senderConstraints - accountNonce atomic.Uint64 - totalBytes atomic.Counter - numFailedSelections atomic.Counter + copyDetectedGap bool + score atomic.Uint32 + accountNonceKnown atomic.Flag + copyPreviousNonce uint64 + sender string + items *list.List + copyBatchIndex *list.Element + constraints *senderConstraints + accountNonce atomic.Uint64 + totalBytes atomic.Counter avgPpuNumerator float64 avgPpuDenominator uint64 @@ -246,7 +245,7 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati // Reset the internal state used for copy operations if isFirstBatch { - hasInitialGap := listForSender.verifyInitialGapOnSelectionStart() + hasInitialGap := listForSender.hasInitialGap() listForSender.copyBatchIndex = listForSender.items.Front() listForSender.copyPreviousNonce = 0 @@ -331,8 +330,6 @@ func approximatelyCountTxInLists(lists []*txListForSender) uint64 { return count } -// notifyAccountNonce does not update the "numFailedSelections" counter, -// since the notification comes at a time when we cannot actually detect whether the initial gap still exists or it was resolved. // Removes transactions with lower nonces and returns their hashes. func (listForSender *txListForSender) notifyAccountNonce(nonce uint64) [][]byte { listForSender.mutex.Lock() @@ -366,19 +363,6 @@ func (listForSender *txListForSender) evictTransactionsWithLowerNonces(accountNo return evictedTxHashes } -// This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) verifyInitialGapOnSelectionStart() bool { - hasInitialGap := listForSender.hasInitialGap() - - if hasInitialGap { - listForSender.numFailedSelections.Increment() - } else { - listForSender.numFailedSelections.Reset() - } - - return hasInitialGap -} - // hasInitialGap should only be called at tx selection time, since only then we can detect initial gaps with certainty // This function should only be used in critical section (listForSender.mutex) func (listForSender *txListForSender) hasInitialGap() bool { diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 5c2de6a3..7039111b 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -265,17 +265,15 @@ func TestListForSender_SelectBatchTo_WhenInitialGap(t *testing.T) { destination := make([]*WrappedTransaction, 1000) - // First batch of selection, first failure + // First batch of selection (failed to select) journal := list.selectBatchTo(true, destination, 50, math.MaxUint64) require.Equal(t, 0, journal.selectedNum) require.Nil(t, destination[0]) - require.Equal(t, int64(1), list.numFailedSelections.Get()) - // Second batch of selection, don't count failure again + // Second batch of selection (the same, failed to select) journal = list.selectBatchTo(false, destination, 50, math.MaxUint64) require.Equal(t, 0, journal.selectedNum) require.Nil(t, destination[0]) - require.Equal(t, int64(1), list.numFailedSelections.Get()) } func TestListForSender_NotifyAccountNonce(t *testing.T) { From 1f4033a232e8950cd8e3ccfc15fd3362a9632ba5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 11 Jul 2024 17:31:21 +0300 Subject: [PATCH 037/124] Minor refactoring. --- txcache/txListForSender.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index da3835c0..11c670b0 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -117,10 +117,10 @@ func (listForSender *txListForSender) recomputeScore() { func (listForSender *txListForSender) getScoreParams() senderScoreParams { numTxs := listForSender.countTx() minTransactionNonce := uint64(0) - firstTx := listForSender.items.Front() + firstTx := listForSender.getLowestNonceTx() if firstTx != nil { - minTransactionNonce = firstTx.Value.(*WrappedTransaction).Tx.GetNonce() + minTransactionNonce = firstTx.Tx.GetNonce() } hasSpotlessSequenceOfNonces := listForSender.noncesTracker.isSpotlessSequence(minTransactionNonce, numTxs) From ad8c55be17367fd65f3677bff8951c58a1c370f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 11 Jul 2024 17:53:20 +0300 Subject: [PATCH 038/124] A little bit of refactoring. --- txcache/txCache.go | 12 +++++------- txcache/txListForSender.go | 10 +++++----- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/txcache/txCache.go b/txcache/txCache.go index c5c87bed..490d75f4 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -124,13 +124,12 @@ func (cache *TxCache) doSelectTransactions(numRequested int, batchSizePerSender score := txList.getScore() batchSize, bandwidth := cache.computeSelectionSenderConstraints(score, batchSizePerSender, bandwidthPerSender) - // Reset happens on first pass only isFirstBatch := pass == 0 - journal := txList.selectBatchTo(isFirstBatch, result[resultFillIndex:], batchSize, bandwidth) - cache.monitorBatchSelectionEnd(journal) + batchSelectionJournal := txList.selectBatchTo(isFirstBatch, result[resultFillIndex:], batchSize, bandwidth) + cache.monitorBatchSelectionEnd(batchSelectionJournal) - resultFillIndex += journal.selectedNum - numSelectedInThisPass += journal.selectedNum + resultFillIndex += batchSelectionJournal.selectedNum + numSelectedInThisPass += batchSelectionJournal.selectedNum resultIsFull = resultFillIndex == numRequested if resultIsFull { break @@ -138,9 +137,8 @@ func (cache *TxCache) doSelectTransactions(numRequested int, batchSizePerSender } nothingSelectedInThisPass := numSelectedInThisPass == 0 - - // No more passes needed if nothingSelectedInThisPass { + // No more passes needed break } } diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 11c670b0..080d697c 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -243,16 +243,16 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati journal := batchSelectionJournal{} - // Reset the internal state used for copy operations if isFirstBatch { hasInitialGap := listForSender.hasInitialGap() + journal.isFirstBatch = true + journal.hasInitialGap = hasInitialGap + + // Reset the internal state used for copy operations listForSender.copyBatchIndex = listForSender.items.Front() listForSender.copyPreviousNonce = 0 listForSender.copyDetectedGap = hasInitialGap - - journal.isFirstBatch = true - journal.hasInitialGap = hasInitialGap } element := listForSender.copyBatchIndex @@ -262,7 +262,7 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati // If a nonce gap is detected, no transaction is returned in this read. if detectedGap { - batchSize = 0 + return journal } copiedBandwidth := uint64(0) From ba403bdabbd07aee7269edd200332c2293140f63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 15 Jul 2024 15:14:48 +0300 Subject: [PATCH 039/124] Add total gas constraints when selecting transactions. --- txcache/monitoring.go | 1 + txcache/txCache.go | 52 +++++++++++++----------- txcache/txCache_test.go | 10 ++--- txcache/txListForSender.go | 81 +++++++++++++++++++++++--------------- 4 files changed, 84 insertions(+), 60 deletions(-) diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 9d8b75fd..b28faec6 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -79,6 +79,7 @@ func (cache *TxCache) monitorSelectionEnd(sortedSenders []*txListForSender, sele type batchSelectionJournal struct { selectedNum int + selectedGas uint64 isFirstBatch bool hasInitialGap bool hasMiddleGap bool diff --git a/txcache/txCache.go b/txcache/txCache.go index 490d75f4..3593685a 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -99,51 +99,57 @@ func (cache *TxCache) GetByTxHash(txHash []byte) (*WrappedTransaction, bool) { return tx, ok } -// SelectTransactionsWithBandwidth selects a reasonably fair list of transactions to be included in the next miniblock -// It returns at most "numRequested" transactions -// Each sender gets the chance to give at least bandwidthPerSender gas worth of transactions, unless "numRequested" limit is reached before iterating over all senders -func (cache *TxCache) SelectTransactionsWithBandwidth(numRequested int, batchSizePerSender int, bandwidthPerSender uint64) []*WrappedTransaction { - result := cache.doSelectTransactions(numRequested, batchSizePerSender, bandwidthPerSender) +// SelectTransactions selects a reasonably fair list of transactions to be included in the next miniblock +// It returns at most "numRequested" transactions, with total gas ~ "gasRequested". +// +// Selection is performed in more passes. +// In each pass, each sender is allowed to contribute a batch of transactions, +// with a number of transactions and total gas proportional to the sender's score. +func (cache *TxCache) SelectTransactions(numRequested int, gasRequested uint64, baseNumPerSenderBatch int, baseGasPerSenderBatch uint64) []*WrappedTransaction { + result := cache.doSelectTransactions(numRequested, gasRequested, baseNumPerSenderBatch, baseGasPerSenderBatch) go cache.doAfterSelection() return result } -func (cache *TxCache) doSelectTransactions(numRequested int, batchSizePerSender int, bandwidthPerSender uint64) []*WrappedTransaction { +func (cache *TxCache) doSelectTransactions(numRequested int, gasRequested uint64, baseNumPerSenderBatch int, baseGasPerSenderBatch uint64) []*WrappedTransaction { stopWatch := cache.monitorSelectionStart() + senders := cache.getSendersEligibleForSelection() result := make([]*WrappedTransaction, numRequested) - resultFillIndex := 0 - resultIsFull := false - senders := cache.getSendersEligibleForSelection() + shouldContinueSelection := true + selectedGas := uint64(0) + selectedNum := 0 - for pass := 0; !resultIsFull; pass++ { - numSelectedInThisPass := 0 + for pass := 0; shouldContinueSelection; pass++ { + selectedNumInThisPass := 0 for _, txList := range senders { score := txList.getScore() - batchSize, bandwidth := cache.computeSelectionSenderConstraints(score, batchSizePerSender, bandwidthPerSender) + numPerBatch, gasPerBatch := cache.computeSelectionSenderConstraints(score, baseNumPerSenderBatch, baseGasPerSenderBatch) isFirstBatch := pass == 0 - batchSelectionJournal := txList.selectBatchTo(isFirstBatch, result[resultFillIndex:], batchSize, bandwidth) + batchSelectionJournal := txList.selectBatchTo(isFirstBatch, result[selectedNum:], numPerBatch, gasPerBatch) + selectedGas += batchSelectionJournal.selectedGas + selectedNum += batchSelectionJournal.selectedNum + selectedNumInThisPass += batchSelectionJournal.selectedNum + cache.monitorBatchSelectionEnd(batchSelectionJournal) - resultFillIndex += batchSelectionJournal.selectedNum - numSelectedInThisPass += batchSelectionJournal.selectedNum - resultIsFull = resultFillIndex == numRequested - if resultIsFull { + shouldContinueSelection := selectedNum < numRequested && selectedGas < gasRequested + if !shouldContinueSelection { break } } - nothingSelectedInThisPass := numSelectedInThisPass == 0 + nothingSelectedInThisPass := selectedNumInThisPass == 0 if nothingSelectedInThisPass { // No more passes needed break } } - result = result[:resultFillIndex] + result = result[:selectedNum] cache.monitorSelectionEnd(senders, result, stopWatch) return result } @@ -152,16 +158,16 @@ func (cache *TxCache) getSendersEligibleForSelection() []*txListForSender { return cache.txListBySender.getSnapshotDescending() } -func (cache *TxCache) computeSelectionSenderConstraints(score int, baseBatchSize int, baseBandwidth uint64) (int, uint64) { +func (cache *TxCache) computeSelectionSenderConstraints(score int, baseNumPerBatch int, baseGasPerBatch uint64) (int, uint64) { if score == 0 { return 1, 1 } scoreDivision := float64(score) / float64(maxSenderScore) - batchSize := int(float64(baseBatchSize) * scoreDivision) - bandwidth := uint64(float64(baseBandwidth) * scoreDivision) + numPerBatch := int(float64(baseNumPerBatch) * scoreDivision) + gasPerBatch := uint64(float64(baseGasPerBatch) * scoreDivision) - return batchSize, bandwidth + return numPerBatch, gasPerBatch } func (cache *TxCache) doAfterSelection() { diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index 26134e9c..ae9a8f6f 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -312,7 +312,7 @@ func Test_SelectTransactions_Dummy(t *testing.T) { cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5)) cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1)) - sorted := cache.SelectTransactionsWithBandwidth(10, 2, math.MaxUint64) + sorted := cache.SelectTransactions(10, math.MaxUint64, 2, math.MaxUint64) require.Len(t, sorted, 8) } @@ -327,7 +327,7 @@ func Test_SelectTransactionsWithBandwidth_Dummy(t *testing.T) { cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasLimit(50000)) cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1).withGasLimit(50000)) - sorted := cache.SelectTransactionsWithBandwidth(5, 2, 200000) + sorted := cache.SelectTransactions(5, math.MaxUint64, 2, 200000) numSelected := 1 + 1 + 3 // 1 alice, 1 carol, 3 bob require.Len(t, sorted, numSelected) @@ -350,7 +350,7 @@ func Test_SelectTransactions_BreaksAtNonceGaps(t *testing.T) { numSelected := 3 + 1 + 2 // 3 alice + 1 bob + 2 carol - sorted := cache.SelectTransactionsWithBandwidth(10, 2, math.MaxUint64) + sorted := cache.SelectTransactions(10, math.MaxUint64, 2, math.MaxUint64) require.Len(t, sorted, numSelected) } @@ -375,7 +375,7 @@ func Test_SelectTransactions(t *testing.T) { require.Equal(t, uint64(nTotalTransactions), cache.CountTx()) - sorted := cache.SelectTransactionsWithBandwidth(nRequestedTransactions, 2, math.MaxUint64) + sorted := cache.SelectTransactions(nRequestedTransactions, math.MaxUint64, 2, math.MaxUint64) require.Len(t, sorted, core.MinInt(nRequestedTransactions, nTotalTransactions)) @@ -493,7 +493,7 @@ func TestTxCache_ConcurrentMutationAndSelection(t *testing.T) { go func() { for i := 0; i < 100; i++ { fmt.Println("Selection", i) - cache.SelectTransactionsWithBandwidth(100, 100, math.MaxUint64) + cache.SelectTransactions(100, math.MaxUint64, 100, math.MaxUint64) } wg.Done() diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 080d697c..c83f554e 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -11,17 +11,18 @@ import ( // txListForSender represents a sorted list of transactions of a particular sender type txListForSender struct { - copyDetectedGap bool - score atomic.Uint32 - accountNonceKnown atomic.Flag - copyPreviousNonce uint64 sender string - items *list.List - copyBatchIndex *list.Element - constraints *senderConstraints accountNonce atomic.Uint64 + accountNonceKnown atomic.Flag + items *list.List totalBytes atomic.Counter + constraints *senderConstraints + + selectionPointer *list.Element + selectionPreviousNonce uint64 + selectionDetectedGap bool + score atomic.Uint32 avgPpuNumerator float64 avgPpuDenominator uint64 noncesTracker *noncesTracker @@ -235,7 +236,7 @@ func (listForSender *txListForSender) IsEmpty() bool { // selectBatchTo copies a batch (usually small) of transactions of a limited gas bandwidth and limited number of transactions to a destination slice // It also updates the internal state used for copy operations -func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destination []*WrappedTransaction, batchSize int, bandwidth uint64) batchSelectionJournal { +func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destination []*WrappedTransaction, numRequested int, gasRequested uint64) batchSelectionJournal { // We can't read from multiple goroutines at the same time // And we can't mutate the sender's list while reading it listForSender.mutex.Lock() @@ -250,47 +251,63 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati journal.hasInitialGap = hasInitialGap // Reset the internal state used for copy operations - listForSender.copyBatchIndex = listForSender.items.Front() - listForSender.copyPreviousNonce = 0 - listForSender.copyDetectedGap = hasInitialGap + listForSender.selectionPointer = listForSender.items.Front() + listForSender.selectionPreviousNonce = 0 + listForSender.selectionDetectedGap = hasInitialGap } - element := listForSender.copyBatchIndex - availableSpace := len(destination) - detectedGap := listForSender.copyDetectedGap - previousNonce := listForSender.copyPreviousNonce + pointer := listForSender.selectionPointer + detectedGap := listForSender.selectionDetectedGap + previousNonce := listForSender.selectionPreviousNonce // If a nonce gap is detected, no transaction is returned in this read. if detectedGap { return journal } - copiedBandwidth := uint64(0) - lastTxGasLimit := uint64(0) - copied := 0 - for ; ; copied, copiedBandwidth = copied+1, copiedBandwidth+lastTxGasLimit { - if element == nil || copied == batchSize || copied == availableSpace || copiedBandwidth >= bandwidth { + selectedGas := uint64(0) + selectedNum := 0 + + for { + if pointer == nil { break } - value := element.Value.(*WrappedTransaction) - txNonce := value.Tx.GetNonce() - lastTxGasLimit = value.Tx.GetGasLimit() + // End because of count + if selectedNum == numRequested || selectedNum == len(destination) { + break + } + + // End because of gas limit + if selectedGas >= gasRequested { + break + } - if previousNonce > 0 && txNonce > previousNonce+1 { - listForSender.copyDetectedGap = true - journal.hasMiddleGap = true + value := pointer.Value.(*WrappedTransaction) + nonce := value.Tx.GetNonce() + gasLimit := value.Tx.GetGasLimit() + + if previousNonce > 0 && nonce > previousNonce+1 { + detectedGap = true break } - destination[copied] = value - element = element.Next() - previousNonce = txNonce + destination[selectedNum] = value + pointer = pointer.Next() + previousNonce = nonce + + selectedNum += 1 + selectedGas += gasLimit } - listForSender.copyBatchIndex = element - listForSender.copyPreviousNonce = previousNonce - journal.selectedNum = copied + listForSender.selectionPointer = pointer + listForSender.selectionPreviousNonce = previousNonce + listForSender.selectionDetectedGap = detectedGap + + journal.selectedNum = selectedNum + journal.selectedGas = selectedGas + journal.hasMiddleGap = detectedGap + return journal } From e8e808d919b9026edf4653ff7eae424f8651287b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 15 Jul 2024 16:22:34 +0300 Subject: [PATCH 040/124] Bit of refactoring. --- txcache/txListForSender.go | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index c83f554e..644f0d03 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -251,17 +251,13 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati journal.hasInitialGap = hasInitialGap // Reset the internal state used for copy operations - listForSender.selectionPointer = listForSender.items.Front() listForSender.selectionPreviousNonce = 0 + listForSender.selectionPointer = listForSender.items.Front() listForSender.selectionDetectedGap = hasInitialGap } - pointer := listForSender.selectionPointer - detectedGap := listForSender.selectionDetectedGap - previousNonce := listForSender.selectionPreviousNonce - // If a nonce gap is detected, no transaction is returned in this read. - if detectedGap { + if listForSender.selectionDetectedGap { return journal } @@ -269,7 +265,7 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati selectedNum := 0 for { - if pointer == nil { + if listForSender.selectionPointer == nil { break } @@ -283,30 +279,28 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati break } - value := pointer.Value.(*WrappedTransaction) - nonce := value.Tx.GetNonce() - gasLimit := value.Tx.GetGasLimit() + tx := listForSender.selectionPointer.Value.(*WrappedTransaction) + nonce := tx.Tx.GetNonce() + gasLimit := tx.Tx.GetGasLimit() - if previousNonce > 0 && nonce > previousNonce+1 { - detectedGap = true + isMiddleGap := listForSender.selectionPreviousNonce > 0 && nonce > listForSender.selectionPreviousNonce+1 + if isMiddleGap { + listForSender.selectionDetectedGap = true break } - destination[selectedNum] = value - pointer = pointer.Next() - previousNonce = nonce + destination[selectedNum] = tx + + listForSender.selectionPreviousNonce = nonce + listForSender.selectionPointer = listForSender.selectionPointer.Next() selectedNum += 1 selectedGas += gasLimit } - listForSender.selectionPointer = pointer - listForSender.selectionPreviousNonce = previousNonce - listForSender.selectionDetectedGap = detectedGap - journal.selectedNum = selectedNum journal.selectedGas = selectedGas - journal.hasMiddleGap = detectedGap + journal.hasMiddleGap = listForSender.selectionDetectedGap return journal } From 7a2e122e2d53f471cc5bff2d792ff9891b446706 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 15 Jul 2024 20:22:12 +0300 Subject: [PATCH 041/124] Shuffle the items of a group (senders with the same score). Add some benchmarks. --- txcache/monitoring.go | 4 +- txcache/txListBySenderMap.go | 13 +++- txcache/txListBySenderMap_test.go | 111 ++++++++++++++++++++---------- 3 files changed, 89 insertions(+), 39 deletions(-) diff --git a/txcache/monitoring.go b/txcache/monitoring.go index b28faec6..aa0905c6 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -242,8 +242,8 @@ func (cache *TxCache) displaySendersSummary() { func monitorSendersScoreHistogram(scoreGroups [][]*txListForSender) { histogram := make([]int, len(scoreGroups)) - for i := 0; i < len(scoreGroups); i++ { - histogram[i] = len(scoreGroups[i]) + for i, group := range scoreGroups { + histogram[i] = len(group) } log.Debug("TxCache.monitorSendersScoreHistogram():", "histogram", histogram) diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index 0a964b06..85910858 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -1,6 +1,7 @@ package txcache import ( + "math/rand" "sync" "github.com/multiversx/mx-chain-core-go/core/atomic" @@ -172,10 +173,20 @@ func (txMap *txListBySenderMap) getSendersGroupedByScore() [][]*txListForSender groups[score] = make([]*txListForSender, 0, groupSizeHint) } - // TODO (next PR) randomize / shuffle. groups[score] = append(groups[score], listForSender) }) + // Shuffle the items of a group (senders with the same score). + for _, group := range groups { + if group == nil { + continue + } + + rand.Shuffle(len(group), func(j, k int) { + group[j], group[k] = group[k], group[j] + }) + } + monitorSendersScoreHistogram(groups) return groups diff --git a/txcache/txListBySenderMap_test.go b/txcache/txListBySenderMap_test.go index 72ad56b0..a591e6ee 100644 --- a/txcache/txListBySenderMap_test.go +++ b/txcache/txListBySenderMap_test.go @@ -14,9 +14,9 @@ import ( func TestSendersMap_AddTx_IncrementsCounter(t *testing.T) { myMap := newSendersMapToTest() - myMap.addTx(createTx([]byte("a"), "alice", uint64(1))) - myMap.addTx(createTx([]byte("aa"), "alice", uint64(2))) - myMap.addTx(createTx([]byte("b"), "bob", uint64(1))) + myMap.addTx(createTx([]byte("a"), "alice", 1)) + myMap.addTx(createTx([]byte("aa"), "alice", 2)) + myMap.addTx(createTx([]byte("b"), "bob", 1)) // There are 2 senders require.Equal(t, int64(2), myMap.counter.Get()) @@ -25,9 +25,9 @@ func TestSendersMap_AddTx_IncrementsCounter(t *testing.T) { func TestSendersMap_RemoveTx_AlsoRemovesSenderWhenNoTransactionLeft(t *testing.T) { myMap := newSendersMapToTest() - txAlice1 := createTx([]byte("a1"), "alice", uint64(1)) - txAlice2 := createTx([]byte("a2"), "alice", uint64(2)) - txBob := createTx([]byte("b"), "bob", uint64(1)) + txAlice1 := createTx([]byte("a1"), "alice", 1) + txAlice2 := createTx([]byte("a2"), "alice", 2) + txBob := createTx([]byte("b"), "bob", 1) myMap.addTx(txAlice1) myMap.addTx(txAlice2) @@ -53,7 +53,7 @@ func TestSendersMap_RemoveTx_AlsoRemovesSenderWhenNoTransactionLeft(t *testing.T func TestSendersMap_RemoveSender(t *testing.T) { myMap := newSendersMapToTest() - myMap.addTx(createTx([]byte("a"), "alice", uint64(1))) + myMap.addTx(createTx([]byte("a"), "alice", 1)) require.Equal(t, int64(1), myMap.counter.Get()) // Bob is unknown @@ -105,7 +105,7 @@ func TestSendersMap_notifyAccountNonce(t *testing.T) { // Discarded notification, since sender not added yet myMap.notifyAccountNonce([]byte("alice"), 42) - myMap.addTx(createTx([]byte("tx-42"), "alice", uint64(42))) + myMap.addTx(createTx([]byte("tx-42"), "alice", 42)) alice, _ := myMap.getListForSender("alice") require.Equal(t, uint64(0), alice.accountNonce.Get()) require.False(t, alice.accountNonceKnown.IsSet()) @@ -118,25 +118,75 @@ func TestSendersMap_notifyAccountNonce(t *testing.T) { func TestBenchmarkSendersMap_GetSnapshotAscending(t *testing.T) { numSendersValues := []int{50000, 100000, 300000} - for _, numSenders := range numSendersValues { - myMap := createTxListBySenderMap(numSenders) + t.Run("scores with uniform distribution", func(t *testing.T) { + fmt.Println(t.Name()) - sw := core.NewStopWatch() - sw.Start("time") - snapshot := myMap.getSnapshotAscending() - sw.Stop("time") + for _, numSenders := range numSendersValues { + myMap := newSendersMapToTest() - require.Len(t, snapshot, numSenders) - fmt.Printf("took %v to sort %d senders\n", sw.GetMeasurementsMap()["time"], numSenders) - } + // Many senders, each with a single transaction + for i := 0; i < numSenders; i++ { + sender := fmt.Sprintf("sender-%d", i) + hash := []byte(fmt.Sprintf("transaction-%d", i)) + myMap.addTx(createTx(hash, sender, 1)) + + // Artificially set a score to each sender: + txList, _ := myMap.getListForSender(sender) + txList.score.Set(uint32(i % (maxSenderScore + 1))) + } + + sw := core.NewStopWatch() + sw.Start("time") + snapshot := myMap.getSnapshotAscending() + sw.Stop("time") + + require.Len(t, snapshot, numSenders) + fmt.Printf("took %v to sort %d senders\n", sw.GetMeasurementsMap()["time"], numSenders) + } + + // Results: + // + // (a) Summary: 0.02s to sort 300k senders: + // cpu: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // took 0.003156466 to sort 50000 senders + // took 0.007549091 to sort 100000 senders + // took 0.022103215 to sort 300000 senders + }) + + t.Run("scores with skewed distribution", func(t *testing.T) { + fmt.Println(t.Name()) + + for _, numSenders := range numSendersValues { + myMap := newSendersMapToTest() + + // Many senders, each with a single transaction + for i := 0; i < numSenders; i++ { + sender := fmt.Sprintf("sender-%d", i) + hash := []byte(fmt.Sprintf("transaction-%d", i)) + myMap.addTx(createTx(hash, sender, 1)) + + // Artificially set a score to each sender: + txList, _ := myMap.getListForSender(sender) + txList.score.Set(uint32(i % 3)) + } - // Results: - // - // (a) 22 ms to sort 300k senders: - // cpu: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz - // took 0.004527414 to sort 50000 senders - // took 0.00745592 to sort 100000 senders - // took 0.022954026 to sort 300000 senders + sw := core.NewStopWatch() + sw.Start("time") + snapshot := myMap.getSnapshotAscending() + sw.Stop("time") + + require.Len(t, snapshot, numSenders) + fmt.Printf("took %v to sort %d senders\n", sw.GetMeasurementsMap()["time"], numSenders) + } + + // Results: + // + // (a) Summary: 0.02s to sort 300k senders: + // cpu: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // took 0.00423772 to sort 50000 senders + // took 0.00683838 to sort 100000 senders + // took 0.025094983 to sort 300000 senders + }) } func TestSendersMap_GetSnapshots_NoPanic_IfAlsoConcurrentMutation(t *testing.T) { @@ -168,21 +218,10 @@ func TestSendersMap_GetSnapshots_NoPanic_IfAlsoConcurrentMutation(t *testing.T) wg.Wait() } -func createTxListBySenderMap(numSenders int) *txListBySenderMap { - myMap := newSendersMapToTest() - for i := 0; i < numSenders; i++ { - sender := fmt.Sprintf("Sender-%d", i) - hash := createFakeTxHash([]byte(sender), 1) - myMap.addTx(createTx(hash, sender, uint64(1))) - } - - return myMap -} - func newSendersMapToTest() *txListBySenderMap { txGasHandler := txcachemocks.NewTxGasHandlerMock() return newTxListBySenderMap(4, senderConstraints{ maxNumBytes: math.MaxUint32, maxNumTxs: math.MaxUint32, - }, &disabledScoreComputer{}, txGasHandler) + }, newDefaultScoreComputer(txGasHandler), txGasHandler) } From 071417718b1c72a96d7b28a58983540bb8338ff7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 15 Jul 2024 22:51:01 +0300 Subject: [PATCH 042/124] Some refactoring. --- txcache/disabledCache.go | 2 +- txcache/testutils_test.go | 10 ---------- txcache/txCache.go | 4 ++++ txcache/txListBySenderMap.go | 12 +++++++----- txcache/txListForSender.go | 6 +++--- txcache/txListForSender_test.go | 2 +- 6 files changed, 16 insertions(+), 20 deletions(-) diff --git a/txcache/disabledCache.go b/txcache/disabledCache.go index 5a5473e5..45eb8b93 100644 --- a/txcache/disabledCache.go +++ b/txcache/disabledCache.go @@ -26,7 +26,7 @@ func (cache *DisabledCache) GetByTxHash(_ []byte) (*WrappedTransaction, bool) { } // SelectTransactionsWithBandwidth returns an empty slice -func (cache *DisabledCache) SelectTransactionsWithBandwidth(_ int, _ int, _ uint64) []*WrappedTransaction { +func (cache *DisabledCache) SelectTransactions(_ int, _ uint64, _ int, _ uint64) []*WrappedTransaction { return make([]*WrappedTransaction, 0) } diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index 27dd2825..d563aac1 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -11,7 +11,6 @@ import ( const oneMilion = 1000000 const oneBillion = oneMilion * 1000 const oneTrillion = oneBillion * 1000 -const delta = 0.00000001 const estimatedSizeOfBoundedTxFields = uint64(128) func (cache *TxCache) areInternalMapsConsistent() bool { @@ -151,12 +150,3 @@ func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { return true // timed out } } - -var _ scoreComputer = (*disabledScoreComputer)(nil) - -type disabledScoreComputer struct { -} - -func (computer *disabledScoreComputer) computeScore(_ senderScoreParams) int { - return 0 -} diff --git a/txcache/txCache.go b/txcache/txCache.go index 3593685a..6649b408 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -126,6 +126,10 @@ func (cache *TxCache) doSelectTransactions(numRequested int, gasRequested uint64 for _, txList := range senders { score := txList.getScore() + + // Slighly suboptimal: we recompute the constraints for each pass, + // even though they are constant with respect to a sender, in the scope of a selection. + // However, this is not a performance bottleneck. numPerBatch, gasPerBatch := cache.computeSelectionSenderConstraints(score, baseNumPerSenderBatch, baseGasPerSenderBatch) isFirstBatch := pass == 0 diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index 85910858..6e71a1a2 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -176,7 +176,13 @@ func (txMap *txListBySenderMap) getSendersGroupedByScore() [][]*txListForSender groups[score] = append(groups[score], listForSender) }) - // Shuffle the items of a group (senders with the same score). + txMap.shuffleSendersWithinScoreGroups(groups) + monitorSendersScoreHistogram(groups) + + return groups +} + +func (txMap *txListBySenderMap) shuffleSendersWithinScoreGroups(groups [][]*txListForSender) { for _, group := range groups { if group == nil { continue @@ -186,10 +192,6 @@ func (txMap *txListBySenderMap) getSendersGroupedByScore() [][]*txListForSender group[j], group[k] = group[k], group[j] }) } - - monitorSendersScoreHistogram(groups) - - return groups } func (txMap *txListBySenderMap) clear() { diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 644f0d03..2b9a8f91 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -236,7 +236,7 @@ func (listForSender *txListForSender) IsEmpty() bool { // selectBatchTo copies a batch (usually small) of transactions of a limited gas bandwidth and limited number of transactions to a destination slice // It also updates the internal state used for copy operations -func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destination []*WrappedTransaction, numRequested int, gasRequested uint64) batchSelectionJournal { +func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destination []*WrappedTransaction, numPerBatch int, gasPerBatch uint64) batchSelectionJournal { // We can't read from multiple goroutines at the same time // And we can't mutate the sender's list while reading it listForSender.mutex.Lock() @@ -270,12 +270,12 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati } // End because of count - if selectedNum == numRequested || selectedNum == len(destination) { + if selectedNum == numPerBatch || selectedNum == len(destination) { break } // End because of gas limit - if selectedGas >= gasRequested { + if selectedGas >= gasPerBatch { break } diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 7039111b..d25837a8 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -201,7 +201,7 @@ func TestListForSender_SelectBatchTo(t *testing.T) { require.Equal(t, 100, journal.selectedNum) } -func TestListForSender_SelectBatchToWithLimitedGasBandwidth(t *testing.T) { +func TestListForSender_SelectBatchToWithLimitedGasPerBatch(t *testing.T) { list := newUnconstrainedListToTest() txGasHandler := txcachemocks.NewTxGasHandlerMock() From 61d37d40f22fc22f9d5dd86ed5957c6ab4356427 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 15 Jul 2024 22:54:14 +0300 Subject: [PATCH 043/124] Fix linter, fix tests. --- txcache/disabledCache_test.go | 2 +- txcache/noncesTracker.go | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/txcache/disabledCache_test.go b/txcache/disabledCache_test.go index a19e947a..656b5528 100644 --- a/txcache/disabledCache_test.go +++ b/txcache/disabledCache_test.go @@ -18,7 +18,7 @@ func TestDisabledCache_DoesNothing(t *testing.T) { require.Nil(t, tx) require.False(t, ok) - selection := cache.SelectTransactionsWithBandwidth(42, 42, math.MaxUint64) + selection := cache.SelectTransactions(42, math.MaxUint64, 42, math.MaxUint64) require.Equal(t, 0, len(selection)) removed := cache.RemoveTxByHash([]byte{}) diff --git a/txcache/noncesTracker.go b/txcache/noncesTracker.go index 5c984bed..6a7fd56b 100644 --- a/txcache/noncesTracker.go +++ b/txcache/noncesTracker.go @@ -90,9 +90,5 @@ func (tracker *noncesTracker) isSpotlessSequence(firstNonce uint64, count uint64 sumOfSquaresOfNonces := tracker.modStrict(int64(tracker.sumOfSquaresOfAddedNonces) - int64(tracker.sumOfSquaresOfRemovedNonces)) sumOfSquaresOfNoncesTimesSix := tracker.mod(sumOfSquaresOfNonces * six) expectedSumOfSquaresOfNoncesTimesSix := tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(firstNonce, count) - if sumOfSquaresOfNoncesTimesSix != expectedSumOfSquaresOfNoncesTimesSix { - return false - } - - return true + return sumOfSquaresOfNoncesTimesSix == expectedSumOfSquaresOfNoncesTimesSix } From e50fbe540cf118a3a25bb56be03ada873a94ed1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 16 Jul 2024 19:08:26 +0300 Subject: [PATCH 044/124] Monitoring, refactoring, optimizations. Fix score computation. --- txcache/constants.go | 2 - txcache/monitoring.go | 175 ++++++++++---------------------- txcache/printing.go | 17 +++- txcache/score.go | 3 +- txcache/score_test.go | 25 ++++- txcache/testutils_test.go | 23 ++++- txcache/txByHashMap.go | 2 + txcache/txCache.go | 13 ++- txcache/txListForSender.go | 1 + txcache/txListForSender_test.go | 3 + 10 files changed, 128 insertions(+), 136 deletions(-) diff --git a/txcache/constants.go b/txcache/constants.go index 6a6c7944..e25fc2b6 100644 --- a/txcache/constants.go +++ b/txcache/constants.go @@ -1,7 +1,5 @@ package txcache -const numEvictedTxsToDisplay = 3 - const excellentGasPriceFactor = 5 const maxSenderScore = 100 diff --git a/txcache/monitoring.go b/txcache/monitoring.go index aa0905c6..2321c13e 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -1,7 +1,6 @@ package txcache import ( - "encoding/hex" "fmt" "strings" @@ -9,27 +8,21 @@ import ( logger "github.com/multiversx/mx-chain-logger-go" ) -var log = logger.GetOrCreate("txcache") -var logSelection = logger.GetOrCreate("txcache-selection") +var log = logger.GetOrCreate("txcache/main") +var logAdd = logger.GetOrCreate("txcache/add") +var logRemove = logger.GetOrCreate("txcache/remove") +var logSelect = logger.GetOrCreate("txcache/select") func (cache *TxCache) monitorEvictionWrtSenderLimit(sender []byte, evicted [][]byte) { - log.Debug("TxCache.monitorEvictionWrtSenderLimit()", "name", cache.name, "sender", sender, "num", len(evicted)) - - for i := 0; i < core.MinInt(len(evicted), numEvictedTxsToDisplay); i++ { - log.Trace("TxCache.monitorEvictionWrtSenderLimit()", "name", cache.name, "sender", sender, "tx", evicted[i]) - } + logRemove.Debug("monitorEvictionWrtSenderLimit()", "sender", sender, "num", len(evicted)) } func (cache *TxCache) monitorEvictionWrtSenderNonce(sender []byte, senderNonce uint64, evicted [][]byte) { - log.Trace("TxCache.monitorEvictionWrtSenderNonce()", "name", cache.name, "sender", sender, "nonce", senderNonce, "num", len(evicted)) - - for i := 0; i < core.MinInt(len(evicted), numEvictedTxsToDisplay); i++ { - log.Trace("TxCache.monitorEvictionWrtSenderNonce()", "name", cache.name, "sender", sender, "nonce", senderNonce, "tx", evicted[i]) - } + logRemove.Trace("monitorEvictionWrtSenderNonce()", "sender", sender, "nonce", senderNonce, "num", len(evicted)) } func (cache *TxCache) monitorEvictionStart() *core.StopWatch { - log.Debug("TxCache: eviction started", "name", cache.name, "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) + logRemove.Debug("monitorEvictionStart()", "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) sw := core.NewStopWatch() sw.Start("eviction") return sw @@ -38,42 +31,46 @@ func (cache *TxCache) monitorEvictionStart() *core.StopWatch { func (cache *TxCache) monitorEvictionEnd(stopWatch *core.StopWatch) { stopWatch.Stop("eviction") duration := stopWatch.GetMeasurement("eviction") - log.Debug("TxCache: eviction ended", "name", cache.name, "duration", duration, "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) + logRemove.Debug("monitorEvictionEnd()", "duration", duration, "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) cache.evictionJournal.display() } func (cache *TxCache) monitorSelectionStart() *core.StopWatch { - log.Debug("TxCache: selection started", "name", cache.name, "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) + logSelect.Debug("monitorSelectionStart()", "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) sw := core.NewStopWatch() sw.Start("selection") return sw } -func (cache *TxCache) monitorSelectionEnd(sortedSenders []*txListForSender, selection []*WrappedTransaction, stopWatch *core.StopWatch) { +func (cache *TxCache) monitorSelectionEnd(stopWatch *core.StopWatch, selection []*WrappedTransaction) { stopWatch.Stop("selection") duration := stopWatch.GetMeasurement("selection") + numSendersSelected := cache.numSendersSelected.Reset() numSendersWithInitialGap := cache.numSendersWithInitialGap.Reset() numSendersWithMiddleGap := cache.numSendersWithMiddleGap.Reset() - log.Debug("TxCache: selection ended", "name", cache.name, "duration", duration, + + logSelect.Debug("monitorSelectionEnd()", "duration", duration, "numTxSelected", len(selection), "numSendersSelected", numSendersSelected, "numSendersWithInitialGap", numSendersWithInitialGap, "numSendersWithMiddleGap", numSendersWithMiddleGap, ) +} - if logSelection.GetLevel() != logger.LogTrace { +func displaySelectionOutcome(sortedSenders []*txListForSender, selection []*WrappedTransaction) { + if logSelect.GetLevel() > logger.LogTrace { return } if len(sortedSenders) > 0 { - logSelection.Trace("Sorted senders (as newline-separated JSON):") - logSelection.Trace(marshalSendersToNewlineDelimitedJson(sortedSenders)) + logSelect.Trace("Sorted senders (as newline-separated JSON):") + logSelect.Trace(marshalSendersToNewlineDelimitedJson(sortedSenders)) } if len(selection) > 0 { - logSelection.Trace("Selected transactions (as newline-separated JSON):") - logSelection.Trace(marshalTransactionsToNewlineDelimitedJson(selection)) + logSelect.Trace("Selected transactions (as newline-separated JSON):") + logSelect.Trace(marshalTransactionsToNewlineDelimitedJson(selection)) } } @@ -112,18 +109,11 @@ type evictionJournal struct { } func (journal *evictionJournal) display() { - log.Debug("Eviction.pass1:", "txs", journal.passOneNumTxs, "senders", journal.passOneNumSenders, "steps", journal.passOneNumSteps) + logRemove.Debug("Eviction.pass1:", "txs", journal.passOneNumTxs, "senders", journal.passOneNumSenders, "steps", journal.passOneNumSteps) } // Diagnose checks the state of the cache for inconsistencies and displays a summary -func (cache *TxCache) Diagnose(deep bool) { - cache.diagnoseShallowly() - if deep { - cache.diagnoseDeeply() - } -} - -func (cache *TxCache) diagnoseShallowly() { +func (cache *TxCache) Diagnose(_ bool) { sw := core.NewStopWatch() sw.Start("diagnose") @@ -134,117 +124,60 @@ func (cache *TxCache) diagnoseShallowly() { numSendersEstimate := int(cache.CountSenders()) numSendersInChunks := cache.txListBySender.backingMap.Count() sendersKeys := cache.txListBySender.backingMap.Keys() - sendersSnapshot := cache.txListBySender.getSnapshotAscending() + senders := cache.txListBySender.getSnapshotAscending() + + cache.displaySendersSummary(senders) sw.Stop("diagnose") duration := sw.GetMeasurement("diagnose") fine := numSendersEstimate == numSendersInChunks - fine = fine && len(sendersKeys) == len(sendersSnapshot) + fine = fine && len(sendersKeys) == len(senders) fine = fine && (int(numSendersEstimate) == len(sendersKeys)) fine = fine && (numTxsEstimate == numTxsInChunks && numTxsEstimate == len(txsKeys)) - log.Debug("TxCache.diagnoseShallowly()", "name", cache.name, "duration", duration, "fine", fine) - log.Debug("TxCache.Size:", "current", sizeInBytes, "max", cache.config.NumBytesThreshold) - log.Debug("TxCache.NumSenders:", "estimate", numSendersEstimate, "inChunks", numSendersInChunks) - log.Debug("TxCache.NumSenders (continued):", "keys", len(sendersKeys), "snapshot", len(sendersSnapshot)) - log.Debug("TxCache.NumTxs:", "estimate", numTxsEstimate, "inChunks", numTxsInChunks, "keys", len(txsKeys)) -} - -func (cache *TxCache) diagnoseDeeply() { - sw := core.NewStopWatch() - sw.Start("diagnose") - - journal := cache.checkInternalConsistency() - cache.displaySendersSummary() - - sw.Stop("diagnose") - duration := sw.GetMeasurement("diagnose") - - log.Debug("TxCache.diagnoseDeeply()", "name", cache.name, "duration", duration) - journal.display() -} - -type internalConsistencyJournal struct { - numInMapByHash int - numInMapBySender int - numMissingInMapByHash int -} - -func (journal *internalConsistencyJournal) isFine() bool { - return (journal.numInMapByHash == journal.numInMapBySender) && (journal.numMissingInMapByHash == 0) -} - -func (journal *internalConsistencyJournal) display() { - log.Debug("TxCache.internalConsistencyJournal:", "fine", journal.isFine(), "numInMapByHash", journal.numInMapByHash, "numInMapBySender", journal.numInMapBySender, "numMissingInMapByHash", journal.numMissingInMapByHash) -} - -func (cache *TxCache) checkInternalConsistency() internalConsistencyJournal { - internalMapByHash := cache.txByHash - internalMapBySender := cache.txListBySender - - senders := internalMapBySender.getSnapshotAscending() - numInMapByHash := len(internalMapByHash.keys()) - numInMapBySender := 0 - numMissingInMapByHash := 0 - - for _, sender := range senders { - numInMapBySender += int(sender.countTx()) - - for _, hash := range sender.getTxHashes() { - _, ok := internalMapByHash.getTx(string(hash)) - if !ok { - numMissingInMapByHash++ - } - } - } - - return internalConsistencyJournal{ - numInMapByHash: numInMapByHash, - numInMapBySender: numInMapBySender, - numMissingInMapByHash: numMissingInMapByHash, - } + log.Debug("TxCache.Diagnose()", + "duration", duration, + "fine", fine, + "numTxsEstimate", numTxsEstimate, + "numTxsInChunks", numTxsInChunks, + "len(txsKeys)", len(txsKeys), + "sizeInBytes", sizeInBytes, + "numBytesThreshold", cache.config.NumBytesThreshold, + "numSendersEstimate", numSendersEstimate, + "numSendersInChunks", numSendersInChunks, + "len(sendersKeys)", len(sendersKeys), + "len(senders)", len(senders), + ) } -func (cache *TxCache) displaySendersSummary() { - if log.GetLevel() != logger.LogTrace { +func (cache *TxCache) displaySendersSummary(senders []*txListForSender) { + if log.GetLevel() > logger.LogTrace { return } - senders := cache.txListBySender.getSnapshotAscending() if len(senders) == 0 { return } - var builder strings.Builder - builder.WriteString("\n[#index (score)] address [nonce known / nonce vs lowestTxNonce] txs = numTxs\n") - - for i, sender := range senders { - address := hex.EncodeToString([]byte(sender.sender)) - accountNonce := sender.accountNonce.Get() - accountNonceKnown := sender.accountNonceKnown.IsSet() - score := sender.getScore() - numTxs := sender.countTxWithLock() - - lowestTxNonce := -1 - lowestTx := sender.getLowestNonceTx() - if lowestTx != nil { - lowestTxNonce = int(lowestTx.Tx.GetNonce()) - } - - _, _ = fmt.Fprintf(&builder, "[#%d (%d)] %s [%t / %d vs %d] txs = %d\n", i, score, address, accountNonceKnown, accountNonce, lowestTxNonce, numTxs) - } - - summary := builder.String() - log.Debug("TxCache.displaySendersSummary()", "name", cache.name, "summary\n", summary) + log.Trace("displaySendersSummary(), as newline-separated JSON:") + log.Trace(marshalSendersToNewlineDelimitedJson(senders)) } func monitorSendersScoreHistogram(scoreGroups [][]*txListForSender) { - histogram := make([]int, len(scoreGroups)) + if log.GetLevel() > logger.LogDebug { + return + } + + stringBuilder := strings.Builder{} for i, group := range scoreGroups { - histogram[i] = len(group) + if len(group) == 0 { + continue + } + + stringBuilder.WriteString(fmt.Sprintf("#%d: %d; ", i, len(group))) } - log.Debug("TxCache.monitorSendersScoreHistogram():", "histogram", histogram) + log.Debug("monitorSendersScoreHistogram()", "histogram", stringBuilder.String()) } diff --git a/txcache/printing.go b/txcache/printing.go index 17f45d28..de85bb5d 100644 --- a/txcache/printing.go +++ b/txcache/printing.go @@ -3,6 +3,7 @@ package txcache import ( "encoding/hex" "encoding/json" + "fmt" "strings" ) @@ -20,30 +21,41 @@ type printedSender struct { Score int `json:"score"` Nonce uint64 `json:"nonce"` IsNonceKnown bool `json:"isNonceKnown"` + NumTxs uint64 `json:"numTxs"` } +// marshalSendersToNewlineDelimitedJson converts a list of senders to a newline-delimited JSON string. +// Note: each line is indexed, to improve readability. The index is easily removable for if separate analysis is needed. func marshalSendersToNewlineDelimitedJson(senders []*txListForSender) string { builder := strings.Builder{} builder.WriteString("\n") - for _, txListForSender := range senders { + for i, txListForSender := range senders { printedSender := convertTxListForSenderToPrintedSender(txListForSender) printedSenderJson, _ := json.Marshal(printedSender) + + builder.WriteString(fmt.Sprintf("#%d: ", i)) builder.WriteString(string(printedSenderJson)) + builder.WriteString("\n") } builder.WriteString("\n") return builder.String() } +// marshalTransactionsToNewlineDelimitedJson converts a list of transactions to a newline-delimited JSON string. +// Note: each line is indexed, to improve readability. The index is easily removable for if separate analysis is needed. func marshalTransactionsToNewlineDelimitedJson(transactions []*WrappedTransaction) string { builder := strings.Builder{} builder.WriteString("\n") - for _, wrappedTx := range transactions { + for i, wrappedTx := range transactions { printedTx := convertWrappedTransactionToPrintedTransaction(wrappedTx) printedTxJson, _ := json.Marshal(printedTx) + + builder.WriteString(fmt.Sprintf("#%d: ", i)) builder.WriteString(string(printedTxJson)) + builder.WriteString("\n") } builder.WriteString("\n") @@ -69,5 +81,6 @@ func convertTxListForSenderToPrintedSender(txListForSender *txListForSender) *pr Score: txListForSender.getScore(), Nonce: txListForSender.accountNonce.Get(), IsNonceKnown: txListForSender.accountNonceKnown.IsSet(), + NumTxs: txListForSender.countTxWithLock(), } } diff --git a/txcache/score.go b/txcache/score.go index ddf5fcad..573eb142 100644 --- a/txcache/score.go +++ b/txcache/score.go @@ -11,6 +11,7 @@ var _ scoreComputer = (*defaultScoreComputer)(nil) type senderScoreParams struct { avgPpuNumerator float64 avgPpuDenominator uint64 + isAccountNonceKnown bool hasSpotlessSequenceOfNonces bool } @@ -66,7 +67,7 @@ func (computer *defaultScoreComputer) computeScore(scoreParams senderScoreParams // score = log(sender's average price per unit / worst price per unit) * scoreScalingFactor, // where scoreScalingFactor = highest score / log(excellent price per unit / worst price per unit) func (computer *defaultScoreComputer) computeRawScore(params senderScoreParams) float64 { - if !params.hasSpotlessSequenceOfNonces { + if params.isAccountNonceKnown && !params.hasSpotlessSequenceOfNonces { return 0 } diff --git a/txcache/score_test.go b/txcache/score_test.go index fbf1b2ba..4bd2488e 100644 --- a/txcache/score_test.go +++ b/txcache/score_test.go @@ -21,6 +21,25 @@ func TestComputeWorstPpu(t *testing.T) { require.Equal(t, float64(10082500), computeWorstPpu(gasHandler)) } +func TestDefaultScoreComputer_computeRawScore(t *testing.T) { + gasHandler := txcachemocks.NewTxGasHandlerMock() + computer := newDefaultScoreComputer(gasHandler) + + require.Equal(t, 74.06805875222626, computer.computeRawScore(senderScoreParams{ + avgPpuNumerator: 57500000000000, + avgPpuDenominator: 57500, + isAccountNonceKnown: false, + hasSpotlessSequenceOfNonces: true, + })) + + require.Equal(t, 135.40260746155397, computer.computeRawScore(senderScoreParams{ + avgPpuNumerator: 57500000000000 * 45, + avgPpuDenominator: 57500, + isAccountNonceKnown: false, + hasSpotlessSequenceOfNonces: true, + })) +} + func TestDefaultScoreComputer_computeScore(t *testing.T) { // Simple transfers: require.Equal(t, 74, computeScoreOfTransaction(0, 50000, oneBillion)) @@ -110,7 +129,7 @@ func BenchmarkScoreComputer_computeScore(b *testing.B) { for i := 0; i < b.N; i++ { txFee := tx.computeFee(gasHandler) - for j := uint64(0); j < 1000000; j++ { + for j := uint64(0); j < 1_000_000; j++ { computer.computeScore(senderScoreParams{ avgPpuNumerator: txFee, avgPpuDenominator: tx.Tx.GetGasLimit(), @@ -121,7 +140,7 @@ func BenchmarkScoreComputer_computeScore(b *testing.B) { // Results: // - // (a) 10 ms to compute the score 1 million times: + // (a) 12 ms to compute the score 1 million times: // cpu: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz - // BenchmarkScoreComputer_computeRawScore-8 124 9812711 ns/op 295 B/op 12 allocs/op + // BenchmarkScoreComputer_computeScore-8 100 11895452 ns/op 297 B/op 12 allocs/op } diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index d563aac1..a1d76f8c 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -14,8 +14,27 @@ const oneTrillion = oneBillion * 1000 const estimatedSizeOfBoundedTxFields = uint64(128) func (cache *TxCache) areInternalMapsConsistent() bool { - journal := cache.checkInternalConsistency() - return journal.isFine() + internalMapByHash := cache.txByHash + internalMapBySender := cache.txListBySender + + senders := internalMapBySender.getSnapshotAscending() + numInMapByHash := len(internalMapByHash.keys()) + numInMapBySender := 0 + numMissingInMapByHash := 0 + + for _, sender := range senders { + numInMapBySender += int(sender.countTx()) + + for _, hash := range sender.getTxHashes() { + _, ok := internalMapByHash.getTx(string(hash)) + if !ok { + numMissingInMapByHash++ + } + } + } + + isFine := (numInMapByHash == numInMapBySender) && (numMissingInMapByHash == 0) + return isFine } func (cache *TxCache) getHashesForSender(sender string) []string { diff --git a/txcache/txByHashMap.go b/txcache/txByHashMap.go index 82902797..5f2ceddd 100644 --- a/txcache/txByHashMap.go +++ b/txcache/txByHashMap.go @@ -74,6 +74,8 @@ func (txMap *txByHashMap) RemoveTxsBulk(txHashes [][]byte) uint32 { } } + logRemove.Trace("RemoveTxsBulk()", "len(txHashes)", len(txHashes), "numRemoved", numRemoved) + return numRemoved } diff --git a/txcache/txCache.go b/txcache/txCache.go index 6649b408..2043b81d 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -64,7 +64,7 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { return false, false } - log.Trace("TxCache.AddTx()", "name", cache.name, "tx", tx.TxHash) + logAdd.Trace("AddTx()", "tx", tx.TxHash) if cache.config.EvictionEnabled { cache.doEviction() @@ -80,7 +80,7 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { // - B won't add to "txByHash" (duplicate) // - B adds to "txListBySender" // - A won't add to "txListBySender" (duplicate) - log.Debug("TxCache.AddTx(): slight inconsistency detected:", "name", cache.name, "tx", tx.TxHash, "sender", tx.Tx.GetSndAddr(), "addedInByHash", addedInByHash, "addedInBySender", addedInBySender) + logAdd.Debug("AddTx(): slight inconsistency detected:", "tx", tx.TxHash, "sender", tx.Tx.GetSndAddr(), "addedInByHash", addedInByHash, "addedInBySender", addedInBySender) } if len(evicted) > 0 { @@ -154,7 +154,10 @@ func (cache *TxCache) doSelectTransactions(numRequested int, gasRequested uint64 } result = result[:selectedNum] - cache.monitorSelectionEnd(senders, result, stopWatch) + + cache.monitorSelectionEnd(stopWatch, result) + go displaySelectionOutcome(senders, result) + return result } @@ -183,7 +186,7 @@ func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { cache.mutTxOperation.Lock() defer cache.mutTxOperation.Unlock() - log.Trace("TxCache.RemoveTxByHash()", "name", cache.name, "tx", txHash) + logRemove.Trace("RemoveTxByHash()", "tx", txHash) tx, foundInByHash := cache.txByHash.removeTx(string(txHash)) if !foundInByHash { @@ -200,7 +203,7 @@ func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { // - B reaches "cache.txByHash.RemoveTxsBulk()" // - B reaches "cache.txListBySender.RemoveSendersBulk()" // - A reaches "cache.txListBySender.removeTx()", but sender does not exist anymore - log.Debug("TxCache.RemoveTxByHash(): slight inconsistency detected: !foundInBySender", "name", cache.name, "tx", txHash) + logRemove.Debug("RemoveTxByHash(): slight inconsistency detected: !foundInBySender", "tx", txHash) } return true diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 2b9a8f91..d0407232 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -129,6 +129,7 @@ func (listForSender *txListForSender) getScoreParams() senderScoreParams { return senderScoreParams{ avgPpuNumerator: listForSender.avgPpuNumerator, avgPpuDenominator: listForSender.avgPpuDenominator, + isAccountNonceKnown: listForSender.accountNonceKnown.IsSet(), hasSpotlessSequenceOfNonces: hasSpotlessSequenceOfNonces, } } diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index d25837a8..699f96b1 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -336,6 +336,9 @@ func TestListForSender_transactionAddAndRemove_updateScore(t *testing.T) { alice := newUnconstrainedListToTest() bob := newUnconstrainedListToTest() + alice.notifyAccountNonce(1) + bob.notifyAccountNonce(1) + a := createTx([]byte("a"), ".", 1) b := createTx([]byte("b"), ".", 1) c := createTx([]byte("c"), ".", 2).withDataLength(42).withGasLimit(50000 + 1500*42) From 4f7726dd5f2979c3b17bbb41cd3b2890cc83f468 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 17 Jul 2024 00:30:33 +0300 Subject: [PATCH 045/124] Optimization on notifyAccountNonce. --- txcache/txListForSender.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index d0407232..8a79ab39 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -344,6 +344,11 @@ func approximatelyCountTxInLists(lists []*txListForSender) uint64 { // Removes transactions with lower nonces and returns their hashes. func (listForSender *txListForSender) notifyAccountNonce(nonce uint64) [][]byte { + // Optimization: if nonce is the same, do nothing. + if listForSender.accountNonce.Get() == nonce { + return nil + } + listForSender.mutex.Lock() defer listForSender.mutex.Unlock() From 1da6b62d6e321c8154fef80306a1ec4d966c0596 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 17 Jul 2024 09:44:14 +0300 Subject: [PATCH 046/124] Adjust some tests. --- txcache/txListForSender_test.go | 44 +++++++++++++++------------------ 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 699f96b1..ef21fc0b 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -181,24 +181,20 @@ func TestListForSender_SelectBatchTo(t *testing.T) { destination := make([]*WrappedTransaction, 1000) - // First batch + // 1st batch journal := list.selectBatchTo(true, destination, 50, math.MaxUint64) require.Equal(t, 50, journal.selectedNum) require.NotNil(t, destination[49]) require.Nil(t, destination[50]) - // Second batch + // 2nd batch journal = list.selectBatchTo(false, destination[50:], 50, math.MaxUint64) require.Equal(t, 50, journal.selectedNum) require.NotNil(t, destination[99]) - // No third batch + // No 3rd batch journal = list.selectBatchTo(false, destination, 50, math.MaxUint64) require.Equal(t, 0, journal.selectedNum) - - // Restart copy - journal = list.selectBatchTo(true, destination, 12345, math.MaxUint64) - require.Equal(t, 100, journal.selectedNum) } func TestListForSender_SelectBatchToWithLimitedGasPerBatch(t *testing.T) { @@ -206,33 +202,33 @@ func TestListForSender_SelectBatchToWithLimitedGasPerBatch(t *testing.T) { txGasHandler := txcachemocks.NewTxGasHandlerMock() for index := 0; index < 40; index++ { - wtx := createTx([]byte{byte(index)}, ".", uint64(index)) - tx, _ := wtx.Tx.(*transaction.Transaction) - tx.GasLimit = 1000000 - list.AddTx(wtx, txGasHandler) + tx := createTx([]byte{byte(index)}, ".", uint64(index)).withGasLimit(oneMilion) + list.AddTx(tx, txGasHandler) } destination := make([]*WrappedTransaction, 1000) - // First batch - journal := list.selectBatchTo(true, destination, 50, 500000) + // 1st batch + journal := list.selectBatchTo(true, destination, 50, oneMilion-1) require.Equal(t, 1, journal.selectedNum) require.NotNil(t, destination[0]) require.Nil(t, destination[1]) - // Second batch - journal = list.selectBatchTo(false, destination[1:], 50, 20000000) - require.Equal(t, 20, journal.selectedNum) - require.NotNil(t, destination[20]) - require.Nil(t, destination[21]) + // 2nd batch + journal = list.selectBatchTo(false, destination[1:], 50, oneMilion) + require.Equal(t, 1, journal.selectedNum) + require.NotNil(t, destination[1]) + require.Nil(t, destination[2]) - // third batch - journal = list.selectBatchTo(false, destination[21:], 20, math.MaxUint64) - require.Equal(t, 19, journal.selectedNum) + // 3nd batch + journal = list.selectBatchTo(false, destination[2:], 50, oneMilion*20) + require.Equal(t, 20, journal.selectedNum) + require.NotNil(t, destination[21]) + require.Nil(t, destination[22]) - // Restart copy - journal = list.selectBatchTo(true, destination[41:], 12345, math.MaxUint64) - require.Equal(t, 40, journal.selectedNum) + // 4th batch + journal = list.selectBatchTo(false, destination[22:], 20, math.MaxUint64) + require.Equal(t, 18, journal.selectedNum) } func TestListForSender_SelectBatchTo_NoPanicWhenCornerCases(t *testing.T) { From 89202915bdb1840ab6f3a8f2eba4ef22b690b2f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 17 Jul 2024 10:37:34 +0300 Subject: [PATCH 047/124] Fix build. --- txcache/txListForSender_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index ef21fc0b..06cc6d71 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -4,7 +4,6 @@ import ( "math" "testing" - "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) From 50fa20e6865f66fb1283d2044b4055962e557155 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 18 Jul 2024 20:37:32 +0300 Subject: [PATCH 048/124] Better diagnostics. --- txcache/constants.go | 3 +- txcache/diagnosis.go | 75 ++++++++++++++++++++++++++++++++++++++++++ txcache/loggers.go | 10 ++++++ txcache/monitoring.go | 57 -------------------------------- txcache/txByHashMap.go | 2 -- txcache/txCache.go | 10 ++++++ 6 files changed, 97 insertions(+), 60 deletions(-) create mode 100644 txcache/diagnosis.go create mode 100644 txcache/loggers.go diff --git a/txcache/constants.go b/txcache/constants.go index e25fc2b6..35e76c63 100644 --- a/txcache/constants.go +++ b/txcache/constants.go @@ -1,5 +1,6 @@ package txcache const excellentGasPriceFactor = 5 - const maxSenderScore = 100 +const diagnosisMaxSendersToDisplay = 1000 +const diagnosisMaxTransactionsToDisplay = 10000 diff --git a/txcache/diagnosis.go b/txcache/diagnosis.go new file mode 100644 index 00000000..8cc6b928 --- /dev/null +++ b/txcache/diagnosis.go @@ -0,0 +1,75 @@ +package txcache + +import ( + "github.com/multiversx/mx-chain-core-go/core" + logger "github.com/multiversx/mx-chain-logger-go" +) + +// Diagnose checks the state of the cache for inconsistencies and displays a summary (senders and transactions). +func (cache *TxCache) Diagnose(_ bool) { + sw := core.NewStopWatch() + sw.Start("diagnose") + + sizeInBytes := cache.NumBytes() + numTxsEstimate := int(cache.CountTx()) + numTxsInChunks := cache.txByHash.backingMap.Count() + txsKeys := cache.txByHash.backingMap.Keys() + numSendersEstimate := int(cache.CountSenders()) + numSendersInChunks := cache.txListBySender.backingMap.Count() + sendersKeys := cache.txListBySender.backingMap.Keys() + + fine := numSendersEstimate == numSendersInChunks + fine = fine && (int(numSendersEstimate) == len(sendersKeys)) + fine = fine && (numTxsEstimate == numTxsInChunks && numTxsEstimate == len(txsKeys)) + + cache.displaySendersAsDiagnostics() + cache.displayTransactionsAsDiagnostics() + + sw.Stop("diagnose") + duration := sw.GetMeasurement("diagnose") + + log.Debug("TxCache.Diagnose()", + "duration", duration, + "fine", fine, + "numTxsEstimate", numTxsEstimate, + "numTxsInChunks", numTxsInChunks, + "len(txsKeys)", len(txsKeys), + "sizeInBytes", sizeInBytes, + "numBytesThreshold", cache.config.NumBytesThreshold, + "numSendersEstimate", numSendersEstimate, + "numSendersInChunks", numSendersInChunks, + "len(sendersKeys)", len(sendersKeys), + ) +} + +func (cache *TxCache) displaySendersAsDiagnostics() { + if log.GetLevel() > logger.LogTrace { + return + } + + senders := cache.txListBySender.getSnapshotAscending() + + if len(senders) == 0 { + return + } + + numToDisplay := core.MinInt(diagnosisMaxSendersToDisplay, len(senders)) + logDiagnoseSenders.Trace("Senders (as newline-separated JSON)", "numSenders", len(senders), "numToDisplay", numToDisplay) + logDiagnoseSenders.Trace(marshalSendersToNewlineDelimitedJson(senders[:numToDisplay])) +} + +func (cache *TxCache) displayTransactionsAsDiagnostics() { + if log.GetLevel() > logger.LogTrace { + return + } + + transactions := cache.getAllTransactions() + + if len(transactions) == 0 { + return + } + + numToDisplay := core.MinInt(diagnosisMaxTransactionsToDisplay, len(transactions)) + logDiagnoseTransactions.Trace("Transactions (as newline-separated JSON)", "numTransactions", len(transactions), "numToDisplay", numToDisplay) + logDiagnoseTransactions.Trace(marshalTransactionsToNewlineDelimitedJson(transactions[:numToDisplay])) +} diff --git a/txcache/loggers.go b/txcache/loggers.go new file mode 100644 index 00000000..262c2374 --- /dev/null +++ b/txcache/loggers.go @@ -0,0 +1,10 @@ +package txcache + +import logger "github.com/multiversx/mx-chain-logger-go" + +var log = logger.GetOrCreate("txcache/main") +var logAdd = logger.GetOrCreate("txcache/add") +var logRemove = logger.GetOrCreate("txcache/remove") +var logSelect = logger.GetOrCreate("txcache/select") +var logDiagnoseSenders = logger.GetOrCreate("txcache/diagnose/senders") +var logDiagnoseTransactions = logger.GetOrCreate("txcache/diagnose/transactions") diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 2321c13e..74f19ee9 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -8,11 +8,6 @@ import ( logger "github.com/multiversx/mx-chain-logger-go" ) -var log = logger.GetOrCreate("txcache/main") -var logAdd = logger.GetOrCreate("txcache/add") -var logRemove = logger.GetOrCreate("txcache/remove") -var logSelect = logger.GetOrCreate("txcache/select") - func (cache *TxCache) monitorEvictionWrtSenderLimit(sender []byte, evicted [][]byte) { logRemove.Debug("monitorEvictionWrtSenderLimit()", "sender", sender, "num", len(evicted)) } @@ -112,58 +107,6 @@ func (journal *evictionJournal) display() { logRemove.Debug("Eviction.pass1:", "txs", journal.passOneNumTxs, "senders", journal.passOneNumSenders, "steps", journal.passOneNumSteps) } -// Diagnose checks the state of the cache for inconsistencies and displays a summary -func (cache *TxCache) Diagnose(_ bool) { - sw := core.NewStopWatch() - sw.Start("diagnose") - - sizeInBytes := cache.NumBytes() - numTxsEstimate := int(cache.CountTx()) - numTxsInChunks := cache.txByHash.backingMap.Count() - txsKeys := cache.txByHash.backingMap.Keys() - numSendersEstimate := int(cache.CountSenders()) - numSendersInChunks := cache.txListBySender.backingMap.Count() - sendersKeys := cache.txListBySender.backingMap.Keys() - senders := cache.txListBySender.getSnapshotAscending() - - cache.displaySendersSummary(senders) - - sw.Stop("diagnose") - duration := sw.GetMeasurement("diagnose") - - fine := numSendersEstimate == numSendersInChunks - fine = fine && len(sendersKeys) == len(senders) - fine = fine && (int(numSendersEstimate) == len(sendersKeys)) - fine = fine && (numTxsEstimate == numTxsInChunks && numTxsEstimate == len(txsKeys)) - - log.Debug("TxCache.Diagnose()", - "duration", duration, - "fine", fine, - "numTxsEstimate", numTxsEstimate, - "numTxsInChunks", numTxsInChunks, - "len(txsKeys)", len(txsKeys), - "sizeInBytes", sizeInBytes, - "numBytesThreshold", cache.config.NumBytesThreshold, - "numSendersEstimate", numSendersEstimate, - "numSendersInChunks", numSendersInChunks, - "len(sendersKeys)", len(sendersKeys), - "len(senders)", len(senders), - ) -} - -func (cache *TxCache) displaySendersSummary(senders []*txListForSender) { - if log.GetLevel() > logger.LogTrace { - return - } - - if len(senders) == 0 { - return - } - - log.Trace("displaySendersSummary(), as newline-separated JSON:") - log.Trace(marshalSendersToNewlineDelimitedJson(senders)) -} - func monitorSendersScoreHistogram(scoreGroups [][]*txListForSender) { if log.GetLevel() > logger.LogDebug { return diff --git a/txcache/txByHashMap.go b/txcache/txByHashMap.go index 5f2ceddd..82902797 100644 --- a/txcache/txByHashMap.go +++ b/txcache/txByHashMap.go @@ -74,8 +74,6 @@ func (txMap *txByHashMap) RemoveTxsBulk(txHashes [][]byte) uint32 { } } - logRemove.Trace("RemoveTxsBulk()", "len(txHashes)", len(txHashes), "numRemoved", numRemoved) - return numRemoved } diff --git a/txcache/txCache.go b/txcache/txCache.go index 2043b81d..835d2fe0 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -239,6 +239,16 @@ func (cache *TxCache) ForEachTransaction(function ForEachTransaction) { cache.txByHash.forEach(function) } +func (cache *TxCache) getAllTransactions() []*WrappedTransaction { + transactions := make([]*WrappedTransaction, 0, cache.Len()) + + cache.ForEachTransaction(func(_ []byte, tx *WrappedTransaction) { + transactions = append(transactions, tx) + }) + + return transactions +} + // GetTransactionsPoolForSender returns the list of transaction hashes for the sender func (cache *TxCache) GetTransactionsPoolForSender(sender string) []*WrappedTransaction { listForSender, ok := cache.txListBySender.getListForSender(sender) From 13a3a9682bf1202e39c33e6847fdfe30fdaf28d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 25 Jul 2024 15:46:07 +0300 Subject: [PATCH 049/124] Diagnose selection. --- txcache/constants.go | 4 ++++ txcache/diagnosis.go | 53 ++++++++++++++++++++++++++++--------------- txcache/loggers.go | 1 + txcache/monitoring.go | 24 ++++++++++++-------- txcache/txCache.go | 35 ++++++++++++++++------------ 5 files changed, 74 insertions(+), 43 deletions(-) diff --git a/txcache/constants.go b/txcache/constants.go index 35e76c63..3fe20ae6 100644 --- a/txcache/constants.go +++ b/txcache/constants.go @@ -4,3 +4,7 @@ const excellentGasPriceFactor = 5 const maxSenderScore = 100 const diagnosisMaxSendersToDisplay = 1000 const diagnosisMaxTransactionsToDisplay = 10000 +const diagnosisSelectionNumRequested = 30_000 +const diagnosisSelectionGasRequested = 10_000_000_000 +const diagnosisSelectionBaseNumPerSenderBatch = 100 +const diagnosisSelectionBaseGasPerSenderBatch = 120000000 diff --git a/txcache/diagnosis.go b/txcache/diagnosis.go index 8cc6b928..e3c17dd1 100644 --- a/txcache/diagnosis.go +++ b/txcache/diagnosis.go @@ -5,10 +5,18 @@ import ( logger "github.com/multiversx/mx-chain-logger-go" ) -// Diagnose checks the state of the cache for inconsistencies and displays a summary (senders and transactions). +// Diagnose checks the state of the cache for inconsistencies and displays a summary, senders and transactions. func (cache *TxCache) Diagnose(_ bool) { - sw := core.NewStopWatch() - sw.Start("diagnose") + cache.diagnoseCounters() + cache.diagnoseSenders() + cache.diagnoseTransactions() + cache.diagnoseSelection() +} + +func (cache *TxCache) diagnoseCounters() { + if log.GetLevel() > logger.LogDebug { + return + } sizeInBytes := cache.NumBytes() numTxsEstimate := int(cache.CountTx()) @@ -22,14 +30,7 @@ func (cache *TxCache) Diagnose(_ bool) { fine = fine && (int(numSendersEstimate) == len(sendersKeys)) fine = fine && (numTxsEstimate == numTxsInChunks && numTxsEstimate == len(txsKeys)) - cache.displaySendersAsDiagnostics() - cache.displayTransactionsAsDiagnostics() - - sw.Stop("diagnose") - duration := sw.GetMeasurement("diagnose") - - log.Debug("TxCache.Diagnose()", - "duration", duration, + log.Debug("diagnoseCounters()", "fine", fine, "numTxsEstimate", numTxsEstimate, "numTxsInChunks", numTxsInChunks, @@ -42,24 +43,24 @@ func (cache *TxCache) Diagnose(_ bool) { ) } -func (cache *TxCache) displaySendersAsDiagnostics() { - if log.GetLevel() > logger.LogTrace { +func (cache *TxCache) diagnoseSenders() { + if logDiagnoseSenders.GetLevel() > logger.LogTrace { return } - senders := cache.txListBySender.getSnapshotAscending() + senders := cache.txListBySender.getSnapshotDescending() if len(senders) == 0 { return } numToDisplay := core.MinInt(diagnosisMaxSendersToDisplay, len(senders)) - logDiagnoseSenders.Trace("Senders (as newline-separated JSON)", "numSenders", len(senders), "numToDisplay", numToDisplay) + logDiagnoseSenders.Trace("diagnoseSenders()", "numSenders", len(senders), "numToDisplay", numToDisplay) logDiagnoseSenders.Trace(marshalSendersToNewlineDelimitedJson(senders[:numToDisplay])) } -func (cache *TxCache) displayTransactionsAsDiagnostics() { - if log.GetLevel() > logger.LogTrace { +func (cache *TxCache) diagnoseTransactions() { + if logDiagnoseTransactions.GetLevel() > logger.LogTrace { return } @@ -70,6 +71,22 @@ func (cache *TxCache) displayTransactionsAsDiagnostics() { } numToDisplay := core.MinInt(diagnosisMaxTransactionsToDisplay, len(transactions)) - logDiagnoseTransactions.Trace("Transactions (as newline-separated JSON)", "numTransactions", len(transactions), "numToDisplay", numToDisplay) + logDiagnoseTransactions.Trace("diagnoseTransactions()", "numTransactions", len(transactions), "numToDisplay", numToDisplay) logDiagnoseTransactions.Trace(marshalTransactionsToNewlineDelimitedJson(transactions[:numToDisplay])) } + +func (cache *TxCache) diagnoseSelection() { + if logDiagnoseSelection.GetLevel() > logger.LogDebug { + return + } + + senders, transactions := cache.doSelectTransactions( + logDiagnoseSelection, + diagnosisSelectionNumRequested, + diagnosisSelectionGasRequested, + diagnosisSelectionBaseNumPerSenderBatch, + diagnosisSelectionBaseGasPerSenderBatch, + ) + + displaySelectionOutcome(logDiagnoseSelection, senders, transactions) +} diff --git a/txcache/loggers.go b/txcache/loggers.go index 262c2374..af55e5b0 100644 --- a/txcache/loggers.go +++ b/txcache/loggers.go @@ -6,5 +6,6 @@ var log = logger.GetOrCreate("txcache/main") var logAdd = logger.GetOrCreate("txcache/add") var logRemove = logger.GetOrCreate("txcache/remove") var logSelect = logger.GetOrCreate("txcache/select") +var logDiagnoseSelection = logger.GetOrCreate("txcache/diagnose/selection") var logDiagnoseSenders = logger.GetOrCreate("txcache/diagnose/senders") var logDiagnoseTransactions = logger.GetOrCreate("txcache/diagnose/transactions") diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 74f19ee9..3a028cf1 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -30,14 +30,14 @@ func (cache *TxCache) monitorEvictionEnd(stopWatch *core.StopWatch) { cache.evictionJournal.display() } -func (cache *TxCache) monitorSelectionStart() *core.StopWatch { - logSelect.Debug("monitorSelectionStart()", "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) +func (cache *TxCache) monitorSelectionStart(contextualLogger logger.Logger) *core.StopWatch { + contextualLogger.Debug("monitorSelectionStart()", "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) sw := core.NewStopWatch() sw.Start("selection") return sw } -func (cache *TxCache) monitorSelectionEnd(stopWatch *core.StopWatch, selection []*WrappedTransaction) { +func (cache *TxCache) monitorSelectionEnd(contextualLog logger.Logger, stopWatch *core.StopWatch, selection []*WrappedTransaction) { stopWatch.Stop("selection") duration := stopWatch.GetMeasurement("selection") @@ -45,7 +45,7 @@ func (cache *TxCache) monitorSelectionEnd(stopWatch *core.StopWatch, selection [ numSendersWithInitialGap := cache.numSendersWithInitialGap.Reset() numSendersWithMiddleGap := cache.numSendersWithMiddleGap.Reset() - logSelect.Debug("monitorSelectionEnd()", "duration", duration, + contextualLog.Debug("monitorSelectionEnd()", "duration", duration, "numTxSelected", len(selection), "numSendersSelected", numSendersSelected, "numSendersWithInitialGap", numSendersWithInitialGap, @@ -53,19 +53,23 @@ func (cache *TxCache) monitorSelectionEnd(stopWatch *core.StopWatch, selection [ ) } -func displaySelectionOutcome(sortedSenders []*txListForSender, selection []*WrappedTransaction) { - if logSelect.GetLevel() > logger.LogTrace { +func displaySelectionOutcome(contextualLogger logger.Logger, sortedSenders []*txListForSender, selection []*WrappedTransaction) { + if contextualLogger.GetLevel() > logger.LogTrace { return } if len(sortedSenders) > 0 { - logSelect.Trace("Sorted senders (as newline-separated JSON):") - logSelect.Trace(marshalSendersToNewlineDelimitedJson(sortedSenders)) + contextualLogger.Trace("Sorted senders (as newline-separated JSON):") + contextualLogger.Trace(marshalSendersToNewlineDelimitedJson(sortedSenders)) + } else { + contextualLogger.Trace("Sorted senders: none") } if len(selection) > 0 { - logSelect.Trace("Selected transactions (as newline-separated JSON):") - logSelect.Trace(marshalTransactionsToNewlineDelimitedJson(selection)) + contextualLogger.Trace("Selected transactions (as newline-separated JSON):") + contextualLogger.Trace(marshalTransactionsToNewlineDelimitedJson(selection)) + } else { + contextualLogger.Trace("Selected transactions: none") } } diff --git a/txcache/txCache.go b/txcache/txCache.go index 835d2fe0..81879d49 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" + logger "github.com/multiversx/mx-chain-logger-go" "github.com/multiversx/mx-chain-storage-go/common" "github.com/multiversx/mx-chain-storage-go/monitoring" "github.com/multiversx/mx-chain-storage-go/types" @@ -106,16 +107,25 @@ func (cache *TxCache) GetByTxHash(txHash []byte) (*WrappedTransaction, bool) { // In each pass, each sender is allowed to contribute a batch of transactions, // with a number of transactions and total gas proportional to the sender's score. func (cache *TxCache) SelectTransactions(numRequested int, gasRequested uint64, baseNumPerSenderBatch int, baseGasPerSenderBatch uint64) []*WrappedTransaction { - result := cache.doSelectTransactions(numRequested, gasRequested, baseNumPerSenderBatch, baseGasPerSenderBatch) - go cache.doAfterSelection() - return result + senders, transactions := cache.doSelectTransactions( + logSelect, + numRequested, + gasRequested, + baseNumPerSenderBatch, + baseGasPerSenderBatch, + ) + + go cache.diagnoseCounters() + go displaySelectionOutcome(logSelect, senders, transactions) + + return transactions } -func (cache *TxCache) doSelectTransactions(numRequested int, gasRequested uint64, baseNumPerSenderBatch int, baseGasPerSenderBatch uint64) []*WrappedTransaction { - stopWatch := cache.monitorSelectionStart() +func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, numRequested int, gasRequested uint64, baseNumPerSenderBatch int, baseGasPerSenderBatch uint64) ([]*txListForSender, []*WrappedTransaction) { + stopWatch := cache.monitorSelectionStart(contextualLogger) senders := cache.getSendersEligibleForSelection() - result := make([]*WrappedTransaction, numRequested) + transactions := make([]*WrappedTransaction, numRequested) shouldContinueSelection := true selectedGas := uint64(0) @@ -133,7 +143,7 @@ func (cache *TxCache) doSelectTransactions(numRequested int, gasRequested uint64 numPerBatch, gasPerBatch := cache.computeSelectionSenderConstraints(score, baseNumPerSenderBatch, baseGasPerSenderBatch) isFirstBatch := pass == 0 - batchSelectionJournal := txList.selectBatchTo(isFirstBatch, result[selectedNum:], numPerBatch, gasPerBatch) + batchSelectionJournal := txList.selectBatchTo(isFirstBatch, transactions[selectedNum:], numPerBatch, gasPerBatch) selectedGas += batchSelectionJournal.selectedGas selectedNum += batchSelectionJournal.selectedNum selectedNumInThisPass += batchSelectionJournal.selectedNum @@ -153,12 +163,11 @@ func (cache *TxCache) doSelectTransactions(numRequested int, gasRequested uint64 } } - result = result[:selectedNum] + transactions = transactions[:selectedNum] - cache.monitorSelectionEnd(stopWatch, result) - go displaySelectionOutcome(senders, result) + cache.monitorSelectionEnd(contextualLogger, stopWatch, transactions) - return result + return senders, transactions } func (cache *TxCache) getSendersEligibleForSelection() []*txListForSender { @@ -177,10 +186,6 @@ func (cache *TxCache) computeSelectionSenderConstraints(score int, baseNumPerBat return numPerBatch, gasPerBatch } -func (cache *TxCache) doAfterSelection() { - cache.Diagnose(false) -} - // RemoveTxByHash removes tx by hash func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { cache.mutTxOperation.Lock() From ed6b63814de7f13618bc2e8247938ce8abb0548a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 25 Jul 2024 16:54:16 +0300 Subject: [PATCH 050/124] Simplifications, refactoring. --- txcache/eviction.go | 75 ++++++++++++++++++++++++---------------- txcache/eviction_test.go | 61 ++++++++++++++++---------------- txcache/monitoring.go | 35 ------------------- txcache/txCache.go | 37 +++++++++----------- 4 files changed, 93 insertions(+), 115 deletions(-) diff --git a/txcache/eviction.go b/txcache/eviction.go index 5ac3f952..6a4f4ca9 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -4,15 +4,23 @@ import ( "github.com/multiversx/mx-chain-core-go/core" ) +// evictionJournal keeps a short journal about the eviction process +// This is useful for debugging and reasoning about the eviction +type evictionJournal struct { + numTxs uint32 + numSenders uint32 + numSteps uint32 +} + // doEviction does cache eviction // We do not allow more evictions to start concurrently -func (cache *TxCache) doEviction() { +func (cache *TxCache) doEviction() *evictionJournal { if cache.isEvictionInProgress.IsSet() { - return + return nil } if !cache.isCapacityExceeded() { - return + return nil } cache.evictionMutex.Lock() @@ -22,27 +30,35 @@ func (cache *TxCache) doEviction() { defer cache.isEvictionInProgress.Reset() if !cache.isCapacityExceeded() { - return + return nil } - stopWatch := cache.monitorEvictionStart() - cache.makeSnapshotOfSenders() + logRemove.Debug("doEviction(): before eviction", + "num bytes", cache.NumBytes(), + "num txs", cache.CountTx(), + "num senders", cache.CountSenders(), + ) - journal := evictionJournal{} - journal.passOneNumSteps, journal.passOneNumTxs, journal.passOneNumSenders = cache.evictSendersInLoop() - journal.evictionPerformed = true - cache.evictionJournal = journal + stopWatch := core.NewStopWatch() + stopWatch.Start("eviction") - cache.monitorEvictionEnd(stopWatch) - cache.destroySnapshotOfSenders() -} + sendersSnapshot := cache.txListBySender.getSnapshotAscending() + evictionJournal := cache.evictSendersInLoop(sendersSnapshot) -func (cache *TxCache) makeSnapshotOfSenders() { - cache.evictionSnapshotOfSenders = cache.txListBySender.getSnapshotAscending() -} + stopWatch.Stop("eviction") -func (cache *TxCache) destroySnapshotOfSenders() { - cache.evictionSnapshotOfSenders = nil + logRemove.Debug( + "doEviction(): after eviction", + "num bytes", cache.NumBytes(), + "num now", cache.CountTx(), + "num senders", cache.CountSenders(), + "duration", stopWatch.GetMeasurement("eviction"), + "evicted txs", evictionJournal.numTxs, + "evicted senders", evictionJournal.numSenders, + "eviction steps", evictionJournal.numSteps, + ) + + return &evictionJournal } func (cache *TxCache) isCapacityExceeded() bool { @@ -73,31 +89,32 @@ func (cache *TxCache) doEvictItems(txsToEvict [][]byte, sendersToEvict []string) return } -func (cache *TxCache) evictSendersInLoop() (uint32, uint32, uint32) { - return cache.evictSendersWhile(cache.isCapacityExceeded) +func (cache *TxCache) evictSendersInLoop(sendersSnapshot []*txListForSender) evictionJournal { + return cache.evictSendersWhile(sendersSnapshot, cache.isCapacityExceeded) } // evictSendersWhileTooManyTxs removes transactions in a loop, as long as "shouldContinue" is true // One batch of senders is removed in each step -func (cache *TxCache) evictSendersWhile(shouldContinue func() bool) (step uint32, numTxs uint32, numSenders uint32) { +func (cache *TxCache) evictSendersWhile(sendersSnapshot []*txListForSender, shouldContinue func() bool) evictionJournal { if !shouldContinue() { - return + return evictionJournal{} } - snapshot := cache.evictionSnapshotOfSenders - snapshotLength := uint32(len(snapshot)) + snapshotLength := uint32(len(sendersSnapshot)) batchSize := cache.config.NumSendersToPreemptivelyEvict batchStart := uint32(0) - for step = 0; shouldContinue(); step++ { + journal := evictionJournal{} + + for ; shouldContinue(); journal.numSteps++ { batchEnd := batchStart + batchSize batchEndBounded := core.MinUint32(batchEnd, snapshotLength) - batch := snapshot[batchStart:batchEndBounded] + batch := sendersSnapshot[batchStart:batchEndBounded] numTxsEvictedInStep, numSendersEvictedInStep := cache.evictSendersAndTheirTxs(batch) - numTxs += numTxsEvictedInStep - numSenders += numSendersEvictedInStep + journal.numTxs += numTxsEvictedInStep + journal.numSenders += numSendersEvictedInStep batchStart += batchSize reachedEnd := batchStart >= snapshotLength @@ -110,7 +127,7 @@ func (cache *TxCache) evictSendersWhile(shouldContinue func() bool) (step uint32 } } - return + return journal } func (cache *TxCache) evictSendersAndTheirTxs(listsToEvict []*txListForSender) (uint32, uint32) { diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go index e9df616e..65a6d870 100644 --- a/txcache/eviction_test.go +++ b/txcache/eviction_test.go @@ -35,12 +35,12 @@ func TestTxCache_EvictSendersInLoop_BecauseOfCount(t *testing.T) { require.Equal(t, int64(200), cache.txListBySender.counter.Get()) require.Equal(t, int64(200), cache.txByHash.counter.Get()) - cache.makeSnapshotOfSenders() - steps, nTxs, nSenders := cache.evictSendersInLoop() + sendersSnapshot := cache.txListBySender.getSnapshotAscending() + journal := cache.evictSendersInLoop(sendersSnapshot) - require.Equal(t, uint32(5), steps) - require.Equal(t, uint32(100), nTxs) - require.Equal(t, uint32(100), nSenders) + require.Equal(t, uint32(5), journal.numSteps) + require.Equal(t, uint32(100), journal.numTxs) + require.Equal(t, uint32(100), journal.numSenders) require.Equal(t, int64(100), cache.txListBySender.counter.Get()) require.Equal(t, int64(100), cache.txByHash.counter.Get()) } @@ -72,12 +72,12 @@ func TestTxCache_EvictSendersInLoop_BecauseOfSize(t *testing.T) { require.Equal(t, int64(200), cache.txListBySender.counter.Get()) require.Equal(t, int64(200), cache.txByHash.counter.Get()) - cache.makeSnapshotOfSenders() - steps, nTxs, nSenders := cache.evictSendersInLoop() + sendersSnapshot := cache.txListBySender.getSnapshotAscending() + journal := cache.evictSendersInLoop(sendersSnapshot) - require.Equal(t, uint32(5), steps) - require.Equal(t, uint32(100), nTxs) - require.Equal(t, uint32(100), nSenders) + require.Equal(t, uint32(5), journal.numSteps) + require.Equal(t, uint32(100), journal.numTxs) + require.Equal(t, uint32(100), journal.numSenders) require.Equal(t, int64(100), cache.txListBySender.counter.Get()) require.Equal(t, int64(100), cache.txByHash.counter.Get()) } @@ -101,10 +101,10 @@ func TestTxCache_DoEviction_BecauseOfCount(t *testing.T) { cache.AddTx(createTx([]byte("hash-bob"), "bob", 1).withGasPrice(1 * oneBillion)) cache.AddTx(createTx([]byte("hash-carol"), "carol", 1).withGasPrice(3 * oneBillion)) - cache.doEviction() - require.Equal(t, uint32(2), cache.evictionJournal.passOneNumTxs) - require.Equal(t, uint32(2), cache.evictionJournal.passOneNumSenders) - require.Equal(t, uint32(1), cache.evictionJournal.passOneNumSteps) + journal := cache.doEviction() + require.Equal(t, uint32(2), journal.numTxs) + require.Equal(t, uint32(2), journal.numSenders) + require.Equal(t, uint32(1), journal.numSteps) // Alice and Bob evicted. Carol still there (better score). _, ok := cache.GetByTxHash([]byte("hash-carol")) @@ -144,10 +144,10 @@ func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { require.Equal(t, 69, scoreCarol) require.Equal(t, 80, scoreEve) - cache.doEviction() - require.Equal(t, uint32(2), cache.evictionJournal.passOneNumTxs) - require.Equal(t, uint32(2), cache.evictionJournal.passOneNumSenders) - require.Equal(t, uint32(1), cache.evictionJournal.passOneNumSteps) + journal := cache.doEviction() + require.Equal(t, uint32(2), journal.numTxs) + require.Equal(t, uint32(2), journal.numSenders) + require.Equal(t, uint32(1), journal.numSteps) // Alice and Bob evicted (lower score). Carol and Eve still there. _, ok := cache.GetByTxHash([]byte("hash-carol")) @@ -176,9 +176,8 @@ func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) _ = cache.isEvictionInProgress.SetReturningPrevious() - cache.doEviction() - - require.False(t, cache.evictionJournal.evictionPerformed) + journal := cache.doEviction() + require.Nil(t, journal) } func TestTxCache_EvictSendersInLoop_CodeCoverageForLoopBreak_WhenSmallBatch(t *testing.T) { @@ -198,12 +197,12 @@ func TestTxCache_EvictSendersInLoop_CodeCoverageForLoopBreak_WhenSmallBatch(t *t cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) - cache.makeSnapshotOfSenders() + sendersSnapshot := cache.txListBySender.getSnapshotAscending() + journal := cache.evictSendersInLoop(sendersSnapshot) - steps, nTxs, nSenders := cache.evictSendersInLoop() - require.Equal(t, uint32(0), steps) - require.Equal(t, uint32(1), nTxs) - require.Equal(t, uint32(1), nSenders) + require.Equal(t, uint32(0), journal.numSteps) + require.Equal(t, uint32(1), journal.numTxs) + require.Equal(t, uint32(1), journal.numSenders) } func TestTxCache_EvictSendersWhile_ShouldContinueBreak(t *testing.T) { @@ -224,15 +223,15 @@ func TestTxCache_EvictSendersWhile_ShouldContinueBreak(t *testing.T) { cache.AddTx(createTx([]byte("hash-alice"), "alice", 1)) cache.AddTx(createTx([]byte("hash-bob"), "bob", 1)) - cache.makeSnapshotOfSenders() + sendersSnapshot := cache.txListBySender.getSnapshotAscending() - steps, nTxs, nSenders := cache.evictSendersWhile(func() bool { + journal := cache.evictSendersWhile(sendersSnapshot, func() bool { return false }) - require.Equal(t, uint32(0), steps) - require.Equal(t, uint32(0), nTxs) - require.Equal(t, uint32(0), nSenders) + require.Equal(t, uint32(0), journal.numSteps) + require.Equal(t, uint32(0), journal.numTxs) + require.Equal(t, uint32(0), journal.numSenders) } // This seems to be the most reasonable "bad-enough" (not worst) scenario to benchmark: diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 3a028cf1..47dd70c6 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -8,28 +8,6 @@ import ( logger "github.com/multiversx/mx-chain-logger-go" ) -func (cache *TxCache) monitorEvictionWrtSenderLimit(sender []byte, evicted [][]byte) { - logRemove.Debug("monitorEvictionWrtSenderLimit()", "sender", sender, "num", len(evicted)) -} - -func (cache *TxCache) monitorEvictionWrtSenderNonce(sender []byte, senderNonce uint64, evicted [][]byte) { - logRemove.Trace("monitorEvictionWrtSenderNonce()", "sender", sender, "nonce", senderNonce, "num", len(evicted)) -} - -func (cache *TxCache) monitorEvictionStart() *core.StopWatch { - logRemove.Debug("monitorEvictionStart()", "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) - sw := core.NewStopWatch() - sw.Start("eviction") - return sw -} - -func (cache *TxCache) monitorEvictionEnd(stopWatch *core.StopWatch) { - stopWatch.Stop("eviction") - duration := stopWatch.GetMeasurement("eviction") - logRemove.Debug("monitorEvictionEnd()", "duration", duration, "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) - cache.evictionJournal.display() -} - func (cache *TxCache) monitorSelectionStart(contextualLogger logger.Logger) *core.StopWatch { contextualLogger.Debug("monitorSelectionStart()", "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) sw := core.NewStopWatch() @@ -98,19 +76,6 @@ func (cache *TxCache) monitorBatchSelectionEnd(journal batchSelectionJournal) { } } -// evictionJournal keeps a short journal about the eviction process -// This is useful for debugging and reasoning about the eviction -type evictionJournal struct { - evictionPerformed bool - passOneNumTxs uint32 - passOneNumSenders uint32 - passOneNumSteps uint32 -} - -func (journal *evictionJournal) display() { - logRemove.Debug("Eviction.pass1:", "txs", journal.passOneNumTxs, "senders", journal.passOneNumSenders, "steps", journal.passOneNumSteps) -} - func monitorSendersScoreHistogram(scoreGroups [][]*txListForSender) { if log.GetLevel() > logger.LogDebug { return diff --git a/txcache/txCache.go b/txcache/txCache.go index 81879d49..8021e4f1 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -15,18 +15,16 @@ var _ types.Cacher = (*TxCache)(nil) // TxCache represents a cache-like structure (it has a fixed capacity and implements an eviction mechanism) for holding transactions type TxCache struct { - name string - txListBySender *txListBySenderMap - txByHash *txByHashMap - config ConfigSourceMe - evictionMutex sync.Mutex - evictionJournal evictionJournal - evictionSnapshotOfSenders []*txListForSender - isEvictionInProgress atomic.Flag - numSendersSelected atomic.Counter - numSendersWithInitialGap atomic.Counter - numSendersWithMiddleGap atomic.Counter - mutTxOperation sync.Mutex + name string + txListBySender *txListBySenderMap + txByHash *txByHashMap + config ConfigSourceMe + evictionMutex sync.Mutex + isEvictionInProgress atomic.Flag + numSendersSelected atomic.Counter + numSendersWithInitialGap atomic.Counter + numSendersWithMiddleGap atomic.Counter + mutTxOperation sync.Mutex } // NewTxCache creates a new transaction cache @@ -48,11 +46,10 @@ func NewTxCache(config ConfigSourceMe, txGasHandler TxGasHandler) (*TxCache, err scoreComputerObj := newDefaultScoreComputer(txGasHandler) txCache := &TxCache{ - name: config.Name, - txListBySender: newTxListBySenderMap(numChunks, senderConstraintsObj, scoreComputerObj, txGasHandler), - txByHash: newTxByHashMap(numChunks), - config: config, - evictionJournal: evictionJournal{}, + name: config.Name, + txListBySender: newTxListBySenderMap(numChunks, senderConstraintsObj, scoreComputerObj, txGasHandler), + txByHash: newTxByHashMap(numChunks), + config: config, } return txCache, nil @@ -68,7 +65,7 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { logAdd.Trace("AddTx()", "tx", tx.TxHash) if cache.config.EvictionEnabled { - cache.doEviction() + _ = cache.doEviction() } cache.mutTxOperation.Lock() @@ -85,7 +82,7 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { } if len(evicted) > 0 { - cache.monitorEvictionWrtSenderLimit(tx.Tx.GetSndAddr(), evicted) + logRemove.Debug("AddTx() with eviction", "sender", tx.Tx.GetSndAddr(), "num evicted txs", len(evicted)) cache.txByHash.RemoveTxsBulk(evicted) } @@ -348,7 +345,7 @@ func (cache *TxCache) NotifyAccountNonce(accountKey []byte, nonce uint64) { evicted := cache.txListBySender.notifyAccountNonce(accountKey, nonce) if len(evicted) > 0 { - cache.monitorEvictionWrtSenderNonce(accountKey, nonce, evicted) + logRemove.Trace("NotifyAccountNonce() with eviction", "sender", accountKey, "nonce", nonce, "num evicted txs", len(evicted)) cache.txByHash.RemoveTxsBulk(evicted) } } From 9211efa02021eb4ea9d6b9726a02c2f523df5c70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 25 Jul 2024 21:15:04 +0300 Subject: [PATCH 051/124] Monitoring simplifications. --- txcache/monitoring.go | 79 +++++++----------------------------- txcache/printing.go | 22 +++++----- txcache/txCache.go | 36 +++++++++------- txcache/txListBySenderMap.go | 2 +- txcache/txListForSender.go | 20 +++++---- 5 files changed, 62 insertions(+), 97 deletions(-) diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 47dd70c6..ae38b5c0 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -4,31 +4,25 @@ import ( "fmt" "strings" - "github.com/multiversx/mx-chain-core-go/core" logger "github.com/multiversx/mx-chain-logger-go" ) -func (cache *TxCache) monitorSelectionStart(contextualLogger logger.Logger) *core.StopWatch { - contextualLogger.Debug("monitorSelectionStart()", "numBytes", cache.NumBytes(), "txs", cache.CountTx(), "senders", cache.CountSenders()) - sw := core.NewStopWatch() - sw.Start("selection") - return sw -} +func displaySendersScoreHistogram(scoreGroups [][]*txListForSender) { + if log.GetLevel() > logger.LogDebug { + return + } -func (cache *TxCache) monitorSelectionEnd(contextualLog logger.Logger, stopWatch *core.StopWatch, selection []*WrappedTransaction) { - stopWatch.Stop("selection") - duration := stopWatch.GetMeasurement("selection") + stringBuilder := strings.Builder{} - numSendersSelected := cache.numSendersSelected.Reset() - numSendersWithInitialGap := cache.numSendersWithInitialGap.Reset() - numSendersWithMiddleGap := cache.numSendersWithMiddleGap.Reset() + for i, group := range scoreGroups { + if len(group) == 0 { + continue + } - contextualLog.Debug("monitorSelectionEnd()", "duration", duration, - "numTxSelected", len(selection), - "numSendersSelected", numSendersSelected, - "numSendersWithInitialGap", numSendersWithInitialGap, - "numSendersWithMiddleGap", numSendersWithMiddleGap, - ) + stringBuilder.WriteString(fmt.Sprintf("#%d: %d; ", i, len(group))) + } + + log.Debug("displaySendersScoreHistogram()", "histogram", stringBuilder.String()) } func displaySelectionOutcome(contextualLogger logger.Logger, sortedSenders []*txListForSender, selection []*WrappedTransaction) { @@ -37,59 +31,16 @@ func displaySelectionOutcome(contextualLogger logger.Logger, sortedSenders []*tx } if len(sortedSenders) > 0 { - contextualLogger.Trace("Sorted senders (as newline-separated JSON):") + contextualLogger.Trace("displaySelectionOutcome() - senders (as newline-separated JSON):") contextualLogger.Trace(marshalSendersToNewlineDelimitedJson(sortedSenders)) } else { contextualLogger.Trace("Sorted senders: none") } if len(selection) > 0 { - contextualLogger.Trace("Selected transactions (as newline-separated JSON):") + contextualLogger.Trace("displaySelectionOutcome() - transactions (as newline-separated JSON):") contextualLogger.Trace(marshalTransactionsToNewlineDelimitedJson(selection)) } else { contextualLogger.Trace("Selected transactions: none") } } - -type batchSelectionJournal struct { - selectedNum int - selectedGas uint64 - isFirstBatch bool - hasInitialGap bool - hasMiddleGap bool -} - -func (cache *TxCache) monitorBatchSelectionEnd(journal batchSelectionJournal) { - if !journal.isFirstBatch { - return - } - - if journal.hasInitialGap { - cache.numSendersWithInitialGap.Increment() - } else if journal.hasMiddleGap { - // Currently, we only count middle gaps on first batch (for simplicity) - cache.numSendersWithMiddleGap.Increment() - } - - if journal.selectedNum > 0 { - cache.numSendersSelected.Increment() - } -} - -func monitorSendersScoreHistogram(scoreGroups [][]*txListForSender) { - if log.GetLevel() > logger.LogDebug { - return - } - - stringBuilder := strings.Builder{} - - for i, group := range scoreGroups { - if len(group) == 0 { - continue - } - - stringBuilder.WriteString(fmt.Sprintf("#%d: %d; ", i, len(group))) - } - - log.Debug("monitorSendersScoreHistogram()", "histogram", stringBuilder.String()) -} diff --git a/txcache/printing.go b/txcache/printing.go index de85bb5d..bee5708a 100644 --- a/txcache/printing.go +++ b/txcache/printing.go @@ -17,11 +17,12 @@ type printedTransaction struct { } type printedSender struct { - Address string `json:"address"` - Score int `json:"score"` - Nonce uint64 `json:"nonce"` - IsNonceKnown bool `json:"isNonceKnown"` - NumTxs uint64 `json:"numTxs"` + Address string `json:"address"` + Score int `json:"score"` + Nonce uint64 `json:"nonce"` + IsNonceKnown bool `json:"isNonceKnown"` + HasInitialGap bool `json:"hasInitialGap"` + NumTxs uint64 `json:"numTxs"` } // marshalSendersToNewlineDelimitedJson converts a list of senders to a newline-delimited JSON string. @@ -77,10 +78,11 @@ func convertWrappedTransactionToPrintedTransaction(wrappedTx *WrappedTransaction func convertTxListForSenderToPrintedSender(txListForSender *txListForSender) *printedSender { return &printedSender{ - Address: hex.EncodeToString([]byte(txListForSender.sender)), - Score: txListForSender.getScore(), - Nonce: txListForSender.accountNonce.Get(), - IsNonceKnown: txListForSender.accountNonceKnown.IsSet(), - NumTxs: txListForSender.countTxWithLock(), + Address: hex.EncodeToString([]byte(txListForSender.sender)), + Score: txListForSender.getScore(), + Nonce: txListForSender.accountNonce.Get(), + IsNonceKnown: txListForSender.accountNonceKnown.IsSet(), + HasInitialGap: txListForSender.hasInitialGapWithLock(), + NumTxs: txListForSender.countTxWithLock(), } } diff --git a/txcache/txCache.go b/txcache/txCache.go index 8021e4f1..8f73c7b3 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -3,6 +3,7 @@ package txcache import ( "sync" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" logger "github.com/multiversx/mx-chain-logger-go" @@ -15,16 +16,13 @@ var _ types.Cacher = (*TxCache)(nil) // TxCache represents a cache-like structure (it has a fixed capacity and implements an eviction mechanism) for holding transactions type TxCache struct { - name string - txListBySender *txListBySenderMap - txByHash *txByHashMap - config ConfigSourceMe - evictionMutex sync.Mutex - isEvictionInProgress atomic.Flag - numSendersSelected atomic.Counter - numSendersWithInitialGap atomic.Counter - numSendersWithMiddleGap atomic.Counter - mutTxOperation sync.Mutex + name string + txListBySender *txListBySenderMap + txByHash *txByHashMap + config ConfigSourceMe + evictionMutex sync.Mutex + isEvictionInProgress atomic.Flag + mutTxOperation sync.Mutex } // NewTxCache creates a new transaction cache @@ -119,7 +117,15 @@ func (cache *TxCache) SelectTransactions(numRequested int, gasRequested uint64, } func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, numRequested int, gasRequested uint64, baseNumPerSenderBatch int, baseGasPerSenderBatch uint64) ([]*txListForSender, []*WrappedTransaction) { - stopWatch := cache.monitorSelectionStart(contextualLogger) + stopWatch := core.NewStopWatch() + stopWatch.Start("selection") + + contextualLogger.Debug( + "doSelectTransactions(): begin", + "num bytes", cache.NumBytes(), + "num txs", cache.CountTx(), + "num senders", cache.CountSenders(), + ) senders := cache.getSendersEligibleForSelection() transactions := make([]*WrappedTransaction, numRequested) @@ -145,8 +151,6 @@ func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, numRe selectedNum += batchSelectionJournal.selectedNum selectedNumInThisPass += batchSelectionJournal.selectedNum - cache.monitorBatchSelectionEnd(batchSelectionJournal) - shouldContinueSelection := selectedNum < numRequested && selectedGas < gasRequested if !shouldContinueSelection { break @@ -162,7 +166,11 @@ func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, numRe transactions = transactions[:selectedNum] - cache.monitorSelectionEnd(contextualLogger, stopWatch, transactions) + contextualLogger.Debug( + "doSelectTransactions(): end", + "duration", stopWatch.GetMeasurement("selection"), + "num txs selected", selectedNum, + ) return senders, transactions } diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index 6e71a1a2..c2c2c703 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -177,7 +177,7 @@ func (txMap *txListBySenderMap) getSendersGroupedByScore() [][]*txListForSender }) txMap.shuffleSendersWithinScoreGroups(groups) - monitorSendersScoreHistogram(groups) + displaySendersScoreHistogram(groups) return groups } diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 8a79ab39..d6546660 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -31,6 +31,11 @@ type txListForSender struct { mutex sync.RWMutex } +type batchSelectionJournal struct { + selectedNum int + selectedGas uint64 +} + // newTxListForSender creates a new (sorted) list of transactions func newTxListForSender(sender string, constraints *senderConstraints, scoreComputer scoreComputer) *txListForSender { return &txListForSender{ @@ -246,15 +251,10 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati journal := batchSelectionJournal{} if isFirstBatch { - hasInitialGap := listForSender.hasInitialGap() - - journal.isFirstBatch = true - journal.hasInitialGap = hasInitialGap - // Reset the internal state used for copy operations listForSender.selectionPreviousNonce = 0 listForSender.selectionPointer = listForSender.items.Front() - listForSender.selectionDetectedGap = hasInitialGap + listForSender.selectionDetectedGap = listForSender.hasInitialGap() } // If a nonce gap is detected, no transaction is returned in this read. @@ -301,7 +301,6 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati journal.selectedNum = selectedNum journal.selectedGas = selectedGas - journal.hasMiddleGap = listForSender.selectionDetectedGap return journal } @@ -380,7 +379,6 @@ func (listForSender *txListForSender) evictTransactionsWithLowerNonces(accountNo return evictedTxHashes } -// hasInitialGap should only be called at tx selection time, since only then we can detect initial gaps with certainty // This function should only be used in critical section (listForSender.mutex) func (listForSender *txListForSender) hasInitialGap() bool { accountNonceKnown := listForSender.accountNonceKnown.IsSet() @@ -399,6 +397,12 @@ func (listForSender *txListForSender) hasInitialGap() bool { return hasGap } +func (listForSender *txListForSender) hasInitialGapWithLock() bool { + listForSender.mutex.RLock() + defer listForSender.mutex.RUnlock() + return listForSender.hasInitialGap() +} + // This function should only be used in critical section (listForSender.mutex) func (listForSender *txListForSender) getLowestNonceTx() *WrappedTransaction { front := listForSender.items.Front() From dede39cf7e581ecadc2053439d236a7550c06d72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 25 Jul 2024 21:18:22 +0300 Subject: [PATCH 052/124] Fix logs. --- txcache/monitoring.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/txcache/monitoring.go b/txcache/monitoring.go index ae38b5c0..3f245e98 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -34,13 +34,13 @@ func displaySelectionOutcome(contextualLogger logger.Logger, sortedSenders []*tx contextualLogger.Trace("displaySelectionOutcome() - senders (as newline-separated JSON):") contextualLogger.Trace(marshalSendersToNewlineDelimitedJson(sortedSenders)) } else { - contextualLogger.Trace("Sorted senders: none") + contextualLogger.Trace("displaySelectionOutcome() - senders: none") } if len(selection) > 0 { contextualLogger.Trace("displaySelectionOutcome() - transactions (as newline-separated JSON):") contextualLogger.Trace(marshalTransactionsToNewlineDelimitedJson(selection)) } else { - contextualLogger.Trace("Selected transactions: none") + contextualLogger.Trace("displaySelectionOutcome() - transactions: none") } } From 843bbd2d1266c252dada899c98f61d3ab9893b99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 26 Jul 2024 11:13:45 +0300 Subject: [PATCH 053/124] Extra logs, test, comments etc. --- txcache/printing.go | 28 ++++++++++++++++------------ txcache/score.go | 11 +++++++++++ txcache/score_test.go | 1 + txcache/txListBySenderMap.go | 2 ++ txcache/txListForSender.go | 1 + 5 files changed, 31 insertions(+), 12 deletions(-) diff --git a/txcache/printing.go b/txcache/printing.go index bee5708a..8dd44cda 100644 --- a/txcache/printing.go +++ b/txcache/printing.go @@ -8,12 +8,14 @@ import ( ) type printedTransaction struct { - Hash string `json:"hash"` - Nonce uint64 `json:"nonce"` - GasPrice uint64 `json:"gasPrice"` - GasLimit uint64 `json:"gasLimit"` - Sender string `json:"sender"` - Receiver string `json:"receiver"` + Hash string `json:"hash"` + Nonce uint64 `json:"nonce"` + GasPrice uint64 `json:"gasPrice"` + GasLimit uint64 `json:"gasLimit"` + Sender string `json:"sender"` + Receiver string `json:"receiver"` + DataLength int `json:"dataLength"` + Fee float64 `json:"fee"` } type printedSender struct { @@ -67,12 +69,14 @@ func convertWrappedTransactionToPrintedTransaction(wrappedTx *WrappedTransaction transaction := wrappedTx.Tx return &printedTransaction{ - Hash: hex.EncodeToString(wrappedTx.TxHash), - Nonce: transaction.GetNonce(), - Receiver: hex.EncodeToString(transaction.GetRcvAddr()), - Sender: hex.EncodeToString(transaction.GetSndAddr()), - GasPrice: transaction.GetGasPrice(), - GasLimit: transaction.GetGasLimit(), + Hash: hex.EncodeToString(wrappedTx.TxHash), + Nonce: transaction.GetNonce(), + Receiver: hex.EncodeToString(transaction.GetRcvAddr()), + Sender: hex.EncodeToString(transaction.GetSndAddr()), + GasPrice: transaction.GetGasPrice(), + GasLimit: transaction.GetGasLimit(), + DataLength: len(transaction.GetData()), + Fee: wrappedTx.TxFee, } } diff --git a/txcache/score.go b/txcache/score.go index 573eb142..ef7bae04 100644 --- a/txcache/score.go +++ b/txcache/score.go @@ -28,6 +28,17 @@ func newDefaultScoreComputer(txGasHandler TxGasHandler) *defaultScoreComputer { excellentPpuNormalizedLog := math.Log(excellentPpuNormalized) scoreScalingFactor := float64(maxSenderScore) / excellentPpuNormalizedLog + log.Debug("newDefaultScoreComputer()", + "maxGasLimitPerTx", txGasHandler.MaxGasLimitPerTx(), + "minGasPrice", txGasHandler.MinGasPrice(), + "worstPpu", worstPpu, + "worstPpuLog", worstPpuLog, + "excellentPpu", excellentPpu, + "excellentPpuNormalized", excellentPpuNormalized, + "excellentPpuNormalizedLog", excellentPpuNormalizedLog, + "scoreScalingFactor", scoreScalingFactor, + ) + return &defaultScoreComputer{ worstPpuLog: worstPpuLog, scoreScalingFactor: scoreScalingFactor, diff --git a/txcache/score_test.go b/txcache/score_test.go index 4bd2488e..e319f717 100644 --- a/txcache/score_test.go +++ b/txcache/score_test.go @@ -14,6 +14,7 @@ func TestNewDefaultScoreComputer(t *testing.T) { require.NotNil(t, computer) require.Equal(t, float64(16.12631180572966), computer.worstPpuLog) + require.Equal(t, float64(16.112421018189185), computer.scoreScalingFactor) } func TestComputeWorstPpu(t *testing.T) { diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index c2c2c703..0cee9cdb 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -102,6 +102,8 @@ func (txMap *txListBySenderMap) removeTx(tx *WrappedTransaction) bool { // Important: this doesn't remove the transactions from txCache.txByHash. That's done by the caller. func (txMap *txListBySenderMap) removeSender(sender string) bool { + logRemove.Trace("txListBySenderMap.removeSender()", "sender", sender) + _, removed := txMap.backingMap.Remove(sender) if removed { txMap.counter.Decrement() diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index d6546660..fced7d2c 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -113,6 +113,7 @@ func (listForSender *txListForSender) onAddedTransaction(tx *WrappedTransaction, listForSender.noncesTracker.addNonce(nonce) } +// This function should only be used in critical section (listForSender.mutex) func (listForSender *txListForSender) recomputeScore() { scoreParams := listForSender.getScoreParams() score := listForSender.scoreComputer.computeScore(scoreParams) From 9b8f619b77be557353be9c40387b624acc497528 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 30 Jul 2024 22:43:28 +0300 Subject: [PATCH 054/124] Refactoring, fix interface etc. --- txcache/crossTxCache.go | 4 ++++ txcache/eviction.go | 2 +- txcache/testutils_test.go | 4 ++-- txcache/txCache.go | 8 +------- txcache/txListForSender.go | 31 ++++++++++++++++++++++--------- txcache/txListForSender_test.go | 6 +++--- 6 files changed, 33 insertions(+), 22 deletions(-) diff --git a/txcache/crossTxCache.go b/txcache/crossTxCache.go index 0c21e0d9..ccd1aa05 100644 --- a/txcache/crossTxCache.go +++ b/txcache/crossTxCache.go @@ -115,6 +115,10 @@ func (cache *CrossTxCache) GetTransactionsPoolForSender(_ string) []*WrappedTran return make([]*WrappedTransaction, 0) } +// NotifyAccountNonce does nothing, only to respect the interface +func (cache *CrossTxCache) NotifyAccountNonce(accountKey []byte, nonce uint64) { +} + // IsInterfaceNil returns true if there is no value under the interface func (cache *CrossTxCache) IsInterfaceNil() bool { return cache == nil diff --git a/txcache/eviction.go b/txcache/eviction.go index 6a4f4ca9..c91bb493 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -136,7 +136,7 @@ func (cache *TxCache) evictSendersAndTheirTxs(listsToEvict []*txListForSender) ( for _, txList := range listsToEvict { sendersToEvict = append(sendersToEvict, txList.sender) - txsToEvict = append(txsToEvict, txList.getTxHashes()...) + txsToEvict = append(txsToEvict, txList.getTxsHashes()...) } return cache.doEvictItems(txsToEvict, sendersToEvict) diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index a1d76f8c..0bb3a16f 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -25,7 +25,7 @@ func (cache *TxCache) areInternalMapsConsistent() bool { for _, sender := range senders { numInMapBySender += int(sender.countTx()) - for _, hash := range sender.getTxHashes() { + for _, hash := range sender.getTxsHashes() { _, ok := internalMapByHash.getTx(string(hash)) if !ok { numMissingInMapByHash++ @@ -62,7 +62,7 @@ func (cache *TxCache) getScoreOfSender(sender string) int { } func (listForSender *txListForSender) getTxHashesAsStrings() []string { - hashes := listForSender.getTxHashes() + hashes := listForSender.getTxsHashes() return hashesAsStrings(hashes) } diff --git a/txcache/txCache.go b/txcache/txCache.go index 8f73c7b3..560f4f62 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -266,13 +266,7 @@ func (cache *TxCache) GetTransactionsPoolForSender(sender string) []*WrappedTran return nil } - wrappedTxs := make([]*WrappedTransaction, listForSender.items.Len()) - for element, i := listForSender.items.Front(), 0; element != nil; element, i = element.Next(), i+1 { - tx := element.Value.(*WrappedTransaction) - wrappedTxs[i] = tx - } - - return wrappedTxs + return listForSender.getTxs() } // Clear clears the cache diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index fced7d2c..eaded08f 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -249,8 +249,6 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati listForSender.mutex.Lock() defer listForSender.mutex.Unlock() - journal := batchSelectionJournal{} - if isFirstBatch { // Reset the internal state used for copy operations listForSender.selectionPreviousNonce = 0 @@ -260,7 +258,7 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati // If a nonce gap is detected, no transaction is returned in this read. if listForSender.selectionDetectedGap { - return journal + return batchSelectionJournal{} } selectedGas := uint64(0) @@ -300,14 +298,14 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati selectedGas += gasLimit } - journal.selectedNum = selectedNum - journal.selectedGas = selectedGas - - return journal + return batchSelectionJournal{ + selectedNum: selectedNum, + selectedGas: selectedGas, + } } -// getTxHashes returns the hashes of transactions in the list -func (listForSender *txListForSender) getTxHashes() [][]byte { +// getTxsHashes returns the hashes of transactions in the list +func (listForSender *txListForSender) getTxsHashes() [][]byte { listForSender.mutex.RLock() defer listForSender.mutex.RUnlock() @@ -321,6 +319,21 @@ func (listForSender *txListForSender) getTxHashes() [][]byte { return result } +// getTxs returns the transactions in the list +func (listForSender *txListForSender) getTxs() []*WrappedTransaction { + listForSender.mutex.RLock() + defer listForSender.mutex.RUnlock() + + result := make([]*WrappedTransaction, 0, listForSender.countTx()) + + for element := listForSender.items.Front(); element != nil; element = element.Next() { + value := element.Value.(*WrappedTransaction) + result = append(result, value) + } + + return result +} + // This function should only be used in critical section (listForSender.mutex) func (listForSender *txListForSender) countTx() uint64 { return uint64(listForSender.items.Len()) diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 06cc6d71..1103b135 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -300,15 +300,15 @@ func TestListForSender_hasInitialGap(t *testing.T) { func TestListForSender_getTxHashes(t *testing.T) { list := newUnconstrainedListToTest() - require.Len(t, list.getTxHashes(), 0) + require.Len(t, list.getTxsHashes(), 0) txGasHandler := txcachemocks.NewTxGasHandlerMock() list.AddTx(createTx([]byte("A"), ".", 1), txGasHandler) - require.Len(t, list.getTxHashes(), 1) + require.Len(t, list.getTxsHashes(), 1) list.AddTx(createTx([]byte("B"), ".", 2), txGasHandler) list.AddTx(createTx([]byte("C"), ".", 3), txGasHandler) - require.Len(t, list.getTxHashes(), 3) + require.Len(t, list.getTxsHashes(), 3) } func TestListForSender_DetectRaceConditions(t *testing.T) { From 07437b3231b521dd609151a0dd92fc486b9c6e3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 6 Aug 2024 14:55:32 +0300 Subject: [PATCH 055/124] Fix diagnosis / duration. --- txcache/txCache.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/txcache/txCache.go b/txcache/txCache.go index 560f4f62..b613654f 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -166,6 +166,8 @@ func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, numRe transactions = transactions[:selectedNum] + stopWatch.Stop("selection") + contextualLogger.Debug( "doSelectTransactions(): end", "duration", stopWatch.GetMeasurement("selection"), From a03e9555d11afa6c1c9f8310f89a6b8aad78a12d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 6 Sep 2024 10:04:48 +0300 Subject: [PATCH 056/124] Additional logging. --- txcache/txCache.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/txcache/txCache.go b/txcache/txCache.go index b613654f..8f2f671f 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -60,7 +60,7 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { return false, false } - logAdd.Trace("AddTx()", "tx", tx.TxHash) + logAdd.Trace("AddTx()", "tx", tx.TxHash, "nonce", tx.Tx.GetNonce(), "sender", tx.Tx.GetSndAddr()) if cache.config.EvictionEnabled { _ = cache.doEviction() @@ -198,10 +198,9 @@ func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { cache.mutTxOperation.Lock() defer cache.mutTxOperation.Unlock() - logRemove.Trace("RemoveTxByHash()", "tx", txHash) - tx, foundInByHash := cache.txByHash.removeTx(string(txHash)) if !foundInByHash { + logRemove.Trace("RemoveTxByHash(), but !foundInByHash", "tx", txHash) return false } @@ -215,9 +214,10 @@ func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { // - B reaches "cache.txByHash.RemoveTxsBulk()" // - B reaches "cache.txListBySender.RemoveSendersBulk()" // - A reaches "cache.txListBySender.removeTx()", but sender does not exist anymore - logRemove.Debug("RemoveTxByHash(): slight inconsistency detected: !foundInBySender", "tx", txHash) + logRemove.Debug("RemoveTxByHash(), but !foundInBySender", "tx", txHash) } + logRemove.Trace("RemoveTxByHash()", "tx", txHash) return true } From 91e0519b760d9fe8a768e861e17b4f88311c543b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 6 Sep 2024 10:29:15 +0300 Subject: [PATCH 057/124] Additional logs when gaps are detected (at selection time). --- txcache/txListForSender.go | 34 +++++++++++++++++++++++++-------- txcache/txListForSender_test.go | 11 ++++++++--- 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index eaded08f..c29a2d70 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -253,7 +253,17 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati // Reset the internal state used for copy operations listForSender.selectionPreviousNonce = 0 listForSender.selectionPointer = listForSender.items.Front() - listForSender.selectionDetectedGap = listForSender.hasInitialGap() + + accountNonce, firstTxNonce, hasInitialGap := listForSender.hasInitialGap() + if hasInitialGap { + log.Trace("selectBatchTo(): initial gap detected", + "sender", listForSender.sender, + "accountNonce", accountNonce, + "firstTxNonce", firstTxNonce, + ) + } + + listForSender.selectionDetectedGap = hasInitialGap } // If a nonce gap is detected, no transaction is returned in this read. @@ -285,6 +295,12 @@ func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destinati isMiddleGap := listForSender.selectionPreviousNonce > 0 && nonce > listForSender.selectionPreviousNonce+1 if isMiddleGap { + log.Trace("selectBatchTo(): middle gap detected", + "sender", listForSender.sender, + "previousNonce", listForSender.selectionPreviousNonce, + "nonce", nonce, + ) + listForSender.selectionDetectedGap = true break } @@ -393,28 +409,30 @@ func (listForSender *txListForSender) evictTransactionsWithLowerNonces(accountNo return evictedTxHashes } -// This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) hasInitialGap() bool { +// This function should only be used in critical section (listForSender.mutex). +// When a gap is detected, the (known) account nonce and the first transactio nonce are also returned. +func (listForSender *txListForSender) hasInitialGap() (uint64, uint64, bool) { accountNonceKnown := listForSender.accountNonceKnown.IsSet() if !accountNonceKnown { - return false + return 0, 0, false } firstTx := listForSender.getLowestNonceTx() if firstTx == nil { - return false + return 0, 0, false } - firstTxNonce := firstTx.Tx.GetNonce() accountNonce := listForSender.accountNonce.Get() + firstTxNonce := firstTx.Tx.GetNonce() hasGap := firstTxNonce > accountNonce - return hasGap + return accountNonce, firstTxNonce, hasGap } func (listForSender *txListForSender) hasInitialGapWithLock() bool { listForSender.mutex.RLock() defer listForSender.mutex.RUnlock() - return listForSender.hasInitialGap() + _, _, hasInitialGap := listForSender.hasInitialGap() + return hasInitialGap } // This function should only be used in critical section (listForSender.mutex) diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 1103b135..7e6b8b2c 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -289,13 +289,18 @@ func TestListForSender_hasInitialGap(t *testing.T) { txGasHandler := txcachemocks.NewTxGasHandlerMock() // No transaction, no gap - require.False(t, list.hasInitialGap()) + _, _, hasInitialGap := list.hasInitialGap() + require.False(t, hasInitialGap) + // One gap list.AddTx(createTx([]byte("tx-43"), ".", 43), txGasHandler) - require.True(t, list.hasInitialGap()) + _, _, hasInitialGap = list.hasInitialGap() + require.True(t, hasInitialGap) + // Resolve gap list.AddTx(createTx([]byte("tx-42"), ".", 42), txGasHandler) - require.False(t, list.hasInitialGap()) + _, _, hasInitialGap = list.hasInitialGap() + require.False(t, hasInitialGap) } func TestListForSender_getTxHashes(t *testing.T) { From 2235ce3f0d08a1616dca20f67af9341a4c3864ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 6 Sep 2024 10:38:34 +0300 Subject: [PATCH 058/124] Fix evictTransactionsWithLowerNonces. --- txcache/txListForSender.go | 6 ++++-- txcache/txListForSender_test.go | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index c29a2d70..4ccb367b 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -391,7 +391,7 @@ func (listForSender *txListForSender) notifyAccountNonce(nonce uint64) [][]byte func (listForSender *txListForSender) evictTransactionsWithLowerNonces(accountNonce uint64) [][]byte { evictedTxHashes := make([][]byte, 0) - for element := listForSender.items.Front(); element != nil; element = element.Next() { + for element := listForSender.items.Front(); element != nil; { tx := element.Value.(*WrappedTransaction) txNonce := tx.Tx.GetNonce() @@ -399,8 +399,10 @@ func (listForSender *txListForSender) evictTransactionsWithLowerNonces(accountNo break } - listForSender.items.Remove(element) + nextElement := element.Next() + _ = listForSender.items.Remove(element) listForSender.onRemovedListElement(element) + element = nextElement // Keep track of removed transactions evictedTxHashes = append(evictedTxHashes, tx.TxHash) diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 7e6b8b2c..3e5bfc81 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -283,6 +283,27 @@ func TestListForSender_NotifyAccountNonce(t *testing.T) { require.True(t, list.accountNonceKnown.IsSet()) } +func TestListForSender_evictTransactionsWithLowerNonces(t *testing.T) { + list := newUnconstrainedListToTest() + txGasHandler := txcachemocks.NewTxGasHandlerMock() + + list.AddTx(createTx([]byte("tx-42"), ".", 42), txGasHandler) + list.AddTx(createTx([]byte("tx-43"), ".", 43), txGasHandler) + list.AddTx(createTx([]byte("tx-44"), ".", 44), txGasHandler) + list.AddTx(createTx([]byte("tx-45"), ".", 45), txGasHandler) + + require.Equal(t, 4, list.items.Len()) + + list.evictTransactionsWithLowerNonces(43) + require.Equal(t, 3, list.items.Len()) + + list.evictTransactionsWithLowerNonces(44) + require.Equal(t, 2, list.items.Len()) + + list.evictTransactionsWithLowerNonces(99) + require.Equal(t, 0, list.items.Len()) +} + func TestListForSender_hasInitialGap(t *testing.T) { list := newUnconstrainedListToTest() list.notifyAccountNonce(42) From f79ad1b8d56f331c6031490246f671842725cd05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 2 Oct 2024 13:26:07 +0300 Subject: [PATCH 059/124] Sketch readme. --- txcache/README.md | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 txcache/README.md diff --git a/txcache/README.md b/txcache/README.md new file mode 100644 index 00000000..a96431da --- /dev/null +++ b/txcache/README.md @@ -0,0 +1,40 @@ +## Mempool + +### Glossary + +1. **maxSenderScore:** 100 (constant) +2. **selection pass:** a single iteration of the _selection loop_. In a single iteration, the algorithm goes through all the senders (appropriately sorted) and selects a batch of transactions from each sender. A _pass_ can stop early (see **Paragraph 3**). + +### Transactions selection + +### Paragraph 1 + +When a proposer asks the mempool for transactions, it provides the following parameters: + + - `numRequested`: the maximum number of transactions to be returned + - `gasRequested`: the maximum total gas limit of the transactions to be returned + - `baseNumPerSenderBatch`: a base value for the number of transactions to be returned per sender, per selection _pass_. This value is used to compute the actual number of transactions to be returned per sender, per selection _pass_, based on the sender's score (see **Paragraph 2**). + - `baseGasPerSenderBatch`: a base value for the total gas limit of the transactions to be returned per sender, per selection _pass_. This value is used to compute the actual total gas limit of the transactions to be returned per sender, per selection _pass_, based on the sender's score (see **Paragraph 2**). + +### Paragraph 2 + +How is the size of a sender batch computed? + +1. If the score of the sender is **zero**, then the size of the sender batch is **1**, and the total gas limit of the sender batch is **1**. +2. If the score of the sender is **non-zero**, then the size of the sender batch is computed as follows: + - `scoreDivision = score / maxSenderScore` + - `numPerBatch = baseNumPerSenderBatch * scoreDivision` + - `gasPerBatch = baseGasPerSenderBatch * scoreDivision` + +### Paragraph 3 + +The mempool selects transactions as follows: + - before starting the selection loop, get a snapshot of the senders (sorted by score, descending) + - in the selection loop, do as many passes as needed to satisfy `numRequested` and `gasRequested` (see **Paragraph 1**). + +### Transactions addition + +### Transactions removal + +### Monitoring and diagnostics + From 4a30d0c148f64578ada77e49df210f9c3a34495c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 2 Oct 2024 15:50:58 +0300 Subject: [PATCH 060/124] Docs, work in progress. --- txcache/README.md | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/txcache/README.md b/txcache/README.md index a96431da..651baf62 100644 --- a/txcache/README.md +++ b/txcache/README.md @@ -2,8 +2,17 @@ ### Glossary -1. **maxSenderScore:** 100 (constant) -2. **selection pass:** a single iteration of the _selection loop_. In a single iteration, the algorithm goes through all the senders (appropriately sorted) and selects a batch of transactions from each sender. A _pass_ can stop early (see **Paragraph 3**). +1. **selection session:** an ephemeral session during which the mempool selects transactions for a proposer. A session starts when a proposer asks the mempool for transactions and ends when the mempool returns the transactions. The most important part of a session is the _selection loop_. +2. **selection pass:** a single iteration of the _selection loop_. In an iteration, the algorithm goes through all the senders (appropriately sorted) and selects a batch of transactions from each sender. A _pass_ can stop early (see **Paragraph 3**). +3. **sender score:** a score assigned to a sender based on her's behavior. The score is used to determine the order in which senders are considered within a _selection pass_, plus the size and capacity of a _sender's transactions batch_. The score is a number in `[0, maxSenderScore]`. + +### Configuration + +1. **maxSenderScore:** `100`, the maximum score a sender can have. The minimum score is `0`. +2. **numRequested:** `30_000`, the maximum number of transactions to be returned to a proposer (one _selection session_). +3. **gasRequested:** `10_000_000_000`, the maximum total gas limit of the transactions to be returned to a proposer (one _selection session_). +4. **baseNumPerSenderBatch:**: `100`, defines the maximum number of transactions to be selected from the transactions pool, for a sender with the maximum possible score, in a _single pass_. Senders with lower scores will have fewer transactions selected in a single pass. +5. **baseGasPerSenderBatch:**: `120_000_000`, defines the maximum gas for transactions to be selected from the transactions pool, for a sender with the maximum possible score, in a single pass. Senders with lower scores will have less gas selected in a single pass. ### Transactions selection @@ -14,7 +23,7 @@ When a proposer asks the mempool for transactions, it provides the following par - `numRequested`: the maximum number of transactions to be returned - `gasRequested`: the maximum total gas limit of the transactions to be returned - `baseNumPerSenderBatch`: a base value for the number of transactions to be returned per sender, per selection _pass_. This value is used to compute the actual number of transactions to be returned per sender, per selection _pass_, based on the sender's score (see **Paragraph 2**). - - `baseGasPerSenderBatch`: a base value for the total gas limit of the transactions to be returned per sender, per selection _pass_. This value is used to compute the actual total gas limit of the transactions to be returned per sender, per selection _pass_, based on the sender's score (see **Paragraph 2**). + - `baseGasPerSenderBatch`: a base value for the total gas limit of the transactions to be returned per sender, per selection _pass_. This value is used to compute the actual total gas limit of the transactions to be returned per sender, per selection _pass_, based on the sender's score (see **Paragraph 2**). Due to how the selection is performed, the theoretical maximum gas might be exceeded (a bit), as follows: `theoretical maximum = (baseGasPerSenderBatch - 1) + max(baseGasPerSenderBatch, max gas limit of a transaction)`. Think of a sender with maximum score, having two transactions, one with `gasLimit = baseGasPerSenderBatch - 1`, and the other with `gasLimit = max gas limit of a transaction`. ### Paragraph 2 @@ -30,7 +39,19 @@ How is the size of a sender batch computed? The mempool selects transactions as follows: - before starting the selection loop, get a snapshot of the senders (sorted by score, descending) - - in the selection loop, do as many passes as needed to satisfy `numRequested` and `gasRequested` (see **Paragraph 1**). + - in the selection loop, do as many _passes_ as needed to satisfy `numRequested` and `gasRequested` (see **Paragraph 1**). + - within a _pass_, go through all the senders (appropriately sorted) and select a batch of transactions from each sender. The size of the batch is computed as described in **Paragraph 2**. + - if either `numRequested` or `gasRequested` is satisfied, stop the _pass_ early. + +### Paragraph 4 + +Within a _selection pass_, a batch of transactions from a sender is selected as follows: + - if it's the first pass, then reset the internal state used for copy operations (in the scope of a sender). Furthermore, attempt to **detect an initial nonces gap** (if enough information is available, that is, if the current account nonce is known - see section **Account nonce notifications**). + - if a nonces gap is detected, return an empty batch. Subsequent passes of the selection loop (within the same selection session) will skip this sender. The sender will be re-considered in a future selection session. + - go through the list of transactions of the sender (sorted by nonce, ascending) and select the first `numPerBatch` transactions that fit within `gasPerBatch`. + - in following passes (within the same selection session), the batch selection algorithm will continue from the last selected transaction of the sender (think of it as a cursor). + +### Account nonce notifications ### Transactions addition From 8352a3f45fcb449979dab0848febb54a0aa5c774 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 2 Oct 2024 20:20:54 +0300 Subject: [PATCH 061/124] Docs, work in progress. Additional tests. --- txcache/README.md | 28 ++++++++++++++++++++++++++++ txcache/score_test.go | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/txcache/README.md b/txcache/README.md index 651baf62..10fd08d4 100644 --- a/txcache/README.md +++ b/txcache/README.md @@ -35,6 +35,12 @@ How is the size of a sender batch computed? - `numPerBatch = baseNumPerSenderBatch * scoreDivision` - `gasPerBatch = baseGasPerSenderBatch * scoreDivision` +Examples: + - for `score == 100`, we have `numPerBatch == 100` and `gasPerBatch == 120000000` + - for `score == 74`, we have `numPerBatch == 74` and `gasPerBatch == 88800000` + - for `score == 1`, we have `numPerBatch == 1` and `gasPerBatch == 1200000` + - for `score == 0`, we have `numPerBatch == 1` and `gasPerBatch == 1` + ### Paragraph 3 The mempool selects transactions as follows: @@ -51,11 +57,33 @@ Within a _selection pass_, a batch of transactions from a sender is selected as - go through the list of transactions of the sender (sorted by nonce, ascending) and select the first `numPerBatch` transactions that fit within `gasPerBatch`. - in following passes (within the same selection session), the batch selection algorithm will continue from the last selected transaction of the sender (think of it as a cursor). +### Score computation + +The score of a sender it's computed based on her transactions (as found in the mempool) and the account nonce (as learned through the _account nonce notifications_). + +The score is strongly correlated with the average price paid by the sender per unit of computation - we'll call this **avgPpu**, as a property of the sender. + +Additionally, we define two global properties: `worstPpu` and `excellentPpu`. A sender with an `avgPpu` of `excellentPpu + 1` gets the maximum score, while a sender with an `avgPpu` of `worstPpu` gets the minimum score. + +`worstPpu` is computed as the average price per unit of the "worst" possible transaction - minimum gas price, maximum gas limit, and minimum data size (thus abusing the Protocol gas price subvention): + +``` +worstPpu = (50000 * 1_000_000_000 + (600_000_000 - 50000) * (1_000_000_000 / 100)) / 600_000_000 + = 10082500 +``` + +`excellentPpu` is set to `minGasPrice` times a _chosen_ factor: + +``` +excellentPpu = 1_000_000_000 * 5 = 5_000_000_000 +``` ### Account nonce notifications ### Transactions addition ### Transactions removal +### Transactions eviction + ### Monitoring and diagnostics diff --git a/txcache/score_test.go b/txcache/score_test.go index e319f717..2e28a2df 100644 --- a/txcache/score_test.go +++ b/txcache/score_test.go @@ -42,6 +42,39 @@ func TestDefaultScoreComputer_computeRawScore(t *testing.T) { } func TestDefaultScoreComputer_computeScore(t *testing.T) { + gasHandler := txcachemocks.NewTxGasHandlerMock() + worstPpu := computeWorstPpu(gasHandler) + excellentPpu := float64(gasHandler.MinGasPrice()) * excellentGasPriceFactor + + require.Equal(t, 0, computeScoreGivenAvgPpu(worstPpu)) + require.Equal(t, 11, computeScoreGivenAvgPpu(worstPpu*2)) + require.Equal(t, 31, computeScoreGivenAvgPpu(worstPpu*7)) + require.Equal(t, 74, computeScoreGivenAvgPpu(worstPpu*100)) + require.Equal(t, 90, computeScoreGivenAvgPpu(worstPpu*270)) + require.Equal(t, 99, computeScoreGivenAvgPpu(worstPpu*495)) + require.Equal(t, 100, computeScoreGivenAvgPpu(worstPpu*500)) + + require.Equal(t, 55, computeScoreGivenAvgPpu(excellentPpu/16)) + require.Equal(t, 66, computeScoreGivenAvgPpu(excellentPpu/8)) + require.Equal(t, 77, computeScoreGivenAvgPpu(excellentPpu/4)) + require.Equal(t, 88, computeScoreGivenAvgPpu(excellentPpu/2)) + require.Equal(t, 99, computeScoreGivenAvgPpu(excellentPpu)) + require.Equal(t, 100, computeScoreGivenAvgPpu(excellentPpu+1)) +} + +func computeScoreGivenAvgPpu(avgPpu float64) int { + gasHandler := txcachemocks.NewTxGasHandlerMock() + computer := newDefaultScoreComputer(gasHandler) + + return computer.computeScore(senderScoreParams{ + avgPpuNumerator: avgPpu, + avgPpuDenominator: 1, + isAccountNonceKnown: true, + hasSpotlessSequenceOfNonces: true, + }) +} + +func TestDefaultScoreComputer_computeScore_consideringOneTransaction(t *testing.T) { // Simple transfers: require.Equal(t, 74, computeScoreOfTransaction(0, 50000, oneBillion)) require.Equal(t, 80, computeScoreOfTransaction(0, 50000, 1.5*oneBillion)) From 712f71a5ec058628d9d45b268262c849ca2a69a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 29 Oct 2024 14:29:54 +0200 Subject: [PATCH 062/124] On selection, don't use "numRequested" anymore (limit is wrt. "gas requested"). --- txcache/README.md | 12 ++++++++---- txcache/constants.go | 1 - txcache/diagnosis.go | 1 - txcache/txCache.go | 11 +++++------ txcache/txCache_test.go | 14 +++++--------- 5 files changed, 18 insertions(+), 21 deletions(-) diff --git a/txcache/README.md b/txcache/README.md index 10fd08d4..e0e542fd 100644 --- a/txcache/README.md +++ b/txcache/README.md @@ -9,7 +9,6 @@ ### Configuration 1. **maxSenderScore:** `100`, the maximum score a sender can have. The minimum score is `0`. -2. **numRequested:** `30_000`, the maximum number of transactions to be returned to a proposer (one _selection session_). 3. **gasRequested:** `10_000_000_000`, the maximum total gas limit of the transactions to be returned to a proposer (one _selection session_). 4. **baseNumPerSenderBatch:**: `100`, defines the maximum number of transactions to be selected from the transactions pool, for a sender with the maximum possible score, in a _single pass_. Senders with lower scores will have fewer transactions selected in a single pass. 5. **baseGasPerSenderBatch:**: `120_000_000`, defines the maximum gas for transactions to be selected from the transactions pool, for a sender with the maximum possible score, in a single pass. Senders with lower scores will have less gas selected in a single pass. @@ -20,7 +19,6 @@ When a proposer asks the mempool for transactions, it provides the following parameters: - - `numRequested`: the maximum number of transactions to be returned - `gasRequested`: the maximum total gas limit of the transactions to be returned - `baseNumPerSenderBatch`: a base value for the number of transactions to be returned per sender, per selection _pass_. This value is used to compute the actual number of transactions to be returned per sender, per selection _pass_, based on the sender's score (see **Paragraph 2**). - `baseGasPerSenderBatch`: a base value for the total gas limit of the transactions to be returned per sender, per selection _pass_. This value is used to compute the actual total gas limit of the transactions to be returned per sender, per selection _pass_, based on the sender's score (see **Paragraph 2**). Due to how the selection is performed, the theoretical maximum gas might be exceeded (a bit), as follows: `theoretical maximum = (baseGasPerSenderBatch - 1) + max(baseGasPerSenderBatch, max gas limit of a transaction)`. Think of a sender with maximum score, having two transactions, one with `gasLimit = baseGasPerSenderBatch - 1`, and the other with `gasLimit = max gas limit of a transaction`. @@ -45,9 +43,9 @@ Examples: The mempool selects transactions as follows: - before starting the selection loop, get a snapshot of the senders (sorted by score, descending) - - in the selection loop, do as many _passes_ as needed to satisfy `numRequested` and `gasRequested` (see **Paragraph 1**). + - in the selection loop, do as many _passes_ as needed to satisfy `gasRequested` (see **Paragraph 1**). - within a _pass_, go through all the senders (appropriately sorted) and select a batch of transactions from each sender. The size of the batch is computed as described in **Paragraph 2**. - - if either `numRequested` or `gasRequested` is satisfied, stop the _pass_ early. + - if `gasRequested` is satisfied, stop the _pass_ early. ### Paragraph 4 @@ -77,6 +75,12 @@ worstPpu = (50000 * 1_000_000_000 + (600_000_000 - 50000) * (1_000_000_000 / 100 ``` excellentPpu = 1_000_000_000 * 5 = 5_000_000_000 ``` + +Examples: + - ... + +#### Spotless sequence of transactions + ### Account nonce notifications ### Transactions addition diff --git a/txcache/constants.go b/txcache/constants.go index 3fe20ae6..aa8d4bd0 100644 --- a/txcache/constants.go +++ b/txcache/constants.go @@ -4,7 +4,6 @@ const excellentGasPriceFactor = 5 const maxSenderScore = 100 const diagnosisMaxSendersToDisplay = 1000 const diagnosisMaxTransactionsToDisplay = 10000 -const diagnosisSelectionNumRequested = 30_000 const diagnosisSelectionGasRequested = 10_000_000_000 const diagnosisSelectionBaseNumPerSenderBatch = 100 const diagnosisSelectionBaseGasPerSenderBatch = 120000000 diff --git a/txcache/diagnosis.go b/txcache/diagnosis.go index e3c17dd1..7f96d466 100644 --- a/txcache/diagnosis.go +++ b/txcache/diagnosis.go @@ -82,7 +82,6 @@ func (cache *TxCache) diagnoseSelection() { senders, transactions := cache.doSelectTransactions( logDiagnoseSelection, - diagnosisSelectionNumRequested, diagnosisSelectionGasRequested, diagnosisSelectionBaseNumPerSenderBatch, diagnosisSelectionBaseGasPerSenderBatch, diff --git a/txcache/txCache.go b/txcache/txCache.go index 8f2f671f..ad1c0d20 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -96,15 +96,14 @@ func (cache *TxCache) GetByTxHash(txHash []byte) (*WrappedTransaction, bool) { } // SelectTransactions selects a reasonably fair list of transactions to be included in the next miniblock -// It returns at most "numRequested" transactions, with total gas ~ "gasRequested". +// It returns transactions with total gas ~ "gasRequested". // // Selection is performed in more passes. // In each pass, each sender is allowed to contribute a batch of transactions, // with a number of transactions and total gas proportional to the sender's score. -func (cache *TxCache) SelectTransactions(numRequested int, gasRequested uint64, baseNumPerSenderBatch int, baseGasPerSenderBatch uint64) []*WrappedTransaction { +func (cache *TxCache) SelectTransactions(gasRequested uint64, baseNumPerSenderBatch int, baseGasPerSenderBatch uint64) []*WrappedTransaction { senders, transactions := cache.doSelectTransactions( logSelect, - numRequested, gasRequested, baseNumPerSenderBatch, baseGasPerSenderBatch, @@ -116,7 +115,7 @@ func (cache *TxCache) SelectTransactions(numRequested int, gasRequested uint64, return transactions } -func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, numRequested int, gasRequested uint64, baseNumPerSenderBatch int, baseGasPerSenderBatch uint64) ([]*txListForSender, []*WrappedTransaction) { +func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, gasRequested uint64, baseNumPerSenderBatch int, baseGasPerSenderBatch uint64) ([]*txListForSender, []*WrappedTransaction) { stopWatch := core.NewStopWatch() stopWatch.Start("selection") @@ -128,7 +127,7 @@ func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, numRe ) senders := cache.getSendersEligibleForSelection() - transactions := make([]*WrappedTransaction, numRequested) + transactions := make([]*WrappedTransaction, 0) shouldContinueSelection := true selectedGas := uint64(0) @@ -151,7 +150,7 @@ func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, numRe selectedNum += batchSelectionJournal.selectedNum selectedNumInThisPass += batchSelectionJournal.selectedNum - shouldContinueSelection := selectedNum < numRequested && selectedGas < gasRequested + shouldContinueSelection := selectedGas < gasRequested if !shouldContinueSelection { break } diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index ae9a8f6f..5b1afa88 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-storage-go/common" "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" @@ -312,7 +311,7 @@ func Test_SelectTransactions_Dummy(t *testing.T) { cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5)) cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1)) - sorted := cache.SelectTransactions(10, math.MaxUint64, 2, math.MaxUint64) + sorted := cache.SelectTransactions(math.MaxUint64, 2, math.MaxUint64) require.Len(t, sorted, 8) } @@ -327,7 +326,7 @@ func Test_SelectTransactionsWithBandwidth_Dummy(t *testing.T) { cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasLimit(50000)) cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1).withGasLimit(50000)) - sorted := cache.SelectTransactions(5, math.MaxUint64, 2, 200000) + sorted := cache.SelectTransactions(math.MaxUint64, 2, 200000) numSelected := 1 + 1 + 3 // 1 alice, 1 carol, 3 bob require.Len(t, sorted, numSelected) @@ -350,7 +349,7 @@ func Test_SelectTransactions_BreaksAtNonceGaps(t *testing.T) { numSelected := 3 + 1 + 2 // 3 alice + 1 bob + 2 carol - sorted := cache.SelectTransactions(10, math.MaxUint64, 2, math.MaxUint64) + sorted := cache.SelectTransactions(math.MaxUint64, 2, math.MaxUint64) require.Len(t, sorted, numSelected) } @@ -361,7 +360,6 @@ func Test_SelectTransactions(t *testing.T) { nSenders := 1000 nTransactionsPerSender := 100 nTotalTransactions := nSenders * nTransactionsPerSender - nRequestedTransactions := math.MaxInt16 for senderTag := 0; senderTag < nSenders; senderTag++ { sender := fmt.Sprintf("sender:%d", senderTag) @@ -375,9 +373,7 @@ func Test_SelectTransactions(t *testing.T) { require.Equal(t, uint64(nTotalTransactions), cache.CountTx()) - sorted := cache.SelectTransactions(nRequestedTransactions, math.MaxUint64, 2, math.MaxUint64) - - require.Len(t, sorted, core.MinInt(nRequestedTransactions, nTotalTransactions)) + sorted := cache.SelectTransactions(math.MaxUint64, 2, math.MaxUint64) // Check order nonces := make(map[string]uint64, nSenders) @@ -493,7 +489,7 @@ func TestTxCache_ConcurrentMutationAndSelection(t *testing.T) { go func() { for i := 0; i < 100; i++ { fmt.Println("Selection", i) - cache.SelectTransactions(100, math.MaxUint64, 100, math.MaxUint64) + cache.SelectTransactions(math.MaxUint64, 100, math.MaxUint64) } wg.Done() From a30472f666ecb8d8b34b3b5a4d28cba6123d363a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 29 Oct 2024 23:32:21 +0200 Subject: [PATCH 063/124] Preparatory cleanup. --- txcache/constants.go | 2 - txcache/diagnosis.go | 4 +- txcache/eviction.go | 69 +------ txcache/eviction_test.go | 175 ----------------- txcache/interface.go | 4 - txcache/noncesTracker.go | 94 ---------- txcache/noncesTracker_test.go | 176 ------------------ txcache/printing.go | 20 +- txcache/score.go | 93 ---------- txcache/score_test.go | 180 ------------------ txcache/testutils_test.go | 9 +- txcache/txCache.go | 69 +------ txcache/txCache_test.go | 40 +--- txcache/txListBySenderMap.go | 62 +------ txcache/txListBySenderMap_test.go | 107 +---------- txcache/txListForSender.go | 155 +--------------- txcache/txListForSender_test.go | 289 +++++------------------------ txcache/wrappedTransaction.go | 13 +- txcache/wrappedTransaction_test.go | 5 +- 19 files changed, 99 insertions(+), 1467 deletions(-) delete mode 100644 txcache/noncesTracker.go delete mode 100644 txcache/noncesTracker_test.go delete mode 100644 txcache/score.go delete mode 100644 txcache/score_test.go diff --git a/txcache/constants.go b/txcache/constants.go index aa8d4bd0..59a6d97f 100644 --- a/txcache/constants.go +++ b/txcache/constants.go @@ -5,5 +5,3 @@ const maxSenderScore = 100 const diagnosisMaxSendersToDisplay = 1000 const diagnosisMaxTransactionsToDisplay = 10000 const diagnosisSelectionGasRequested = 10_000_000_000 -const diagnosisSelectionBaseNumPerSenderBatch = 100 -const diagnosisSelectionBaseGasPerSenderBatch = 120000000 diff --git a/txcache/diagnosis.go b/txcache/diagnosis.go index 7f96d466..ffe66085 100644 --- a/txcache/diagnosis.go +++ b/txcache/diagnosis.go @@ -48,7 +48,7 @@ func (cache *TxCache) diagnoseSenders() { return } - senders := cache.txListBySender.getSnapshotDescending() + senders := cache.txListBySender.getSenders() if len(senders) == 0 { return @@ -83,8 +83,6 @@ func (cache *TxCache) diagnoseSelection() { senders, transactions := cache.doSelectTransactions( logDiagnoseSelection, diagnosisSelectionGasRequested, - diagnosisSelectionBaseNumPerSenderBatch, - diagnosisSelectionBaseGasPerSenderBatch, ) displaySelectionOutcome(logDiagnoseSelection, senders, transactions) diff --git a/txcache/eviction.go b/txcache/eviction.go index c91bb493..109f3b03 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -7,9 +7,7 @@ import ( // evictionJournal keeps a short journal about the eviction process // This is useful for debugging and reasoning about the eviction type evictionJournal struct { - numTxs uint32 - numSenders uint32 - numSteps uint32 + numTxs uint32 } // doEviction does cache eviction @@ -42,8 +40,8 @@ func (cache *TxCache) doEviction() *evictionJournal { stopWatch := core.NewStopWatch() stopWatch.Start("eviction") - sendersSnapshot := cache.txListBySender.getSnapshotAscending() - evictionJournal := cache.evictSendersInLoop(sendersSnapshot) + // TODO: reimplement. + evictionJournal := evictionJournal{} stopWatch.Stop("eviction") @@ -54,8 +52,6 @@ func (cache *TxCache) doEviction() *evictionJournal { "num senders", cache.CountSenders(), "duration", stopWatch.GetMeasurement("eviction"), "evicted txs", evictionJournal.numTxs, - "evicted senders", evictionJournal.numSenders, - "eviction steps", evictionJournal.numSteps, ) return &evictionJournal @@ -82,62 +78,3 @@ func (cache *TxCache) areThereTooManyTxs() bool { tooManyTxs := numTxs > uint64(cache.config.CountThreshold) return tooManyTxs } - -func (cache *TxCache) doEvictItems(txsToEvict [][]byte, sendersToEvict []string) (countTxs uint32, countSenders uint32) { - countTxs = cache.txByHash.RemoveTxsBulk(txsToEvict) - countSenders = cache.txListBySender.RemoveSendersBulk(sendersToEvict) - return -} - -func (cache *TxCache) evictSendersInLoop(sendersSnapshot []*txListForSender) evictionJournal { - return cache.evictSendersWhile(sendersSnapshot, cache.isCapacityExceeded) -} - -// evictSendersWhileTooManyTxs removes transactions in a loop, as long as "shouldContinue" is true -// One batch of senders is removed in each step -func (cache *TxCache) evictSendersWhile(sendersSnapshot []*txListForSender, shouldContinue func() bool) evictionJournal { - if !shouldContinue() { - return evictionJournal{} - } - - snapshotLength := uint32(len(sendersSnapshot)) - batchSize := cache.config.NumSendersToPreemptivelyEvict - batchStart := uint32(0) - - journal := evictionJournal{} - - for ; shouldContinue(); journal.numSteps++ { - batchEnd := batchStart + batchSize - batchEndBounded := core.MinUint32(batchEnd, snapshotLength) - batch := sendersSnapshot[batchStart:batchEndBounded] - - numTxsEvictedInStep, numSendersEvictedInStep := cache.evictSendersAndTheirTxs(batch) - - journal.numTxs += numTxsEvictedInStep - journal.numSenders += numSendersEvictedInStep - batchStart += batchSize - - reachedEnd := batchStart >= snapshotLength - noTxsEvicted := numTxsEvictedInStep == 0 - incompleteBatch := numSendersEvictedInStep < batchSize - - shouldBreak := noTxsEvicted || incompleteBatch || reachedEnd - if shouldBreak { - break - } - } - - return journal -} - -func (cache *TxCache) evictSendersAndTheirTxs(listsToEvict []*txListForSender) (uint32, uint32) { - sendersToEvict := make([]string, 0, len(listsToEvict)) - txsToEvict := make([][]byte, 0, approximatelyCountTxInLists(listsToEvict)) - - for _, txList := range listsToEvict { - sendersToEvict = append(sendersToEvict, txList.sender) - txsToEvict = append(txsToEvict, txList.getTxsHashes()...) - } - - return cache.doEvictItems(txsToEvict, sendersToEvict) -} diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go index 65a6d870..417eb3a6 100644 --- a/txcache/eviction_test.go +++ b/txcache/eviction_test.go @@ -2,86 +2,12 @@ package txcache import ( "math" - "sync" "testing" "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) -func TestTxCache_EvictSendersInLoop_BecauseOfCount(t *testing.T) { - config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - CountThreshold: 100, - CountPerSenderThreshold: math.MaxUint32, - NumSendersToPreemptivelyEvict: 20, - NumBytesThreshold: maxNumBytesUpperBound, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - } - - txGasHandler := txcachemocks.NewTxGasHandlerMock() - - cache, err := NewTxCache(config, txGasHandler) - require.Nil(t, err) - require.NotNil(t, cache) - - // 200 senders, each with 1 transaction - for index := 0; index < 200; index++ { - sender := string(createFakeSenderAddress(index)) - cache.AddTx(createTx([]byte{byte(index)}, sender, 1)) - } - - require.Equal(t, int64(200), cache.txListBySender.counter.Get()) - require.Equal(t, int64(200), cache.txByHash.counter.Get()) - - sendersSnapshot := cache.txListBySender.getSnapshotAscending() - journal := cache.evictSendersInLoop(sendersSnapshot) - - require.Equal(t, uint32(5), journal.numSteps) - require.Equal(t, uint32(100), journal.numTxs) - require.Equal(t, uint32(100), journal.numSenders) - require.Equal(t, int64(100), cache.txListBySender.counter.Get()) - require.Equal(t, int64(100), cache.txByHash.counter.Get()) -} - -func TestTxCache_EvictSendersInLoop_BecauseOfSize(t *testing.T) { - numBytesPerTx := uint32(200) - - config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - CountThreshold: math.MaxUint32, - CountPerSenderThreshold: math.MaxUint32, - NumBytesThreshold: numBytesPerTx * 100, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - NumSendersToPreemptivelyEvict: 20, - } - txGasHandler := txcachemocks.NewTxGasHandlerMock() - - cache, err := NewTxCache(config, txGasHandler) - require.Nil(t, err) - require.NotNil(t, cache) - - // 200 senders, each with 1 transaction - for index := 0; index < 200; index++ { - sender := string(createFakeSenderAddress(index)) - cache.AddTx(createTx([]byte{byte(index)}, sender, 1).withSize(uint64(numBytesPerTx)).withGasLimit(250000)) - } - - require.Equal(t, int64(200), cache.txListBySender.counter.Get()) - require.Equal(t, int64(200), cache.txByHash.counter.Get()) - - sendersSnapshot := cache.txListBySender.getSnapshotAscending() - journal := cache.evictSendersInLoop(sendersSnapshot) - - require.Equal(t, uint32(5), journal.numSteps) - require.Equal(t, uint32(100), journal.numTxs) - require.Equal(t, uint32(100), journal.numSenders) - require.Equal(t, int64(100), cache.txListBySender.counter.Get()) - require.Equal(t, int64(100), cache.txByHash.counter.Get()) -} - func TestTxCache_DoEviction_BecauseOfCount(t *testing.T) { config := ConfigSourceMe{ Name: "untitled", @@ -103,8 +29,6 @@ func TestTxCache_DoEviction_BecauseOfCount(t *testing.T) { journal := cache.doEviction() require.Equal(t, uint32(2), journal.numTxs) - require.Equal(t, uint32(2), journal.numSenders) - require.Equal(t, uint32(1), journal.numSteps) // Alice and Bob evicted. Carol still there (better score). _, ok := cache.GetByTxHash([]byte("hash-carol")) @@ -134,20 +58,8 @@ func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { cache.AddTx(createTx([]byte("hash-carol"), "carol", 1).withSize(256).withGasLimit(500000).withGasPrice(1.5 * oneBillion)) cache.AddTx(createTx([]byte("hash-eve"), "eve", 1).withSize(256).withGasLimit(500000).withGasPrice(3 * oneBillion)) - scoreAlice := cache.getScoreOfSender("alice") - scoreBob := cache.getScoreOfSender("bob") - scoreCarol := cache.getScoreOfSender("carol") - scoreEve := cache.getScoreOfSender("eve") - - require.Equal(t, 62, scoreAlice) - require.Equal(t, 62, scoreBob) - require.Equal(t, 69, scoreCarol) - require.Equal(t, 80, scoreEve) - journal := cache.doEviction() require.Equal(t, uint32(2), journal.numTxs) - require.Equal(t, uint32(2), journal.numSenders) - require.Equal(t, uint32(1), journal.numSteps) // Alice and Bob evicted (lower score). Carol and Eve still there. _, ok := cache.GetByTxHash([]byte("hash-carol")) @@ -180,60 +92,6 @@ func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { require.Nil(t, journal) } -func TestTxCache_EvictSendersInLoop_CodeCoverageForLoopBreak_WhenSmallBatch(t *testing.T) { - config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 1, - CountThreshold: 0, - NumSendersToPreemptivelyEvict: 42, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, - } - - txGasHandler := txcachemocks.NewTxGasHandlerMock() - cache, err := NewTxCache(config, txGasHandler) - require.Nil(t, err) - require.NotNil(t, cache) - - cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) - - sendersSnapshot := cache.txListBySender.getSnapshotAscending() - journal := cache.evictSendersInLoop(sendersSnapshot) - - require.Equal(t, uint32(0), journal.numSteps) - require.Equal(t, uint32(1), journal.numTxs) - require.Equal(t, uint32(1), journal.numSenders) -} - -func TestTxCache_EvictSendersWhile_ShouldContinueBreak(t *testing.T) { - config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 1, - CountThreshold: 0, - NumSendersToPreemptivelyEvict: 1, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, - } - - txGasHandler := txcachemocks.NewTxGasHandlerMock() - cache, err := NewTxCache(config, txGasHandler) - require.Nil(t, err) - require.NotNil(t, cache) - - cache.AddTx(createTx([]byte("hash-alice"), "alice", 1)) - cache.AddTx(createTx([]byte("hash-bob"), "bob", 1)) - - sendersSnapshot := cache.txListBySender.getSnapshotAscending() - - journal := cache.evictSendersWhile(sendersSnapshot, func() bool { - return false - }) - - require.Equal(t, uint32(0), journal.numSteps) - require.Equal(t, uint32(0), journal.numTxs) - require.Equal(t, uint32(0), journal.numSenders) -} - // This seems to be the most reasonable "bad-enough" (not worst) scenario to benchmark: // 25000 senders with 10 transactions each, with default "NumSendersToPreemptivelyEvict". // ~1 second on average laptop. @@ -263,36 +121,3 @@ func TestTxCache_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { require.LessOrEqual(t, uint32(cache.CountTx()), config.CountThreshold) require.GreaterOrEqual(t, uint32(cache.CountTx()), config.CountThreshold-config.NumSendersToPreemptivelyEvict*uint32(numTxsPerSender)) } - -func TestTxCache_EvictSendersAndTheirTxs_Concurrently(t *testing.T) { - cache := newUnconstrainedCacheToTest() - var wg sync.WaitGroup - - for i := 0; i < 10; i++ { - wg.Add(3) - - go func() { - cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) - cache.AddTx(createTx([]byte("alice-y"), "alice", 43)) - cache.AddTx(createTx([]byte("bob-x"), "bob", 42)) - cache.AddTx(createTx([]byte("bob-y"), "bob", 43)) - cache.Remove([]byte("alice-x")) - cache.Remove([]byte("bob-x")) - wg.Done() - }() - - go func() { - snapshot := cache.txListBySender.getSnapshotAscending() - cache.evictSendersAndTheirTxs(snapshot) - wg.Done() - }() - - go func() { - snapshot := cache.txListBySender.getSnapshotAscending() - cache.evictSendersAndTheirTxs(snapshot) - wg.Done() - }() - } - - wg.Wait() -} diff --git a/txcache/interface.go b/txcache/interface.go index 6037b087..be3b0bc4 100644 --- a/txcache/interface.go +++ b/txcache/interface.go @@ -6,10 +6,6 @@ import ( "github.com/multiversx/mx-chain-core-go/data" ) -type scoreComputer interface { - computeScore(scoreParams senderScoreParams) int -} - // TxGasHandler handles a transaction gas and gas cost type TxGasHandler interface { MinGasPrice() uint64 diff --git a/txcache/noncesTracker.go b/txcache/noncesTracker.go deleted file mode 100644 index 6a7fd56b..00000000 --- a/txcache/noncesTracker.go +++ /dev/null @@ -1,94 +0,0 @@ -package txcache - -import ( - "math" -) - -var six = uint64(6) -var nonceModulus = uint64(math.MaxUint32) - -// noncesTracker is a helper struct to track nonces for a sender, -// so we can check if the sequence of nonces "is spotless" (has no gaps and no duplicates). -// -// Notes: -// -// (a) math.MaxUint32 * math.MaxUint32 < math.MaxUint64. -// (b) however, math.MaxUint32 * (2 * math.MaxUint32 + 1) > math.MaxUint64 -// (c) we use modular arithmetic, with modulus = nonceModulus (see above). -// (d) memory footprint: 4 * 8 bytes = 32 bytes. -type noncesTracker struct { - sumOfAddedNonces uint64 - sumOfRemovedNonces uint64 - sumOfSquaresOfAddedNonces uint64 - sumOfSquaresOfRemovedNonces uint64 -} - -func newNoncesTracker() *noncesTracker { - return &noncesTracker{} -} - -func (tracker *noncesTracker) addNonce(nonce uint64) { - nonce = tracker.mod(nonce) - nonceSquared := tracker.mod(nonce * nonce) - - tracker.sumOfAddedNonces = tracker.mod(tracker.sumOfAddedNonces + nonce) - tracker.sumOfSquaresOfAddedNonces = tracker.mod(tracker.sumOfSquaresOfAddedNonces + nonceSquared) -} - -func (tracker *noncesTracker) removeNonce(nonce uint64) { - nonce = tracker.mod(nonce) - nonceSquared := tracker.mod(nonce * nonce) - - tracker.sumOfRemovedNonces = tracker.mod(tracker.sumOfRemovedNonces + nonce) - tracker.sumOfSquaresOfRemovedNonces = tracker.mod(tracker.sumOfSquaresOfRemovedNonces + nonceSquared) -} - -func (tracker *noncesTracker) computeExpectedSumOfNonces(firstNonce uint64, count uint64) uint64 { - firstNonce = tracker.mod(firstNonce) - lastNonce := firstNonce + count - 1 - result := (firstNonce + lastNonce) * count / 2 - return tracker.mod(result) -} - -// Computes [lastNonce * (lastNonce + 1) * (2 * lastNonce + 1) - firstNonce * (firstNonce + 1) * (2 * firstNonce + 1)] / 6 * 6 -func (tracker *noncesTracker) computeExpectedSumOfSquaresOfNoncesTimesSix(firstNonce uint64, count uint64) uint64 { - firstNonce = tracker.mod(firstNonce) - lastNonce := firstNonce + count - 1 - nonceBeforeFirst := firstNonce - 1 - - firstTerm := lastNonce - firstTerm = tracker.mod(firstTerm * (lastNonce + 1)) - // See note (b) above. - firstTerm = tracker.mod(firstTerm * tracker.mod(2*lastNonce+1)) - - secondTerm := nonceBeforeFirst - secondTerm = tracker.mod(secondTerm * (nonceBeforeFirst + 1)) - // See note (b) above. - secondTerm = tracker.mod(secondTerm * tracker.mod(2*nonceBeforeFirst+1)) - - result := tracker.modStrict(int64(firstTerm) - int64(secondTerm)) - return uint64(result) -} - -func (tracker *noncesTracker) mod(value uint64) uint64 { - return value % nonceModulus -} - -// See: -// - https://stackoverflow.com/questions/43018206/modulo-of-negative-integers-in-go -func (tracker *noncesTracker) modStrict(value int64) uint64 { - return uint64((value%int64(nonceModulus) + int64(nonceModulus)) % int64(nonceModulus)) -} - -func (tracker *noncesTracker) isSpotlessSequence(firstNonce uint64, count uint64) bool { - sumOfNonces := tracker.modStrict(int64(tracker.sumOfAddedNonces) - int64(tracker.sumOfRemovedNonces)) - expectedSumOfNonces := tracker.computeExpectedSumOfNonces(firstNonce, count) - if sumOfNonces != expectedSumOfNonces { - return false - } - - sumOfSquaresOfNonces := tracker.modStrict(int64(tracker.sumOfSquaresOfAddedNonces) - int64(tracker.sumOfSquaresOfRemovedNonces)) - sumOfSquaresOfNoncesTimesSix := tracker.mod(sumOfSquaresOfNonces * six) - expectedSumOfSquaresOfNoncesTimesSix := tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(firstNonce, count) - return sumOfSquaresOfNoncesTimesSix == expectedSumOfSquaresOfNoncesTimesSix -} diff --git a/txcache/noncesTracker_test.go b/txcache/noncesTracker_test.go deleted file mode 100644 index dca2d6e7..00000000 --- a/txcache/noncesTracker_test.go +++ /dev/null @@ -1,176 +0,0 @@ -package txcache - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestNoncesTracker_computeExpectedSumOfNonces(t *testing.T) { - tracker := newNoncesTracker() - - sum := tracker.computeExpectedSumOfNonces(0, 0) - require.Equal(t, uint64(0), sum) - - sum = tracker.computeExpectedSumOfNonces(0, 1) - require.Equal(t, uint64(0), sum) - - sum = tracker.computeExpectedSumOfNonces(0, 4) - require.Equal(t, uint64(6), sum) - - sum = tracker.computeExpectedSumOfNonces(1, 4) - require.Equal(t, uint64(10), sum) - - // https://www.wolframalpha.com/input?i=sum+of+consecutive+integers+between+100000+and+100041 - sum = tracker.computeExpectedSumOfNonces(100000, 42) - require.Equal(t, uint64(4200861), sum) - - // https://www.wolframalpha.com/input?i=sum+of+consecutive+integers+between+1000000000000+and+1000000065534 - sum = tracker.computeExpectedSumOfNonces(oneTrillion, 65535) - require.Equal(t, uint64(65535002147385345)%nonceModulus, sum) -} - -func TestNoncesTracker_computeExpectedSumOfSquaresOfNonces(t *testing.T) { - tracker := newNoncesTracker() - - sum := tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(0, 0) - require.Equal(t, uint64(0), sum) - - sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(0, 1) - require.Equal(t, uint64(0), sum) - - sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(0, 4) - require.Equal(t, uint64(14)*six, sum) - - sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(1, 4) - require.Equal(t, uint64(30)*six, sum) - - // https://www.wolframalpha.com/input?i=sum+of+consecutive+squares+between+100000+and+100041 - sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(100000, 42) - require.Equal(t, (uint64(420172223821)*six)%nonceModulus, sum) - - // Python: (sum([i * i for i in range(1000000000, 1000065535)]) * 6) % 4294967295 = 92732025 - sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(oneBillion, 65535) - require.Equal(t, uint64(92732025), sum) - - // Python: (sum([i * i for i in range(1000000000000, 1000000000042)]) * 6) % 4294967295 = 307941426 - sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(oneTrillion, 42) - require.Equal(t, uint64(307941426), sum) - - // Python: (sum([i * i for i in range(1000000000000, 1000000065535)]) * 6) % 4294967295 = 445375860 - sum = tracker.computeExpectedSumOfSquaresOfNoncesTimesSix(oneTrillion, 65535) - require.Equal(t, uint64(445375860), sum) -} - -func TestNoncesTracker_isSpotlessSequence(t *testing.T) { - t.Run("empty sequence", func(t *testing.T) { - tracker := newNoncesTracker() - - // A little bit of ambiguity (a sequence holding the nonce zero only behaves like an empty sequence): - require.True(t, tracker.isSpotlessSequence(0, 0)) - require.True(t, tracker.isSpotlessSequence(0, 1)) - - require.False(t, tracker.isSpotlessSequence(0, 2)) - require.False(t, tracker.isSpotlessSequence(7, 3)) - }) - - t.Run("1-item sequence", func(t *testing.T) { - tracker := newNoncesTracker() - tracker.addNonce(0) - - // A little bit of ambiguity (a sequence holding the nonce zero only behaves like an empty sequence): - require.True(t, tracker.isSpotlessSequence(0, 1)) - require.True(t, tracker.isSpotlessSequence(0, 0)) - - require.False(t, tracker.isSpotlessSequence(0, 2)) - require.False(t, tracker.isSpotlessSequence(7, 3)) - - tracker.removeNonce(0) - tracker.addNonce(5) - require.True(t, tracker.isSpotlessSequence(5, 1)) - require.False(t, tracker.isSpotlessSequence(5, 2)) - require.False(t, tracker.isSpotlessSequence(7, 1)) - require.False(t, tracker.isSpotlessSequence(7, 2)) - - tracker.removeNonce(5) - tracker.addNonce(42) - require.True(t, tracker.isSpotlessSequence(42, 1)) - require.False(t, tracker.isSpotlessSequence(42, 2)) - require.False(t, tracker.isSpotlessSequence(7, 1)) - require.False(t, tracker.isSpotlessSequence(7, 2)) - }) - - t.Run("with spotless addition and removal", func(t *testing.T) { - t.Parallel() - - tracker := newNoncesTracker() - numTotalTxsSender := uint64(100) - firstNonce := uint64(oneBillion) - lastNonce := firstNonce + numTotalTxsSender - 1 - numCurrentTxs := uint64(0) - - // We add nonces in increasing order: - for nonce := firstNonce; nonce < firstNonce+numTotalTxsSender; nonce++ { - tracker.addNonce(nonce) - numCurrentTxs++ - - isSpotless := tracker.isSpotlessSequence(firstNonce, numCurrentTxs) - if !isSpotless { - require.Fail(t, "nonce sequence is not spotless (after add)", "nonce: %d", nonce) - } - } - - // We remove nonces in decreasing order: - for nonce := lastNonce; nonce >= firstNonce; nonce-- { - tracker.removeNonce(nonce) - numCurrentTxs-- - - isSpotless := tracker.isSpotlessSequence(firstNonce, numCurrentTxs) - if !isSpotless { - require.Fail(t, "nonce sequence is not spotless (after remove)", "nonce: %d", nonce) - } - } - }) - - t.Run("with initial gap", func(t *testing.T) { - tracker := newNoncesTracker() - - tracker.addNonce(5) - tracker.addNonce(6) - tracker.addNonce(7) - - require.False(t, tracker.isSpotlessSequence(2, 3)) - }) - - t.Run("with initial duplicate", func(t *testing.T) { - tracker := newNoncesTracker() - - tracker.addNonce(5) - tracker.addNonce(5) - tracker.addNonce(6) - - require.False(t, tracker.isSpotlessSequence(2, 3)) - }) - - t.Run("with middle gap", func(t *testing.T) { - tracker := newNoncesTracker() - - tracker.addNonce(5) - tracker.addNonce(6) - tracker.addNonce(8) - - require.False(t, tracker.isSpotlessSequence(5, 3)) - }) - - t.Run("with middle duplicate", func(t *testing.T) { - tracker := newNoncesTracker() - - tracker.addNonce(5) - tracker.addNonce(6) - tracker.addNonce(6) - tracker.addNonce(8) - - require.False(t, tracker.isSpotlessSequence(5, 4)) - require.False(t, tracker.isSpotlessSequence(5, 3)) - }) -} diff --git a/txcache/printing.go b/txcache/printing.go index 8dd44cda..4d137439 100644 --- a/txcache/printing.go +++ b/txcache/printing.go @@ -8,19 +8,18 @@ import ( ) type printedTransaction struct { - Hash string `json:"hash"` - Nonce uint64 `json:"nonce"` - GasPrice uint64 `json:"gasPrice"` - GasLimit uint64 `json:"gasLimit"` - Sender string `json:"sender"` - Receiver string `json:"receiver"` - DataLength int `json:"dataLength"` - Fee float64 `json:"fee"` + Hash string `json:"hash"` + Nonce uint64 `json:"nonce"` + GasPrice uint64 `json:"gasPrice"` + GasLimit uint64 `json:"gasLimit"` + Sender string `json:"sender"` + Receiver string `json:"receiver"` + DataLength int `json:"dataLength"` + Fee string `json:"fee"` } type printedSender struct { Address string `json:"address"` - Score int `json:"score"` Nonce uint64 `json:"nonce"` IsNonceKnown bool `json:"isNonceKnown"` HasInitialGap bool `json:"hasInitialGap"` @@ -76,14 +75,13 @@ func convertWrappedTransactionToPrintedTransaction(wrappedTx *WrappedTransaction GasPrice: transaction.GetGasPrice(), GasLimit: transaction.GetGasLimit(), DataLength: len(transaction.GetData()), - Fee: wrappedTx.TxFee, + Fee: wrappedTx.TxFee.String(), } } func convertTxListForSenderToPrintedSender(txListForSender *txListForSender) *printedSender { return &printedSender{ Address: hex.EncodeToString([]byte(txListForSender.sender)), - Score: txListForSender.getScore(), Nonce: txListForSender.accountNonce.Get(), IsNonceKnown: txListForSender.accountNonceKnown.IsSet(), HasInitialGap: txListForSender.hasInitialGapWithLock(), diff --git a/txcache/score.go b/txcache/score.go deleted file mode 100644 index ef7bae04..00000000 --- a/txcache/score.go +++ /dev/null @@ -1,93 +0,0 @@ -package txcache - -import ( - "math" - - "github.com/multiversx/mx-chain-core-go/data/transaction" -) - -var _ scoreComputer = (*defaultScoreComputer)(nil) - -type senderScoreParams struct { - avgPpuNumerator float64 - avgPpuDenominator uint64 - isAccountNonceKnown bool - hasSpotlessSequenceOfNonces bool -} - -type defaultScoreComputer struct { - worstPpuLog float64 - scoreScalingFactor float64 -} - -func newDefaultScoreComputer(txGasHandler TxGasHandler) *defaultScoreComputer { - worstPpu := computeWorstPpu(txGasHandler) - worstPpuLog := math.Log(worstPpu) - excellentPpu := float64(txGasHandler.MinGasPrice()) * excellentGasPriceFactor - excellentPpuNormalized := excellentPpu / worstPpu - excellentPpuNormalizedLog := math.Log(excellentPpuNormalized) - scoreScalingFactor := float64(maxSenderScore) / excellentPpuNormalizedLog - - log.Debug("newDefaultScoreComputer()", - "maxGasLimitPerTx", txGasHandler.MaxGasLimitPerTx(), - "minGasPrice", txGasHandler.MinGasPrice(), - "worstPpu", worstPpu, - "worstPpuLog", worstPpuLog, - "excellentPpu", excellentPpu, - "excellentPpuNormalized", excellentPpuNormalized, - "excellentPpuNormalizedLog", excellentPpuNormalizedLog, - "scoreScalingFactor", scoreScalingFactor, - ) - - return &defaultScoreComputer{ - worstPpuLog: worstPpuLog, - scoreScalingFactor: scoreScalingFactor, - } -} - -func computeWorstPpu(txGasHandler TxGasHandler) float64 { - gasLimit := txGasHandler.MaxGasLimitPerTx() - gasPrice := txGasHandler.MinGasPrice() - - worstPpuTx := &WrappedTransaction{ - Tx: &transaction.Transaction{ - GasLimit: gasLimit, - GasPrice: gasPrice, - }, - } - - return worstPpuTx.computeFee(txGasHandler) / float64(gasLimit) -} - -// computeScore computes the score of the sender, as an integer in [0, numberOfScoreChunks] -func (computer *defaultScoreComputer) computeScore(scoreParams senderScoreParams) int { - rawScore := computer.computeRawScore(scoreParams) - truncatedScore := int(rawScore) - - if truncatedScore < 0 { - return 0 - } - if truncatedScore > maxSenderScore { - return maxSenderScore - } - - return truncatedScore -} - -// computeRawScore computes the score of a sender, as follows: -// score = log(sender's average price per unit / worst price per unit) * scoreScalingFactor, -// where scoreScalingFactor = highest score / log(excellent price per unit / worst price per unit) -func (computer *defaultScoreComputer) computeRawScore(params senderScoreParams) float64 { - if params.isAccountNonceKnown && !params.hasSpotlessSequenceOfNonces { - return 0 - } - - avgPpu := params.avgPpuNumerator / float64(params.avgPpuDenominator) - - // We use the worst possible price per unit for normalization. - // The expression below is same as log(avgPpu / worstPpu), but we precompute "worstPpuLog" in the constructor. - avgPpuNormalizedLog := math.Log(avgPpu) - computer.worstPpuLog - - score := avgPpuNormalizedLog * computer.scoreScalingFactor - return score -} diff --git a/txcache/score_test.go b/txcache/score_test.go deleted file mode 100644 index 2e28a2df..00000000 --- a/txcache/score_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package txcache - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" - "github.com/stretchr/testify/require" -) - -func TestNewDefaultScoreComputer(t *testing.T) { - gasHandler := txcachemocks.NewTxGasHandlerMock() - computer := newDefaultScoreComputer(gasHandler) - - require.NotNil(t, computer) - require.Equal(t, float64(16.12631180572966), computer.worstPpuLog) - require.Equal(t, float64(16.112421018189185), computer.scoreScalingFactor) -} - -func TestComputeWorstPpu(t *testing.T) { - gasHandler := txcachemocks.NewTxGasHandlerMock() - require.Equal(t, float64(10082500), computeWorstPpu(gasHandler)) -} - -func TestDefaultScoreComputer_computeRawScore(t *testing.T) { - gasHandler := txcachemocks.NewTxGasHandlerMock() - computer := newDefaultScoreComputer(gasHandler) - - require.Equal(t, 74.06805875222626, computer.computeRawScore(senderScoreParams{ - avgPpuNumerator: 57500000000000, - avgPpuDenominator: 57500, - isAccountNonceKnown: false, - hasSpotlessSequenceOfNonces: true, - })) - - require.Equal(t, 135.40260746155397, computer.computeRawScore(senderScoreParams{ - avgPpuNumerator: 57500000000000 * 45, - avgPpuDenominator: 57500, - isAccountNonceKnown: false, - hasSpotlessSequenceOfNonces: true, - })) -} - -func TestDefaultScoreComputer_computeScore(t *testing.T) { - gasHandler := txcachemocks.NewTxGasHandlerMock() - worstPpu := computeWorstPpu(gasHandler) - excellentPpu := float64(gasHandler.MinGasPrice()) * excellentGasPriceFactor - - require.Equal(t, 0, computeScoreGivenAvgPpu(worstPpu)) - require.Equal(t, 11, computeScoreGivenAvgPpu(worstPpu*2)) - require.Equal(t, 31, computeScoreGivenAvgPpu(worstPpu*7)) - require.Equal(t, 74, computeScoreGivenAvgPpu(worstPpu*100)) - require.Equal(t, 90, computeScoreGivenAvgPpu(worstPpu*270)) - require.Equal(t, 99, computeScoreGivenAvgPpu(worstPpu*495)) - require.Equal(t, 100, computeScoreGivenAvgPpu(worstPpu*500)) - - require.Equal(t, 55, computeScoreGivenAvgPpu(excellentPpu/16)) - require.Equal(t, 66, computeScoreGivenAvgPpu(excellentPpu/8)) - require.Equal(t, 77, computeScoreGivenAvgPpu(excellentPpu/4)) - require.Equal(t, 88, computeScoreGivenAvgPpu(excellentPpu/2)) - require.Equal(t, 99, computeScoreGivenAvgPpu(excellentPpu)) - require.Equal(t, 100, computeScoreGivenAvgPpu(excellentPpu+1)) -} - -func computeScoreGivenAvgPpu(avgPpu float64) int { - gasHandler := txcachemocks.NewTxGasHandlerMock() - computer := newDefaultScoreComputer(gasHandler) - - return computer.computeScore(senderScoreParams{ - avgPpuNumerator: avgPpu, - avgPpuDenominator: 1, - isAccountNonceKnown: true, - hasSpotlessSequenceOfNonces: true, - }) -} - -func TestDefaultScoreComputer_computeScore_consideringOneTransaction(t *testing.T) { - // Simple transfers: - require.Equal(t, 74, computeScoreOfTransaction(0, 50000, oneBillion)) - require.Equal(t, 80, computeScoreOfTransaction(0, 50000, 1.5*oneBillion)) - require.Equal(t, 85, computeScoreOfTransaction(0, 50000, 2*oneBillion)) - require.Equal(t, 99, computeScoreOfTransaction(0, 50000, 5*oneBillion)) - require.Equal(t, 100, computeScoreOfTransaction(0, 50000, 10*oneBillion)) - - // Simple transfers, with some data (same scores as above): - require.Equal(t, 74, computeScoreOfTransaction(100, 50000+1500*100, oneBillion)) - require.Equal(t, 80, computeScoreOfTransaction(100, 50000+1500*100, 1.5*oneBillion)) - require.Equal(t, 85, computeScoreOfTransaction(100, 50000+1500*100, 2*oneBillion)) - require.Equal(t, 99, computeScoreOfTransaction(100, 50000+1500*100, 5*oneBillion)) - require.Equal(t, 100, computeScoreOfTransaction(100, 50000+1500*100, 10*oneBillion)) - - // Smart contract calls: - require.Equal(t, 28, computeScoreOfTransaction(1, 1000000, oneBillion)) - require.Equal(t, 40, computeScoreOfTransaction(42, 1000000, oneBillion)) - // Even though the gas price is high, it does not compensate the network's contract execution subsidies (thus, score is not excellent). - require.Equal(t, 46, computeScoreOfTransaction(42, 1000000, 1.5*oneBillion)) - require.Equal(t, 51, computeScoreOfTransaction(42, 1000000, 2*oneBillion)) - require.Equal(t, 66, computeScoreOfTransaction(42, 1000000, 5*oneBillion)) - require.Equal(t, 77, computeScoreOfTransaction(42, 1000000, 10*oneBillion)) - require.Equal(t, 88, computeScoreOfTransaction(42, 1000000, 20*oneBillion)) - require.Equal(t, 94, computeScoreOfTransaction(42, 1000000, 30*oneBillion)) - require.Equal(t, 99, computeScoreOfTransaction(42, 1000000, 40*oneBillion)) - require.Equal(t, 100, computeScoreOfTransaction(42, 1000000, 50*oneBillion)) - - // Smart contract calls with extremely large gas limit: - require.Equal(t, 0, computeScoreOfTransaction(3, 150000000, oneBillion)) - require.Equal(t, 0, computeScoreOfTransaction(3, 300000000, oneBillion)) - require.Equal(t, 6, computeScoreOfTransaction(3, 150000000, 1.5*oneBillion)) - require.Equal(t, 11, computeScoreOfTransaction(3, 150000000, 2*oneBillion)) - require.Equal(t, 26, computeScoreOfTransaction(3, 150000000, 5*oneBillion)) - require.Equal(t, 37, computeScoreOfTransaction(3, 150000000, 10*oneBillion)) - require.Equal(t, 48, computeScoreOfTransaction(3, 150000000, 20*oneBillion)) - require.Equal(t, 55, computeScoreOfTransaction(3, 150000000, 30*oneBillion)) - // With a very high gas price, the transaction reaches the score of a simple transfer: - require.Equal(t, 74, computeScoreOfTransaction(3, 150000000, 100*oneBillion)) - - // Smart contract calls with max gas limit: - require.Equal(t, 0, computeScoreOfTransaction(3, 600000000, oneBillion)) - require.Equal(t, 37, computeScoreOfTransaction(3, 600000000, 10*oneBillion)) - require.Equal(t, 63, computeScoreOfTransaction(3, 600000000, 50*oneBillion)) - // With a very high gas price, the transaction reaches the score of a simple transfer: - require.Equal(t, 74, computeScoreOfTransaction(3, 600000000, 100*oneBillion)) - require.Equal(t, 85, computeScoreOfTransaction(3, 600000000, 200*oneBillion)) -} - -// Generally speaking, the score is computed for a sender, not for a single transaction. -// However, for the sake of testing, we consider a sender with a single transaction. -func computeScoreOfTransaction(dataLength int, gasLimit uint64, gasPrice uint64) int { - gasHandler := txcachemocks.NewTxGasHandlerMock() - computer := newDefaultScoreComputer(gasHandler) - - tx := &WrappedTransaction{ - Tx: &transaction.Transaction{ - Data: make([]byte, dataLength), - GasLimit: gasLimit, - GasPrice: gasPrice, - }, - } - - txFee := tx.computeFee(gasHandler) - - scoreParams := senderScoreParams{ - avgPpuNumerator: txFee, - avgPpuDenominator: gasLimit, - hasSpotlessSequenceOfNonces: true, - } - - return int(computer.computeScore(scoreParams)) -} - -func BenchmarkScoreComputer_computeScore(b *testing.B) { - gasHandler := txcachemocks.NewTxGasHandlerMock() - computer := newDefaultScoreComputer(gasHandler) - - tx := &WrappedTransaction{ - Tx: &transaction.Transaction{ - Data: make([]byte, 42), - GasLimit: 50000000, - GasPrice: 1000000000, - }, - } - - for i := 0; i < b.N; i++ { - txFee := tx.computeFee(gasHandler) - - for j := uint64(0); j < 1_000_000; j++ { - computer.computeScore(senderScoreParams{ - avgPpuNumerator: txFee, - avgPpuDenominator: tx.Tx.GetGasLimit(), - hasSpotlessSequenceOfNonces: true, - }) - } - } - - // Results: - // - // (a) 12 ms to compute the score 1 million times: - // cpu: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz - // BenchmarkScoreComputer_computeScore-8 100 11895452 ns/op 297 B/op 12 allocs/op -} diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index 0bb3a16f..3cd21895 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -17,7 +17,7 @@ func (cache *TxCache) areInternalMapsConsistent() bool { internalMapByHash := cache.txByHash internalMapBySender := cache.txListBySender - senders := internalMapBySender.getSnapshotAscending() + senders := internalMapBySender.getSenders() numInMapByHash := len(internalMapByHash.keys()) numInMapBySender := 0 numMissingInMapByHash := 0 @@ -54,13 +54,6 @@ func (txMap *txListBySenderMap) testGetListForSender(sender string) *txListForSe return list } -func (cache *TxCache) getScoreOfSender(sender string) int { - list := cache.getListForSender(sender) - scoreParams := list.getScoreParams() - computer := cache.txListBySender.scoreComputer - return computer.computeScore(scoreParams) -} - func (listForSender *txListForSender) getTxHashesAsStrings() []string { hashes := listForSender.getTxsHashes() return hashesAsStrings(hashes) diff --git a/txcache/txCache.go b/txcache/txCache.go index ad1c0d20..8aa65d9e 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -41,11 +41,10 @@ func NewTxCache(config ConfigSourceMe, txGasHandler TxGasHandler) (*TxCache, err // Note: for simplicity, we use the same "numChunks" for both internal concurrent maps numChunks := config.NumChunks senderConstraintsObj := config.getSenderConstraints() - scoreComputerObj := newDefaultScoreComputer(txGasHandler) txCache := &TxCache{ name: config.Name, - txListBySender: newTxListBySenderMap(numChunks, senderConstraintsObj, scoreComputerObj, txGasHandler), + txListBySender: newTxListBySenderMap(numChunks, senderConstraintsObj, txGasHandler), txByHash: newTxByHashMap(numChunks), config: config, } @@ -97,16 +96,10 @@ func (cache *TxCache) GetByTxHash(txHash []byte) (*WrappedTransaction, bool) { // SelectTransactions selects a reasonably fair list of transactions to be included in the next miniblock // It returns transactions with total gas ~ "gasRequested". -// -// Selection is performed in more passes. -// In each pass, each sender is allowed to contribute a batch of transactions, -// with a number of transactions and total gas proportional to the sender's score. -func (cache *TxCache) SelectTransactions(gasRequested uint64, baseNumPerSenderBatch int, baseGasPerSenderBatch uint64) []*WrappedTransaction { +func (cache *TxCache) SelectTransactions(gasRequested uint64) []*WrappedTransaction { senders, transactions := cache.doSelectTransactions( logSelect, gasRequested, - baseNumPerSenderBatch, - baseGasPerSenderBatch, ) go cache.diagnoseCounters() @@ -115,7 +108,7 @@ func (cache *TxCache) SelectTransactions(gasRequested uint64, baseNumPerSenderBa return transactions } -func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, gasRequested uint64, baseNumPerSenderBatch int, baseGasPerSenderBatch uint64) ([]*txListForSender, []*WrappedTransaction) { +func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, gasRequested uint64) ([]*txListForSender, []*WrappedTransaction) { stopWatch := core.NewStopWatch() stopWatch.Start("selection") @@ -126,70 +119,22 @@ func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, gasRe "num senders", cache.CountSenders(), ) - senders := cache.getSendersEligibleForSelection() + senders := cache.getSenders() transactions := make([]*WrappedTransaction, 0) - shouldContinueSelection := true - selectedGas := uint64(0) - selectedNum := 0 - - for pass := 0; shouldContinueSelection; pass++ { - selectedNumInThisPass := 0 - - for _, txList := range senders { - score := txList.getScore() - - // Slighly suboptimal: we recompute the constraints for each pass, - // even though they are constant with respect to a sender, in the scope of a selection. - // However, this is not a performance bottleneck. - numPerBatch, gasPerBatch := cache.computeSelectionSenderConstraints(score, baseNumPerSenderBatch, baseGasPerSenderBatch) - - isFirstBatch := pass == 0 - batchSelectionJournal := txList.selectBatchTo(isFirstBatch, transactions[selectedNum:], numPerBatch, gasPerBatch) - selectedGas += batchSelectionJournal.selectedGas - selectedNum += batchSelectionJournal.selectedNum - selectedNumInThisPass += batchSelectionJournal.selectedNum - - shouldContinueSelection := selectedGas < gasRequested - if !shouldContinueSelection { - break - } - } - - nothingSelectedInThisPass := selectedNumInThisPass == 0 - if nothingSelectedInThisPass { - // No more passes needed - break - } - } - - transactions = transactions[:selectedNum] - stopWatch.Stop("selection") contextualLogger.Debug( "doSelectTransactions(): end", "duration", stopWatch.GetMeasurement("selection"), - "num txs selected", selectedNum, + "num txs selected", len(transactions), ) return senders, transactions } -func (cache *TxCache) getSendersEligibleForSelection() []*txListForSender { - return cache.txListBySender.getSnapshotDescending() -} - -func (cache *TxCache) computeSelectionSenderConstraints(score int, baseNumPerBatch int, baseGasPerBatch uint64) (int, uint64) { - if score == 0 { - return 1, 1 - } - - scoreDivision := float64(score) / float64(maxSenderScore) - numPerBatch := int(float64(baseNumPerBatch) * scoreDivision) - gasPerBatch := uint64(float64(baseGasPerBatch) * scoreDivision) - - return numPerBatch, gasPerBatch +func (cache *TxCache) getSenders() []*txListForSender { + return cache.txListBySender.getSenders() } // RemoveTxByHash removes tx by hash diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index 5b1afa88..b287cc9c 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -311,7 +311,7 @@ func Test_SelectTransactions_Dummy(t *testing.T) { cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5)) cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1)) - sorted := cache.SelectTransactions(math.MaxUint64, 2, math.MaxUint64) + sorted := cache.SelectTransactions(math.MaxUint64) require.Len(t, sorted, 8) } @@ -326,7 +326,7 @@ func Test_SelectTransactionsWithBandwidth_Dummy(t *testing.T) { cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasLimit(50000)) cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1).withGasLimit(50000)) - sorted := cache.SelectTransactions(math.MaxUint64, 2, 200000) + sorted := cache.SelectTransactions(math.MaxUint64) numSelected := 1 + 1 + 3 // 1 alice, 1 carol, 3 bob require.Len(t, sorted, numSelected) @@ -349,7 +349,7 @@ func Test_SelectTransactions_BreaksAtNonceGaps(t *testing.T) { numSelected := 3 + 1 + 2 // 3 alice + 1 bob + 2 carol - sorted := cache.SelectTransactions(math.MaxUint64, 2, math.MaxUint64) + sorted := cache.SelectTransactions(math.MaxUint64) require.Len(t, sorted, numSelected) } @@ -373,7 +373,7 @@ func Test_SelectTransactions(t *testing.T) { require.Equal(t, uint64(nTotalTransactions), cache.CountTx()) - sorted := cache.SelectTransactions(math.MaxUint64, 2, math.MaxUint64) + sorted := cache.SelectTransactions(math.MaxUint64) // Check order nonces := make(map[string]uint64, nSenders) @@ -489,7 +489,7 @@ func TestTxCache_ConcurrentMutationAndSelection(t *testing.T) { go func() { for i := 0; i < 100; i++ { fmt.Println("Selection", i) - cache.SelectTransactions(math.MaxUint64, 100, math.MaxUint64) + cache.SelectTransactions(math.MaxUint64) } wg.Done() @@ -624,36 +624,6 @@ func TestTxCache_NoCriticalInconsistency_WhenConcurrentAdditionsAndRemovals(t *t cache.Clear() } -func TestTxCache_computeSelectionSenderConstraints(t *testing.T) { - cache := newUnconstrainedCacheToTest() - baseBatchSize := 100 - baseBandwidth := uint64(120000000) - - batchSize, bandwidth := cache.computeSelectionSenderConstraints(100, baseBatchSize, baseBandwidth) - require.Equal(t, 100, batchSize) - require.Equal(t, 120000000, int(bandwidth)) - - batchSize, bandwidth = cache.computeSelectionSenderConstraints(99, baseBatchSize, baseBandwidth) - require.Equal(t, 99, batchSize) - require.Equal(t, 118800000, int(bandwidth)) - - batchSize, bandwidth = cache.computeSelectionSenderConstraints(74, baseBatchSize, baseBandwidth) - require.Equal(t, 74, batchSize) - require.Equal(t, 88800000, int(bandwidth)) - - batchSize, bandwidth = cache.computeSelectionSenderConstraints(74, baseBatchSize, baseBandwidth) - require.Equal(t, 74, batchSize) - require.Equal(t, 88800000, int(bandwidth)) - - batchSize, bandwidth = cache.computeSelectionSenderConstraints(1, baseBatchSize, baseBandwidth) - require.Equal(t, 1, batchSize) - require.Equal(t, 1200000, int(bandwidth)) - - batchSize, bandwidth = cache.computeSelectionSenderConstraints(0, baseBatchSize, baseBandwidth) - require.Equal(t, 1, batchSize) - require.Equal(t, 1, int(bandwidth)) -} - func newUnconstrainedCacheToTest() *TxCache { txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(ConfigSourceMe{ diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index 0cee9cdb..6feb19b5 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -1,7 +1,6 @@ package txcache import ( - "math/rand" "sync" "github.com/multiversx/mx-chain-core-go/core/atomic" @@ -13,7 +12,6 @@ type txListBySenderMap struct { backingMap *maps.ConcurrentMap senderConstraints senderConstraints counter atomic.Counter - scoreComputer scoreComputer txGasHandler TxGasHandler mutex sync.Mutex } @@ -22,7 +20,6 @@ type txListBySenderMap struct { func newTxListBySenderMap( nChunksHint uint32, senderConstraints senderConstraints, - scoreComputer scoreComputer, txGasHandler TxGasHandler, ) *txListBySenderMap { backingMap := maps.NewConcurrentMap(nChunksHint) @@ -30,7 +27,6 @@ func newTxListBySenderMap( return &txListBySenderMap{ backingMap: backingMap, senderConstraints: senderConstraints, - scoreComputer: scoreComputer, txGasHandler: txGasHandler, } } @@ -39,7 +35,8 @@ func newTxListBySenderMap( func (txMap *txListBySenderMap) addTx(tx *WrappedTransaction) (bool, [][]byte) { sender := string(tx.Tx.GetSndAddr()) listForSender := txMap.getOrAddListForSender(sender) - return listForSender.AddTx(tx, txMap.txGasHandler) + tx.computeFee(txMap.txGasHandler) + return listForSender.AddTx(tx) } // getOrAddListForSender gets or lazily creates a list (using double-checked locking pattern) @@ -71,7 +68,7 @@ func (txMap *txListBySenderMap) getListForSender(sender string) (*txListForSende } func (txMap *txListBySenderMap) addSender(sender string) *txListForSender { - listForSender := newTxListForSender(sender, &txMap.senderConstraints, txMap.scoreComputer) + listForSender := newTxListForSender(sender, &txMap.senderConstraints) txMap.backingMap.Set(sender, listForSender) txMap.counter.Increment() @@ -140,60 +137,15 @@ func (txMap *txListBySenderMap) notifyAccountNonce(accountKey []byte, nonce uint return evictedTxHashes } -func (txMap *txListBySenderMap) getSnapshotAscending() []*txListForSender { - scoreGroups := txMap.getSendersGroupedByScore() - listsSnapshot := make([]*txListForSender, 0, txMap.counter.Get()) - - for i := 0; i < len(scoreGroups); i++ { - listsSnapshot = append(listsSnapshot, scoreGroups[i]...) - } - - return listsSnapshot -} - -func (txMap *txListBySenderMap) getSnapshotDescending() []*txListForSender { - scoreGroups := txMap.getSendersGroupedByScore() - listsSnapshot := make([]*txListForSender, 0, txMap.counter.Get()) - - for i := len(scoreGroups) - 1; i >= 0; i-- { - listsSnapshot = append(listsSnapshot, scoreGroups[i]...) - } - - return listsSnapshot -} - -func (txMap *txListBySenderMap) getSendersGroupedByScore() [][]*txListForSender { - groups := make([][]*txListForSender, maxSenderScore+1) - // Hint for pre-allocating slice for each group (imprecise, but reasonable). - groupSizeHint := txMap.counter.Get() / int64(maxSenderScore) / 2 +func (txMap *txListBySenderMap) getSenders() []*txListForSender { + senders := make([]*txListForSender, 0, txMap.counter.Get()) txMap.backingMap.IterCb(func(key string, item interface{}) { listForSender := item.(*txListForSender) - score := listForSender.getScore() - - if groups[score] == nil { - groups[score] = make([]*txListForSender, 0, groupSizeHint) - } - - groups[score] = append(groups[score], listForSender) + senders = append(senders, listForSender) }) - txMap.shuffleSendersWithinScoreGroups(groups) - displaySendersScoreHistogram(groups) - - return groups -} - -func (txMap *txListBySenderMap) shuffleSendersWithinScoreGroups(groups [][]*txListForSender) { - for _, group := range groups { - if group == nil { - continue - } - - rand.Shuffle(len(group), func(j, k int) { - group[j], group[k] = group[k], group[j] - }) - } + return senders } func (txMap *txListBySenderMap) clear() { diff --git a/txcache/txListBySenderMap_test.go b/txcache/txListBySenderMap_test.go index a591e6ee..e7cde461 100644 --- a/txcache/txListBySenderMap_test.go +++ b/txcache/txListBySenderMap_test.go @@ -1,12 +1,10 @@ package txcache import ( - "fmt" "math" "sync" "testing" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) @@ -115,113 +113,10 @@ func TestSendersMap_notifyAccountNonce(t *testing.T) { require.True(t, alice.accountNonceKnown.IsSet()) } -func TestBenchmarkSendersMap_GetSnapshotAscending(t *testing.T) { - numSendersValues := []int{50000, 100000, 300000} - - t.Run("scores with uniform distribution", func(t *testing.T) { - fmt.Println(t.Name()) - - for _, numSenders := range numSendersValues { - myMap := newSendersMapToTest() - - // Many senders, each with a single transaction - for i := 0; i < numSenders; i++ { - sender := fmt.Sprintf("sender-%d", i) - hash := []byte(fmt.Sprintf("transaction-%d", i)) - myMap.addTx(createTx(hash, sender, 1)) - - // Artificially set a score to each sender: - txList, _ := myMap.getListForSender(sender) - txList.score.Set(uint32(i % (maxSenderScore + 1))) - } - - sw := core.NewStopWatch() - sw.Start("time") - snapshot := myMap.getSnapshotAscending() - sw.Stop("time") - - require.Len(t, snapshot, numSenders) - fmt.Printf("took %v to sort %d senders\n", sw.GetMeasurementsMap()["time"], numSenders) - } - - // Results: - // - // (a) Summary: 0.02s to sort 300k senders: - // cpu: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz - // took 0.003156466 to sort 50000 senders - // took 0.007549091 to sort 100000 senders - // took 0.022103215 to sort 300000 senders - }) - - t.Run("scores with skewed distribution", func(t *testing.T) { - fmt.Println(t.Name()) - - for _, numSenders := range numSendersValues { - myMap := newSendersMapToTest() - - // Many senders, each with a single transaction - for i := 0; i < numSenders; i++ { - sender := fmt.Sprintf("sender-%d", i) - hash := []byte(fmt.Sprintf("transaction-%d", i)) - myMap.addTx(createTx(hash, sender, 1)) - - // Artificially set a score to each sender: - txList, _ := myMap.getListForSender(sender) - txList.score.Set(uint32(i % 3)) - } - - sw := core.NewStopWatch() - sw.Start("time") - snapshot := myMap.getSnapshotAscending() - sw.Stop("time") - - require.Len(t, snapshot, numSenders) - fmt.Printf("took %v to sort %d senders\n", sw.GetMeasurementsMap()["time"], numSenders) - } - - // Results: - // - // (a) Summary: 0.02s to sort 300k senders: - // cpu: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz - // took 0.00423772 to sort 50000 senders - // took 0.00683838 to sort 100000 senders - // took 0.025094983 to sort 300000 senders - }) -} - -func TestSendersMap_GetSnapshots_NoPanic_IfAlsoConcurrentMutation(t *testing.T) { - myMap := newSendersMapToTest() - - var wg sync.WaitGroup - - for i := 0; i < 100; i++ { - wg.Add(2) - - go func() { - for j := 0; j < 100; j++ { - myMap.getSnapshotAscending() - } - - wg.Done() - }() - - go func() { - for j := 0; j < 1000; j++ { - sender := fmt.Sprintf("Sender-%d", j) - myMap.removeSender(sender) - } - - wg.Done() - }() - } - - wg.Wait() -} - func newSendersMapToTest() *txListBySenderMap { txGasHandler := txcachemocks.NewTxGasHandlerMock() return newTxListBySenderMap(4, senderConstraints{ maxNumBytes: math.MaxUint32, maxNumTxs: math.MaxUint32, - }, newDefaultScoreComputer(txGasHandler), txGasHandler) + }, txGasHandler) } diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 4ccb367b..405e45ed 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -18,38 +18,21 @@ type txListForSender struct { totalBytes atomic.Counter constraints *senderConstraints - selectionPointer *list.Element - selectionPreviousNonce uint64 - selectionDetectedGap bool - - score atomic.Uint32 - avgPpuNumerator float64 - avgPpuDenominator uint64 - noncesTracker *noncesTracker - scoreComputer scoreComputer - mutex sync.RWMutex } -type batchSelectionJournal struct { - selectedNum int - selectedGas uint64 -} - // newTxListForSender creates a new (sorted) list of transactions -func newTxListForSender(sender string, constraints *senderConstraints, scoreComputer scoreComputer) *txListForSender { +func newTxListForSender(sender string, constraints *senderConstraints) *txListForSender { return &txListForSender{ - items: list.New(), - sender: sender, - constraints: constraints, - noncesTracker: newNoncesTracker(), - scoreComputer: scoreComputer, + items: list.New(), + sender: sender, + constraints: constraints, } } // AddTx adds a transaction in sender's list // This is a "sorted" insert -func (listForSender *txListForSender) AddTx(tx *WrappedTransaction, gasHandler TxGasHandler) (bool, [][]byte) { +func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) (bool, [][]byte) { // We don't allow concurrent interceptor goroutines to mutate a given sender's list listForSender.mutex.Lock() defer listForSender.mutex.Unlock() @@ -65,11 +48,10 @@ func (listForSender *txListForSender) AddTx(tx *WrappedTransaction, gasHandler T listForSender.items.InsertAfter(tx, insertionPlace) } - listForSender.onAddedTransaction(tx, gasHandler) + listForSender.onAddedTransaction(tx) // TODO: Check how does the sender get removed if empty afterwards (maybe the answer is: "it never gets empty after applySizeConstraints()"). evicted := listForSender.applySizeConstraints() - listForSender.recomputeScore() return true, evicted } @@ -103,41 +85,8 @@ func (listForSender *txListForSender) isCapacityExceeded() bool { return tooManyBytes || tooManyTxs } -func (listForSender *txListForSender) onAddedTransaction(tx *WrappedTransaction, gasHandler TxGasHandler) { - nonce := tx.Tx.GetNonce() - gasLimit := tx.Tx.GetGasLimit() - +func (listForSender *txListForSender) onAddedTransaction(tx *WrappedTransaction) { listForSender.totalBytes.Add(tx.Size) - listForSender.avgPpuNumerator += tx.computeFee(gasHandler) - listForSender.avgPpuDenominator += gasLimit - listForSender.noncesTracker.addNonce(nonce) -} - -// This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) recomputeScore() { - scoreParams := listForSender.getScoreParams() - score := listForSender.scoreComputer.computeScore(scoreParams) - listForSender.score.Set(uint32(score)) -} - -// This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) getScoreParams() senderScoreParams { - numTxs := listForSender.countTx() - minTransactionNonce := uint64(0) - firstTx := listForSender.getLowestNonceTx() - - if firstTx != nil { - minTransactionNonce = firstTx.Tx.GetNonce() - } - - hasSpotlessSequenceOfNonces := listForSender.noncesTracker.isSpotlessSequence(minTransactionNonce, numTxs) - - return senderScoreParams{ - avgPpuNumerator: listForSender.avgPpuNumerator, - avgPpuDenominator: listForSender.avgPpuDenominator, - isAccountNonceKnown: listForSender.accountNonceKnown.IsSet(), - hasSpotlessSequenceOfNonces: hasSpotlessSequenceOfNonces, - } } // This function should only be used in critical section (listForSender.mutex) @@ -194,7 +143,6 @@ func (listForSender *txListForSender) RemoveTx(tx *WrappedTransaction) bool { if isFound { listForSender.items.Remove(marker) listForSender.onRemovedListElement(marker) - listForSender.recomputeScore() } return isFound @@ -202,13 +150,7 @@ func (listForSender *txListForSender) RemoveTx(tx *WrappedTransaction) bool { func (listForSender *txListForSender) onRemovedListElement(element *list.Element) { tx := element.Value.(*WrappedTransaction) - nonce := tx.Tx.GetNonce() - gasLimit := tx.Tx.GetGasLimit() - listForSender.totalBytes.Subtract(tx.Size) - listForSender.avgPpuNumerator -= tx.TxFee - listForSender.avgPpuDenominator -= gasLimit - listForSender.noncesTracker.removeNonce(nonce) } // This function should only be used in critical section (listForSender.mutex) @@ -241,85 +183,6 @@ func (listForSender *txListForSender) IsEmpty() bool { return listForSender.countTxWithLock() == 0 } -// selectBatchTo copies a batch (usually small) of transactions of a limited gas bandwidth and limited number of transactions to a destination slice -// It also updates the internal state used for copy operations -func (listForSender *txListForSender) selectBatchTo(isFirstBatch bool, destination []*WrappedTransaction, numPerBatch int, gasPerBatch uint64) batchSelectionJournal { - // We can't read from multiple goroutines at the same time - // And we can't mutate the sender's list while reading it - listForSender.mutex.Lock() - defer listForSender.mutex.Unlock() - - if isFirstBatch { - // Reset the internal state used for copy operations - listForSender.selectionPreviousNonce = 0 - listForSender.selectionPointer = listForSender.items.Front() - - accountNonce, firstTxNonce, hasInitialGap := listForSender.hasInitialGap() - if hasInitialGap { - log.Trace("selectBatchTo(): initial gap detected", - "sender", listForSender.sender, - "accountNonce", accountNonce, - "firstTxNonce", firstTxNonce, - ) - } - - listForSender.selectionDetectedGap = hasInitialGap - } - - // If a nonce gap is detected, no transaction is returned in this read. - if listForSender.selectionDetectedGap { - return batchSelectionJournal{} - } - - selectedGas := uint64(0) - selectedNum := 0 - - for { - if listForSender.selectionPointer == nil { - break - } - - // End because of count - if selectedNum == numPerBatch || selectedNum == len(destination) { - break - } - - // End because of gas limit - if selectedGas >= gasPerBatch { - break - } - - tx := listForSender.selectionPointer.Value.(*WrappedTransaction) - nonce := tx.Tx.GetNonce() - gasLimit := tx.Tx.GetGasLimit() - - isMiddleGap := listForSender.selectionPreviousNonce > 0 && nonce > listForSender.selectionPreviousNonce+1 - if isMiddleGap { - log.Trace("selectBatchTo(): middle gap detected", - "sender", listForSender.sender, - "previousNonce", listForSender.selectionPreviousNonce, - "nonce", nonce, - ) - - listForSender.selectionDetectedGap = true - break - } - - destination[selectedNum] = tx - - listForSender.selectionPreviousNonce = nonce - listForSender.selectionPointer = listForSender.selectionPointer.Next() - - selectedNum += 1 - selectedGas += gasLimit - } - - return batchSelectionJournal{ - selectedNum: selectedNum, - selectedGas: selectedGas, - } -} - // getTxsHashes returns the hashes of transactions in the list func (listForSender *txListForSender) getTxsHashes() [][]byte { listForSender.mutex.RLock() @@ -448,10 +311,6 @@ func (listForSender *txListForSender) getLowestNonceTx() *WrappedTransaction { return value } -func (listForSender *txListForSender) getScore() int { - return int(listForSender.score.Get()) -} - // GetKey returns the key func (listForSender *txListForSender) GetKey() string { return listForSender.sender diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 3e5bfc81..ba56f57c 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -4,46 +4,42 @@ import ( "math" "testing" - "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) func TestListForSender_AddTx_Sorts(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTx([]byte("a"), ".", 1), txGasHandler) - list.AddTx(createTx([]byte("c"), ".", 3), txGasHandler) - list.AddTx(createTx([]byte("d"), ".", 4), txGasHandler) - list.AddTx(createTx([]byte("b"), ".", 2), txGasHandler) + list.AddTx(createTx([]byte("a"), ".", 1)) + list.AddTx(createTx([]byte("c"), ".", 3)) + list.AddTx(createTx([]byte("d"), ".", 4)) + list.AddTx(createTx([]byte("b"), ".", 2)) require.Equal(t, []string{"a", "b", "c", "d"}, list.getTxHashesAsStrings()) } func TestListForSender_AddTx_GivesPriorityToHigherGas(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTx([]byte("a"), ".", 1), txGasHandler) - list.AddTx(createTx([]byte("b"), ".", 3).withGasPrice(1.2*oneBillion), txGasHandler) - list.AddTx(createTx([]byte("c"), ".", 3).withGasPrice(1.1*oneBillion), txGasHandler) - list.AddTx(createTx([]byte("d"), ".", 2), txGasHandler) - list.AddTx(createTx([]byte("e"), ".", 3).withGasPrice(1.3*oneBillion), txGasHandler) + list.AddTx(createTx([]byte("a"), ".", 1)) + list.AddTx(createTx([]byte("b"), ".", 3).withGasPrice(1.2 * oneBillion)) + list.AddTx(createTx([]byte("c"), ".", 3).withGasPrice(1.1 * oneBillion)) + list.AddTx(createTx([]byte("d"), ".", 2)) + list.AddTx(createTx([]byte("e"), ".", 3).withGasPrice(1.3 * oneBillion)) require.Equal(t, []string{"a", "d", "e", "b", "c"}, list.getTxHashesAsStrings()) } func TestListForSender_AddTx_SortsCorrectlyWhenSameNonceSamePrice(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTx([]byte("a"), ".", 1).withGasPrice(oneBillion), txGasHandler) - list.AddTx(createTx([]byte("b"), ".", 3).withGasPrice(3*oneBillion), txGasHandler) - list.AddTx(createTx([]byte("c"), ".", 3).withGasPrice(3*oneBillion), txGasHandler) - list.AddTx(createTx([]byte("d"), ".", 3).withGasPrice(2*oneBillion), txGasHandler) - list.AddTx(createTx([]byte("e"), ".", 3).withGasPrice(3.5*oneBillion), txGasHandler) - list.AddTx(createTx([]byte("f"), ".", 2).withGasPrice(oneBillion), txGasHandler) - list.AddTx(createTx([]byte("g"), ".", 3).withGasPrice(2.5*oneBillion), txGasHandler) + list.AddTx(createTx([]byte("a"), ".", 1).withGasPrice(oneBillion)) + list.AddTx(createTx([]byte("b"), ".", 3).withGasPrice(3 * oneBillion)) + list.AddTx(createTx([]byte("c"), ".", 3).withGasPrice(3 * oneBillion)) + list.AddTx(createTx([]byte("d"), ".", 3).withGasPrice(2 * oneBillion)) + list.AddTx(createTx([]byte("e"), ".", 3).withGasPrice(3.5 * oneBillion)) + list.AddTx(createTx([]byte("f"), ".", 2).withGasPrice(oneBillion)) + list.AddTx(createTx([]byte("g"), ".", 3).withGasPrice(2.5 * oneBillion)) // In case of same-nonce, same-price transactions, the newer one has priority require.Equal(t, []string{"a", "f", "e", "b", "c", "g", "d"}, list.getTxHashesAsStrings()) @@ -51,79 +47,75 @@ func TestListForSender_AddTx_SortsCorrectlyWhenSameNonceSamePrice(t *testing.T) func TestListForSender_AddTx_IgnoresDuplicates(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler := txcachemocks.NewTxGasHandlerMock() - added, _ := list.AddTx(createTx([]byte("tx1"), ".", 1), txGasHandler) + added, _ := list.AddTx(createTx([]byte("tx1"), ".", 1)) require.True(t, added) - added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2), txGasHandler) + added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2)) require.True(t, added) - added, _ = list.AddTx(createTx([]byte("tx3"), ".", 3), txGasHandler) + added, _ = list.AddTx(createTx([]byte("tx3"), ".", 3)) require.True(t, added) - added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2), txGasHandler) + added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2)) require.False(t, added) } func TestListForSender_AddTx_AppliesSizeConstraintsForNumTransactions(t *testing.T) { list := newListToTest(math.MaxUint32, 3) - txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTx([]byte("tx1"), ".", 1), txGasHandler) - list.AddTx(createTx([]byte("tx5"), ".", 5), txGasHandler) - list.AddTx(createTx([]byte("tx4"), ".", 4), txGasHandler) - list.AddTx(createTx([]byte("tx2"), ".", 2), txGasHandler) + list.AddTx(createTx([]byte("tx1"), ".", 1)) + list.AddTx(createTx([]byte("tx5"), ".", 5)) + list.AddTx(createTx([]byte("tx4"), ".", 4)) + list.AddTx(createTx([]byte("tx2"), ".", 2)) require.Equal(t, []string{"tx1", "tx2", "tx4"}, list.getTxHashesAsStrings()) - _, evicted := list.AddTx(createTx([]byte("tx3"), ".", 3), txGasHandler) + _, evicted := list.AddTx(createTx([]byte("tx3"), ".", 3)) require.Equal(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) // Gives priority to higher gas - though undesirably to some extent, "tx3" is evicted - _, evicted = list.AddTx(createTx([]byte("tx2++"), ".", 2).withGasPrice(1.5*oneBillion), txGasHandler) + _, evicted = list.AddTx(createTx([]byte("tx2++"), ".", 2).withGasPrice(1.5 * oneBillion)) require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx3"}, hashesAsStrings(evicted)) // Though Undesirably to some extent, "tx3++"" is added, then evicted - _, evicted = list.AddTx(createTx([]byte("tx3++"), ".", 3).withGasPrice(1.5*oneBillion), txGasHandler) + _, evicted = list.AddTx(createTx([]byte("tx3++"), ".", 3).withGasPrice(1.5 * oneBillion)) require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx3++"}, hashesAsStrings(evicted)) } func TestListForSender_AddTx_AppliesSizeConstraintsForNumBytes(t *testing.T) { list := newListToTest(1024, math.MaxUint32) - txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTx([]byte("tx1"), ".", 1).withSize(128).withGasLimit(50000), txGasHandler) - list.AddTx(createTx([]byte("tx2"), ".", 2).withSize(512).withGasLimit(1500000), txGasHandler) - list.AddTx(createTx([]byte("tx3"), ".", 3).withSize(256).withGasLimit(1500000), txGasHandler) - _, evicted := list.AddTx(createTx([]byte("tx5"), ".", 4).withSize(256).withGasLimit(1500000), txGasHandler) + list.AddTx(createTx([]byte("tx1"), ".", 1).withSize(128).withGasLimit(50000)) + list.AddTx(createTx([]byte("tx2"), ".", 2).withSize(512).withGasLimit(1500000)) + list.AddTx(createTx([]byte("tx3"), ".", 3).withSize(256).withGasLimit(1500000)) + _, evicted := list.AddTx(createTx([]byte("tx5"), ".", 4).withSize(256).withGasLimit(1500000)) require.Equal(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx5"}, hashesAsStrings(evicted)) - _, evicted = list.AddTx(createTx([]byte("tx5--"), ".", 4).withSize(128).withGasLimit(50000), txGasHandler) + _, evicted = list.AddTx(createTx([]byte("tx5--"), ".", 4).withSize(128).withGasLimit(50000)) require.Equal(t, []string{"tx1", "tx2", "tx3", "tx5--"}, list.getTxHashesAsStrings()) require.Equal(t, []string{}, hashesAsStrings(evicted)) - _, evicted = list.AddTx(createTx([]byte("tx4"), ".", 4).withSize(128).withGasLimit(50000), txGasHandler) + _, evicted = list.AddTx(createTx([]byte("tx4"), ".", 4).withSize(128).withGasLimit(50000)) require.Equal(t, []string{"tx1", "tx2", "tx3", "tx4"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx5--"}, hashesAsStrings(evicted)) // Gives priority to higher gas - though undesirably to some extent, "tx4" is evicted - _, evicted = list.AddTx(createTx([]byte("tx3++"), ".", 3).withSize(256).withGasLimit(1500000).withGasPrice(1.5*oneBillion), txGasHandler) + _, evicted = list.AddTx(createTx([]byte("tx3++"), ".", 3).withSize(256).withGasLimit(1500000).withGasPrice(1.5 * oneBillion)) require.Equal(t, []string{"tx1", "tx2", "tx3++", "tx3"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) } func TestListForSender_findTx(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler := txcachemocks.NewTxGasHandlerMock() txA := createTx([]byte("A"), ".", 41) txANewer := createTx([]byte("ANewer"), ".", 41) txB := createTx([]byte("B"), ".", 42) txD := createTx([]byte("none"), ".", 43) - list.AddTx(txA, txGasHandler) - list.AddTx(txANewer, txGasHandler) - list.AddTx(txB, txGasHandler) + list.AddTx(txA) + list.AddTx(txANewer) + list.AddTx(txB) elementWithA := list.findListElementWithTx(txA) elementWithANewer := list.findListElementWithTx(txANewer) @@ -142,8 +134,8 @@ func TestListForSender_findTx(t *testing.T) { func TestListForSender_findTx_CoverNonceComparisonOptimization(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTx([]byte("A"), ".", 42), txGasHandler) + + list.AddTx(createTx([]byte("A"), ".", 42)) // Find one with a lower nonce, not added to cache noElement := list.findListElementWithTx(createTx(nil, ".", 41)) @@ -153,9 +145,8 @@ func TestListForSender_findTx_CoverNonceComparisonOptimization(t *testing.T) { func TestListForSender_RemoveTransaction(t *testing.T) { list := newUnconstrainedListToTest() tx := createTx([]byte("a"), ".", 1) - txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(tx, txGasHandler) + list.AddTx(tx) require.Equal(t, 1, list.items.Len()) list.RemoveTx(tx) @@ -170,107 +161,6 @@ func TestListForSender_RemoveTransaction_NoPanicWhenTxMissing(t *testing.T) { require.Equal(t, 0, list.items.Len()) } -func TestListForSender_SelectBatchTo(t *testing.T) { - list := newUnconstrainedListToTest() - txGasHandler := txcachemocks.NewTxGasHandlerMock() - - for index := 0; index < 100; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler) - } - - destination := make([]*WrappedTransaction, 1000) - - // 1st batch - journal := list.selectBatchTo(true, destination, 50, math.MaxUint64) - require.Equal(t, 50, journal.selectedNum) - require.NotNil(t, destination[49]) - require.Nil(t, destination[50]) - - // 2nd batch - journal = list.selectBatchTo(false, destination[50:], 50, math.MaxUint64) - require.Equal(t, 50, journal.selectedNum) - require.NotNil(t, destination[99]) - - // No 3rd batch - journal = list.selectBatchTo(false, destination, 50, math.MaxUint64) - require.Equal(t, 0, journal.selectedNum) -} - -func TestListForSender_SelectBatchToWithLimitedGasPerBatch(t *testing.T) { - list := newUnconstrainedListToTest() - txGasHandler := txcachemocks.NewTxGasHandlerMock() - - for index := 0; index < 40; index++ { - tx := createTx([]byte{byte(index)}, ".", uint64(index)).withGasLimit(oneMilion) - list.AddTx(tx, txGasHandler) - } - - destination := make([]*WrappedTransaction, 1000) - - // 1st batch - journal := list.selectBatchTo(true, destination, 50, oneMilion-1) - require.Equal(t, 1, journal.selectedNum) - require.NotNil(t, destination[0]) - require.Nil(t, destination[1]) - - // 2nd batch - journal = list.selectBatchTo(false, destination[1:], 50, oneMilion) - require.Equal(t, 1, journal.selectedNum) - require.NotNil(t, destination[1]) - require.Nil(t, destination[2]) - - // 3nd batch - journal = list.selectBatchTo(false, destination[2:], 50, oneMilion*20) - require.Equal(t, 20, journal.selectedNum) - require.NotNil(t, destination[21]) - require.Nil(t, destination[22]) - - // 4th batch - journal = list.selectBatchTo(false, destination[22:], 20, math.MaxUint64) - require.Equal(t, 18, journal.selectedNum) -} - -func TestListForSender_SelectBatchTo_NoPanicWhenCornerCases(t *testing.T) { - list := newUnconstrainedListToTest() - txGasHandler := txcachemocks.NewTxGasHandlerMock() - - for index := 0; index < 100; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler) - } - - // When empty destination - destination := make([]*WrappedTransaction, 0) - journal := list.selectBatchTo(true, destination, 10, math.MaxUint64) - require.Equal(t, 0, journal.selectedNum) - - // When small destination - destination = make([]*WrappedTransaction, 5) - journal = list.selectBatchTo(false, destination, 10, math.MaxUint64) - require.Equal(t, 5, journal.selectedNum) -} - -func TestListForSender_SelectBatchTo_WhenInitialGap(t *testing.T) { - list := newUnconstrainedListToTest() - txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.notifyAccountNonce(1) - - for index := 10; index < 20; index++ { - list.AddTx(createTx([]byte{byte(index)}, ".", uint64(index)), txGasHandler) - } - - destination := make([]*WrappedTransaction, 1000) - - // First batch of selection (failed to select) - journal := list.selectBatchTo(true, destination, 50, math.MaxUint64) - require.Equal(t, 0, journal.selectedNum) - require.Nil(t, destination[0]) - - // Second batch of selection (the same, failed to select) - journal = list.selectBatchTo(false, destination, 50, math.MaxUint64) - require.Equal(t, 0, journal.selectedNum) - require.Nil(t, destination[0]) -} - func TestListForSender_NotifyAccountNonce(t *testing.T) { list := newUnconstrainedListToTest() @@ -285,12 +175,11 @@ func TestListForSender_NotifyAccountNonce(t *testing.T) { func TestListForSender_evictTransactionsWithLowerNonces(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTx([]byte("tx-42"), ".", 42), txGasHandler) - list.AddTx(createTx([]byte("tx-43"), ".", 43), txGasHandler) - list.AddTx(createTx([]byte("tx-44"), ".", 44), txGasHandler) - list.AddTx(createTx([]byte("tx-45"), ".", 45), txGasHandler) + list.AddTx(createTx([]byte("tx-42"), ".", 42)) + list.AddTx(createTx([]byte("tx-43"), ".", 43)) + list.AddTx(createTx([]byte("tx-44"), ".", 44)) + list.AddTx(createTx([]byte("tx-45"), ".", 45)) require.Equal(t, 4, list.items.Len()) @@ -307,19 +196,18 @@ func TestListForSender_evictTransactionsWithLowerNonces(t *testing.T) { func TestListForSender_hasInitialGap(t *testing.T) { list := newUnconstrainedListToTest() list.notifyAccountNonce(42) - txGasHandler := txcachemocks.NewTxGasHandlerMock() // No transaction, no gap _, _, hasInitialGap := list.hasInitialGap() require.False(t, hasInitialGap) // One gap - list.AddTx(createTx([]byte("tx-43"), ".", 43), txGasHandler) + list.AddTx(createTx([]byte("tx-43"), ".", 43)) _, _, hasInitialGap = list.hasInitialGap() require.True(t, hasInitialGap) // Resolve gap - list.AddTx(createTx([]byte("tx-42"), ".", 42), txGasHandler) + list.AddTx(createTx([]byte("tx-42"), ".", 42)) _, _, hasInitialGap = list.hasInitialGap() require.False(t, hasInitialGap) } @@ -327,19 +215,17 @@ func TestListForSender_hasInitialGap(t *testing.T) { func TestListForSender_getTxHashes(t *testing.T) { list := newUnconstrainedListToTest() require.Len(t, list.getTxsHashes(), 0) - txGasHandler := txcachemocks.NewTxGasHandlerMock() - list.AddTx(createTx([]byte("A"), ".", 1), txGasHandler) + list.AddTx(createTx([]byte("A"), ".", 1)) require.Len(t, list.getTxsHashes(), 1) - list.AddTx(createTx([]byte("B"), ".", 2), txGasHandler) - list.AddTx(createTx([]byte("C"), ".", 3), txGasHandler) + list.AddTx(createTx([]byte("B"), ".", 2)) + list.AddTx(createTx([]byte("C"), ".", 3)) require.Len(t, list.getTxsHashes(), 3) } func TestListForSender_DetectRaceConditions(t *testing.T) { list := newUnconstrainedListToTest() - txGasHandler := txcachemocks.NewTxGasHandlerMock() go func() { // These are called concurrently with addition: during eviction, during removal etc. @@ -348,80 +234,10 @@ func TestListForSender_DetectRaceConditions(t *testing.T) { }() go func() { - list.AddTx(createTx([]byte("test"), ".", 42), txGasHandler) + list.AddTx(createTx([]byte("test"), ".", 42)) }() } -func TestListForSender_transactionAddAndRemove_updateScore(t *testing.T) { - txGasHandler := txcachemocks.NewTxGasHandlerMock() - alice := newUnconstrainedListToTest() - bob := newUnconstrainedListToTest() - - alice.notifyAccountNonce(1) - bob.notifyAccountNonce(1) - - a := createTx([]byte("a"), ".", 1) - b := createTx([]byte("b"), ".", 1) - c := createTx([]byte("c"), ".", 2).withDataLength(42).withGasLimit(50000 + 1500*42) - d := createTx([]byte("d"), ".", 2).withDataLength(84).withGasLimit(50000 + 1500*84) - e := createTx([]byte("e"), ".", 3).withDataLength(1).withGasLimit(50000000).withGasPrice(oneBillion) - f := createTx([]byte("f"), ".", 3).withDataLength(1).withGasLimit(150000000).withGasPrice(oneBillion) - g := createTx([]byte("g"), ".", 4).withDataLength(7).withGasLimit(5000000).withGasPrice(oneBillion) - h := createTx([]byte("h"), ".", 4).withDataLength(7).withGasLimit(5000000).withGasPrice(oneBillion) - i := createTx([]byte("i"), ".", 5).withDataLength(42).withGasLimit(5000000).withGasPrice(2 * oneBillion) - j := createTx([]byte("j"), ".", 5).withDataLength(42).withGasLimit(5000000).withGasPrice(3 * oneBillion) - k := createTx([]byte("k"), ".", 5).withDataLength(42).withGasLimit(5000000).withGasPrice(2 * oneBillion) - l := createTx([]byte("l"), ".", 8) - - alice.AddTx(a, txGasHandler) - bob.AddTx(b, txGasHandler) - - require.Equal(t, 74, alice.getScore()) - require.Equal(t, 74, bob.getScore()) - - alice.AddTx(c, txGasHandler) - bob.AddTx(d, txGasHandler) - - require.Equal(t, 74, alice.getScore()) - require.Equal(t, 74, bob.getScore()) - - alice.AddTx(e, txGasHandler) - bob.AddTx(f, txGasHandler) - - require.Equal(t, 5, alice.getScore()) - require.Equal(t, 2, bob.getScore()) - - alice.AddTx(g, txGasHandler) - bob.AddTx(h, txGasHandler) - - require.Equal(t, 6, alice.getScore()) - require.Equal(t, 3, bob.getScore()) - - alice.AddTx(i, txGasHandler) - bob.AddTx(j, txGasHandler) - - require.Equal(t, 10, alice.getScore()) - require.Equal(t, 6, bob.getScore()) - - // Bob adds a transaction with duplicated nonce - bob.AddTx(k, txGasHandler) - - require.Equal(t, 10, alice.getScore()) - require.Equal(t, 0, bob.getScore()) - - require.True(t, alice.RemoveTx(a)) - require.True(t, alice.RemoveTx(c)) - - require.Equal(t, 7, alice.getScore()) - require.Equal(t, 0, bob.getScore()) - - // Alice comes with a nonce gap - alice.AddTx(l, txGasHandler) - - require.Equal(t, 0, alice.getScore()) - require.Equal(t, 0, bob.getScore()) -} - func newUnconstrainedListToTest() *txListForSender { return newListToTest(math.MaxUint32, math.MaxUint32) } @@ -432,8 +248,5 @@ func newListToTest(maxNumBytes uint32, maxNumTxs uint32) *txListForSender { maxNumTxs: maxNumTxs, } - txGasHandler := txcachemocks.NewTxGasHandlerMock() - scoreComputer := newDefaultScoreComputer(txGasHandler) - - return newTxListForSender(".", senderConstraints, scoreComputer) + return newTxListForSender(".", senderConstraints) } diff --git a/txcache/wrappedTransaction.go b/txcache/wrappedTransaction.go index 11b60c7a..ab8b7255 100644 --- a/txcache/wrappedTransaction.go +++ b/txcache/wrappedTransaction.go @@ -13,14 +13,11 @@ type WrappedTransaction struct { SenderShardID uint32 ReceiverShardID uint32 Size int64 - TxFee float64 + + TxFee *big.Int } -// computeFee computes the transaction fee. -// The returned fee is also held on the transaction object. -func (wrappedTx *WrappedTransaction) computeFee(txGasHandler TxGasHandler) float64 { - fee := txGasHandler.ComputeTxFee(wrappedTx.Tx) - feeAsFloat, _ := new(big.Float).SetInt(fee).Float64() - wrappedTx.TxFee = feeAsFloat - return feeAsFloat +// computeFee computes (and caches) the transaction fee. +func (wrappedTx *WrappedTransaction) computeFee(txGasHandler TxGasHandler) { + wrappedTx.TxFee = txGasHandler.ComputeTxFee(wrappedTx.Tx) } diff --git a/txcache/wrappedTransaction_test.go b/txcache/wrappedTransaction_test.go index 56f212d9..4794bc61 100644 --- a/txcache/wrappedTransaction_test.go +++ b/txcache/wrappedTransaction_test.go @@ -10,8 +10,7 @@ import ( func Test_computeTxFee(t *testing.T) { txGasHandler := txcachemocks.NewTxGasHandlerMock() tx := createTx([]byte("a"), "a", 1).withDataLength(1).withGasLimit(51500).withGasPrice(oneBillion) - txFee := tx.computeFee(txGasHandler) + tx.computeFee(txGasHandler) - require.Equal(t, float64(51500000000000), txFee) - require.Equal(t, txFee, tx.TxFee) + require.Equal(t, "51500000000000", tx.TxFee.String()) } From a71a512863e3e706c0ccb104e817025c12491450 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 30 Oct 2024 17:47:41 +0200 Subject: [PATCH 064/124] Sketch selection using merges. --- txcache/selectionUsingMerges.go | 110 ++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 txcache/selectionUsingMerges.go diff --git a/txcache/selectionUsingMerges.go b/txcache/selectionUsingMerges.go new file mode 100644 index 00000000..bf395c41 --- /dev/null +++ b/txcache/selectionUsingMerges.go @@ -0,0 +1,110 @@ +package txcache + +type BunchOfTransactions []*WrappedTransaction + +func (cache *TxCache) selectTransactionsUsingMerges(gasRequested uint64) BunchOfTransactions { + senders := cache.getSenders() + bunches := make([]BunchOfTransactions, 0, len(senders)) + + for _, sender := range senders { + bunches = append(bunches, sender.getTxsWithoutGaps()) + } + + // If number of bunches is odd, add a phony bunch (to ease pairing logic). + if len(bunches)%2 == 1 { + bunches = append(bunches, make(BunchOfTransactions, 0)) + } + + mergedBunch := mergeBunchesOfTransactions(bunches)[0] + selection := selectUntilReachedGasRequested(mergedBunch, gasRequested) + return selection +} + +func selectUntilReachedGasRequested(bunch BunchOfTransactions, gasRequested uint64) BunchOfTransactions { + accumulatedGas := uint64(0) + + for index, transaction := range bunch { + accumulatedGas += transaction.Tx.GetGasLimit() + + if accumulatedGas > gasRequested { + return bunch[0:index] + } + } + + return bunch +} + +func mergeBunchesOfTransactions(bunches []BunchOfTransactions) []BunchOfTransactions { + if len(bunches) == 1 { + return bunches + } + + // Make pairs of bunches, merge a pair into one bunch. + newBunches := make([]BunchOfTransactions, 0, len(bunches)/2) + + for i := 0; i < len(bunches); i += 2 { + first := bunches[i] + second := bunches[i+1] + + newBunch := mergeTwoBunchesOfTransactions(first, second) + newBunches = append(newBunches, newBunch) + } + + // Recursive call: + return mergeBunchesOfTransactions(newBunches) +} + +func mergeTwoBunchesOfTransactions(first BunchOfTransactions, second BunchOfTransactions) BunchOfTransactions { + result := make(BunchOfTransactions, len(first)+len(second)) + + resultIndex := 0 + firstIndex := 0 + secondIndex := 0 + + for resultIndex < len(result) { + a := first[firstIndex] + b := second[secondIndex] + + if isTransactionGreater(a, b) { + result[resultIndex] = a + firstIndex++ + } else { + result[resultIndex] = b + secondIndex++ + } + + resultIndex++ + } + + return result +} + +// Equality is out of scope (not possible in our case). +func isTransactionGreater(transaction *WrappedTransaction, otherTransaction *WrappedTransaction) bool { + // First, compare by fee (PLS CHANGE TO PPU) + cmpFee := transaction.TxFee.Cmp(otherTransaction.TxFee) + if cmpFee > 0 { + return true + } else if cmpFee < 0 { + return false + } + + // Then, compare by gas price (to promote the practice of a higher gas price) + if transaction.Tx.GetGasPrice() > otherTransaction.Tx.GetGasPrice() { + return true + } + if transaction.Tx.GetGasPrice() < otherTransaction.Tx.GetGasPrice() { + return false + } + + // Then, compare by gas limit (promote the practice of lower gas limit) + if transaction.Tx.GetGasLimit() < otherTransaction.Tx.GetGasLimit() { + return true + } + if transaction.Tx.GetGasLimit() > otherTransaction.Tx.GetGasLimit() { + return false + } + + // In the end, compare by transaction hash + return string(transaction.TxHash) > string(otherTransaction.TxHash) +} From a8c39b704484b6584e7515f8c312bf2a21efd15e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 30 Oct 2024 18:00:07 +0200 Subject: [PATCH 065/124] Sketch getTxsWithoutGaps (not tested). --- txcache/txListForSender.go | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 405e45ed..d909ce96 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -213,6 +213,38 @@ func (listForSender *txListForSender) getTxs() []*WrappedTransaction { return result } +// getTxsWithoutGaps returns the transactions in the list (gaps are handled, affected transactions are excluded) +func (listForSender *txListForSender) getTxsWithoutGaps() []*WrappedTransaction { + listForSender.mutex.RLock() + defer listForSender.mutex.RUnlock() + + accountNonce := listForSender.accountNonce.Get() + accountNonceKnown := listForSender.accountNonceKnown.IsSet() + + result := make([]*WrappedTransaction, 0, listForSender.countTx()) + previousNonce := uint64(0) + + for element := listForSender.items.Front(); element != nil; element = element.Next() { + value := element.Value.(*WrappedTransaction) + nonce := value.Tx.GetNonce() + + // Detect initial gaps. + if len(result) == 0 && accountNonceKnown && accountNonce > nonce { + break + } + + // Detect middle gaps. + if len(result) > 0 && nonce != previousNonce+1 { + break + } + + result = append(result, value) + previousNonce = nonce + } + + return result +} + // This function should only be used in critical section (listForSender.mutex) func (listForSender *txListForSender) countTx() uint64 { return uint64(listForSender.items.Len()) From 5a4210221382dfcdb558b6f5cb90689ded4a8b54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 31 Oct 2024 14:28:22 +0200 Subject: [PATCH 066/124] Sketch computePricePerGasUnit. --- txcache/printing.go | 4 ++-- txcache/txListBySenderMap.go | 2 +- txcache/wrappedTransaction.go | 17 +++++++++++++---- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/txcache/printing.go b/txcache/printing.go index 4d137439..d549d763 100644 --- a/txcache/printing.go +++ b/txcache/printing.go @@ -15,7 +15,7 @@ type printedTransaction struct { Sender string `json:"sender"` Receiver string `json:"receiver"` DataLength int `json:"dataLength"` - Fee string `json:"fee"` + PPU uint64 `json:"ppu"` } type printedSender struct { @@ -75,7 +75,7 @@ func convertWrappedTransactionToPrintedTransaction(wrappedTx *WrappedTransaction GasPrice: transaction.GetGasPrice(), GasLimit: transaction.GetGasLimit(), DataLength: len(transaction.GetData()), - Fee: wrappedTx.TxFee.String(), + PPU: wrappedTx.PricePerGasUnitQuotient, } } diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index 6feb19b5..b1180b6e 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -35,7 +35,7 @@ func newTxListBySenderMap( func (txMap *txListBySenderMap) addTx(tx *WrappedTransaction) (bool, [][]byte) { sender := string(tx.Tx.GetSndAddr()) listForSender := txMap.getOrAddListForSender(sender) - tx.computeFee(txMap.txGasHandler) + tx.computePricePerGasUnit(txMap.txGasHandler) return listForSender.AddTx(tx) } diff --git a/txcache/wrappedTransaction.go b/txcache/wrappedTransaction.go index ab8b7255..f8b4d641 100644 --- a/txcache/wrappedTransaction.go +++ b/txcache/wrappedTransaction.go @@ -14,10 +14,19 @@ type WrappedTransaction struct { ReceiverShardID uint32 Size int64 - TxFee *big.Int + PricePerGasUnitQuotient uint64 + PricePerGasUnitRemainder uint64 } -// computeFee computes (and caches) the transaction fee. -func (wrappedTx *WrappedTransaction) computeFee(txGasHandler TxGasHandler) { - wrappedTx.TxFee = txGasHandler.ComputeTxFee(wrappedTx.Tx) +// computePricePerGasUnit computes (and caches) the (average) price per gas unit. +func (wrappedTx *WrappedTransaction) computePricePerGasUnit(txGasHandler TxGasHandler) { + fee := txGasHandler.ComputeTxFee(wrappedTx.Tx) + gasLimit := big.NewInt(0).SetUint64(wrappedTx.Tx.GetGasLimit()) + + quotient := new(big.Int) + remainder := new(big.Int) + quotient, remainder = quotient.QuoRem(fee, gasLimit, remainder) + + wrappedTx.PricePerGasUnitQuotient = quotient.Uint64() + wrappedTx.PricePerGasUnitRemainder = remainder.Uint64() } From d1063bed4810460620b6a69e37923ae32ec5c4d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 31 Oct 2024 14:28:51 +0200 Subject: [PATCH 067/124] Fix isTransactionGreater(). --- txcache/selectionUsingMerges.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/txcache/selectionUsingMerges.go b/txcache/selectionUsingMerges.go index bf395c41..9eaab5e1 100644 --- a/txcache/selectionUsingMerges.go +++ b/txcache/selectionUsingMerges.go @@ -81,11 +81,17 @@ func mergeTwoBunchesOfTransactions(first BunchOfTransactions, second BunchOfTran // Equality is out of scope (not possible in our case). func isTransactionGreater(transaction *WrappedTransaction, otherTransaction *WrappedTransaction) bool { - // First, compare by fee (PLS CHANGE TO PPU) - cmpFee := transaction.TxFee.Cmp(otherTransaction.TxFee) - if cmpFee > 0 { + // First, compare by price per unit + if transaction.PricePerGasUnitQuotient > otherTransaction.PricePerGasUnitQuotient { return true - } else if cmpFee < 0 { + } + if transaction.PricePerGasUnitQuotient < otherTransaction.PricePerGasUnitQuotient { + return false + } + if transaction.PricePerGasUnitRemainder > otherTransaction.PricePerGasUnitRemainder { + return true + } + if transaction.PricePerGasUnitRemainder < otherTransaction.PricePerGasUnitRemainder { return false } From 0d96e18985b7633d3232a67a00afb37236834d65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 31 Oct 2024 16:53:22 +0200 Subject: [PATCH 068/124] Sketch parallelization (not tested). --- txcache/selectionUsingMerges.go | 58 +++++++++++++++++++++++++++++---- 1 file changed, 52 insertions(+), 6 deletions(-) diff --git a/txcache/selectionUsingMerges.go b/txcache/selectionUsingMerges.go index 9eaab5e1..57c13245 100644 --- a/txcache/selectionUsingMerges.go +++ b/txcache/selectionUsingMerges.go @@ -1,7 +1,16 @@ package txcache +import "sync" + type BunchOfTransactions []*WrappedTransaction +const numJobsForMerging = 4 + +type mergingJob struct { + input []BunchOfTransactions + output BunchOfTransactions +} + func (cache *TxCache) selectTransactionsUsingMerges(gasRequested uint64) BunchOfTransactions { senders := cache.getSenders() bunches := make([]BunchOfTransactions, 0, len(senders)) @@ -10,12 +19,7 @@ func (cache *TxCache) selectTransactionsUsingMerges(gasRequested uint64) BunchOf bunches = append(bunches, sender.getTxsWithoutGaps()) } - // If number of bunches is odd, add a phony bunch (to ease pairing logic). - if len(bunches)%2 == 1 { - bunches = append(bunches, make(BunchOfTransactions, 0)) - } - - mergedBunch := mergeBunchesOfTransactions(bunches)[0] + mergedBunch := mergeBunchesOfTransactionsInParallel(bunches) selection := selectUntilReachedGasRequested(mergedBunch, gasRequested) return selection } @@ -34,6 +38,48 @@ func selectUntilReachedGasRequested(bunch BunchOfTransactions, gasRequested uint return bunch } +func mergeBunchesOfTransactionsInParallel(bunches []BunchOfTransactions) BunchOfTransactions { + // If number of bunches is odd, add a phony bunch (to ease pairing logic). + if len(bunches)%2 == 1 { + bunches = append(bunches, make(BunchOfTransactions, 0)) + } + + jobs := make([]*mergingJob, numJobsForMerging) + + for i := 0; i < numJobsForMerging; i++ { + jobs[i] = &mergingJob{ + input: make([]BunchOfTransactions, 0, len(bunches)/numJobsForMerging), + } + } + + for i, bunch := range bunches { + jobs[i%numJobsForMerging].input = append(jobs[i%numJobsForMerging].input, bunch) + } + + // Run jobs in parallel + wg := sync.WaitGroup{} + + for _, job := range jobs { + wg.Add(1) + + go func(job *mergingJob) { + job.output = mergeBunchesOfTransactions(job.input)[0] + defer wg.Done() + }(job) + } + + wg.Wait() + + // Merge the results of the jobs + outputBunchesOfJobs := make([]BunchOfTransactions, 0, numJobsForMerging) + + for _, job := range jobs { + outputBunchesOfJobs = append(outputBunchesOfJobs, job.output) + } + + return mergeBunchesOfTransactions(outputBunchesOfJobs)[0] +} + func mergeBunchesOfTransactions(bunches []BunchOfTransactions) []BunchOfTransactions { if len(bunches) == 1 { return bunches From b8cf28a50654ad6e3981fdd53f37f9cbb8d00ef0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 31 Oct 2024 18:20:48 +0200 Subject: [PATCH 069/124] Fix & simplify mergeTwoBunchesOfTransactions. --- txcache/selectionUsingMerges.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/txcache/selectionUsingMerges.go b/txcache/selectionUsingMerges.go index 57c13245..391f3ac5 100644 --- a/txcache/selectionUsingMerges.go +++ b/txcache/selectionUsingMerges.go @@ -101,27 +101,28 @@ func mergeBunchesOfTransactions(bunches []BunchOfTransactions) []BunchOfTransact } func mergeTwoBunchesOfTransactions(first BunchOfTransactions, second BunchOfTransactions) BunchOfTransactions { - result := make(BunchOfTransactions, len(first)+len(second)) + result := make(BunchOfTransactions, 0, len(first)+len(second)) - resultIndex := 0 firstIndex := 0 secondIndex := 0 - for resultIndex < len(result) { + for firstIndex < len(first) && secondIndex < len(second) { a := first[firstIndex] b := second[secondIndex] if isTransactionGreater(a, b) { - result[resultIndex] = a + result = append(result, a) firstIndex++ } else { - result[resultIndex] = b + result = append(result, b) secondIndex++ } - - resultIndex++ } + // Append any remaining elements. + result = append(result, first[firstIndex:]...) + result = append(result, second[secondIndex:]...) + return result } From f40df764daeced86de25508afc1c3dd47ae1aea1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 31 Oct 2024 18:21:26 +0200 Subject: [PATCH 070/124] Integrate new selection (work in progress). --- txcache/selectionUsingMerges.go | 4 ++-- txcache/txCache.go | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/txcache/selectionUsingMerges.go b/txcache/selectionUsingMerges.go index 391f3ac5..9c922a7a 100644 --- a/txcache/selectionUsingMerges.go +++ b/txcache/selectionUsingMerges.go @@ -11,7 +11,7 @@ type mergingJob struct { output BunchOfTransactions } -func (cache *TxCache) selectTransactionsUsingMerges(gasRequested uint64) BunchOfTransactions { +func (cache *TxCache) selectTransactionsUsingMerges(gasRequested uint64) (BunchOfTransactions, []*txListForSender) { senders := cache.getSenders() bunches := make([]BunchOfTransactions, 0, len(senders)) @@ -21,7 +21,7 @@ func (cache *TxCache) selectTransactionsUsingMerges(gasRequested uint64) BunchOf mergedBunch := mergeBunchesOfTransactionsInParallel(bunches) selection := selectUntilReachedGasRequested(mergedBunch, gasRequested) - return selection + return selection, senders } func selectUntilReachedGasRequested(bunch BunchOfTransactions, gasRequested uint64) BunchOfTransactions { diff --git a/txcache/txCache.go b/txcache/txCache.go index 8aa65d9e..253fee13 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -119,8 +119,7 @@ func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, gasRe "num senders", cache.CountSenders(), ) - senders := cache.getSenders() - transactions := make([]*WrappedTransaction, 0) + transactions, senders := cache.selectTransactionsUsingMerges(gasRequested) stopWatch.Stop("selection") From 5e67c02f3740dd466787730684ab29c1bef49a76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 31 Oct 2024 21:07:03 +0200 Subject: [PATCH 071/124] Sketch / fix merging. --- txcache/selectionUsingMerges.go | 71 ++++++++++++++++----------------- 1 file changed, 34 insertions(+), 37 deletions(-) diff --git a/txcache/selectionUsingMerges.go b/txcache/selectionUsingMerges.go index 9c922a7a..435b92b4 100644 --- a/txcache/selectionUsingMerges.go +++ b/txcache/selectionUsingMerges.go @@ -24,26 +24,7 @@ func (cache *TxCache) selectTransactionsUsingMerges(gasRequested uint64) (BunchO return selection, senders } -func selectUntilReachedGasRequested(bunch BunchOfTransactions, gasRequested uint64) BunchOfTransactions { - accumulatedGas := uint64(0) - - for index, transaction := range bunch { - accumulatedGas += transaction.Tx.GetGasLimit() - - if accumulatedGas > gasRequested { - return bunch[0:index] - } - } - - return bunch -} - func mergeBunchesOfTransactionsInParallel(bunches []BunchOfTransactions) BunchOfTransactions { - // If number of bunches is odd, add a phony bunch (to ease pairing logic). - if len(bunches)%2 == 1 { - bunches = append(bunches, make(BunchOfTransactions, 0)) - } - jobs := make([]*mergingJob, numJobsForMerging) for i := 0; i < numJobsForMerging; i++ { @@ -63,7 +44,7 @@ func mergeBunchesOfTransactionsInParallel(bunches []BunchOfTransactions) BunchOf wg.Add(1) go func(job *mergingJob) { - job.output = mergeBunchesOfTransactions(job.input)[0] + job.output = mergeBunches(job.input) defer wg.Done() }(job) } @@ -77,30 +58,32 @@ func mergeBunchesOfTransactionsInParallel(bunches []BunchOfTransactions) BunchOf outputBunchesOfJobs = append(outputBunchesOfJobs, job.output) } - return mergeBunchesOfTransactions(outputBunchesOfJobs)[0] + return mergeBunches(outputBunchesOfJobs) } -func mergeBunchesOfTransactions(bunches []BunchOfTransactions) []BunchOfTransactions { - if len(bunches) == 1 { - return bunches - } - - // Make pairs of bunches, merge a pair into one bunch. - newBunches := make([]BunchOfTransactions, 0, len(bunches)/2) - - for i := 0; i < len(bunches); i += 2 { - first := bunches[i] - second := bunches[i+1] +func mergeBunches(bunches []BunchOfTransactions) BunchOfTransactions { + return mergeTwoBunchesOfBunches(bunches, make([]BunchOfTransactions, 0)) +} - newBunch := mergeTwoBunchesOfTransactions(first, second) - newBunches = append(newBunches, newBunch) +func mergeTwoBunchesOfBunches(first []BunchOfTransactions, second []BunchOfTransactions) BunchOfTransactions { + if len(first) == 0 && len(second) == 1 { + return second[0] + } + if len(first) == 1 && len(second) == 0 { + return first[0] + } + if len(first) == 0 && len(second) == 0 { + return make(BunchOfTransactions, 0) } - // Recursive call: - return mergeBunchesOfTransactions(newBunches) + return mergeTwoBunches( + mergeTwoBunchesOfBunches(first[0:len(first)/2], first[len(first)/2:]), + mergeTwoBunchesOfBunches(second[0:len(second)/2], second[len(second)/2:]), + ) } -func mergeTwoBunchesOfTransactions(first BunchOfTransactions, second BunchOfTransactions) BunchOfTransactions { +// Empty bunches are handled. +func mergeTwoBunches(first BunchOfTransactions, second BunchOfTransactions) BunchOfTransactions { result := make(BunchOfTransactions, 0, len(first)+len(second)) firstIndex := 0 @@ -161,3 +144,17 @@ func isTransactionGreater(transaction *WrappedTransaction, otherTransaction *Wra // In the end, compare by transaction hash return string(transaction.TxHash) > string(otherTransaction.TxHash) } + +func selectUntilReachedGasRequested(bunch BunchOfTransactions, gasRequested uint64) BunchOfTransactions { + accumulatedGas := uint64(0) + + for index, transaction := range bunch { + accumulatedGas += transaction.Tx.GetGasLimit() + + if accumulatedGas > gasRequested { + return bunch[0:index] + } + } + + return bunch +} From ce285074c0574bede4360e83de5772826caf4fa2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 31 Oct 2024 21:11:20 +0200 Subject: [PATCH 072/124] Improve isTransactionGreater(). --- txcache/selectionUsingMerges.go | 45 +++++++++++++++++---------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/txcache/selectionUsingMerges.go b/txcache/selectionUsingMerges.go index 435b92b4..41f828b3 100644 --- a/txcache/selectionUsingMerges.go +++ b/txcache/selectionUsingMerges.go @@ -1,6 +1,9 @@ package txcache -import "sync" +import ( + "bytes" + "sync" +) type BunchOfTransactions []*WrappedTransaction @@ -112,37 +115,35 @@ func mergeTwoBunches(first BunchOfTransactions, second BunchOfTransactions) Bunc // Equality is out of scope (not possible in our case). func isTransactionGreater(transaction *WrappedTransaction, otherTransaction *WrappedTransaction) bool { // First, compare by price per unit - if transaction.PricePerGasUnitQuotient > otherTransaction.PricePerGasUnitQuotient { - return true + ppuQuotient := transaction.PricePerGasUnitQuotient + ppuQuotientOther := otherTransaction.PricePerGasUnitQuotient + if ppuQuotient != ppuQuotientOther { + return ppuQuotient > ppuQuotientOther } - if transaction.PricePerGasUnitQuotient < otherTransaction.PricePerGasUnitQuotient { - return false - } - if transaction.PricePerGasUnitRemainder > otherTransaction.PricePerGasUnitRemainder { - return true - } - if transaction.PricePerGasUnitRemainder < otherTransaction.PricePerGasUnitRemainder { - return false + + ppuRemainder := transaction.PricePerGasUnitRemainder + ppuRemainderOther := otherTransaction.PricePerGasUnitRemainder + if ppuRemainder != ppuRemainderOther { + return ppuRemainder > ppuRemainderOther } // Then, compare by gas price (to promote the practice of a higher gas price) - if transaction.Tx.GetGasPrice() > otherTransaction.Tx.GetGasPrice() { - return true - } - if transaction.Tx.GetGasPrice() < otherTransaction.Tx.GetGasPrice() { - return false + gasPrice := transaction.Tx.GetGasPrice() + gasPriceOther := otherTransaction.Tx.GetGasPrice() + if gasPrice != gasPriceOther { + return gasPrice > gasPriceOther } // Then, compare by gas limit (promote the practice of lower gas limit) - if transaction.Tx.GetGasLimit() < otherTransaction.Tx.GetGasLimit() { - return true - } - if transaction.Tx.GetGasLimit() > otherTransaction.Tx.GetGasLimit() { - return false + // Compare Gas Limits (promote lower gas limit) + gasLimit := transaction.Tx.GetGasLimit() + gasLimitOther := otherTransaction.Tx.GetGasLimit() + if gasLimit != gasLimitOther { + return gasLimit < gasLimitOther } // In the end, compare by transaction hash - return string(transaction.TxHash) > string(otherTransaction.TxHash) + return bytes.Compare(transaction.TxHash, otherTransaction.TxHash) > 0 } func selectUntilReachedGasRequested(bunch BunchOfTransactions, gasRequested uint64) BunchOfTransactions { From e50d419a54d7c767a21f848bac34af1b84dce134 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 31 Oct 2024 21:39:00 +0200 Subject: [PATCH 073/124] Add some tests. --- txcache/selection_test.go | 30 ++++++++++++++ txcache/txCache_test.go | 87 ++++++++++++++++++++++++++++----------- 2 files changed, 92 insertions(+), 25 deletions(-) create mode 100644 txcache/selection_test.go diff --git a/txcache/selection_test.go b/txcache/selection_test.go new file mode 100644 index 00000000..233028ab --- /dev/null +++ b/txcache/selection_test.go @@ -0,0 +1,30 @@ +package txcache + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_mergeTwoBunches(t *testing.T) { + t.Run("empty bunches", func(t *testing.T) { + merged := mergeTwoBunches(BunchOfTransactions{}, BunchOfTransactions{}) + require.Len(t, merged, 0) + }) + + t.Run("alice and bob (1)", func(t *testing.T) { + first := BunchOfTransactions{ + createTx([]byte("hash-alice-1"), "alice", 1).withGasPrice(42), + } + + second := BunchOfTransactions{ + createTx([]byte("hash-bob-1"), "bob", 1).withGasPrice(43), + } + + merged := mergeTwoBunches(first, second) + + require.Len(t, merged, 2) + require.Equal(t, "hash-bob-1", string(merged[0].TxHash)) + require.Equal(t, "hash-alice-1", string(merged[1].TxHash)) + }) +} diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index b287cc9c..94d3508f 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -300,36 +300,73 @@ func Test_GetTransactionsPoolForSender(t *testing.T) { } func Test_SelectTransactions_Dummy(t *testing.T) { - cache := newUnconstrainedCacheToTest() + t.Run("all having same PPU", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) + cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6)) + cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5)) + cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1)) + + selected := cache.SelectTransactions(math.MaxUint64) + require.Len(t, selected, 8) + + // Check order + require.Equal(t, "hash-carol-1", string(selected[0].TxHash)) + require.Equal(t, "hash-bob-5", string(selected[1].TxHash)) + require.Equal(t, "hash-bob-6", string(selected[2].TxHash)) + require.Equal(t, "hash-bob-7", string(selected[3].TxHash)) + require.Equal(t, "hash-alice-1", string(selected[4].TxHash)) + require.Equal(t, "hash-alice-2", string(selected[5].TxHash)) + require.Equal(t, "hash-alice-3", string(selected[6].TxHash)) + require.Equal(t, "hash-alice-4", string(selected[7].TxHash)) + }) - cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4)) - cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) - cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) - cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) - cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) - cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6)) - cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5)) - cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1)) + t.Run("alice > carol > bob", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() - sorted := cache.SelectTransactions(math.MaxUint64) - require.Len(t, sorted, 8) -} + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1).withGasPrice(100)) + cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasPrice(50)) + cache.AddTx(createTx([]byte("hash-carol-3"), "carol", 3).withGasPrice(75)) -func Test_SelectTransactionsWithBandwidth_Dummy(t *testing.T) { - cache := newUnconstrainedCacheToTest() - cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4).withGasLimit(100000)) - cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3).withGasLimit(100000)) - cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2).withGasLimit(500000)) - cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1).withGasLimit(200000)) - cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7).withGasLimit(100000)) - cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6).withGasLimit(50000)) - cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasLimit(50000)) - cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1).withGasLimit(50000)) + selected := cache.SelectTransactions(math.MaxUint64) + require.Len(t, selected, 3) - sorted := cache.SelectTransactions(math.MaxUint64) - numSelected := 1 + 1 + 3 // 1 alice, 1 carol, 3 bob + // Check order + require.Equal(t, "hash-alice-1", string(selected[0].TxHash)) + require.Equal(t, "hash-carol-3", string(selected[1].TxHash)) + require.Equal(t, "hash-bob-5", string(selected[2].TxHash)) + }) +} - require.Len(t, sorted, numSelected) +func Test_SelectTransactionsWithBandwidth_Dummy(t *testing.T) { + t.Run("transactions with no data field", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2).withGasLimit(500000)) + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1).withGasLimit(200000)) + cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7).withGasLimit(400000)) + cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6).withGasLimit(50000)) + cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasLimit(50000)) + cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1).withGasLimit(50000)) + + selected := cache.SelectTransactions(760000) + + require.Len(t, selected, 5) + + // Check order + require.Equal(t, "hash-carol-1", string(selected[0].TxHash)) + require.Equal(t, "hash-bob-5", string(selected[1].TxHash)) + require.Equal(t, "hash-bob-6", string(selected[2].TxHash)) + require.Equal(t, "hash-alice-1", string(selected[3].TxHash)) + require.Equal(t, "hash-bob-7", string(selected[4].TxHash)) + }) } func Test_SelectTransactions_BreaksAtNonceGaps(t *testing.T) { From fc80fe3b3df016c4ca41c5378c4f9f78bccf60c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 31 Oct 2024 23:29:50 +0200 Subject: [PATCH 074/124] Sketch eviction (not tested). --- txcache/config.go | 19 +++++------- txcache/eviction.go | 52 +++++++++++++++++++++++++++++-- txcache/eviction_test.go | 55 +++++++++++++++------------------ txcache/txCache_test.go | 49 +++++++++++++---------------- txcache/txListForSender.go | 25 +++++++++++++-- txcache/txListForSender_test.go | 8 ++--- 6 files changed, 129 insertions(+), 79 deletions(-) diff --git a/txcache/config.go b/txcache/config.go index 40236d3d..e9f11475 100644 --- a/txcache/config.go +++ b/txcache/config.go @@ -16,18 +16,16 @@ const maxNumItemsPerSenderLowerBound = 1 const maxNumBytesPerSenderLowerBound = maxNumItemsPerSenderLowerBound * 1 const maxNumBytesPerSenderUpperBound = 33_554_432 // 32 MB const numTxsToPreemptivelyEvictLowerBound = 1 -const numSendersToPreemptivelyEvictLowerBound = 1 // ConfigSourceMe holds cache configuration type ConfigSourceMe struct { - Name string - NumChunks uint32 - EvictionEnabled bool - NumBytesThreshold uint32 - NumBytesPerSenderThreshold uint32 - CountThreshold uint32 - CountPerSenderThreshold uint32 - NumSendersToPreemptivelyEvict uint32 + Name string + NumChunks uint32 + EvictionEnabled bool + NumBytesThreshold uint32 + NumBytesPerSenderThreshold uint32 + CountThreshold uint32 + CountPerSenderThreshold uint32 } type senderConstraints struct { @@ -56,9 +54,6 @@ func (config *ConfigSourceMe) verify() error { if config.CountThreshold < maxNumItemsLowerBound { return fmt.Errorf("%w: config.CountThreshold is invalid", common.ErrInvalidConfig) } - if config.NumSendersToPreemptivelyEvict < numSendersToPreemptivelyEvictLowerBound { - return fmt.Errorf("%w: config.NumSendersToPreemptivelyEvict is invalid", common.ErrInvalidConfig) - } } return nil diff --git a/txcache/eviction.go b/txcache/eviction.go index 109f3b03..ca930bd0 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -40,8 +40,7 @@ func (cache *TxCache) doEviction() *evictionJournal { stopWatch := core.NewStopWatch() stopWatch.Start("eviction") - // TODO: reimplement. - evictionJournal := evictionJournal{} + evictionJournal := cache.evictLeastLikelyToSelectTransactions() stopWatch.Stop("eviction") @@ -78,3 +77,52 @@ func (cache *TxCache) areThereTooManyTxs() bool { tooManyTxs := numTxs > uint64(cache.config.CountThreshold) return tooManyTxs } + +func (cache *TxCache) evictLeastLikelyToSelectTransactions() evictionJournal { + senders := cache.getSenders() + bunches := make([]BunchOfTransactions, 0, len(senders)) + + for _, sender := range senders { + // Include transactions after gaps, as well (important), unlike when selecting transactions for processing. + bunches = append(bunches, sender.getTxs()) + } + + mergedBunch := mergeBunchesOfTransactionsInParallel(bunches) + + // Select a reasonable number of transactions to evict. + transactionsToEvict := mergedBunch[3*len(mergedBunch)/4:] + transactionsToEvictHashes := make([][]byte, len(transactionsToEvict)) + + // For each sender, find the "lowest" (in nonce) transaction to evict. + lowestToEvictBySender := make(map[string]uint64) + + for _, tx := range transactionsToEvict { + transactionsToEvictHashes = append(transactionsToEvictHashes, tx.TxHash) + sender := string(tx.Tx.GetSndAddr()) + + if _, ok := lowestToEvictBySender[sender]; ok { + continue + } + + lowestToEvictBySender[sender] = tx.Tx.GetNonce() + } + + // Remove those transactions from "txListBySender". + for sender, nonce := range lowestToEvictBySender { + list, ok := cache.txListBySender.getListForSender(sender) + if !ok { + continue + } + + list.evictTransactionsWithHigherNonces(nonce - 1) + } + + // Remove those transactions from "txByHash". + cache.txByHash.RemoveTxsBulk(transactionsToEvictHashes) + + evictionJournal := evictionJournal{ + numTxs: uint32(len(transactionsToEvict)), + } + + return evictionJournal +} diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go index 417eb3a6..a170f3fc 100644 --- a/txcache/eviction_test.go +++ b/txcache/eviction_test.go @@ -10,13 +10,12 @@ import ( func TestTxCache_DoEviction_BecauseOfCount(t *testing.T) { config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - NumBytesThreshold: maxNumBytesUpperBound, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountThreshold: 2, - CountPerSenderThreshold: math.MaxUint32, - NumSendersToPreemptivelyEvict: 2, + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 2, + CountPerSenderThreshold: math.MaxUint32, } txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(config, txGasHandler) @@ -39,13 +38,12 @@ func TestTxCache_DoEviction_BecauseOfCount(t *testing.T) { func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - CountThreshold: math.MaxUint32, - CountPerSenderThreshold: math.MaxUint32, - NumBytesThreshold: 1000, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - NumSendersToPreemptivelyEvict: 2, + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 1000, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, } txGasHandler := txcachemocks.NewTxGasHandlerMock() @@ -72,12 +70,11 @@ func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 1, - CountThreshold: 0, - NumSendersToPreemptivelyEvict: 1, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, + Name: "untitled", + NumChunks: 1, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 0, + CountPerSenderThreshold: math.MaxUint32, } txGasHandler := txcachemocks.NewTxGasHandlerMock() @@ -93,18 +90,17 @@ func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { } // This seems to be the most reasonable "bad-enough" (not worst) scenario to benchmark: -// 25000 senders with 10 transactions each, with default "NumSendersToPreemptivelyEvict". +// 25000 senders with 10 transactions each, with default "NumItemsToPreemptivelyEvict". // ~1 second on average laptop. func TestTxCache_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - EvictionEnabled: true, - NumBytesThreshold: 1000000000, - CountThreshold: 240000, - NumSendersToPreemptivelyEvict: 1000, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: 1000000000, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 240000, + CountPerSenderThreshold: math.MaxUint32, } txGasHandler := txcachemocks.NewTxGasHandlerMock() @@ -119,5 +115,4 @@ func TestTxCache_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { // Sometimes (due to map iteration non-determinism), more eviction happens - one more step of 100 senders. require.LessOrEqual(t, uint32(cache.CountTx()), config.CountThreshold) - require.GreaterOrEqual(t, uint32(cache.CountTx()), config.CountThreshold-config.NumSendersToPreemptivelyEvict*uint32(numTxsPerSender)) } diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index 94d3508f..2047ca80 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -26,14 +26,13 @@ func Test_NewTxCache(t *testing.T) { } withEvictionConfig := ConfigSourceMe{ - Name: "test", - NumChunks: 16, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, - EvictionEnabled: true, - NumBytesThreshold: maxNumBytesUpperBound, - CountThreshold: math.MaxUint32, - NumSendersToPreemptivelyEvict: 100, + Name: "test", + NumChunks: 16, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + CountThreshold: math.MaxUint32, } txGasHandler := txcachemocks.NewTxGasHandlerMock() @@ -70,10 +69,6 @@ func Test_NewTxCache(t *testing.T) { badConfig = withEvictionConfig badConfig.CountThreshold = 0 requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.CountThreshold", txGasHandler) - - badConfig = withEvictionConfig - badConfig.NumSendersToPreemptivelyEvict = 0 - requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.NumSendersToPreemptivelyEvict", txGasHandler) } func requireErrorOnNewTxCache(t *testing.T, config ConfigSourceMe, errExpected error, errPartialMessage string, txGasHandler TxGasHandler) { @@ -443,14 +438,13 @@ func Test_Keys(t *testing.T) { func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { txGasHandler := txcachemocks.NewTxGasHandlerMock() config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - EvictionEnabled: true, - NumBytesThreshold: maxNumBytesUpperBound, - CountThreshold: 100, - NumSendersToPreemptivelyEvict: 1, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 100, + CountPerSenderThreshold: math.MaxUint32, } // 11 * 10 @@ -462,14 +456,13 @@ func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { require.LessOrEqual(t, cache.CountTx(), uint64(100)) config = ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - EvictionEnabled: true, - NumBytesThreshold: maxNumBytesUpperBound, - CountThreshold: 250000, - NumSendersToPreemptivelyEvict: 1, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 250000, + CountPerSenderThreshold: math.MaxUint32, } // 100 * 1000 diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index d909ce96..f60752d1 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -279,18 +279,18 @@ func (listForSender *txListForSender) notifyAccountNonce(nonce uint64) [][]byte listForSender.accountNonce.Set(nonce) _ = listForSender.accountNonceKnown.SetReturningPrevious() - return listForSender.evictTransactionsWithLowerNonces(nonce) + return listForSender.evictTransactionsWithLowerNoncesNoLock(nonce) } // This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) evictTransactionsWithLowerNonces(accountNonce uint64) [][]byte { +func (listForSender *txListForSender) evictTransactionsWithLowerNoncesNoLock(givenNonce uint64) [][]byte { evictedTxHashes := make([][]byte, 0) for element := listForSender.items.Front(); element != nil; { tx := element.Value.(*WrappedTransaction) txNonce := tx.Tx.GetNonce() - if txNonce >= accountNonce { + if txNonce >= givenNonce { break } @@ -306,6 +306,25 @@ func (listForSender *txListForSender) evictTransactionsWithLowerNonces(accountNo return evictedTxHashes } +func (listForSender *txListForSender) evictTransactionsWithHigherNonces(givenNonce uint64) { + listForSender.mutex.Lock() + defer listForSender.mutex.Unlock() + + for element := listForSender.items.Back(); element != nil; { + tx := element.Value.(*WrappedTransaction) + txNonce := tx.Tx.GetNonce() + + if txNonce <= givenNonce { + break + } + + prevElement := element.Prev() + _ = listForSender.items.Remove(element) + listForSender.onRemovedListElement(element) + element = prevElement + } +} + // This function should only be used in critical section (listForSender.mutex). // When a gap is detected, the (known) account nonce and the first transactio nonce are also returned. func (listForSender *txListForSender) hasInitialGap() (uint64, uint64, bool) { diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index ba56f57c..8fe7344d 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -173,7 +173,7 @@ func TestListForSender_NotifyAccountNonce(t *testing.T) { require.True(t, list.accountNonceKnown.IsSet()) } -func TestListForSender_evictTransactionsWithLowerNonces(t *testing.T) { +func TestListForSender_evictTransactionsWithLowerNoncesNoLock(t *testing.T) { list := newUnconstrainedListToTest() list.AddTx(createTx([]byte("tx-42"), ".", 42)) @@ -183,13 +183,13 @@ func TestListForSender_evictTransactionsWithLowerNonces(t *testing.T) { require.Equal(t, 4, list.items.Len()) - list.evictTransactionsWithLowerNonces(43) + list.evictTransactionsWithLowerNoncesNoLock(43) require.Equal(t, 3, list.items.Len()) - list.evictTransactionsWithLowerNonces(44) + list.evictTransactionsWithLowerNoncesNoLock(44) require.Equal(t, 2, list.items.Len()) - list.evictTransactionsWithLowerNonces(99) + list.evictTransactionsWithLowerNoncesNoLock(99) require.Equal(t, 0, list.items.Len()) } From 430744fd3d29d81894127c2e72de2978d81d8468 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 10:41:01 +0200 Subject: [PATCH 075/124] Simplify / refactor. --- txcache/README.md | 10 ++-------- txcache/diagnosis.go | 4 ++-- txcache/monitoring.go | 9 +-------- txcache/selectionUsingMerges.go | 4 ++-- txcache/txCache.go | 10 +++++----- txcache/txListForSender.go | 4 ++-- txcache/txListForSender_test.go | 6 +++--- 7 files changed, 17 insertions(+), 30 deletions(-) diff --git a/txcache/README.md b/txcache/README.md index e0e542fd..16494448 100644 --- a/txcache/README.md +++ b/txcache/README.md @@ -3,15 +3,11 @@ ### Glossary 1. **selection session:** an ephemeral session during which the mempool selects transactions for a proposer. A session starts when a proposer asks the mempool for transactions and ends when the mempool returns the transactions. The most important part of a session is the _selection loop_. -2. **selection pass:** a single iteration of the _selection loop_. In an iteration, the algorithm goes through all the senders (appropriately sorted) and selects a batch of transactions from each sender. A _pass_ can stop early (see **Paragraph 3**). -3. **sender score:** a score assigned to a sender based on her's behavior. The score is used to determine the order in which senders are considered within a _selection pass_, plus the size and capacity of a _sender's transactions batch_. The score is a number in `[0, maxSenderScore]`. +2. **transaction PPU:** the price per unit of computation, for a transaction. It's computed as `fee / gasLimit`. ### Configuration -1. **maxSenderScore:** `100`, the maximum score a sender can have. The minimum score is `0`. -3. **gasRequested:** `10_000_000_000`, the maximum total gas limit of the transactions to be returned to a proposer (one _selection session_). -4. **baseNumPerSenderBatch:**: `100`, defines the maximum number of transactions to be selected from the transactions pool, for a sender with the maximum possible score, in a _single pass_. Senders with lower scores will have fewer transactions selected in a single pass. -5. **baseGasPerSenderBatch:**: `120_000_000`, defines the maximum gas for transactions to be selected from the transactions pool, for a sender with the maximum possible score, in a single pass. Senders with lower scores will have less gas selected in a single pass. +1. **gasRequested:** `10_000_000_000`, the maximum total gas limit of the transactions to be returned to a proposer (one _selection session_). ### Transactions selection @@ -20,8 +16,6 @@ When a proposer asks the mempool for transactions, it provides the following parameters: - `gasRequested`: the maximum total gas limit of the transactions to be returned - - `baseNumPerSenderBatch`: a base value for the number of transactions to be returned per sender, per selection _pass_. This value is used to compute the actual number of transactions to be returned per sender, per selection _pass_, based on the sender's score (see **Paragraph 2**). - - `baseGasPerSenderBatch`: a base value for the total gas limit of the transactions to be returned per sender, per selection _pass_. This value is used to compute the actual total gas limit of the transactions to be returned per sender, per selection _pass_, based on the sender's score (see **Paragraph 2**). Due to how the selection is performed, the theoretical maximum gas might be exceeded (a bit), as follows: `theoretical maximum = (baseGasPerSenderBatch - 1) + max(baseGasPerSenderBatch, max gas limit of a transaction)`. Think of a sender with maximum score, having two transactions, one with `gasLimit = baseGasPerSenderBatch - 1`, and the other with `gasLimit = max gas limit of a transaction`. ### Paragraph 2 diff --git a/txcache/diagnosis.go b/txcache/diagnosis.go index ffe66085..79fac6f6 100644 --- a/txcache/diagnosis.go +++ b/txcache/diagnosis.go @@ -80,10 +80,10 @@ func (cache *TxCache) diagnoseSelection() { return } - senders, transactions := cache.doSelectTransactions( + transactions := cache.doSelectTransactions( logDiagnoseSelection, diagnosisSelectionGasRequested, ) - displaySelectionOutcome(logDiagnoseSelection, senders, transactions) + displaySelectionOutcome(logDiagnoseSelection, transactions) } diff --git a/txcache/monitoring.go b/txcache/monitoring.go index 3f245e98..b15e8727 100644 --- a/txcache/monitoring.go +++ b/txcache/monitoring.go @@ -25,18 +25,11 @@ func displaySendersScoreHistogram(scoreGroups [][]*txListForSender) { log.Debug("displaySendersScoreHistogram()", "histogram", stringBuilder.String()) } -func displaySelectionOutcome(contextualLogger logger.Logger, sortedSenders []*txListForSender, selection []*WrappedTransaction) { +func displaySelectionOutcome(contextualLogger logger.Logger, selection []*WrappedTransaction) { if contextualLogger.GetLevel() > logger.LogTrace { return } - if len(sortedSenders) > 0 { - contextualLogger.Trace("displaySelectionOutcome() - senders (as newline-separated JSON):") - contextualLogger.Trace(marshalSendersToNewlineDelimitedJson(sortedSenders)) - } else { - contextualLogger.Trace("displaySelectionOutcome() - senders: none") - } - if len(selection) > 0 { contextualLogger.Trace("displaySelectionOutcome() - transactions (as newline-separated JSON):") contextualLogger.Trace(marshalTransactionsToNewlineDelimitedJson(selection)) diff --git a/txcache/selectionUsingMerges.go b/txcache/selectionUsingMerges.go index 41f828b3..9fefc1bd 100644 --- a/txcache/selectionUsingMerges.go +++ b/txcache/selectionUsingMerges.go @@ -14,7 +14,7 @@ type mergingJob struct { output BunchOfTransactions } -func (cache *TxCache) selectTransactionsUsingMerges(gasRequested uint64) (BunchOfTransactions, []*txListForSender) { +func (cache *TxCache) selectTransactionsUsingMerges(gasRequested uint64) BunchOfTransactions { senders := cache.getSenders() bunches := make([]BunchOfTransactions, 0, len(senders)) @@ -24,7 +24,7 @@ func (cache *TxCache) selectTransactionsUsingMerges(gasRequested uint64) (BunchO mergedBunch := mergeBunchesOfTransactionsInParallel(bunches) selection := selectUntilReachedGasRequested(mergedBunch, gasRequested) - return selection, senders + return selection } func mergeBunchesOfTransactionsInParallel(bunches []BunchOfTransactions) BunchOfTransactions { diff --git a/txcache/txCache.go b/txcache/txCache.go index 253fee13..41f41ef3 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -97,18 +97,18 @@ func (cache *TxCache) GetByTxHash(txHash []byte) (*WrappedTransaction, bool) { // SelectTransactions selects a reasonably fair list of transactions to be included in the next miniblock // It returns transactions with total gas ~ "gasRequested". func (cache *TxCache) SelectTransactions(gasRequested uint64) []*WrappedTransaction { - senders, transactions := cache.doSelectTransactions( + transactions := cache.doSelectTransactions( logSelect, gasRequested, ) go cache.diagnoseCounters() - go displaySelectionOutcome(logSelect, senders, transactions) + go displaySelectionOutcome(logSelect, transactions) return transactions } -func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, gasRequested uint64) ([]*txListForSender, []*WrappedTransaction) { +func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, gasRequested uint64) []*WrappedTransaction { stopWatch := core.NewStopWatch() stopWatch.Start("selection") @@ -119,7 +119,7 @@ func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, gasRe "num senders", cache.CountSenders(), ) - transactions, senders := cache.selectTransactionsUsingMerges(gasRequested) + transactions := cache.selectTransactionsUsingMerges(gasRequested) stopWatch.Stop("selection") @@ -129,7 +129,7 @@ func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, gasRe "num txs selected", len(transactions), ) - return senders, transactions + return transactions } func (cache *TxCache) getSenders() []*txListForSender { diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index f60752d1..52344383 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -279,11 +279,11 @@ func (listForSender *txListForSender) notifyAccountNonce(nonce uint64) [][]byte listForSender.accountNonce.Set(nonce) _ = listForSender.accountNonceKnown.SetReturningPrevious() - return listForSender.evictTransactionsWithLowerNoncesNoLock(nonce) + return listForSender.evictTransactionsWithLowerNoncesNoLockReturnEvicted(nonce) } // This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) evictTransactionsWithLowerNoncesNoLock(givenNonce uint64) [][]byte { +func (listForSender *txListForSender) evictTransactionsWithLowerNoncesNoLockReturnEvicted(givenNonce uint64) [][]byte { evictedTxHashes := make([][]byte, 0) for element := listForSender.items.Front(); element != nil; { diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 8fe7344d..08e15bfe 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -183,13 +183,13 @@ func TestListForSender_evictTransactionsWithLowerNoncesNoLock(t *testing.T) { require.Equal(t, 4, list.items.Len()) - list.evictTransactionsWithLowerNoncesNoLock(43) + list.evictTransactionsWithLowerNoncesNoLockReturnEvicted(43) require.Equal(t, 3, list.items.Len()) - list.evictTransactionsWithLowerNoncesNoLock(44) + list.evictTransactionsWithLowerNoncesNoLockReturnEvicted(44) require.Equal(t, 2, list.items.Len()) - list.evictTransactionsWithLowerNoncesNoLock(99) + list.evictTransactionsWithLowerNoncesNoLockReturnEvicted(99) require.Equal(t, 0, list.items.Len()) } From 71de44fe087afcd0c3d5b6acde180f3270043a33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 14:45:16 +0200 Subject: [PATCH 076/124] Optimize / simplify merging. --- txcache/selectionUsingMerges.go | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/txcache/selectionUsingMerges.go b/txcache/selectionUsingMerges.go index 9fefc1bd..7cb88406 100644 --- a/txcache/selectionUsingMerges.go +++ b/txcache/selectionUsingMerges.go @@ -65,24 +65,17 @@ func mergeBunchesOfTransactionsInParallel(bunches []BunchOfTransactions) BunchOf } func mergeBunches(bunches []BunchOfTransactions) BunchOfTransactions { - return mergeTwoBunchesOfBunches(bunches, make([]BunchOfTransactions, 0)) -} - -func mergeTwoBunchesOfBunches(first []BunchOfTransactions, second []BunchOfTransactions) BunchOfTransactions { - if len(first) == 0 && len(second) == 1 { - return second[0] - } - if len(first) == 1 && len(second) == 0 { - return first[0] - } - if len(first) == 0 && len(second) == 0 { + if len(bunches) == 0 { return make(BunchOfTransactions, 0) } + if len(bunches) == 1 { + return bunches[0] + } - return mergeTwoBunches( - mergeTwoBunchesOfBunches(first[0:len(first)/2], first[len(first)/2:]), - mergeTwoBunchesOfBunches(second[0:len(second)/2], second[len(second)/2:]), - ) + mid := len(bunches) / 2 + left := mergeBunches(bunches[:mid]) + right := mergeBunches(bunches[mid:]) + return mergeTwoBunches(left, right) } // Empty bunches are handled. From 9099c66ba7bfef0dd0a396af6f85079ba4eb4160 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 14:50:11 +0200 Subject: [PATCH 077/124] Fix eviction, add some tests. --- txcache/config.go | 24 +++++---- txcache/eviction.go | 81 +++++++++++++++++------------ txcache/txCache_test.go | 109 +++++++++++++++++++++++++++++----------- 3 files changed, 141 insertions(+), 73 deletions(-) diff --git a/txcache/config.go b/txcache/config.go index e9f11475..a0752044 100644 --- a/txcache/config.go +++ b/txcache/config.go @@ -15,17 +15,18 @@ const maxNumBytesUpperBound = 1_073_741_824 // one GB const maxNumItemsPerSenderLowerBound = 1 const maxNumBytesPerSenderLowerBound = maxNumItemsPerSenderLowerBound * 1 const maxNumBytesPerSenderUpperBound = 33_554_432 // 32 MB -const numTxsToPreemptivelyEvictLowerBound = 1 +const numItemsToPreemptivelyEvictLowerBound = 1 // ConfigSourceMe holds cache configuration type ConfigSourceMe struct { - Name string - NumChunks uint32 - EvictionEnabled bool - NumBytesThreshold uint32 - NumBytesPerSenderThreshold uint32 - CountThreshold uint32 - CountPerSenderThreshold uint32 + Name string + NumChunks uint32 + EvictionEnabled bool + NumBytesThreshold uint32 + NumBytesPerSenderThreshold uint32 + CountThreshold uint32 + CountPerSenderThreshold uint32 + NumItemsToPreemptivelyEvict uint32 } type senderConstraints struct { @@ -33,7 +34,6 @@ type senderConstraints struct { maxNumBytes uint32 } -// TODO: Upon further analysis and brainstorming, add some sensible minimum accepted values for the appropriate fields. func (config *ConfigSourceMe) verify() error { if len(config.Name) == 0 { return fmt.Errorf("%w: config.Name is invalid", common.ErrInvalidConfig) @@ -54,6 +54,9 @@ func (config *ConfigSourceMe) verify() error { if config.CountThreshold < maxNumItemsLowerBound { return fmt.Errorf("%w: config.CountThreshold is invalid", common.ErrInvalidConfig) } + if config.NumItemsToPreemptivelyEvict < numItemsToPreemptivelyEvictLowerBound { + return fmt.Errorf("%w: config.NumItemsToPreemptivelyEvict is invalid", common.ErrInvalidConfig) + } } return nil @@ -85,7 +88,6 @@ type ConfigDestinationMe struct { NumItemsToPreemptivelyEvict uint32 } -// TODO: Upon further analysis and brainstorming, add some sensible minimum accepted values for the appropriate fields. func (config *ConfigDestinationMe) verify() error { if len(config.Name) == 0 { return fmt.Errorf("%w: config.Name is invalid", common.ErrInvalidConfig) @@ -99,7 +101,7 @@ func (config *ConfigDestinationMe) verify() error { if config.MaxNumBytes < maxNumBytesLowerBound || config.MaxNumBytes > maxNumBytesUpperBound { return fmt.Errorf("%w: config.MaxNumBytes is invalid", common.ErrInvalidConfig) } - if config.NumItemsToPreemptivelyEvict < numTxsToPreemptivelyEvictLowerBound { + if config.NumItemsToPreemptivelyEvict < numItemsToPreemptivelyEvictLowerBound { return fmt.Errorf("%w: config.NumItemsToPreemptivelyEvict is invalid", common.ErrInvalidConfig) } diff --git a/txcache/eviction.go b/txcache/eviction.go index ca930bd0..68394408 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -7,11 +7,12 @@ import ( // evictionJournal keeps a short journal about the eviction process // This is useful for debugging and reasoning about the eviction type evictionJournal struct { - numTxs uint32 + numTxs int + numPasses int } -// doEviction does cache eviction -// We do not allow more evictions to start concurrently +// doEviction does cache eviction. +// We do not allow more evictions to start concurrently. func (cache *TxCache) doEviction() *evictionJournal { if cache.isEvictionInProgress.IsSet() { return nil @@ -53,11 +54,12 @@ func (cache *TxCache) doEviction() *evictionJournal { "evicted txs", evictionJournal.numTxs, ) - return &evictionJournal + return evictionJournal } func (cache *TxCache) isCapacityExceeded() bool { - return cache.areThereTooManyBytes() || cache.areThereTooManySenders() || cache.areThereTooManyTxs() + exceeded := cache.areThereTooManyBytes() || cache.areThereTooManySenders() || cache.areThereTooManyTxs() + return exceeded } func (cache *TxCache) areThereTooManyBytes() bool { @@ -78,7 +80,7 @@ func (cache *TxCache) areThereTooManyTxs() bool { return tooManyTxs } -func (cache *TxCache) evictLeastLikelyToSelectTransactions() evictionJournal { +func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { senders := cache.getSenders() bunches := make([]BunchOfTransactions, 0, len(senders)) @@ -87,42 +89,57 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() evictionJournal { bunches = append(bunches, sender.getTxs()) } - mergedBunch := mergeBunchesOfTransactionsInParallel(bunches) - - // Select a reasonable number of transactions to evict. - transactionsToEvict := mergedBunch[3*len(mergedBunch)/4:] - transactionsToEvictHashes := make([][]byte, len(transactionsToEvict)) + transactions := mergeBunchesOfTransactionsInParallel(bunches) + transactionsHashes := make([][]byte, len(transactions)) - // For each sender, find the "lowest" (in nonce) transaction to evict. - lowestToEvictBySender := make(map[string]uint64) + for i, tx := range transactions { + transactionsHashes[i] = tx.TxHash + } - for _, tx := range transactionsToEvict { - transactionsToEvictHashes = append(transactionsToEvictHashes, tx.TxHash) - sender := string(tx.Tx.GetSndAddr()) + journal := &evictionJournal{} - if _, ok := lowestToEvictBySender[sender]; ok { - continue + for pass := 1; cache.isCapacityExceeded(); pass++ { + cutoffIndex := len(transactions) - int(cache.config.NumItemsToPreemptivelyEvict)*pass + if cutoffIndex <= 0 { + cutoffIndex = 0 } - lowestToEvictBySender[sender] = tx.Tx.GetNonce() - } + transactionsToEvict := transactions[cutoffIndex:] + transactionsToEvictHashes := transactionsHashes[cutoffIndex:] + + transactions = transactions[:cutoffIndex] + transactionsHashes = transactionsHashes[:cutoffIndex] + + // For each sender, find the "lowest" (in nonce) transaction to evict. + lowestToEvictBySender := make(map[string]uint64) - // Remove those transactions from "txListBySender". - for sender, nonce := range lowestToEvictBySender { - list, ok := cache.txListBySender.getListForSender(sender) - if !ok { - continue + for _, tx := range transactionsToEvict { + transactionsToEvictHashes = append(transactionsToEvictHashes, tx.TxHash) + sender := string(tx.Tx.GetSndAddr()) + + if _, ok := lowestToEvictBySender[sender]; ok { + continue + } + + lowestToEvictBySender[sender] = tx.Tx.GetNonce() } - list.evictTransactionsWithHigherNonces(nonce - 1) - } + // Remove those transactions from "txListBySender". + for sender, nonce := range lowestToEvictBySender { + list, ok := cache.txListBySender.getListForSender(sender) + if !ok { + continue + } - // Remove those transactions from "txByHash". - cache.txByHash.RemoveTxsBulk(transactionsToEvictHashes) + list.evictTransactionsWithHigherNonces(nonce - 1) + } + + // Remove those transactions from "txByHash". + cache.txByHash.RemoveTxsBulk(transactionsToEvictHashes) - evictionJournal := evictionJournal{ - numTxs: uint32(len(transactionsToEvict)), + journal.numPasses = pass + journal.numTxs += len(transactionsToEvict) } - return evictionJournal + return journal } diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index 2047ca80..26be1b17 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -437,41 +437,90 @@ func Test_Keys(t *testing.T) { func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { txGasHandler := txcachemocks.NewTxGasHandlerMock() - config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - EvictionEnabled: true, - NumBytesThreshold: maxNumBytesUpperBound, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountThreshold: 100, - CountPerSenderThreshold: math.MaxUint32, - } - // 11 * 10 - cache, err := NewTxCache(config, txGasHandler) - require.Nil(t, err) - require.NotNil(t, cache) + t.Run("numSenders = 11, numTransactions = 10, countThreshold = 100, numItemsToPreemptivelyEvict = 1", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 100, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 1, + } - addManyTransactionsWithUniformDistribution(cache, 11, 10) - require.LessOrEqual(t, cache.CountTx(), uint64(100)) + cache, err := NewTxCache(config, txGasHandler) + require.Nil(t, err) + require.NotNil(t, cache) - config = ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - EvictionEnabled: true, - NumBytesThreshold: maxNumBytesUpperBound, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountThreshold: 250000, - CountPerSenderThreshold: math.MaxUint32, - } + addManyTransactionsWithUniformDistribution(cache, 11, 10) - // 100 * 1000 - cache, err = NewTxCache(config, txGasHandler) - require.Nil(t, err) - require.NotNil(t, cache) + // Eviction happens if the cache capacity is already exceeded, + // but not if the capacity will be exceeded after the addition. + // Thus, for the given value of "NumItemsToPreemptivelyEvict", there will be "countThreshold" + 1 transactions in the cache. + require.Equal(t, 101, int(cache.CountTx())) + }) + + t.Run("numSenders = 11, numTransactions = 10, countThreshold = 100, numItemsToPreemptivelyEvict = 2", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 100, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 2, + } - addManyTransactionsWithUniformDistribution(cache, 100, 1000) - require.LessOrEqual(t, cache.CountTx(), uint64(250000)) + cache, err := NewTxCache(config, txGasHandler) + require.Nil(t, err) + require.NotNil(t, cache) + + addManyTransactionsWithUniformDistribution(cache, 11, 10) + require.Equal(t, 100, int(cache.CountTx())) + }) + + t.Run("numSenders = 100, numTransactions = 1000, countThreshold = 250000 (no eviction)", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 250000, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 1, + } + + cache, err := NewTxCache(config, txGasHandler) + require.Nil(t, err) + require.NotNil(t, cache) + + addManyTransactionsWithUniformDistribution(cache, 100, 1000) + require.Equal(t, 100000, int(cache.CountTx())) + }) + + t.Run("numSenders = 1000, numTransactions = 500, countThreshold = 250000, NumItemsToPreemptivelyEvict = 50000", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 250000, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 50000, + } + + cache, err := NewTxCache(config, txGasHandler) + require.Nil(t, err) + require.NotNil(t, cache) + + addManyTransactionsWithUniformDistribution(cache, 1000, 500) + require.Equal(t, 250000, int(cache.CountTx())) + }) } func Test_NotImplementedFunctions(t *testing.T) { From 688b98531229d543eaf271f59b4bab92c8d5ddd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 15:02:17 +0200 Subject: [PATCH 078/124] Fix call of wg.Done(). --- txcache/selectionUsingMerges.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/txcache/selectionUsingMerges.go b/txcache/selectionUsingMerges.go index 7cb88406..8f2fd29f 100644 --- a/txcache/selectionUsingMerges.go +++ b/txcache/selectionUsingMerges.go @@ -48,7 +48,7 @@ func mergeBunchesOfTransactionsInParallel(bunches []BunchOfTransactions) BunchOf go func(job *mergingJob) { job.output = mergeBunches(job.input) - defer wg.Done() + wg.Done() }(job) } From 5fa37976892e0f9a0902c4dfb33c6e5b4e1dca03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 15:33:50 +0200 Subject: [PATCH 079/124] Tests, trial and error. --- txcache/constants.go | 1 + txcache/eviction.go | 2 +- txcache/selectionUsingMerges.go | 46 +++++++++++++++++++++++---------- txcache/selection_test.go | 33 +++++++++++++++++++++++ 4 files changed, 68 insertions(+), 14 deletions(-) diff --git a/txcache/constants.go b/txcache/constants.go index 59a6d97f..448a06b2 100644 --- a/txcache/constants.go +++ b/txcache/constants.go @@ -5,3 +5,4 @@ const maxSenderScore = 100 const diagnosisMaxSendersToDisplay = 1000 const diagnosisMaxTransactionsToDisplay = 10000 const diagnosisSelectionGasRequested = 10_000_000_000 +const numJobsForMerging = 1 diff --git a/txcache/eviction.go b/txcache/eviction.go index 68394408..a30f89f4 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -89,7 +89,7 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { bunches = append(bunches, sender.getTxs()) } - transactions := mergeBunchesOfTransactionsInParallel(bunches) + transactions := mergeBunchesInParallel(bunches, numJobsForMerging) transactionsHashes := make([][]byte, len(transactions)) for i, tx := range transactions { diff --git a/txcache/selectionUsingMerges.go b/txcache/selectionUsingMerges.go index 8f2fd29f..98dd1852 100644 --- a/txcache/selectionUsingMerges.go +++ b/txcache/selectionUsingMerges.go @@ -2,13 +2,14 @@ package txcache import ( "bytes" + "fmt" "sync" + + "github.com/multiversx/mx-chain-core-go/core" ) type BunchOfTransactions []*WrappedTransaction -const numJobsForMerging = 4 - type mergingJob struct { input []BunchOfTransactions output BunchOfTransactions @@ -22,46 +23,65 @@ func (cache *TxCache) selectTransactionsUsingMerges(gasRequested uint64) BunchOf bunches = append(bunches, sender.getTxsWithoutGaps()) } - mergedBunch := mergeBunchesOfTransactionsInParallel(bunches) + mergedBunch := mergeBunchesInParallel(bunches, numJobsForMerging) selection := selectUntilReachedGasRequested(mergedBunch, gasRequested) return selection } -func mergeBunchesOfTransactionsInParallel(bunches []BunchOfTransactions) BunchOfTransactions { - jobs := make([]*mergingJob, numJobsForMerging) +func mergeBunchesInParallel(bunches []BunchOfTransactions, numJobs int) BunchOfTransactions { + jobs := make([]*mergingJob, numJobs) - for i := 0; i < numJobsForMerging; i++ { + for i := 0; i < numJobs; i++ { jobs[i] = &mergingJob{ - input: make([]BunchOfTransactions, 0, len(bunches)/numJobsForMerging), + input: make([]BunchOfTransactions, 0, len(bunches)/numJobs), } } for i, bunch := range bunches { - jobs[i%numJobsForMerging].input = append(jobs[i%numJobsForMerging].input, bunch) + jobs[i%numJobs].input = append(jobs[i%numJobs].input, bunch) } // Run jobs in parallel wg := sync.WaitGroup{} - for _, job := range jobs { + stopWatch := core.NewStopWatch() + + for i, job := range jobs { wg.Add(1) - go func(job *mergingJob) { + go func(job *mergingJob, i int) { + stopWatch.Start(fmt.Sprintf("job %d", i)) + job.output = mergeBunches(job.input) + + stopWatch.Stop(fmt.Sprintf("job %d", i)) + wg.Done() - }(job) + }(job, i) } wg.Wait() // Merge the results of the jobs - outputBunchesOfJobs := make([]BunchOfTransactions, 0, numJobsForMerging) + outputBunchesOfJobs := make([]BunchOfTransactions, 0, numJobs) for _, job := range jobs { outputBunchesOfJobs = append(outputBunchesOfJobs, job.output) } - return mergeBunches(outputBunchesOfJobs) + stopWatch.Start("final merge") + + finalMerge := mergeBunches(outputBunchesOfJobs) + + stopWatch.Stop("final merge") + + for i := 0; i < numJobs; i++ { + fmt.Println("job", i, stopWatch.GetMeasurement(fmt.Sprintf("job %d", i))) + } + + fmt.Println("final merge", stopWatch.GetMeasurement("final merge")) + + return finalMerge } func mergeBunches(bunches []BunchOfTransactions) BunchOfTransactions { diff --git a/txcache/selection_test.go b/txcache/selection_test.go index 233028ab..b996c1e5 100644 --- a/txcache/selection_test.go +++ b/txcache/selection_test.go @@ -28,3 +28,36 @@ func Test_mergeTwoBunches(t *testing.T) { require.Equal(t, "hash-alice-1", string(merged[1].TxHash)) }) } + +func Test_mergeBunches(t *testing.T) { + t.Run("numSenders = 1000, numTransactions = 1000", func(t *testing.T) { + bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) + merged := mergeBunches(bunches) + require.Len(t, merged, 1000*1000) + }) + + t.Run("numSenders = 1000, numTransactions = 1000, parallel (4 jobs)", func(t *testing.T) { + bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) + merged := mergeBunchesInParallel(bunches, 4) + require.Len(t, merged, 1000*1000) + }) +} + +func createBunchesOfTransactionsWithUniformDistribution(nSenders int, nTransactionsPerSender int) []BunchOfTransactions { + bunches := make([]BunchOfTransactions, 0, nSenders) + + for senderTag := 0; senderTag < nSenders; senderTag++ { + bunch := make(BunchOfTransactions, 0, nTransactionsPerSender) + sender := createFakeSenderAddress(senderTag) + + for txNonce := nTransactionsPerSender; txNonce > 0; txNonce-- { + transactionHash := createFakeTxHash(sender, txNonce) + transaction := createTx(transactionHash, string(sender), uint64(txNonce)) + bunch = append(bunch, transaction) + } + + bunches = append(bunches, bunch) + } + + return bunches +} From 0b2e20135226b493e1cb00efa415e3edb0f7b7c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 16:08:13 +0200 Subject: [PATCH 080/124] Additional testing. --- txcache/selection_test.go | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/txcache/selection_test.go b/txcache/selection_test.go index b996c1e5..f29f15eb 100644 --- a/txcache/selection_test.go +++ b/txcache/selection_test.go @@ -1,8 +1,10 @@ package txcache import ( + "fmt" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/stretchr/testify/require" ) @@ -30,17 +32,49 @@ func Test_mergeTwoBunches(t *testing.T) { } func Test_mergeBunches(t *testing.T) { + sw := core.NewStopWatch() + t.Run("numSenders = 1000, numTransactions = 1000", func(t *testing.T) { bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) + + sw.Start(t.Name()) merged := mergeBunches(bunches) + sw.Stop(t.Name()) + require.Len(t, merged, 1000*1000) }) t.Run("numSenders = 1000, numTransactions = 1000, parallel (4 jobs)", func(t *testing.T) { bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) + + sw.Start(t.Name()) merged := mergeBunchesInParallel(bunches, 4) + sw.Stop(t.Name()) + require.Len(t, merged, 1000*1000) }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } +} + +func TestTxCache_selectTransactionsFromBunchesUsingHeap(t *testing.T) { + sw := core.NewStopWatch() + + t.Run("numSenders = 1000, numTransactions = 1000", func(t *testing.T) { + bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) + + sw.Start(t.Name()) + merged := selectTransactionsFromBunchesUsingHeap(bunches, 10_000_000_000) + sw.Stop(t.Name()) + + require.Equal(t, 200001, len(merged)) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } } func createBunchesOfTransactionsWithUniformDistribution(nSenders int, nTransactionsPerSender int) []BunchOfTransactions { From 65d495392ddbc01270a19d383ea545e1308c3bb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 17:37:28 +0200 Subject: [PATCH 081/124] selectTransactionsUsingHeap(). --- txcache/selectionUsingHeap.go | 94 +++++++++++++++++++++++++++++++++++ txcache/txCache.go | 2 +- 2 files changed, 95 insertions(+), 1 deletion(-) create mode 100644 txcache/selectionUsingHeap.go diff --git a/txcache/selectionUsingHeap.go b/txcache/selectionUsingHeap.go new file mode 100644 index 00000000..d3cf592d --- /dev/null +++ b/txcache/selectionUsingHeap.go @@ -0,0 +1,94 @@ +package txcache + +import "container/heap" + +func (cache *TxCache) selectTransactionsUsingHeap(gasRequested uint64) BunchOfTransactions { + senders := cache.getSenders() + bunches := make([]BunchOfTransactions, 0, len(senders)) + + for _, sender := range senders { + bunches = append(bunches, sender.getTxsWithoutGaps()) + } + + return selectTransactionsFromBunchesUsingHeap(bunches, gasRequested) +} + +func selectTransactionsFromBunchesUsingHeap(bunches []BunchOfTransactions, gasRequested uint64) BunchOfTransactions { + selectedTransactions := make(BunchOfTransactions, 0, 30000) + + // Items popped from the heap are added to "selectedTransactions". + transactionsHeap := &TransactionHeap{} + heap.Init(transactionsHeap) + + // Initialize the heap with the first transaction of each bunch + for i, bunch := range bunches { + if len(bunch) == 0 { + // Some senders may have no eligible transactions (initial gaps). + continue + } + + heap.Push(transactionsHeap, &HeapItem{ + bunchIndex: i, + transactionIndex: 0, + transaction: bunch[0], + }) + } + + accumulatedGas := uint64(0) + + // Select transactions (sorted). + for transactionsHeap.Len() > 0 { + // Always pick the best transaction. + item := heap.Pop(transactionsHeap).(*HeapItem) + + accumulatedGas += item.transaction.Tx.GetGasLimit() + if accumulatedGas > gasRequested { + break + } + + selectedTransactions = append(selectedTransactions, item.transaction) + + // If there are more transactions in the same bunch (same sender as the popped item), + // add the next one to the heap (to compete with the others). + item.transactionIndex++ + + if item.transactionIndex < len(bunches[item.bunchIndex]) { + item.transaction = bunches[item.bunchIndex][item.transactionIndex] + heap.Push(transactionsHeap, item) + } + } + + return selectedTransactions +} + +type HeapItem struct { + bunchIndex int + transactionIndex int + transaction *WrappedTransaction +} + +type TransactionHeap []*HeapItem + +func (h TransactionHeap) Len() int { return len(h) } + +func (h TransactionHeap) Less(i, j int) bool { + return isTransactionGreater(h[i].transaction, h[j].transaction) +} + +func (h TransactionHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h *TransactionHeap) Push(x interface{}) { + *h = append(*h, x.(*HeapItem)) +} + +func (h *TransactionHeap) Pop() interface{} { + // Standard code when storing the heap in a slice: + // https://pkg.go.dev/container/heap + old := *h + n := len(old) + item := old[n-1] + *h = old[0 : n-1] + return item +} diff --git a/txcache/txCache.go b/txcache/txCache.go index 41f41ef3..93dd8f3e 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -119,7 +119,7 @@ func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, gasRe "num senders", cache.CountSenders(), ) - transactions := cache.selectTransactionsUsingMerges(gasRequested) + transactions := cache.selectTransactionsUsingHeap(gasRequested) stopWatch.Stop("selection") From 4a9fa7109f5b9972c0159fc27acf683fc255c328 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 17:54:28 +0200 Subject: [PATCH 082/124] Refactor; isTransactionMoreDesirableByProtocol(). --- txcache/selectionUsingHeap.go | 2 +- txcache/selectionUsingMerges.go | 58 +-------------------------------- txcache/wrappedTransaction.go | 45 ++++++++++++++++++++++--- 3 files changed, 42 insertions(+), 63 deletions(-) diff --git a/txcache/selectionUsingHeap.go b/txcache/selectionUsingHeap.go index d3cf592d..d12ff3be 100644 --- a/txcache/selectionUsingHeap.go +++ b/txcache/selectionUsingHeap.go @@ -72,7 +72,7 @@ type TransactionHeap []*HeapItem func (h TransactionHeap) Len() int { return len(h) } func (h TransactionHeap) Less(i, j int) bool { - return isTransactionGreater(h[i].transaction, h[j].transaction) + return h[i].transaction.isTransactionMoreDesirableByProtocol(h[j].transaction) } func (h TransactionHeap) Swap(i, j int) { diff --git a/txcache/selectionUsingMerges.go b/txcache/selectionUsingMerges.go index 98dd1852..3c4ad523 100644 --- a/txcache/selectionUsingMerges.go +++ b/txcache/selectionUsingMerges.go @@ -1,11 +1,7 @@ package txcache import ( - "bytes" - "fmt" "sync" - - "github.com/multiversx/mx-chain-core-go/core" ) type BunchOfTransactions []*WrappedTransaction @@ -44,18 +40,11 @@ func mergeBunchesInParallel(bunches []BunchOfTransactions, numJobs int) BunchOfT // Run jobs in parallel wg := sync.WaitGroup{} - stopWatch := core.NewStopWatch() - for i, job := range jobs { wg.Add(1) go func(job *mergingJob, i int) { - stopWatch.Start(fmt.Sprintf("job %d", i)) - job.output = mergeBunches(job.input) - - stopWatch.Stop(fmt.Sprintf("job %d", i)) - wg.Done() }(job, i) } @@ -69,18 +58,7 @@ func mergeBunchesInParallel(bunches []BunchOfTransactions, numJobs int) BunchOfT outputBunchesOfJobs = append(outputBunchesOfJobs, job.output) } - stopWatch.Start("final merge") - finalMerge := mergeBunches(outputBunchesOfJobs) - - stopWatch.Stop("final merge") - - for i := 0; i < numJobs; i++ { - fmt.Println("job", i, stopWatch.GetMeasurement(fmt.Sprintf("job %d", i))) - } - - fmt.Println("final merge", stopWatch.GetMeasurement("final merge")) - return finalMerge } @@ -109,7 +87,7 @@ func mergeTwoBunches(first BunchOfTransactions, second BunchOfTransactions) Bunc a := first[firstIndex] b := second[secondIndex] - if isTransactionGreater(a, b) { + if a.isTransactionMoreDesirableByProtocol(b) { result = append(result, a) firstIndex++ } else { @@ -125,40 +103,6 @@ func mergeTwoBunches(first BunchOfTransactions, second BunchOfTransactions) Bunc return result } -// Equality is out of scope (not possible in our case). -func isTransactionGreater(transaction *WrappedTransaction, otherTransaction *WrappedTransaction) bool { - // First, compare by price per unit - ppuQuotient := transaction.PricePerGasUnitQuotient - ppuQuotientOther := otherTransaction.PricePerGasUnitQuotient - if ppuQuotient != ppuQuotientOther { - return ppuQuotient > ppuQuotientOther - } - - ppuRemainder := transaction.PricePerGasUnitRemainder - ppuRemainderOther := otherTransaction.PricePerGasUnitRemainder - if ppuRemainder != ppuRemainderOther { - return ppuRemainder > ppuRemainderOther - } - - // Then, compare by gas price (to promote the practice of a higher gas price) - gasPrice := transaction.Tx.GetGasPrice() - gasPriceOther := otherTransaction.Tx.GetGasPrice() - if gasPrice != gasPriceOther { - return gasPrice > gasPriceOther - } - - // Then, compare by gas limit (promote the practice of lower gas limit) - // Compare Gas Limits (promote lower gas limit) - gasLimit := transaction.Tx.GetGasLimit() - gasLimitOther := otherTransaction.Tx.GetGasLimit() - if gasLimit != gasLimitOther { - return gasLimit < gasLimitOther - } - - // In the end, compare by transaction hash - return bytes.Compare(transaction.TxHash, otherTransaction.TxHash) > 0 -} - func selectUntilReachedGasRequested(bunch BunchOfTransactions, gasRequested uint64) BunchOfTransactions { accumulatedGas := uint64(0) diff --git a/txcache/wrappedTransaction.go b/txcache/wrappedTransaction.go index f8b4d641..155722cc 100644 --- a/txcache/wrappedTransaction.go +++ b/txcache/wrappedTransaction.go @@ -1,6 +1,7 @@ package txcache import ( + "bytes" "math/big" "github.com/multiversx/mx-chain-core-go/data" @@ -19,14 +20,48 @@ type WrappedTransaction struct { } // computePricePerGasUnit computes (and caches) the (average) price per gas unit. -func (wrappedTx *WrappedTransaction) computePricePerGasUnit(txGasHandler TxGasHandler) { - fee := txGasHandler.ComputeTxFee(wrappedTx.Tx) - gasLimit := big.NewInt(0).SetUint64(wrappedTx.Tx.GetGasLimit()) +func (transaction *WrappedTransaction) computePricePerGasUnit(txGasHandler TxGasHandler) { + fee := txGasHandler.ComputeTxFee(transaction.Tx) + gasLimit := big.NewInt(0).SetUint64(transaction.Tx.GetGasLimit()) quotient := new(big.Int) remainder := new(big.Int) quotient, remainder = quotient.QuoRem(fee, gasLimit, remainder) - wrappedTx.PricePerGasUnitQuotient = quotient.Uint64() - wrappedTx.PricePerGasUnitRemainder = remainder.Uint64() + transaction.PricePerGasUnitQuotient = quotient.Uint64() + transaction.PricePerGasUnitRemainder = remainder.Uint64() +} + +// Equality is out of scope (not possible in our case). +func (transaction *WrappedTransaction) isTransactionMoreDesirableByProtocol(otherTransaction *WrappedTransaction) bool { + // First, compare by price per unit + ppuQuotient := transaction.PricePerGasUnitQuotient + ppuQuotientOther := otherTransaction.PricePerGasUnitQuotient + if ppuQuotient != ppuQuotientOther { + return ppuQuotient > ppuQuotientOther + } + + ppuRemainder := transaction.PricePerGasUnitRemainder + ppuRemainderOther := otherTransaction.PricePerGasUnitRemainder + if ppuRemainder != ppuRemainderOther { + return ppuRemainder > ppuRemainderOther + } + + // Then, compare by gas price (to promote the practice of a higher gas price) + gasPrice := transaction.Tx.GetGasPrice() + gasPriceOther := otherTransaction.Tx.GetGasPrice() + if gasPrice != gasPriceOther { + return gasPrice > gasPriceOther + } + + // Then, compare by gas limit (promote the practice of lower gas limit) + // Compare Gas Limits (promote lower gas limit) + gasLimit := transaction.Tx.GetGasLimit() + gasLimitOther := otherTransaction.Tx.GetGasLimit() + if gasLimit != gasLimitOther { + return gasLimit < gasLimitOther + } + + // In the end, compare by transaction hash + return bytes.Compare(transaction.TxHash, otherTransaction.TxHash) > 0 } From 5a3fb748757a9a07a0406b0f25699f1a96125a2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 19:51:12 +0200 Subject: [PATCH 083/124] More unit tests. --- txcache/wrappedTransaction_test.go | 45 +++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 4 deletions(-) diff --git a/txcache/wrappedTransaction_test.go b/txcache/wrappedTransaction_test.go index 4794bc61..fec82681 100644 --- a/txcache/wrappedTransaction_test.go +++ b/txcache/wrappedTransaction_test.go @@ -7,10 +7,47 @@ import ( "github.com/stretchr/testify/require" ) -func Test_computeTxFee(t *testing.T) { +func TestWrappedTransaction_computePricePerGasUnit(t *testing.T) { txGasHandler := txcachemocks.NewTxGasHandlerMock() - tx := createTx([]byte("a"), "a", 1).withDataLength(1).withGasLimit(51500).withGasPrice(oneBillion) - tx.computeFee(txGasHandler) - require.Equal(t, "51500000000000", tx.TxFee.String()) + t.Run("only move balance gas limit", func(t *testing.T) { + tx := createTx([]byte("a"), "a", 1).withDataLength(1).withGasLimit(51500).withGasPrice(oneBillion) + tx.computePricePerGasUnit(txGasHandler) + + require.Equal(t, oneBillion, int(tx.PricePerGasUnitQuotient)) + require.Equal(t, 0, int(tx.PricePerGasUnitRemainder)) + }) + + t.Run("move balance gas limit and execution gas limit (1)", func(t *testing.T) { + tx := createTx([]byte("a"), "a", 1).withDataLength(1).withGasLimit(51501).withGasPrice(oneBillion) + tx.computePricePerGasUnit(txGasHandler) + + require.Equal(t, 999980777, int(tx.PricePerGasUnitQuotient)) + require.Equal(t, 3723, int(tx.PricePerGasUnitRemainder)) + }) + + t.Run("move balance gas limit and execution gas limit (2)", func(t *testing.T) { + tx := createTx([]byte("a"), "a", 1).withDataLength(1).withGasLimit(oneMilion).withGasPrice(oneBillion) + tx.computePricePerGasUnit(txGasHandler) + + actualFee := 51500*oneBillion + (oneMilion-51500)*oneBillion/100 + require.Equal(t, 60985000000000, actualFee) + + require.Equal(t, actualFee/oneMilion, int(tx.PricePerGasUnitQuotient)) + require.Equal(t, 0, int(tx.PricePerGasUnitRemainder)) + }) +} + +func TestWrappedTransaction_isTransactionMoreDesirableByProtocol(t *testing.T) { + txGasHandler := txcachemocks.NewTxGasHandlerMock() + + t.Run("decide by price per unit", func(t *testing.T) { + a := createTx([]byte("a-1"), "a", 1).withDataLength(1).withGasLimit(51500).withGasPrice(oneBillion) + a.computePricePerGasUnit(txGasHandler) + + b := createTx([]byte("b-1"), "b", 1).withDataLength(1).withGasLimit(51501).withGasPrice(oneBillion) + b.computePricePerGasUnit(txGasHandler) + + require.True(t, a.isTransactionMoreDesirableByProtocol(b)) + }) } From 4b1ec6d6b9552b50ff6855dd729846438213ccaa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 20:14:33 +0200 Subject: [PATCH 084/124] More unit tests. --- txcache/wrappedTransaction_test.go | 31 ++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/txcache/wrappedTransaction_test.go b/txcache/wrappedTransaction_test.go index fec82681..ac75103b 100644 --- a/txcache/wrappedTransaction_test.go +++ b/txcache/wrappedTransaction_test.go @@ -50,4 +50,35 @@ func TestWrappedTransaction_isTransactionMoreDesirableByProtocol(t *testing.T) { require.True(t, a.isTransactionMoreDesirableByProtocol(b)) }) + + t.Run("decide by gas price (set them up to have the same PPU)", func(t *testing.T) { + a := createTx([]byte("a-2"), "a", 1).withGasPrice(oneBillion + 1) + b := createTx([]byte("b-2"), "b", 1).withGasPrice(oneBillion) + + a.PricePerGasUnitQuotient = 42 + b.PricePerGasUnitQuotient = 42 + a.PricePerGasUnitRemainder = 0 + b.PricePerGasUnitRemainder = 0 + + require.True(t, a.isTransactionMoreDesirableByProtocol(b)) + }) + + t.Run("decide by gas limit (set them up to have the same PPU and gas price)", func(t *testing.T) { + a := createTx([]byte("a-2"), "a", 1).withGasLimit(55000) + b := createTx([]byte("b-2"), "b", 1).withGasLimit(60000) + + a.PricePerGasUnitQuotient = 42 + b.PricePerGasUnitQuotient = 42 + a.PricePerGasUnitRemainder = 0 + b.PricePerGasUnitRemainder = 0 + + require.True(t, a.isTransactionMoreDesirableByProtocol(b)) + }) + + t.Run("decide by transaction hash (set them up to have the same PPU, gas price and gas limit)", func(t *testing.T) { + a := createTx([]byte("a-2"), "a", 1) + b := createTx([]byte("b-2"), "b", 1) + + require.True(t, b.isTransactionMoreDesirableByProtocol(a)) + }) } From 8d28cc6a15c998b3c24fd1f347169cb02aec36f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 20:28:46 +0200 Subject: [PATCH 085/124] Refactor. Split min/max heaps. --- txcache/heaps.go | 58 +++++++++++++++++++++++++++++++++++ txcache/selectionUsingHeap.go | 47 ++++++---------------------- 2 files changed, 67 insertions(+), 38 deletions(-) create mode 100644 txcache/heaps.go diff --git a/txcache/heaps.go b/txcache/heaps.go new file mode 100644 index 00000000..bde84f94 --- /dev/null +++ b/txcache/heaps.go @@ -0,0 +1,58 @@ +package txcache + +type TransactionsHeapItem struct { + senderIndex int + transactionIndex int + transaction *WrappedTransaction +} + +type TransactionsMaxHeap []*TransactionsHeapItem +type TransactionsMinHeap []*TransactionsHeapItem + +func (maxHeap TransactionsMaxHeap) Len() int { return len(maxHeap) } + +func (maxHeap TransactionsMaxHeap) Less(i, j int) bool { + return maxHeap[i].transaction.isTransactionMoreDesirableByProtocol(maxHeap[j].transaction) +} + +func (maxHeap TransactionsMaxHeap) Swap(i, j int) { + maxHeap[i], maxHeap[j] = maxHeap[j], maxHeap[i] +} + +func (maxHeap *TransactionsMaxHeap) Push(x interface{}) { + *maxHeap = append(*maxHeap, x.(*TransactionsHeapItem)) +} + +func (maxHeap *TransactionsMaxHeap) Pop() interface{} { + // Standard code when storing the heap in a slice: + // https://pkg.go.dev/container/heap + old := *maxHeap + n := len(old) + item := old[n-1] + *maxHeap = old[0 : n-1] + return item +} + +func (minHeap TransactionsMinHeap) Len() int { return len(minHeap) } + +func (minHeap TransactionsMinHeap) Less(i, j int) bool { + return minHeap[j].transaction.isTransactionMoreDesirableByProtocol(minHeap[i].transaction) +} + +func (minHeap TransactionsMinHeap) Swap(i, j int) { + minHeap[i], minHeap[j] = minHeap[j], minHeap[i] +} + +func (minHeap *TransactionsMinHeap) Push(x interface{}) { + *minHeap = append(*minHeap, x.(*TransactionsHeapItem)) +} + +func (minHeap *TransactionsMinHeap) Pop() interface{} { + // Standard code when storing the heap in a slice: + // https://pkg.go.dev/container/heap + old := *minHeap + n := len(old) + item := old[n-1] + *minHeap = old[0 : n-1] + return item +} diff --git a/txcache/selectionUsingHeap.go b/txcache/selectionUsingHeap.go index d12ff3be..1d92d9c7 100644 --- a/txcache/selectionUsingHeap.go +++ b/txcache/selectionUsingHeap.go @@ -17,7 +17,7 @@ func selectTransactionsFromBunchesUsingHeap(bunches []BunchOfTransactions, gasRe selectedTransactions := make(BunchOfTransactions, 0, 30000) // Items popped from the heap are added to "selectedTransactions". - transactionsHeap := &TransactionHeap{} + transactionsHeap := &TransactionsMaxHeap{} heap.Init(transactionsHeap) // Initialize the heap with the first transaction of each bunch @@ -27,8 +27,10 @@ func selectTransactionsFromBunchesUsingHeap(bunches []BunchOfTransactions, gasRe continue } - heap.Push(transactionsHeap, &HeapItem{ - bunchIndex: i, + // Items are reused (see below). + // Each sender gets one (and only one) item in the heap. + heap.Push(transactionsHeap, &TransactionsHeapItem{ + senderIndex: i, transactionIndex: 0, transaction: bunch[0], }) @@ -39,7 +41,7 @@ func selectTransactionsFromBunchesUsingHeap(bunches []BunchOfTransactions, gasRe // Select transactions (sorted). for transactionsHeap.Len() > 0 { // Always pick the best transaction. - item := heap.Pop(transactionsHeap).(*HeapItem) + item := heap.Pop(transactionsHeap).(*TransactionsHeapItem) accumulatedGas += item.transaction.Tx.GetGasLimit() if accumulatedGas > gasRequested { @@ -52,43 +54,12 @@ func selectTransactionsFromBunchesUsingHeap(bunches []BunchOfTransactions, gasRe // add the next one to the heap (to compete with the others). item.transactionIndex++ - if item.transactionIndex < len(bunches[item.bunchIndex]) { - item.transaction = bunches[item.bunchIndex][item.transactionIndex] + if item.transactionIndex < len(bunches[item.senderIndex]) { + // Items are reused (same originating sender). + item.transaction = bunches[item.senderIndex][item.transactionIndex] heap.Push(transactionsHeap, item) } } return selectedTransactions } - -type HeapItem struct { - bunchIndex int - transactionIndex int - transaction *WrappedTransaction -} - -type TransactionHeap []*HeapItem - -func (h TransactionHeap) Len() int { return len(h) } - -func (h TransactionHeap) Less(i, j int) bool { - return h[i].transaction.isTransactionMoreDesirableByProtocol(h[j].transaction) -} - -func (h TransactionHeap) Swap(i, j int) { - h[i], h[j] = h[j], h[i] -} - -func (h *TransactionHeap) Push(x interface{}) { - *h = append(*h, x.(*HeapItem)) -} - -func (h *TransactionHeap) Pop() interface{} { - // Standard code when storing the heap in a slice: - // https://pkg.go.dev/container/heap - old := *h - n := len(old) - item := old[n-1] - *h = old[0 : n-1] - return item -} From 7565d6e115101ada2afd9f67fb957ce3c012617e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 23:13:09 +0200 Subject: [PATCH 086/124] Fix config, fix tests. --- txcache/config.go | 21 +++++----- txcache/selectionUsingHeap.go | 6 +-- txcache/slices.go | 7 ++++ txcache/txCache.go | 4 +- txcache/txCache_test.go | 73 ++++++++++++++++++++++------------- txcache/txListForSender.go | 4 +- 6 files changed, 71 insertions(+), 44 deletions(-) create mode 100644 txcache/slices.go diff --git a/txcache/config.go b/txcache/config.go index a0752044..d02c2142 100644 --- a/txcache/config.go +++ b/txcache/config.go @@ -15,7 +15,7 @@ const maxNumBytesUpperBound = 1_073_741_824 // one GB const maxNumItemsPerSenderLowerBound = 1 const maxNumBytesPerSenderLowerBound = maxNumItemsPerSenderLowerBound * 1 const maxNumBytesPerSenderUpperBound = 33_554_432 // 32 MB -const numItemsToPreemptivelyEvictLowerBound = 1 +const numItemsToPreemptivelyEvictLowerBound = uint32(1) // ConfigSourceMe holds cache configuration type ConfigSourceMe struct { @@ -47,16 +47,15 @@ func (config *ConfigSourceMe) verify() error { if config.CountPerSenderThreshold < maxNumItemsPerSenderLowerBound { return fmt.Errorf("%w: config.CountPerSenderThreshold is invalid", common.ErrInvalidConfig) } - if config.EvictionEnabled { - if config.NumBytesThreshold < maxNumBytesLowerBound || config.NumBytesThreshold > maxNumBytesUpperBound { - return fmt.Errorf("%w: config.NumBytesThreshold is invalid", common.ErrInvalidConfig) - } - if config.CountThreshold < maxNumItemsLowerBound { - return fmt.Errorf("%w: config.CountThreshold is invalid", common.ErrInvalidConfig) - } - if config.NumItemsToPreemptivelyEvict < numItemsToPreemptivelyEvictLowerBound { - return fmt.Errorf("%w: config.NumItemsToPreemptivelyEvict is invalid", common.ErrInvalidConfig) - } + + if config.NumBytesThreshold < maxNumBytesLowerBound || config.NumBytesThreshold > maxNumBytesUpperBound { + return fmt.Errorf("%w: config.NumBytesThreshold is invalid", common.ErrInvalidConfig) + } + if config.CountThreshold < maxNumItemsLowerBound { + return fmt.Errorf("%w: config.CountThreshold is invalid", common.ErrInvalidConfig) + } + if config.NumItemsToPreemptivelyEvict < numItemsToPreemptivelyEvictLowerBound { + return fmt.Errorf("%w: config.NumItemsToPreemptivelyEvict is invalid", common.ErrInvalidConfig) } return nil diff --git a/txcache/selectionUsingHeap.go b/txcache/selectionUsingHeap.go index 1d92d9c7..553bde72 100644 --- a/txcache/selectionUsingHeap.go +++ b/txcache/selectionUsingHeap.go @@ -13,6 +13,7 @@ func (cache *TxCache) selectTransactionsUsingHeap(gasRequested uint64) BunchOfTr return selectTransactionsFromBunchesUsingHeap(bunches, gasRequested) } +// Selection tolerates concurrent transaction additions / removals. func selectTransactionsFromBunchesUsingHeap(bunches []BunchOfTransactions, gasRequested uint64) BunchOfTransactions { selectedTransactions := make(BunchOfTransactions, 0, 30000) @@ -27,8 +28,7 @@ func selectTransactionsFromBunchesUsingHeap(bunches []BunchOfTransactions, gasRe continue } - // Items are reused (see below). - // Each sender gets one (and only one) item in the heap. + // Items will be reused (see below). Each sender gets one (and only one) item in the heap. heap.Push(transactionsHeap, &TransactionsHeapItem{ senderIndex: i, transactionIndex: 0, @@ -55,7 +55,7 @@ func selectTransactionsFromBunchesUsingHeap(bunches []BunchOfTransactions, gasRe item.transactionIndex++ if item.transactionIndex < len(bunches[item.senderIndex]) { - // Items are reused (same originating sender). + // Item is reused (same originating sender), pushed back on the heap. item.transaction = bunches[item.senderIndex][item.transactionIndex] heap.Push(transactionsHeap, item) } diff --git a/txcache/slices.go b/txcache/slices.go new file mode 100644 index 00000000..4f6c7c6f --- /dev/null +++ b/txcache/slices.go @@ -0,0 +1,7 @@ +package txcache + +func reverseSlice[T any](s []T) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/txcache/txCache.go b/txcache/txCache.go index 93dd8f3e..3f4ad7d9 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -270,9 +270,9 @@ func (cache *TxCache) Keys() [][]byte { return cache.txByHash.keys() } -// MaxSize is not implemented +// MaxSize returns the maximum number of transactions that can be stored in the cache. +// See: https://github.com/multiversx/mx-chain-go/blob/v1.8.4/dataRetriever/txpool/shardedTxPool.go#L55 func (cache *TxCache) MaxSize() int { - // TODO: Should be analyzed if the returned value represents the max size of one cache in sharded cache configuration return int(cache.config.CountThreshold) } diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index 26be1b17..132535cd 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -19,20 +19,14 @@ import ( func Test_NewTxCache(t *testing.T) { config := ConfigSourceMe{ - Name: "test", - NumChunks: 16, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, - } - - withEvictionConfig := ConfigSourceMe{ - Name: "test", - NumChunks: 16, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, - EvictionEnabled: true, - NumBytesThreshold: maxNumBytesUpperBound, - CountThreshold: math.MaxUint32, + Name: "test", + NumChunks: 16, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 1, } txGasHandler := txcachemocks.NewTxGasHandlerMock() @@ -62,11 +56,11 @@ func Test_NewTxCache(t *testing.T) { require.Nil(t, cache) require.Equal(t, common.ErrNilTxGasHandler, err) - badConfig = withEvictionConfig + badConfig = config badConfig.NumBytesThreshold = 0 requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.NumBytesThreshold", txGasHandler) - badConfig = withEvictionConfig + badConfig = config badConfig.CountThreshold = 0 requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.CountThreshold", txGasHandler) } @@ -462,6 +456,26 @@ func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { require.Equal(t, 101, int(cache.CountTx())) }) + t.Run("numSenders = 3, numTransactions = 5, countThreshold = 4, numItemsToPreemptivelyEvict = 3", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 4, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 3, + } + + cache, err := NewTxCache(config, txGasHandler) + require.Nil(t, err) + require.NotNil(t, cache) + + addManyTransactionsWithUniformDistribution(cache, 3, 5) + require.Equal(t, 3, int(cache.CountTx())) + }) + t.Run("numSenders = 11, numTransactions = 10, countThreshold = 100, numItemsToPreemptivelyEvict = 2", func(t *testing.T) { config := ConfigSourceMe{ Name: "untitled", @@ -511,7 +525,7 @@ func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, CountThreshold: 250000, CountPerSenderThreshold: math.MaxUint32, - NumItemsToPreemptivelyEvict: 50000, + NumItemsToPreemptivelyEvict: 10000, } cache, err := NewTxCache(config, txGasHandler) @@ -534,7 +548,6 @@ func Test_NotImplementedFunctions(t *testing.T) { require.False(t, added) require.NotPanics(t, func() { cache.RegisterHandler(nil, "") }) - require.Zero(t, cache.MaxSize()) err := cache.Close() require.Nil(t, err) @@ -706,10 +719,14 @@ func TestTxCache_NoCriticalInconsistency_WhenConcurrentAdditionsAndRemovals(t *t func newUnconstrainedCacheToTest() *TxCache { txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(ConfigSourceMe{ - Name: "test", - NumChunks: 16, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountPerSenderThreshold: math.MaxUint32, + Name: "test", + NumChunks: 16, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: false, + NumItemsToPreemptivelyEvict: 1, }, txGasHandler) if err != nil { panic(fmt.Sprintf("newUnconstrainedCacheToTest(): %s", err)) @@ -721,10 +738,14 @@ func newUnconstrainedCacheToTest() *TxCache { func newCacheToTest(numBytesPerSenderThreshold uint32, countPerSenderThreshold uint32) *TxCache { txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(ConfigSourceMe{ - Name: "test", - NumChunks: 16, - NumBytesPerSenderThreshold: numBytesPerSenderThreshold, - CountPerSenderThreshold: countPerSenderThreshold, + Name: "test", + NumChunks: 16, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: numBytesPerSenderThreshold, + CountThreshold: math.MaxUint32, + CountPerSenderThreshold: countPerSenderThreshold, + EvictionEnabled: false, + NumItemsToPreemptivelyEvict: 1, }, txGasHandler) if err != nil { panic(fmt.Sprintf("newCacheToTest(): %s", err)) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 52344383..21f05641 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -306,7 +306,7 @@ func (listForSender *txListForSender) evictTransactionsWithLowerNoncesNoLockRetu return evictedTxHashes } -func (listForSender *txListForSender) evictTransactionsWithHigherNonces(givenNonce uint64) { +func (listForSender *txListForSender) evictTransactionsWithHigherOrEqualNonces(givenNonce uint64) { listForSender.mutex.Lock() defer listForSender.mutex.Unlock() @@ -314,7 +314,7 @@ func (listForSender *txListForSender) evictTransactionsWithHigherNonces(givenNon tx := element.Value.(*WrappedTransaction) txNonce := tx.Tx.GetNonce() - if txNonce <= givenNonce { + if txNonce < givenNonce { break } From a6faf2e4bf3008f61daf8d3f60d8b34932ddba87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 23:13:31 +0200 Subject: [PATCH 087/124] Reimplement eviction, using min heap. --- txcache/eviction.go | 86 ++++++++++++++++++++++++++++++--------------- 1 file changed, 58 insertions(+), 28 deletions(-) diff --git a/txcache/eviction.go b/txcache/eviction.go index a30f89f4..9bf9c53b 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -1,14 +1,16 @@ package txcache import ( + "container/heap" + "github.com/multiversx/mx-chain-core-go/core" ) // evictionJournal keeps a short journal about the eviction process // This is useful for debugging and reasoning about the eviction type evictionJournal struct { - numTxs int - numPasses int + numEvicted int + numEvictedByPass []int } // doEviction does cache eviction. @@ -51,7 +53,7 @@ func (cache *TxCache) doEviction() *evictionJournal { "num now", cache.CountTx(), "num senders", cache.CountSenders(), "duration", stopWatch.GetMeasurement("eviction"), - "evicted txs", evictionJournal.numTxs, + "evicted txs", evictionJournal.numEvicted, ) return evictionJournal @@ -80,47 +82,75 @@ func (cache *TxCache) areThereTooManyTxs() bool { return tooManyTxs } +// Eviction tolerates concurrent transaction additions / removals. func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { senders := cache.getSenders() bunches := make([]BunchOfTransactions, 0, len(senders)) for _, sender := range senders { // Include transactions after gaps, as well (important), unlike when selecting transactions for processing. - bunches = append(bunches, sender.getTxs()) - } - - transactions := mergeBunchesInParallel(bunches, numJobsForMerging) - transactionsHashes := make([][]byte, len(transactions)) + bunch := sender.getTxs() + // Reverse the order of transactions (will come in handy later, when creating the min-heap). + reverseSlice(bunch) - for i, tx := range transactions { - transactionsHashes[i] = tx.TxHash + bunches = append(bunches, bunch) } journal := &evictionJournal{} - for pass := 1; cache.isCapacityExceeded(); pass++ { - cutoffIndex := len(transactions) - int(cache.config.NumItemsToPreemptivelyEvict)*pass - if cutoffIndex <= 0 { - cutoffIndex = 0 + // Heap is reused among passes. + // Items popped from the heap are added to "transactionsToEvict" (slice is re-created in each pass). + transactionsHeap := &TransactionsMinHeap{} + heap.Init(transactionsHeap) + + // Initialize the heap with the first transaction of each bunch + for i, bunch := range bunches { + if len(bunch) == 0 { + // Some senders may have no transaction anymore (hazardous concurrent removals). + continue } - transactionsToEvict := transactions[cutoffIndex:] - transactionsToEvictHashes := transactionsHashes[cutoffIndex:] + // Items will be reused (see below). Each sender gets one (and only one) item in the heap. + heap.Push(transactionsHeap, &TransactionsHeapItem{ + senderIndex: i, + transactionIndex: 0, + transaction: bunch[0], + }) + } + + for pass := 0; cache.isCapacityExceeded(); pass++ { + transactionsToEvict := make(BunchOfTransactions, 0, cache.config.NumItemsToPreemptivelyEvict) + transactionsToEvictHashes := make([][]byte, 0, cache.config.NumItemsToPreemptivelyEvict) + + // Select transactions (sorted). + for transactionsHeap.Len() > 0 { + // Always pick the "worst" transaction. + item := heap.Pop(transactionsHeap).(*TransactionsHeapItem) + + if len(transactionsToEvict) >= int(cache.config.NumItemsToPreemptivelyEvict) { + // We have enough transactions to evict in this pass. + break + } - transactions = transactions[:cutoffIndex] - transactionsHashes = transactionsHashes[:cutoffIndex] + transactionsToEvict = append(transactionsToEvict, item.transaction) + transactionsToEvictHashes = append(transactionsToEvictHashes, item.transaction.TxHash) + + // If there are more transactions in the same bunch (same sender as the popped item), + // add the next one to the heap (to compete with the others in being "the worst"). + item.transactionIndex++ + + if item.transactionIndex < len(bunches[item.senderIndex]) { + // Item is reused (same originating sender), pushed back on the heap. + item.transaction = bunches[item.senderIndex][item.transactionIndex] + heap.Push(transactionsHeap, item) + } + } // For each sender, find the "lowest" (in nonce) transaction to evict. lowestToEvictBySender := make(map[string]uint64) for _, tx := range transactionsToEvict { - transactionsToEvictHashes = append(transactionsToEvictHashes, tx.TxHash) sender := string(tx.Tx.GetSndAddr()) - - if _, ok := lowestToEvictBySender[sender]; ok { - continue - } - lowestToEvictBySender[sender] = tx.Tx.GetNonce() } @@ -131,14 +161,14 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { continue } - list.evictTransactionsWithHigherNonces(nonce - 1) + list.evictTransactionsWithHigherOrEqualNonces(nonce) } // Remove those transactions from "txByHash". - cache.txByHash.RemoveTxsBulk(transactionsToEvictHashes) + _ = cache.txByHash.RemoveTxsBulk(transactionsToEvictHashes) - journal.numPasses = pass - journal.numTxs += len(transactionsToEvict) + journal.numEvictedByPass = append(journal.numEvictedByPass, len(transactionsToEvict)) + journal.numEvicted += len(transactionsToEvict) } return journal From 1920a2f7af6f9ee444848364a7ff847b2a8f8d30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 23:31:49 +0200 Subject: [PATCH 088/124] Fix eviction, work in progress. --- txcache/eviction.go | 5 +++ txcache/eviction_test.go | 68 +++++++++++++++++++++++++++------------- 2 files changed, 51 insertions(+), 22 deletions(-) diff --git a/txcache/eviction.go b/txcache/eviction.go index 9bf9c53b..0283eb2f 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -146,6 +146,11 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { } } + if len(transactionsToEvict) == 0 { + // No more transactions to evict. + break + } + // For each sender, find the "lowest" (in nonce) transaction to evict. lowestToEvictBySender := make(map[string]uint64) diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go index a170f3fc..3c72fc3b 100644 --- a/txcache/eviction_test.go +++ b/txcache/eviction_test.go @@ -10,12 +10,14 @@ import ( func TestTxCache_DoEviction_BecauseOfCount(t *testing.T) { config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - NumBytesThreshold: maxNumBytesUpperBound, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountThreshold: 2, - CountPerSenderThreshold: math.MaxUint32, + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 4, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 1, } txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(config, txGasHandler) @@ -23,11 +25,13 @@ func TestTxCache_DoEviction_BecauseOfCount(t *testing.T) { require.NotNil(t, cache) cache.AddTx(createTx([]byte("hash-alice"), "alice", 1).withGasPrice(1 * oneBillion)) - cache.AddTx(createTx([]byte("hash-bob"), "bob", 1).withGasPrice(1 * oneBillion)) + cache.AddTx(createTx([]byte("hash-bob"), "bob", 1).withGasPrice(2 * oneBillion)) cache.AddTx(createTx([]byte("hash-carol"), "carol", 1).withGasPrice(3 * oneBillion)) + cache.AddTx(createTx([]byte("hash-eve"), "eve", 1).withGasPrice(4 * oneBillion)) + cache.AddTx(createTx([]byte("hash-dan"), "dan", 1).withGasPrice(5 * oneBillion)) journal := cache.doEviction() - require.Equal(t, uint32(2), journal.numTxs) + require.Equal(t, uint32(1), journal.numEvicted) // Alice and Bob evicted. Carol still there (better score). _, ok := cache.GetByTxHash([]byte("hash-carol")) @@ -38,12 +42,14 @@ func TestTxCache_DoEviction_BecauseOfCount(t *testing.T) { func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - NumBytesThreshold: 1000, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountThreshold: math.MaxUint32, - CountPerSenderThreshold: math.MaxUint32, + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 1000, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 1, } txGasHandler := txcachemocks.NewTxGasHandlerMock() @@ -57,7 +63,7 @@ func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { cache.AddTx(createTx([]byte("hash-eve"), "eve", 1).withSize(256).withGasLimit(500000).withGasPrice(3 * oneBillion)) journal := cache.doEviction() - require.Equal(t, uint32(2), journal.numTxs) + require.Equal(t, 2, journal.numEvicted) // Alice and Bob evicted (lower score). Carol and Eve still there. _, ok := cache.GetByTxHash([]byte("hash-carol")) @@ -70,11 +76,14 @@ func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 1, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountThreshold: 0, - CountPerSenderThreshold: math.MaxUint32, + Name: "untitled", + NumChunks: 1, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 4, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 1, } txGasHandler := txcachemocks.NewTxGasHandlerMock() @@ -82,11 +91,26 @@ func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { require.Nil(t, err) require.NotNil(t, cache) - cache.AddTx(createTx([]byte("hash-alice"), "alice", uint64(1))) - _ = cache.isEvictionInProgress.SetReturningPrevious() + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", uint64(1))) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", uint64(2))) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", uint64(3))) + cache.AddTx(createTx([]byte("hash-alice-4"), "alice", uint64(4))) + cache.AddTx(createTx([]byte("hash-alice-5"), "alice", uint64(5))) + + // Nothing is evicted because eviction is already in progress. journal := cache.doEviction() require.Nil(t, journal) + require.Equal(t, uint64(5), cache.CountTx()) + + cache.isEvictionInProgress.Reset() + + // Now eviction can happen. + journal = cache.doEviction() + require.NotNil(t, journal) + require.Equal(t, 1, journal.numEvicted) + require.Equal(t, 4, int(cache.CountTx())) } // This seems to be the most reasonable "bad-enough" (not worst) scenario to benchmark: From 608bda0369b735a02910edef9d3d5b758cf60725 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Fri, 1 Nov 2024 23:33:45 +0200 Subject: [PATCH 089/124] Remove selection using merges / merge sort. --- txcache/selectionUsingMerges.go | 118 -------------------------------- txcache/selection_test.go | 51 -------------- txcache/wrappedTransaction.go | 2 + 3 files changed, 2 insertions(+), 169 deletions(-) delete mode 100644 txcache/selectionUsingMerges.go diff --git a/txcache/selectionUsingMerges.go b/txcache/selectionUsingMerges.go deleted file mode 100644 index 3c4ad523..00000000 --- a/txcache/selectionUsingMerges.go +++ /dev/null @@ -1,118 +0,0 @@ -package txcache - -import ( - "sync" -) - -type BunchOfTransactions []*WrappedTransaction - -type mergingJob struct { - input []BunchOfTransactions - output BunchOfTransactions -} - -func (cache *TxCache) selectTransactionsUsingMerges(gasRequested uint64) BunchOfTransactions { - senders := cache.getSenders() - bunches := make([]BunchOfTransactions, 0, len(senders)) - - for _, sender := range senders { - bunches = append(bunches, sender.getTxsWithoutGaps()) - } - - mergedBunch := mergeBunchesInParallel(bunches, numJobsForMerging) - selection := selectUntilReachedGasRequested(mergedBunch, gasRequested) - return selection -} - -func mergeBunchesInParallel(bunches []BunchOfTransactions, numJobs int) BunchOfTransactions { - jobs := make([]*mergingJob, numJobs) - - for i := 0; i < numJobs; i++ { - jobs[i] = &mergingJob{ - input: make([]BunchOfTransactions, 0, len(bunches)/numJobs), - } - } - - for i, bunch := range bunches { - jobs[i%numJobs].input = append(jobs[i%numJobs].input, bunch) - } - - // Run jobs in parallel - wg := sync.WaitGroup{} - - for i, job := range jobs { - wg.Add(1) - - go func(job *mergingJob, i int) { - job.output = mergeBunches(job.input) - wg.Done() - }(job, i) - } - - wg.Wait() - - // Merge the results of the jobs - outputBunchesOfJobs := make([]BunchOfTransactions, 0, numJobs) - - for _, job := range jobs { - outputBunchesOfJobs = append(outputBunchesOfJobs, job.output) - } - - finalMerge := mergeBunches(outputBunchesOfJobs) - return finalMerge -} - -func mergeBunches(bunches []BunchOfTransactions) BunchOfTransactions { - if len(bunches) == 0 { - return make(BunchOfTransactions, 0) - } - if len(bunches) == 1 { - return bunches[0] - } - - mid := len(bunches) / 2 - left := mergeBunches(bunches[:mid]) - right := mergeBunches(bunches[mid:]) - return mergeTwoBunches(left, right) -} - -// Empty bunches are handled. -func mergeTwoBunches(first BunchOfTransactions, second BunchOfTransactions) BunchOfTransactions { - result := make(BunchOfTransactions, 0, len(first)+len(second)) - - firstIndex := 0 - secondIndex := 0 - - for firstIndex < len(first) && secondIndex < len(second) { - a := first[firstIndex] - b := second[secondIndex] - - if a.isTransactionMoreDesirableByProtocol(b) { - result = append(result, a) - firstIndex++ - } else { - result = append(result, b) - secondIndex++ - } - } - - // Append any remaining elements. - result = append(result, first[firstIndex:]...) - result = append(result, second[secondIndex:]...) - - return result -} - -func selectUntilReachedGasRequested(bunch BunchOfTransactions, gasRequested uint64) BunchOfTransactions { - accumulatedGas := uint64(0) - - for index, transaction := range bunch { - accumulatedGas += transaction.Tx.GetGasLimit() - - if accumulatedGas > gasRequested { - return bunch[0:index] - } - } - - return bunch -} diff --git a/txcache/selection_test.go b/txcache/selection_test.go index f29f15eb..3307ab3a 100644 --- a/txcache/selection_test.go +++ b/txcache/selection_test.go @@ -8,57 +8,6 @@ import ( "github.com/stretchr/testify/require" ) -func Test_mergeTwoBunches(t *testing.T) { - t.Run("empty bunches", func(t *testing.T) { - merged := mergeTwoBunches(BunchOfTransactions{}, BunchOfTransactions{}) - require.Len(t, merged, 0) - }) - - t.Run("alice and bob (1)", func(t *testing.T) { - first := BunchOfTransactions{ - createTx([]byte("hash-alice-1"), "alice", 1).withGasPrice(42), - } - - second := BunchOfTransactions{ - createTx([]byte("hash-bob-1"), "bob", 1).withGasPrice(43), - } - - merged := mergeTwoBunches(first, second) - - require.Len(t, merged, 2) - require.Equal(t, "hash-bob-1", string(merged[0].TxHash)) - require.Equal(t, "hash-alice-1", string(merged[1].TxHash)) - }) -} - -func Test_mergeBunches(t *testing.T) { - sw := core.NewStopWatch() - - t.Run("numSenders = 1000, numTransactions = 1000", func(t *testing.T) { - bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) - - sw.Start(t.Name()) - merged := mergeBunches(bunches) - sw.Stop(t.Name()) - - require.Len(t, merged, 1000*1000) - }) - - t.Run("numSenders = 1000, numTransactions = 1000, parallel (4 jobs)", func(t *testing.T) { - bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) - - sw.Start(t.Name()) - merged := mergeBunchesInParallel(bunches, 4) - sw.Stop(t.Name()) - - require.Len(t, merged, 1000*1000) - }) - - for name, measurement := range sw.GetMeasurementsMap() { - fmt.Printf("%fs (%s)\n", measurement, name) - } -} - func TestTxCache_selectTransactionsFromBunchesUsingHeap(t *testing.T) { sw := core.NewStopWatch() diff --git a/txcache/wrappedTransaction.go b/txcache/wrappedTransaction.go index 155722cc..5bed2c05 100644 --- a/txcache/wrappedTransaction.go +++ b/txcache/wrappedTransaction.go @@ -7,6 +7,8 @@ import ( "github.com/multiversx/mx-chain-core-go/data" ) +type BunchOfTransactions []*WrappedTransaction + // WrappedTransaction contains a transaction, its hash and extra information type WrappedTransaction struct { Tx data.TransactionHandler From 786f4e2c0e052b5dcb25377585d35ec4cfd45318 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 4 Nov 2024 09:17:09 +0200 Subject: [PATCH 090/124] Refactor, fix tests. --- txcache/eviction.go | 7 +------ txcache/eviction_test.go | 33 +++++++++++++++++-------------- txcache/selection_test.go | 2 +- txcache/txCache.go | 2 +- txcache/txListBySenderMap.go | 23 +++++++++++++++++---- txcache/txListBySenderMap_test.go | 4 ++-- txcache/txListForSender.go | 2 +- txcache/txListForSender_test.go | 4 ++-- 8 files changed, 45 insertions(+), 32 deletions(-) diff --git a/txcache/eviction.go b/txcache/eviction.go index 0283eb2f..b230c968 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -161,12 +161,7 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { // Remove those transactions from "txListBySender". for sender, nonce := range lowestToEvictBySender { - list, ok := cache.txListBySender.getListForSender(sender) - if !ok { - continue - } - - list.evictTransactionsWithHigherOrEqualNonces(nonce) + cache.txListBySender.evictTransactionsWithHigherOrEqualNonces([]byte(sender), nonce) } // Remove those transactions from "txByHash". diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go index 3c72fc3b..1b4f49eb 100644 --- a/txcache/eviction_test.go +++ b/txcache/eviction_test.go @@ -31,13 +31,14 @@ func TestTxCache_DoEviction_BecauseOfCount(t *testing.T) { cache.AddTx(createTx([]byte("hash-dan"), "dan", 1).withGasPrice(5 * oneBillion)) journal := cache.doEviction() - require.Equal(t, uint32(1), journal.numEvicted) + require.Equal(t, 1, journal.numEvicted) + require.Equal(t, []int{1}, journal.numEvictedByPass) // Alice and Bob evicted. Carol still there (better score). _, ok := cache.GetByTxHash([]byte("hash-carol")) require.True(t, ok) - require.Equal(t, uint64(1), cache.CountSenders()) - require.Equal(t, uint64(1), cache.CountTx()) + require.Equal(t, uint64(4), cache.CountSenders()) + require.Equal(t, uint64(4), cache.CountTx()) } func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { @@ -63,15 +64,16 @@ func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { cache.AddTx(createTx([]byte("hash-eve"), "eve", 1).withSize(256).withGasLimit(500000).withGasPrice(3 * oneBillion)) journal := cache.doEviction() - require.Equal(t, 2, journal.numEvicted) + require.Equal(t, 1, journal.numEvicted) + require.Equal(t, []int{1}, journal.numEvictedByPass) // Alice and Bob evicted (lower score). Carol and Eve still there. _, ok := cache.GetByTxHash([]byte("hash-carol")) require.True(t, ok) _, ok = cache.GetByTxHash([]byte("hash-eve")) require.True(t, ok) - require.Equal(t, uint64(2), cache.CountSenders()) - require.Equal(t, uint64(2), cache.CountTx()) + require.Equal(t, uint64(3), cache.CountSenders()) + require.Equal(t, uint64(3), cache.CountTx()) } func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { @@ -114,17 +116,18 @@ func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { } // This seems to be the most reasonable "bad-enough" (not worst) scenario to benchmark: -// 25000 senders with 10 transactions each, with default "NumItemsToPreemptivelyEvict". -// ~1 second on average laptop. +// 25000 senders with 10 transactions each, with "NumItemsToPreemptivelyEvict" = 50000. +// ~0.5 seconds on average laptop. func TestTxCache_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { config := ConfigSourceMe{ - Name: "untitled", - NumChunks: 16, - EvictionEnabled: true, - NumBytesThreshold: 1000000000, - NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountThreshold: 240000, - CountPerSenderThreshold: math.MaxUint32, + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 1000000000, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 240000, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 50000, } txGasHandler := txcachemocks.NewTxGasHandlerMock() diff --git a/txcache/selection_test.go b/txcache/selection_test.go index 3307ab3a..4c60692f 100644 --- a/txcache/selection_test.go +++ b/txcache/selection_test.go @@ -18,7 +18,7 @@ func TestTxCache_selectTransactionsFromBunchesUsingHeap(t *testing.T) { merged := selectTransactionsFromBunchesUsingHeap(bunches, 10_000_000_000) sw.Stop(t.Name()) - require.Equal(t, 200001, len(merged)) + require.Equal(t, 200000, len(merged)) }) for name, measurement := range sw.GetMeasurementsMap() { diff --git a/txcache/txCache.go b/txcache/txCache.go index 3f4ad7d9..43a4691c 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -289,7 +289,7 @@ func (cache *TxCache) UnRegisterHandler(string) { // NotifyAccountNonce should be called by external components (such as interceptors and transactions processor) // in order to inform the cache about initial nonce gap phenomena func (cache *TxCache) NotifyAccountNonce(accountKey []byte, nonce uint64) { - evicted := cache.txListBySender.notifyAccountNonce(accountKey, nonce) + evicted := cache.txListBySender.notifyAccountNonceReturnEvictedTransactions(accountKey, nonce) if len(evicted) > 0 { logRemove.Trace("NotifyAccountNonce() with eviction", "sender", accountKey, "nonce", nonce, "num evicted txs", len(evicted)) diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index b1180b6e..8fb5b2c0 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -89,8 +89,8 @@ func (txMap *txListBySenderMap) removeTx(tx *WrappedTransaction) bool { } isFound := listForSender.RemoveTx(tx) - isEmpty := listForSender.IsEmpty() - if isEmpty { + + if listForSender.IsEmpty() { txMap.removeSender(sender) } @@ -122,14 +122,15 @@ func (txMap *txListBySenderMap) RemoveSendersBulk(senders []string) uint32 { return numRemoved } -func (txMap *txListBySenderMap) notifyAccountNonce(accountKey []byte, nonce uint64) [][]byte { +func (txMap *txListBySenderMap) notifyAccountNonceReturnEvictedTransactions(accountKey []byte, nonce uint64) [][]byte { sender := string(accountKey) listForSender, ok := txMap.getListForSender(sender) if !ok { return nil } - evictedTxHashes := listForSender.notifyAccountNonce(nonce) + evictedTxHashes := listForSender.notifyAccountNonceReturnEvictedTransactions(nonce) + if listForSender.IsEmpty() { txMap.removeSender(sender) } @@ -137,6 +138,20 @@ func (txMap *txListBySenderMap) notifyAccountNonce(accountKey []byte, nonce uint return evictedTxHashes } +func (txMap *txListBySenderMap) evictTransactionsWithHigherOrEqualNonces(accountKey []byte, nonce uint64) { + sender := string(accountKey) + listForSender, ok := txMap.getListForSender(sender) + if !ok { + return + } + + listForSender.evictTransactionsWithHigherOrEqualNonces(nonce) + + if listForSender.IsEmpty() { + txMap.removeSender(sender) + } +} + func (txMap *txListBySenderMap) getSenders() []*txListForSender { senders := make([]*txListForSender, 0, txMap.counter.Get()) diff --git a/txcache/txListBySenderMap_test.go b/txcache/txListBySenderMap_test.go index e7cde461..325fae0f 100644 --- a/txcache/txListBySenderMap_test.go +++ b/txcache/txListBySenderMap_test.go @@ -101,14 +101,14 @@ func TestSendersMap_notifyAccountNonce(t *testing.T) { myMap := newSendersMapToTest() // Discarded notification, since sender not added yet - myMap.notifyAccountNonce([]byte("alice"), 42) + myMap.notifyAccountNonceReturnEvictedTransactions([]byte("alice"), 42) myMap.addTx(createTx([]byte("tx-42"), "alice", 42)) alice, _ := myMap.getListForSender("alice") require.Equal(t, uint64(0), alice.accountNonce.Get()) require.False(t, alice.accountNonceKnown.IsSet()) - myMap.notifyAccountNonce([]byte("alice"), 42) + myMap.notifyAccountNonceReturnEvictedTransactions([]byte("alice"), 42) require.Equal(t, uint64(42), alice.accountNonce.Get()) require.True(t, alice.accountNonceKnown.IsSet()) } diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 21f05641..40ccbd6f 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -267,7 +267,7 @@ func approximatelyCountTxInLists(lists []*txListForSender) uint64 { } // Removes transactions with lower nonces and returns their hashes. -func (listForSender *txListForSender) notifyAccountNonce(nonce uint64) [][]byte { +func (listForSender *txListForSender) notifyAccountNonceReturnEvictedTransactions(nonce uint64) [][]byte { // Optimization: if nonce is the same, do nothing. if listForSender.accountNonce.Get() == nonce { return nil diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 08e15bfe..9484a849 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -167,7 +167,7 @@ func TestListForSender_NotifyAccountNonce(t *testing.T) { require.Equal(t, uint64(0), list.accountNonce.Get()) require.False(t, list.accountNonceKnown.IsSet()) - list.notifyAccountNonce(42) + list.notifyAccountNonceReturnEvictedTransactions(42) require.Equal(t, uint64(42), list.accountNonce.Get()) require.True(t, list.accountNonceKnown.IsSet()) @@ -195,7 +195,7 @@ func TestListForSender_evictTransactionsWithLowerNoncesNoLock(t *testing.T) { func TestListForSender_hasInitialGap(t *testing.T) { list := newUnconstrainedListToTest() - list.notifyAccountNonce(42) + list.notifyAccountNonceReturnEvictedTransactions(42) // No transaction, no gap _, _, hasInitialGap := list.hasInitialGap() From 8b89f4c1fa666646dc5083d289d9a3c833e255d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 4 Nov 2024 10:27:42 +0200 Subject: [PATCH 091/124] Fix disabled cache. --- txcache/disabledCache.go | 2 +- txcache/disabledCache_test.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/txcache/disabledCache.go b/txcache/disabledCache.go index 45eb8b93..cf4b2049 100644 --- a/txcache/disabledCache.go +++ b/txcache/disabledCache.go @@ -26,7 +26,7 @@ func (cache *DisabledCache) GetByTxHash(_ []byte) (*WrappedTransaction, bool) { } // SelectTransactionsWithBandwidth returns an empty slice -func (cache *DisabledCache) SelectTransactions(_ int, _ uint64, _ int, _ uint64) []*WrappedTransaction { +func (cache *DisabledCache) SelectTransactions(uint64) []*WrappedTransaction { return make([]*WrappedTransaction, 0) } diff --git a/txcache/disabledCache_test.go b/txcache/disabledCache_test.go index 656b5528..c71130d9 100644 --- a/txcache/disabledCache_test.go +++ b/txcache/disabledCache_test.go @@ -1,7 +1,6 @@ package txcache import ( - "math" "testing" "github.com/stretchr/testify/require" @@ -18,7 +17,7 @@ func TestDisabledCache_DoesNothing(t *testing.T) { require.Nil(t, tx) require.False(t, ok) - selection := cache.SelectTransactions(42, math.MaxUint64, 42, math.MaxUint64) + selection := cache.SelectTransactions(42) require.Equal(t, 0, len(selection)) removed := cache.RemoveTxByHash([]byte{}) From 81bb99dd01b44065d1d9a61281f4ab2a9edf6a04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 4 Nov 2024 11:34:42 +0200 Subject: [PATCH 092/124] Use float for PPU. --- txcache/printing.go | 18 +++++++++--------- txcache/wrappedTransaction.go | 27 ++++++--------------------- txcache/wrappedTransaction_test.go | 21 +++++++-------------- 3 files changed, 22 insertions(+), 44 deletions(-) diff --git a/txcache/printing.go b/txcache/printing.go index d549d763..b9cb4b2f 100644 --- a/txcache/printing.go +++ b/txcache/printing.go @@ -8,14 +8,14 @@ import ( ) type printedTransaction struct { - Hash string `json:"hash"` - Nonce uint64 `json:"nonce"` - GasPrice uint64 `json:"gasPrice"` - GasLimit uint64 `json:"gasLimit"` - Sender string `json:"sender"` - Receiver string `json:"receiver"` - DataLength int `json:"dataLength"` - PPU uint64 `json:"ppu"` + Hash string `json:"hash"` + Nonce uint64 `json:"nonce"` + GasPrice uint64 `json:"gasPrice"` + GasLimit uint64 `json:"gasLimit"` + Sender string `json:"sender"` + Receiver string `json:"receiver"` + DataLength int `json:"dataLength"` + PPU float64 `json:"ppu"` } type printedSender struct { @@ -75,7 +75,7 @@ func convertWrappedTransactionToPrintedTransaction(wrappedTx *WrappedTransaction GasPrice: transaction.GetGasPrice(), GasLimit: transaction.GetGasLimit(), DataLength: len(transaction.GetData()), - PPU: wrappedTx.PricePerGasUnitQuotient, + PPU: wrappedTx.PricePerUnit, } } diff --git a/txcache/wrappedTransaction.go b/txcache/wrappedTransaction.go index 5bed2c05..a0734820 100644 --- a/txcache/wrappedTransaction.go +++ b/txcache/wrappedTransaction.go @@ -2,7 +2,6 @@ package txcache import ( "bytes" - "math/big" "github.com/multiversx/mx-chain-core-go/data" ) @@ -17,36 +16,22 @@ type WrappedTransaction struct { ReceiverShardID uint32 Size int64 - PricePerGasUnitQuotient uint64 - PricePerGasUnitRemainder uint64 + PricePerUnit float64 } // computePricePerGasUnit computes (and caches) the (average) price per gas unit. func (transaction *WrappedTransaction) computePricePerGasUnit(txGasHandler TxGasHandler) { fee := txGasHandler.ComputeTxFee(transaction.Tx) - gasLimit := big.NewInt(0).SetUint64(transaction.Tx.GetGasLimit()) - - quotient := new(big.Int) - remainder := new(big.Int) - quotient, remainder = quotient.QuoRem(fee, gasLimit, remainder) - - transaction.PricePerGasUnitQuotient = quotient.Uint64() - transaction.PricePerGasUnitRemainder = remainder.Uint64() + transaction.PricePerUnit = float64(fee.Uint64()) / float64(transaction.Tx.GetGasLimit()) } // Equality is out of scope (not possible in our case). func (transaction *WrappedTransaction) isTransactionMoreDesirableByProtocol(otherTransaction *WrappedTransaction) bool { // First, compare by price per unit - ppuQuotient := transaction.PricePerGasUnitQuotient - ppuQuotientOther := otherTransaction.PricePerGasUnitQuotient - if ppuQuotient != ppuQuotientOther { - return ppuQuotient > ppuQuotientOther - } - - ppuRemainder := transaction.PricePerGasUnitRemainder - ppuRemainderOther := otherTransaction.PricePerGasUnitRemainder - if ppuRemainder != ppuRemainderOther { - return ppuRemainder > ppuRemainderOther + ppu := transaction.PricePerUnit + ppuOther := otherTransaction.PricePerUnit + if ppu != ppuOther { + return ppu > ppuOther } // Then, compare by gas price (to promote the practice of a higher gas price) diff --git a/txcache/wrappedTransaction_test.go b/txcache/wrappedTransaction_test.go index ac75103b..04c8cd92 100644 --- a/txcache/wrappedTransaction_test.go +++ b/txcache/wrappedTransaction_test.go @@ -14,16 +14,14 @@ func TestWrappedTransaction_computePricePerGasUnit(t *testing.T) { tx := createTx([]byte("a"), "a", 1).withDataLength(1).withGasLimit(51500).withGasPrice(oneBillion) tx.computePricePerGasUnit(txGasHandler) - require.Equal(t, oneBillion, int(tx.PricePerGasUnitQuotient)) - require.Equal(t, 0, int(tx.PricePerGasUnitRemainder)) + require.Equal(t, float64(oneBillion), tx.PricePerUnit) }) t.Run("move balance gas limit and execution gas limit (1)", func(t *testing.T) { tx := createTx([]byte("a"), "a", 1).withDataLength(1).withGasLimit(51501).withGasPrice(oneBillion) tx.computePricePerGasUnit(txGasHandler) - require.Equal(t, 999980777, int(tx.PricePerGasUnitQuotient)) - require.Equal(t, 3723, int(tx.PricePerGasUnitRemainder)) + require.InDelta(t, float64(999980777), tx.PricePerUnit, 0.1) }) t.Run("move balance gas limit and execution gas limit (2)", func(t *testing.T) { @@ -33,8 +31,7 @@ func TestWrappedTransaction_computePricePerGasUnit(t *testing.T) { actualFee := 51500*oneBillion + (oneMilion-51500)*oneBillion/100 require.Equal(t, 60985000000000, actualFee) - require.Equal(t, actualFee/oneMilion, int(tx.PricePerGasUnitQuotient)) - require.Equal(t, 0, int(tx.PricePerGasUnitRemainder)) + require.InDelta(t, actualFee/oneMilion, tx.PricePerUnit, 0.1) }) } @@ -55,10 +52,8 @@ func TestWrappedTransaction_isTransactionMoreDesirableByProtocol(t *testing.T) { a := createTx([]byte("a-2"), "a", 1).withGasPrice(oneBillion + 1) b := createTx([]byte("b-2"), "b", 1).withGasPrice(oneBillion) - a.PricePerGasUnitQuotient = 42 - b.PricePerGasUnitQuotient = 42 - a.PricePerGasUnitRemainder = 0 - b.PricePerGasUnitRemainder = 0 + a.PricePerUnit = 42 + b.PricePerUnit = 42 require.True(t, a.isTransactionMoreDesirableByProtocol(b)) }) @@ -67,10 +62,8 @@ func TestWrappedTransaction_isTransactionMoreDesirableByProtocol(t *testing.T) { a := createTx([]byte("a-2"), "a", 1).withGasLimit(55000) b := createTx([]byte("b-2"), "b", 1).withGasLimit(60000) - a.PricePerGasUnitQuotient = 42 - b.PricePerGasUnitQuotient = 42 - a.PricePerGasUnitRemainder = 0 - b.PricePerGasUnitRemainder = 0 + a.PricePerUnit = 42 + b.PricePerUnit = 42 require.True(t, a.isTransactionMoreDesirableByProtocol(b)) }) From 068118448bc9a35106d0e28bf3b39b965f199a7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 4 Nov 2024 11:45:01 +0200 Subject: [PATCH 093/124] Additional guard for gas limit. --- txcache/wrappedTransaction.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/txcache/wrappedTransaction.go b/txcache/wrappedTransaction.go index a0734820..5d623536 100644 --- a/txcache/wrappedTransaction.go +++ b/txcache/wrappedTransaction.go @@ -21,8 +21,14 @@ type WrappedTransaction struct { // computePricePerGasUnit computes (and caches) the (average) price per gas unit. func (transaction *WrappedTransaction) computePricePerGasUnit(txGasHandler TxGasHandler) { - fee := txGasHandler.ComputeTxFee(transaction.Tx) - transaction.PricePerUnit = float64(fee.Uint64()) / float64(transaction.Tx.GetGasLimit()) + fee := txGasHandler.ComputeTxFee(transaction.Tx).Uint64() + + gasLimit := transaction.Tx.GetGasLimit() + if gasLimit == 0 { + return + } + + transaction.PricePerUnit = float64(fee) / float64(gasLimit) } // Equality is out of scope (not possible in our case). From 15c28eb450a95a9f770745c5fb519bdceae49fab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 4 Nov 2024 16:12:13 +0200 Subject: [PATCH 094/124] Refactor, simplify monitoring. --- txcache/config.go | 4 ++-- txcache/constants.go | 1 + txcache/crossTxCache.go | 2 +- txcache/diagnosis.go | 34 +++++++++++++---------------- txcache/monitoring.go | 39 ---------------------------------- txcache/printing.go | 39 +--------------------------------- txcache/selectionUsingHeap.go | 8 +++---- txcache/selection_test.go | 4 ++-- txcache/txCache.go | 40 +++++++++++++---------------------- txcache/txListBySenderMap.go | 4 ++-- 10 files changed, 43 insertions(+), 132 deletions(-) delete mode 100644 txcache/monitoring.go diff --git a/txcache/config.go b/txcache/config.go index d02c2142..1e371d41 100644 --- a/txcache/config.go +++ b/txcache/config.go @@ -72,7 +72,7 @@ func (config *ConfigSourceMe) getSenderConstraints() senderConstraints { func (config *ConfigSourceMe) String() string { bytes, err := json.Marshal(config) if err != nil { - log.Error("ConfigSourceMe.String()", "err", err) + log.Error("ConfigSourceMe.String", "err", err) } return string(bytes) @@ -111,7 +111,7 @@ func (config *ConfigDestinationMe) verify() error { func (config *ConfigDestinationMe) String() string { bytes, err := json.Marshal(config) if err != nil { - log.Error("ConfigDestinationMe.String()", "err", err) + log.Error("ConfigDestinationMe.String", "err", err) } return string(bytes) diff --git a/txcache/constants.go b/txcache/constants.go index 448a06b2..274c9e99 100644 --- a/txcache/constants.go +++ b/txcache/constants.go @@ -6,3 +6,4 @@ const diagnosisMaxSendersToDisplay = 1000 const diagnosisMaxTransactionsToDisplay = 10000 const diagnosisSelectionGasRequested = 10_000_000_000 const numJobsForMerging = 1 +const initialCapacityOfSelectionSlice = 30000 diff --git a/txcache/crossTxCache.go b/txcache/crossTxCache.go index ccd1aa05..9d89c18f 100644 --- a/txcache/crossTxCache.go +++ b/txcache/crossTxCache.go @@ -46,7 +46,7 @@ func NewCrossTxCache(config ConfigDestinationMe) (*CrossTxCache, error) { // ImmunizeTxsAgainstEviction marks items as non-evictable func (cache *CrossTxCache) ImmunizeTxsAgainstEviction(keys [][]byte) { numNow, numFuture := cache.ImmunityCache.ImmunizeKeys(keys) - log.Trace("CrossTxCache.ImmunizeTxsAgainstEviction()", + log.Trace("CrossTxCache.ImmunizeTxsAgainstEviction", "name", cache.config.Name, "len(keys)", len(keys), "numNow", numNow, diff --git a/txcache/diagnosis.go b/txcache/diagnosis.go index 79fac6f6..e0d60ecc 100644 --- a/txcache/diagnosis.go +++ b/txcache/diagnosis.go @@ -8,7 +8,6 @@ import ( // Diagnose checks the state of the cache for inconsistencies and displays a summary, senders and transactions. func (cache *TxCache) Diagnose(_ bool) { cache.diagnoseCounters() - cache.diagnoseSenders() cache.diagnoseTransactions() cache.diagnoseSelection() } @@ -30,7 +29,7 @@ func (cache *TxCache) diagnoseCounters() { fine = fine && (int(numSendersEstimate) == len(sendersKeys)) fine = fine && (numTxsEstimate == numTxsInChunks && numTxsEstimate == len(txsKeys)) - log.Debug("diagnoseCounters()", + log.Debug("diagnoseCounters", "fine", fine, "numTxsEstimate", numTxsEstimate, "numTxsInChunks", numTxsInChunks, @@ -43,22 +42,6 @@ func (cache *TxCache) diagnoseCounters() { ) } -func (cache *TxCache) diagnoseSenders() { - if logDiagnoseSenders.GetLevel() > logger.LogTrace { - return - } - - senders := cache.txListBySender.getSenders() - - if len(senders) == 0 { - return - } - - numToDisplay := core.MinInt(diagnosisMaxSendersToDisplay, len(senders)) - logDiagnoseSenders.Trace("diagnoseSenders()", "numSenders", len(senders), "numToDisplay", numToDisplay) - logDiagnoseSenders.Trace(marshalSendersToNewlineDelimitedJson(senders[:numToDisplay])) -} - func (cache *TxCache) diagnoseTransactions() { if logDiagnoseTransactions.GetLevel() > logger.LogTrace { return @@ -71,7 +54,7 @@ func (cache *TxCache) diagnoseTransactions() { } numToDisplay := core.MinInt(diagnosisMaxTransactionsToDisplay, len(transactions)) - logDiagnoseTransactions.Trace("diagnoseTransactions()", "numTransactions", len(transactions), "numToDisplay", numToDisplay) + logDiagnoseTransactions.Trace("diagnoseTransactions", "numTransactions", len(transactions), "numToDisplay", numToDisplay) logDiagnoseTransactions.Trace(marshalTransactionsToNewlineDelimitedJson(transactions[:numToDisplay])) } @@ -87,3 +70,16 @@ func (cache *TxCache) diagnoseSelection() { displaySelectionOutcome(logDiagnoseSelection, transactions) } + +func displaySelectionOutcome(contextualLogger logger.Logger, selection []*WrappedTransaction) { + if contextualLogger.GetLevel() > logger.LogTrace { + return + } + + if len(selection) > 0 { + contextualLogger.Trace("displaySelectionOutcome - transactions (as newline-separated JSON):") + contextualLogger.Trace(marshalTransactionsToNewlineDelimitedJson(selection)) + } else { + contextualLogger.Trace("displaySelectionOutcome - transactions: none") + } +} diff --git a/txcache/monitoring.go b/txcache/monitoring.go deleted file mode 100644 index b15e8727..00000000 --- a/txcache/monitoring.go +++ /dev/null @@ -1,39 +0,0 @@ -package txcache - -import ( - "fmt" - "strings" - - logger "github.com/multiversx/mx-chain-logger-go" -) - -func displaySendersScoreHistogram(scoreGroups [][]*txListForSender) { - if log.GetLevel() > logger.LogDebug { - return - } - - stringBuilder := strings.Builder{} - - for i, group := range scoreGroups { - if len(group) == 0 { - continue - } - - stringBuilder.WriteString(fmt.Sprintf("#%d: %d; ", i, len(group))) - } - - log.Debug("displaySendersScoreHistogram()", "histogram", stringBuilder.String()) -} - -func displaySelectionOutcome(contextualLogger logger.Logger, selection []*WrappedTransaction) { - if contextualLogger.GetLevel() > logger.LogTrace { - return - } - - if len(selection) > 0 { - contextualLogger.Trace("displaySelectionOutcome() - transactions (as newline-separated JSON):") - contextualLogger.Trace(marshalTransactionsToNewlineDelimitedJson(selection)) - } else { - contextualLogger.Trace("displaySelectionOutcome() - transactions: none") - } -} diff --git a/txcache/printing.go b/txcache/printing.go index b9cb4b2f..faa2b1bb 100644 --- a/txcache/printing.go +++ b/txcache/printing.go @@ -10,39 +10,12 @@ import ( type printedTransaction struct { Hash string `json:"hash"` Nonce uint64 `json:"nonce"` + PPU float64 `json:"ppu"` GasPrice uint64 `json:"gasPrice"` GasLimit uint64 `json:"gasLimit"` Sender string `json:"sender"` Receiver string `json:"receiver"` DataLength int `json:"dataLength"` - PPU float64 `json:"ppu"` -} - -type printedSender struct { - Address string `json:"address"` - Nonce uint64 `json:"nonce"` - IsNonceKnown bool `json:"isNonceKnown"` - HasInitialGap bool `json:"hasInitialGap"` - NumTxs uint64 `json:"numTxs"` -} - -// marshalSendersToNewlineDelimitedJson converts a list of senders to a newline-delimited JSON string. -// Note: each line is indexed, to improve readability. The index is easily removable for if separate analysis is needed. -func marshalSendersToNewlineDelimitedJson(senders []*txListForSender) string { - builder := strings.Builder{} - builder.WriteString("\n") - - for i, txListForSender := range senders { - printedSender := convertTxListForSenderToPrintedSender(txListForSender) - printedSenderJson, _ := json.Marshal(printedSender) - - builder.WriteString(fmt.Sprintf("#%d: ", i)) - builder.WriteString(string(printedSenderJson)) - builder.WriteString("\n") - } - - builder.WriteString("\n") - return builder.String() } // marshalTransactionsToNewlineDelimitedJson converts a list of transactions to a newline-delimited JSON string. @@ -78,13 +51,3 @@ func convertWrappedTransactionToPrintedTransaction(wrappedTx *WrappedTransaction PPU: wrappedTx.PricePerUnit, } } - -func convertTxListForSenderToPrintedSender(txListForSender *txListForSender) *printedSender { - return &printedSender{ - Address: hex.EncodeToString([]byte(txListForSender.sender)), - Nonce: txListForSender.accountNonce.Get(), - IsNonceKnown: txListForSender.accountNonceKnown.IsSet(), - HasInitialGap: txListForSender.hasInitialGapWithLock(), - NumTxs: txListForSender.countTxWithLock(), - } -} diff --git a/txcache/selectionUsingHeap.go b/txcache/selectionUsingHeap.go index 553bde72..023ec783 100644 --- a/txcache/selectionUsingHeap.go +++ b/txcache/selectionUsingHeap.go @@ -2,7 +2,7 @@ package txcache import "container/heap" -func (cache *TxCache) selectTransactionsUsingHeap(gasRequested uint64) BunchOfTransactions { +func (cache *TxCache) doSelectTransactions(gasRequested uint64) BunchOfTransactions { senders := cache.getSenders() bunches := make([]BunchOfTransactions, 0, len(senders)) @@ -10,12 +10,12 @@ func (cache *TxCache) selectTransactionsUsingHeap(gasRequested uint64) BunchOfTr bunches = append(bunches, sender.getTxsWithoutGaps()) } - return selectTransactionsFromBunchesUsingHeap(bunches, gasRequested) + return selectTransactionsFromBunches(bunches, gasRequested) } // Selection tolerates concurrent transaction additions / removals. -func selectTransactionsFromBunchesUsingHeap(bunches []BunchOfTransactions, gasRequested uint64) BunchOfTransactions { - selectedTransactions := make(BunchOfTransactions, 0, 30000) +func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested uint64) BunchOfTransactions { + selectedTransactions := make(BunchOfTransactions, 0, initialCapacityOfSelectionSlice) // Items popped from the heap are added to "selectedTransactions". transactionsHeap := &TransactionsMaxHeap{} diff --git a/txcache/selection_test.go b/txcache/selection_test.go index 4c60692f..1294e8f5 100644 --- a/txcache/selection_test.go +++ b/txcache/selection_test.go @@ -8,14 +8,14 @@ import ( "github.com/stretchr/testify/require" ) -func TestTxCache_selectTransactionsFromBunchesUsingHeap(t *testing.T) { +func TestTxCache_selectTransactionsFromBunches(t *testing.T) { sw := core.NewStopWatch() t.Run("numSenders = 1000, numTransactions = 1000", func(t *testing.T) { bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) sw.Start(t.Name()) - merged := selectTransactionsFromBunchesUsingHeap(bunches, 10_000_000_000) + merged := selectTransactionsFromBunches(bunches, 10_000_000_000) sw.Stop(t.Name()) require.Equal(t, 200000, len(merged)) diff --git a/txcache/txCache.go b/txcache/txCache.go index 43a4691c..19b96505 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -6,7 +6,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" - logger "github.com/multiversx/mx-chain-logger-go" "github.com/multiversx/mx-chain-storage-go/common" "github.com/multiversx/mx-chain-storage-go/monitoring" "github.com/multiversx/mx-chain-storage-go/types" @@ -59,7 +58,7 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { return false, false } - logAdd.Trace("AddTx()", "tx", tx.TxHash, "nonce", tx.Tx.GetNonce(), "sender", tx.Tx.GetSndAddr()) + logAdd.Trace("AddTx", "tx", tx.TxHash, "nonce", tx.Tx.GetNonce(), "sender", tx.Tx.GetSndAddr()) if cache.config.EvictionEnabled { _ = cache.doEviction() @@ -75,11 +74,11 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { // - B won't add to "txByHash" (duplicate) // - B adds to "txListBySender" // - A won't add to "txListBySender" (duplicate) - logAdd.Debug("AddTx(): slight inconsistency detected:", "tx", tx.TxHash, "sender", tx.Tx.GetSndAddr(), "addedInByHash", addedInByHash, "addedInBySender", addedInBySender) + logAdd.Debug("AddTx: slight inconsistency detected:", "tx", tx.TxHash, "sender", tx.Tx.GetSndAddr(), "addedInByHash", addedInByHash, "addedInBySender", addedInBySender) } if len(evicted) > 0 { - logRemove.Debug("AddTx() with eviction", "sender", tx.Tx.GetSndAddr(), "num evicted txs", len(evicted)) + logRemove.Debug("AddTx with eviction", "sender", tx.Tx.GetSndAddr(), "num evicted txs", len(evicted)) cache.txByHash.RemoveTxsBulk(evicted) } @@ -97,38 +96,29 @@ func (cache *TxCache) GetByTxHash(txHash []byte) (*WrappedTransaction, bool) { // SelectTransactions selects a reasonably fair list of transactions to be included in the next miniblock // It returns transactions with total gas ~ "gasRequested". func (cache *TxCache) SelectTransactions(gasRequested uint64) []*WrappedTransaction { - transactions := cache.doSelectTransactions( - logSelect, - gasRequested, - ) - - go cache.diagnoseCounters() - go displaySelectionOutcome(logSelect, transactions) - - return transactions -} - -func (cache *TxCache) doSelectTransactions(contextualLogger logger.Logger, gasRequested uint64) []*WrappedTransaction { stopWatch := core.NewStopWatch() stopWatch.Start("selection") - contextualLogger.Debug( - "doSelectTransactions(): begin", + logSelect.Debug( + "doSelectTransactions: begin", "num bytes", cache.NumBytes(), "num txs", cache.CountTx(), "num senders", cache.CountSenders(), ) - transactions := cache.selectTransactionsUsingHeap(gasRequested) + transactions := cache.doSelectTransactions(gasRequested) stopWatch.Stop("selection") - contextualLogger.Debug( - "doSelectTransactions(): end", + logSelect.Debug( + "doSelectTransactions: end", "duration", stopWatch.GetMeasurement("selection"), "num txs selected", len(transactions), ) + go cache.diagnoseCounters() + go displaySelectionOutcome(logSelect, transactions) + return transactions } @@ -143,7 +133,7 @@ func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { tx, foundInByHash := cache.txByHash.removeTx(string(txHash)) if !foundInByHash { - logRemove.Trace("RemoveTxByHash(), but !foundInByHash", "tx", txHash) + // Could have been previously removed (e.g. due to NotifyAccountNonce). return false } @@ -157,10 +147,10 @@ func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { // - B reaches "cache.txByHash.RemoveTxsBulk()" // - B reaches "cache.txListBySender.RemoveSendersBulk()" // - A reaches "cache.txListBySender.removeTx()", but sender does not exist anymore - logRemove.Debug("RemoveTxByHash(), but !foundInBySender", "tx", txHash) + logRemove.Debug("RemoveTxByHash, but !foundInBySender", "tx", txHash) } - logRemove.Trace("RemoveTxByHash()", "tx", txHash) + logRemove.Trace("RemoveTxByHash", "tx", txHash) return true } @@ -292,7 +282,7 @@ func (cache *TxCache) NotifyAccountNonce(accountKey []byte, nonce uint64) { evicted := cache.txListBySender.notifyAccountNonceReturnEvictedTransactions(accountKey, nonce) if len(evicted) > 0 { - logRemove.Trace("NotifyAccountNonce() with eviction", "sender", accountKey, "nonce", nonce, "num evicted txs", len(evicted)) + logRemove.Trace("NotifyAccountNonce with eviction", "sender", accountKey, "nonce", nonce, "num evicted txs", len(evicted)) cache.txByHash.RemoveTxsBulk(evicted) } } diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index 8fb5b2c0..d4698b7b 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -84,7 +84,7 @@ func (txMap *txListBySenderMap) removeTx(tx *WrappedTransaction) bool { if !ok { // This happens when a sender whose transactions were selected for processing is removed from cache in the meantime. // When it comes to remove one if its transactions due to processing (commited / finalized block), they don't exist in cache anymore. - log.Debug("txListBySenderMap.removeTx() detected slight inconsistency: sender of tx not in cache", "tx", tx.TxHash, "sender", []byte(sender)) + log.Debug("txListBySenderMap.removeTx detected slight inconsistency: sender of tx not in cache", "tx", tx.TxHash, "sender", []byte(sender)) return false } @@ -99,7 +99,7 @@ func (txMap *txListBySenderMap) removeTx(tx *WrappedTransaction) bool { // Important: this doesn't remove the transactions from txCache.txByHash. That's done by the caller. func (txMap *txListBySenderMap) removeSender(sender string) bool { - logRemove.Trace("txListBySenderMap.removeSender()", "sender", sender) + logRemove.Trace("txListBySenderMap.removeSender", "sender", sender) _, removed := txMap.backingMap.Remove(sender) if removed { From 62c0f96c724452628b664d3d182d46ac1573b80e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 4 Nov 2024 16:12:31 +0200 Subject: [PATCH 095/124] Rename file. --- txcache/{selectionUsingHeap.go => selection.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename txcache/{selectionUsingHeap.go => selection.go} (100%) diff --git a/txcache/selectionUsingHeap.go b/txcache/selection.go similarity index 100% rename from txcache/selectionUsingHeap.go rename to txcache/selection.go From 8348e116d6f964054ed5bcdbf0860b4812eed020 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 4 Nov 2024 17:02:03 +0200 Subject: [PATCH 096/124] Refactor, fix logging. --- txcache/diagnosis.go | 67 ++++++++++++++++++++++++++----- txcache/printing.go | 53 ------------------------ txcache/txCache.go | 4 +- txcache/txCache_test.go | 2 +- txcache/txListBySenderMap.go | 16 ++++++-- txcache/txListBySenderMap_test.go | 22 +++++----- txcache/txListForSender.go | 1 - 7 files changed, 83 insertions(+), 82 deletions(-) delete mode 100644 txcache/printing.go diff --git a/txcache/diagnosis.go b/txcache/diagnosis.go index e0d60ecc..0b790923 100644 --- a/txcache/diagnosis.go +++ b/txcache/diagnosis.go @@ -1,10 +1,26 @@ package txcache import ( + "encoding/hex" + "encoding/json" + "fmt" + "strings" + "github.com/multiversx/mx-chain-core-go/core" logger "github.com/multiversx/mx-chain-logger-go" ) +type printedTransaction struct { + Hash string `json:"hash"` + Nonce uint64 `json:"nonce"` + PPU float64 `json:"ppu"` + GasPrice uint64 `json:"gasPrice"` + GasLimit uint64 `json:"gasLimit"` + Sender string `json:"sender"` + Receiver string `json:"receiver"` + DataLength int `json:"dataLength"` +} + // Diagnose checks the state of the cache for inconsistencies and displays a summary, senders and transactions. func (cache *TxCache) Diagnose(_ bool) { cache.diagnoseCounters() @@ -48,14 +64,47 @@ func (cache *TxCache) diagnoseTransactions() { } transactions := cache.getAllTransactions() - if len(transactions) == 0 { return } numToDisplay := core.MinInt(diagnosisMaxTransactionsToDisplay, len(transactions)) logDiagnoseTransactions.Trace("diagnoseTransactions", "numTransactions", len(transactions), "numToDisplay", numToDisplay) - logDiagnoseTransactions.Trace(marshalTransactionsToNewlineDelimitedJson(transactions[:numToDisplay])) + logDiagnoseTransactions.Trace(marshalTransactionsToNewlineDelimitedJson(transactions[:numToDisplay], "diagnoseTransactions")) +} + +// marshalTransactionsToNewlineDelimitedJson converts a list of transactions to a newline-delimited JSON string. +// Note: each line is indexed, to improve readability. The index is easily removable for if separate analysis is needed. +func marshalTransactionsToNewlineDelimitedJson(transactions []*WrappedTransaction, linePrefix string) string { + builder := strings.Builder{} + builder.WriteString("\n") + + for i, wrappedTx := range transactions { + printedTx := convertWrappedTransactionToPrintedTransaction(wrappedTx) + printedTxJson, _ := json.Marshal(printedTx) + + builder.WriteString(fmt.Sprintf("%s#%d: ", linePrefix, i)) + builder.WriteString(string(printedTxJson)) + builder.WriteString("\n") + } + + builder.WriteString("\n") + return builder.String() +} + +func convertWrappedTransactionToPrintedTransaction(wrappedTx *WrappedTransaction) *printedTransaction { + transaction := wrappedTx.Tx + + return &printedTransaction{ + Hash: hex.EncodeToString(wrappedTx.TxHash), + Nonce: transaction.GetNonce(), + Receiver: hex.EncodeToString(transaction.GetRcvAddr()), + Sender: hex.EncodeToString(transaction.GetSndAddr()), + GasPrice: transaction.GetGasPrice(), + GasLimit: transaction.GetGasLimit(), + DataLength: len(transaction.GetData()), + PPU: wrappedTx.PricePerUnit, + } } func (cache *TxCache) diagnoseSelection() { @@ -63,22 +112,18 @@ func (cache *TxCache) diagnoseSelection() { return } - transactions := cache.doSelectTransactions( - logDiagnoseSelection, - diagnosisSelectionGasRequested, - ) - - displaySelectionOutcome(logDiagnoseSelection, transactions) + transactions := cache.doSelectTransactions(diagnosisSelectionGasRequested) + displaySelectionOutcome(logDiagnoseSelection, "diagnoseSelection", transactions) } -func displaySelectionOutcome(contextualLogger logger.Logger, selection []*WrappedTransaction) { +func displaySelectionOutcome(contextualLogger logger.Logger, linePrefix string, transactions []*WrappedTransaction) { if contextualLogger.GetLevel() > logger.LogTrace { return } - if len(selection) > 0 { + if len(transactions) > 0 { contextualLogger.Trace("displaySelectionOutcome - transactions (as newline-separated JSON):") - contextualLogger.Trace(marshalTransactionsToNewlineDelimitedJson(selection)) + contextualLogger.Trace(marshalTransactionsToNewlineDelimitedJson(transactions, linePrefix)) } else { contextualLogger.Trace("displaySelectionOutcome - transactions: none") } diff --git a/txcache/printing.go b/txcache/printing.go deleted file mode 100644 index faa2b1bb..00000000 --- a/txcache/printing.go +++ /dev/null @@ -1,53 +0,0 @@ -package txcache - -import ( - "encoding/hex" - "encoding/json" - "fmt" - "strings" -) - -type printedTransaction struct { - Hash string `json:"hash"` - Nonce uint64 `json:"nonce"` - PPU float64 `json:"ppu"` - GasPrice uint64 `json:"gasPrice"` - GasLimit uint64 `json:"gasLimit"` - Sender string `json:"sender"` - Receiver string `json:"receiver"` - DataLength int `json:"dataLength"` -} - -// marshalTransactionsToNewlineDelimitedJson converts a list of transactions to a newline-delimited JSON string. -// Note: each line is indexed, to improve readability. The index is easily removable for if separate analysis is needed. -func marshalTransactionsToNewlineDelimitedJson(transactions []*WrappedTransaction) string { - builder := strings.Builder{} - builder.WriteString("\n") - - for i, wrappedTx := range transactions { - printedTx := convertWrappedTransactionToPrintedTransaction(wrappedTx) - printedTxJson, _ := json.Marshal(printedTx) - - builder.WriteString(fmt.Sprintf("#%d: ", i)) - builder.WriteString(string(printedTxJson)) - builder.WriteString("\n") - } - - builder.WriteString("\n") - return builder.String() -} - -func convertWrappedTransactionToPrintedTransaction(wrappedTx *WrappedTransaction) *printedTransaction { - transaction := wrappedTx.Tx - - return &printedTransaction{ - Hash: hex.EncodeToString(wrappedTx.TxHash), - Nonce: transaction.GetNonce(), - Receiver: hex.EncodeToString(transaction.GetRcvAddr()), - Sender: hex.EncodeToString(transaction.GetSndAddr()), - GasPrice: transaction.GetGasPrice(), - GasLimit: transaction.GetGasLimit(), - DataLength: len(transaction.GetData()), - PPU: wrappedTx.PricePerUnit, - } -} diff --git a/txcache/txCache.go b/txcache/txCache.go index 19b96505..72855b86 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -66,7 +66,7 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { cache.mutTxOperation.Lock() addedInByHash := cache.txByHash.addTx(tx) - addedInBySender, evicted := cache.txListBySender.addTx(tx) + addedInBySender, evicted := cache.txListBySender.addTxReturnEvicted(tx) cache.mutTxOperation.Unlock() if addedInByHash != addedInBySender { // This can happen when two go-routines concur to add the same transaction: @@ -117,7 +117,7 @@ func (cache *TxCache) SelectTransactions(gasRequested uint64) []*WrappedTransact ) go cache.diagnoseCounters() - go displaySelectionOutcome(logSelect, transactions) + go displaySelectionOutcome(logSelect, "selection", transactions) return transactions } diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index 132535cd..5b854d1e 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -619,7 +619,7 @@ func TestTxCache_TransactionIsAdded_EvenWhenInternalMapsAreInconsistent(t *testi cache.Clear() // Setup inconsistency: transaction already exists in map by sender, but not in map by hash - cache.txListBySender.addTx(createTx([]byte("alice-x"), "alice", 42)) + cache.txListBySender.addTxReturnEvicted(createTx([]byte("alice-x"), "alice", 42)) require.False(t, cache.Has([]byte("alice-x"))) ok, added = cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index d4698b7b..55c8bf62 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -31,12 +31,22 @@ func newTxListBySenderMap( } } -// addTx adds a transaction in the map, in the corresponding list (selected by its sender) -func (txMap *txListBySenderMap) addTx(tx *WrappedTransaction) (bool, [][]byte) { +// addTxReturnEvicted adds a transaction in the map, in the corresponding list (selected by its sender). +// This function returns a boolean indicating whether the transaction was added, and a slice of evicted transaction hashes (upon applying sender-level constraints). +func (txMap *txListBySenderMap) addTxReturnEvicted(tx *WrappedTransaction) (bool, [][]byte) { sender := string(tx.Tx.GetSndAddr()) listForSender := txMap.getOrAddListForSender(sender) tx.computePricePerGasUnit(txMap.txGasHandler) - return listForSender.AddTx(tx) + + added, evictedHashes := listForSender.AddTx(tx) + + if listForSender.IsEmpty() { + // Generally speaking, a sender cannot become empty after upon applying sender-level constraints. + // However: + txMap.removeSender(sender) + } + + return added, evictedHashes } // getOrAddListForSender gets or lazily creates a list (using double-checked locking pattern) diff --git a/txcache/txListBySenderMap_test.go b/txcache/txListBySenderMap_test.go index 325fae0f..cb937e61 100644 --- a/txcache/txListBySenderMap_test.go +++ b/txcache/txListBySenderMap_test.go @@ -12,9 +12,9 @@ import ( func TestSendersMap_AddTx_IncrementsCounter(t *testing.T) { myMap := newSendersMapToTest() - myMap.addTx(createTx([]byte("a"), "alice", 1)) - myMap.addTx(createTx([]byte("aa"), "alice", 2)) - myMap.addTx(createTx([]byte("b"), "bob", 1)) + myMap.addTxReturnEvicted(createTx([]byte("a"), "alice", 1)) + myMap.addTxReturnEvicted(createTx([]byte("aa"), "alice", 2)) + myMap.addTxReturnEvicted(createTx([]byte("b"), "bob", 1)) // There are 2 senders require.Equal(t, int64(2), myMap.counter.Get()) @@ -27,9 +27,9 @@ func TestSendersMap_RemoveTx_AlsoRemovesSenderWhenNoTransactionLeft(t *testing.T txAlice2 := createTx([]byte("a2"), "alice", 2) txBob := createTx([]byte("b"), "bob", 1) - myMap.addTx(txAlice1) - myMap.addTx(txAlice2) - myMap.addTx(txBob) + myMap.addTxReturnEvicted(txAlice1) + myMap.addTxReturnEvicted(txAlice2) + myMap.addTxReturnEvicted(txBob) require.Equal(t, int64(2), myMap.counter.Get()) require.Equal(t, uint64(2), myMap.testGetListForSender("alice").countTx()) require.Equal(t, uint64(1), myMap.testGetListForSender("bob").countTx()) @@ -51,7 +51,7 @@ func TestSendersMap_RemoveTx_AlsoRemovesSenderWhenNoTransactionLeft(t *testing.T func TestSendersMap_RemoveSender(t *testing.T) { myMap := newSendersMapToTest() - myMap.addTx(createTx([]byte("a"), "alice", 1)) + myMap.addTxReturnEvicted(createTx([]byte("a"), "alice", 1)) require.Equal(t, int64(1), myMap.counter.Get()) // Bob is unknown @@ -86,9 +86,9 @@ func TestSendersMap_RemoveSendersBulk_ConcurrentWithAddition(t *testing.T) { wg.Add(100) for i := 0; i < 100; i++ { go func(i int) { - myMap.addTx(createTx([]byte("a"), "alice", uint64(i))) - myMap.addTx(createTx([]byte("b"), "bob", uint64(i))) - myMap.addTx(createTx([]byte("c"), "carol", uint64(i))) + myMap.addTxReturnEvicted(createTx([]byte("a"), "alice", uint64(i))) + myMap.addTxReturnEvicted(createTx([]byte("b"), "bob", uint64(i))) + myMap.addTxReturnEvicted(createTx([]byte("c"), "carol", uint64(i))) wg.Done() }(i) @@ -103,7 +103,7 @@ func TestSendersMap_notifyAccountNonce(t *testing.T) { // Discarded notification, since sender not added yet myMap.notifyAccountNonceReturnEvictedTransactions([]byte("alice"), 42) - myMap.addTx(createTx([]byte("tx-42"), "alice", 42)) + myMap.addTxReturnEvicted(createTx([]byte("tx-42"), "alice", 42)) alice, _ := myMap.getListForSender("alice") require.Equal(t, uint64(0), alice.accountNonce.Get()) require.False(t, alice.accountNonceKnown.IsSet()) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 40ccbd6f..bf8ad5fb 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -50,7 +50,6 @@ func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) (bool, [][]b listForSender.onAddedTransaction(tx) - // TODO: Check how does the sender get removed if empty afterwards (maybe the answer is: "it never gets empty after applySizeConstraints()"). evicted := listForSender.applySizeConstraints() return true, evicted } From 4240502e64f34518f6fcebddb172970b5f4bf849 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 4 Nov 2024 17:07:42 +0200 Subject: [PATCH 097/124] Additional logging. --- txcache/eviction.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/txcache/eviction.go b/txcache/eviction.go index b230c968..40cff622 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -34,7 +34,7 @@ func (cache *TxCache) doEviction() *evictionJournal { return nil } - logRemove.Debug("doEviction(): before eviction", + logRemove.Debug("doEviction: before eviction", "num bytes", cache.NumBytes(), "num txs", cache.CountTx(), "num senders", cache.CountSenders(), @@ -48,7 +48,7 @@ func (cache *TxCache) doEviction() *evictionJournal { stopWatch.Stop("eviction") logRemove.Debug( - "doEviction(): after eviction", + "doEviction: after eviction", "num bytes", cache.NumBytes(), "num now", cache.CountTx(), "num senders", cache.CountSenders(), @@ -169,6 +169,8 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { journal.numEvictedByPass = append(journal.numEvictedByPass, len(transactionsToEvict)) journal.numEvicted += len(transactionsToEvict) + + logRemove.Debug("evictLeastLikelyToSelectTransactions", "pass", pass, "num evicted", len(transactionsToEvict)) } return journal From be56be9d49379937bd282ec9b6b1f1f607b42321 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 4 Nov 2024 17:42:55 +0200 Subject: [PATCH 098/124] Fix selection (accumulated gas condition). --- txcache/selection.go | 11 ++++++----- txcache/selection_test.go | 3 ++- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/txcache/selection.go b/txcache/selection.go index 023ec783..5ed95fab 100644 --- a/txcache/selection.go +++ b/txcache/selection.go @@ -2,7 +2,7 @@ package txcache import "container/heap" -func (cache *TxCache) doSelectTransactions(gasRequested uint64) BunchOfTransactions { +func (cache *TxCache) doSelectTransactions(gasRequested uint64) (BunchOfTransactions, uint64) { senders := cache.getSenders() bunches := make([]BunchOfTransactions, 0, len(senders)) @@ -14,7 +14,7 @@ func (cache *TxCache) doSelectTransactions(gasRequested uint64) BunchOfTransacti } // Selection tolerates concurrent transaction additions / removals. -func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested uint64) BunchOfTransactions { +func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested uint64) (BunchOfTransactions, uint64) { selectedTransactions := make(BunchOfTransactions, 0, initialCapacityOfSelectionSlice) // Items popped from the heap are added to "selectedTransactions". @@ -42,12 +42,13 @@ func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested u for transactionsHeap.Len() > 0 { // Always pick the best transaction. item := heap.Pop(transactionsHeap).(*TransactionsHeapItem) + gasLimit := item.transaction.Tx.GetGasLimit() - accumulatedGas += item.transaction.Tx.GetGasLimit() - if accumulatedGas > gasRequested { + if accumulatedGas+gasLimit > gasRequested { break } + accumulatedGas += gasLimit selectedTransactions = append(selectedTransactions, item.transaction) // If there are more transactions in the same bunch (same sender as the popped item), @@ -61,5 +62,5 @@ func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested u } } - return selectedTransactions + return selectedTransactions, accumulatedGas } diff --git a/txcache/selection_test.go b/txcache/selection_test.go index 1294e8f5..8fff79f3 100644 --- a/txcache/selection_test.go +++ b/txcache/selection_test.go @@ -15,10 +15,11 @@ func TestTxCache_selectTransactionsFromBunches(t *testing.T) { bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) sw.Start(t.Name()) - merged := selectTransactionsFromBunches(bunches, 10_000_000_000) + merged, accumulatedGas := selectTransactionsFromBunches(bunches, 10_000_000_000) sw.Stop(t.Name()) require.Equal(t, 200000, len(merged)) + require.Equal(t, uint64(10_000_000_000), accumulatedGas) }) for name, measurement := range sw.GetMeasurementsMap() { From ff895963d0a884cd045e9264ee20545cceacd291 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 4 Nov 2024 17:43:23 +0200 Subject: [PATCH 099/124] Log accumulated gas. --- txcache/diagnosis.go | 19 ++++++++++--------- txcache/txCache.go | 3 ++- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/txcache/diagnosis.go b/txcache/diagnosis.go index 0b790923..58b49951 100644 --- a/txcache/diagnosis.go +++ b/txcache/diagnosis.go @@ -11,14 +11,15 @@ import ( ) type printedTransaction struct { - Hash string `json:"hash"` - Nonce uint64 `json:"nonce"` - PPU float64 `json:"ppu"` - GasPrice uint64 `json:"gasPrice"` - GasLimit uint64 `json:"gasLimit"` - Sender string `json:"sender"` - Receiver string `json:"receiver"` - DataLength int `json:"dataLength"` + Hash string `json:"hash"` + PPU float64 `json:"ppu"` + Nonce uint64 `json:"nonce"` + Sender string `json:"sender"` + GasPrice uint64 `json:"gasPrice"` + GasLimit uint64 `json:"gasLimit"` + + Receiver string `json:"receiver"` + DataLength int `json:"dataLength"` } // Diagnose checks the state of the cache for inconsistencies and displays a summary, senders and transactions. @@ -112,7 +113,7 @@ func (cache *TxCache) diagnoseSelection() { return } - transactions := cache.doSelectTransactions(diagnosisSelectionGasRequested) + transactions, _ := cache.doSelectTransactions(diagnosisSelectionGasRequested) displaySelectionOutcome(logDiagnoseSelection, "diagnoseSelection", transactions) } diff --git a/txcache/txCache.go b/txcache/txCache.go index 72855b86..86a3a623 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -106,7 +106,7 @@ func (cache *TxCache) SelectTransactions(gasRequested uint64) []*WrappedTransact "num senders", cache.CountSenders(), ) - transactions := cache.doSelectTransactions(gasRequested) + transactions, accumulatedGas := cache.doSelectTransactions(gasRequested) stopWatch.Stop("selection") @@ -114,6 +114,7 @@ func (cache *TxCache) SelectTransactions(gasRequested uint64) []*WrappedTransact "doSelectTransactions: end", "duration", stopWatch.GetMeasurement("selection"), "num txs selected", len(transactions), + "gas", accumulatedGas, ) go cache.diagnoseCounters() From e0332d2a4d533862569902cbf0581b4fa71b73a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 4 Nov 2024 17:44:55 +0200 Subject: [PATCH 100/124] Fix detection of initial gaps. --- txcache/txListForSender.go | 2 +- txcache/txListForSender_test.go | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index bf8ad5fb..1eb84a32 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -228,7 +228,7 @@ func (listForSender *txListForSender) getTxsWithoutGaps() []*WrappedTransaction nonce := value.Tx.GetNonce() // Detect initial gaps. - if len(result) == 0 && accountNonceKnown && accountNonce > nonce { + if len(result) == 0 && accountNonceKnown && accountNonce != nonce { break } diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 9484a849..1791ddda 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -200,16 +200,19 @@ func TestListForSender_hasInitialGap(t *testing.T) { // No transaction, no gap _, _, hasInitialGap := list.hasInitialGap() require.False(t, hasInitialGap) + require.Len(t, list.getTxsWithoutGaps(), 0) // One gap list.AddTx(createTx([]byte("tx-43"), ".", 43)) _, _, hasInitialGap = list.hasInitialGap() require.True(t, hasInitialGap) + require.Len(t, list.getTxsWithoutGaps(), 0) // Resolve gap list.AddTx(createTx([]byte("tx-42"), ".", 42)) _, _, hasInitialGap = list.hasInitialGap() require.False(t, hasInitialGap) + require.Len(t, list.getTxsWithoutGaps(), 2) } func TestListForSender_getTxHashes(t *testing.T) { From 8f53bbff491a0e0795604e958f88201b58e4d667 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 4 Nov 2024 21:31:39 +0200 Subject: [PATCH 101/124] Cleanup, logging, benchmarks, minor optimizations. --- txcache/diagnosis.go | 17 +++---- txcache/eviction.go | 10 ++-- txcache/eviction_test.go | 87 +++++++++++++++++++++++++++------ txcache/loggers.go | 1 - txcache/selection.go | 10 ++-- txcache/selection_test.go | 53 ++++++++++++++++++++ txcache/testutils_test.go | 14 ++++++ txcache/txListForSender.go | 64 +----------------------- txcache/txListForSender_test.go | 42 +++++++--------- 9 files changed, 178 insertions(+), 120 deletions(-) diff --git a/txcache/diagnosis.go b/txcache/diagnosis.go index 58b49951..56e64f2c 100644 --- a/txcache/diagnosis.go +++ b/txcache/diagnosis.go @@ -11,15 +11,14 @@ import ( ) type printedTransaction struct { - Hash string `json:"hash"` - PPU float64 `json:"ppu"` - Nonce uint64 `json:"nonce"` - Sender string `json:"sender"` - GasPrice uint64 `json:"gasPrice"` - GasLimit uint64 `json:"gasLimit"` - - Receiver string `json:"receiver"` - DataLength int `json:"dataLength"` + Hash string `json:"hash"` + PPU float64 `json:"ppu"` + Nonce uint64 `json:"nonce"` + Sender string `json:"sender"` + GasPrice uint64 `json:"gasPrice"` + GasLimit uint64 `json:"gasLimit"` + Receiver string `json:"receiver"` + DataLength int `json:"dataLength"` } // Diagnose checks the state of the cache for inconsistencies and displays a summary, senders and transactions. diff --git a/txcache/eviction.go b/txcache/eviction.go index 40cff622..75384644 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -100,8 +100,8 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { // Heap is reused among passes. // Items popped from the heap are added to "transactionsToEvict" (slice is re-created in each pass). - transactionsHeap := &TransactionsMinHeap{} - heap.Init(transactionsHeap) + transactionsHeap := make(TransactionsMinHeap, 0, len(bunches)) + heap.Init(&transactionsHeap) // Initialize the heap with the first transaction of each bunch for i, bunch := range bunches { @@ -111,7 +111,7 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { } // Items will be reused (see below). Each sender gets one (and only one) item in the heap. - heap.Push(transactionsHeap, &TransactionsHeapItem{ + heap.Push(&transactionsHeap, &TransactionsHeapItem{ senderIndex: i, transactionIndex: 0, transaction: bunch[0], @@ -125,7 +125,7 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { // Select transactions (sorted). for transactionsHeap.Len() > 0 { // Always pick the "worst" transaction. - item := heap.Pop(transactionsHeap).(*TransactionsHeapItem) + item := heap.Pop(&transactionsHeap).(*TransactionsHeapItem) if len(transactionsToEvict) >= int(cache.config.NumItemsToPreemptivelyEvict) { // We have enough transactions to evict in this pass. @@ -142,7 +142,7 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { if item.transactionIndex < len(bunches[item.senderIndex]) { // Item is reused (same originating sender), pushed back on the heap. item.transaction = bunches[item.senderIndex][item.transactionIndex] - heap.Push(transactionsHeap, item) + heap.Push(&transactionsHeap, item) } } diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go index 1b4f49eb..db34d5bc 100644 --- a/txcache/eviction_test.go +++ b/txcache/eviction_test.go @@ -1,9 +1,11 @@ package txcache import ( + "fmt" "math" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) @@ -115,31 +117,88 @@ func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { require.Equal(t, 4, int(cache.CountTx())) } -// This seems to be the most reasonable "bad-enough" (not worst) scenario to benchmark: -// 25000 senders with 10 transactions each, with "NumItemsToPreemptivelyEvict" = 50000. -// ~0.5 seconds on average laptop. -func TestTxCache_AddWithEviction_UniformDistribution_25000x10(t *testing.T) { +func TestTxCache_DoEviction_Benchmark(t *testing.T) { config := ConfigSourceMe{ Name: "untitled", NumChunks: 16, NumBytesThreshold: 1000000000, NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, - CountThreshold: 240000, + CountThreshold: 300000, CountPerSenderThreshold: math.MaxUint32, - EvictionEnabled: true, NumItemsToPreemptivelyEvict: 50000, } txGasHandler := txcachemocks.NewTxGasHandlerMock() - numSenders := 25000 - numTxsPerSender := 10 - cache, err := NewTxCache(config, txGasHandler) - require.Nil(t, err) - require.NotNil(t, cache) + sw := core.NewStopWatch() + + t.Run("numSenders = 35000, numTransactions = 10", func(t *testing.T) { + cache, err := NewTxCache(config, txGasHandler) + require.Nil(t, err) + + cache.config.EvictionEnabled = false + addManyTransactionsWithUniformDistribution(cache, 35000, 10) + cache.config.EvictionEnabled = true + + require.Equal(t, uint64(350000), cache.CountTx()) + + sw.Start(t.Name()) + journal := cache.doEviction() + sw.Stop(t.Name()) + + require.Equal(t, 50000, journal.numEvicted) + require.Equal(t, 1, len(journal.numEvictedByPass)) + }) + + t.Run("numSenders = 100000, numTransactions = 5", func(t *testing.T) { + cache, err := NewTxCache(config, txGasHandler) + require.Nil(t, err) - addManyTransactionsWithUniformDistribution(cache, numSenders, numTxsPerSender) + cache.config.EvictionEnabled = false + addManyTransactionsWithUniformDistribution(cache, 100000, 5) + cache.config.EvictionEnabled = true + + require.Equal(t, uint64(500000), cache.CountTx()) + + sw.Start(t.Name()) + journal := cache.doEviction() + sw.Stop(t.Name()) + + require.Equal(t, 200000, journal.numEvicted) + require.Equal(t, 4, len(journal.numEvictedByPass)) + }) + + t.Run("numSenders = 10000, numTransactions = 100", func(t *testing.T) { + cache, err := NewTxCache(config, txGasHandler) + require.Nil(t, err) + + cache.config.EvictionEnabled = false + addManyTransactionsWithUniformDistribution(cache, 10000, 100) + cache.config.EvictionEnabled = true + + require.Equal(t, uint64(1000000), cache.CountTx()) + + sw.Start(t.Name()) + journal := cache.doEviction() + sw.Stop(t.Name()) + + require.Equal(t, 700000, journal.numEvicted) + require.Equal(t, 14, len(journal.numEvictedByPass)) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } - // Sometimes (due to map iteration non-determinism), more eviction happens - one more step of 100 senders. - require.LessOrEqual(t, uint32(cache.CountTx()), config.CountThreshold) + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // 0.079401s (TestTxCache_DoEviction_Benchmark/numSenders_=_35000,_numTransactions_=_10) + // 0.366044s (TestTxCache_DoEviction_Benchmark/numSenders_=_100000,_numTransactions_=_5) + // 0.611849s (TestTxCache_DoEviction_Benchmark/numSenders_=_10000,_numTransactions_=_100) } diff --git a/txcache/loggers.go b/txcache/loggers.go index af55e5b0..d829a1e0 100644 --- a/txcache/loggers.go +++ b/txcache/loggers.go @@ -7,5 +7,4 @@ var logAdd = logger.GetOrCreate("txcache/add") var logRemove = logger.GetOrCreate("txcache/remove") var logSelect = logger.GetOrCreate("txcache/select") var logDiagnoseSelection = logger.GetOrCreate("txcache/diagnose/selection") -var logDiagnoseSenders = logger.GetOrCreate("txcache/diagnose/senders") var logDiagnoseTransactions = logger.GetOrCreate("txcache/diagnose/transactions") diff --git a/txcache/selection.go b/txcache/selection.go index 5ed95fab..b1b0204e 100644 --- a/txcache/selection.go +++ b/txcache/selection.go @@ -18,8 +18,8 @@ func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested u selectedTransactions := make(BunchOfTransactions, 0, initialCapacityOfSelectionSlice) // Items popped from the heap are added to "selectedTransactions". - transactionsHeap := &TransactionsMaxHeap{} - heap.Init(transactionsHeap) + transactionsHeap := make(TransactionsMaxHeap, 0, len(bunches)) + heap.Init(&transactionsHeap) // Initialize the heap with the first transaction of each bunch for i, bunch := range bunches { @@ -29,7 +29,7 @@ func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested u } // Items will be reused (see below). Each sender gets one (and only one) item in the heap. - heap.Push(transactionsHeap, &TransactionsHeapItem{ + heap.Push(&transactionsHeap, &TransactionsHeapItem{ senderIndex: i, transactionIndex: 0, transaction: bunch[0], @@ -41,7 +41,7 @@ func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested u // Select transactions (sorted). for transactionsHeap.Len() > 0 { // Always pick the best transaction. - item := heap.Pop(transactionsHeap).(*TransactionsHeapItem) + item := heap.Pop(&transactionsHeap).(*TransactionsHeapItem) gasLimit := item.transaction.Tx.GetGasLimit() if accumulatedGas+gasLimit > gasRequested { @@ -58,7 +58,7 @@ func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested u if item.transactionIndex < len(bunches[item.senderIndex]) { // Item is reused (same originating sender), pushed back on the heap. item.transaction = bunches[item.senderIndex][item.transactionIndex] - heap.Push(transactionsHeap, item) + heap.Push(&transactionsHeap, item) } } diff --git a/txcache/selection_test.go b/txcache/selection_test.go index 8fff79f3..94ca1825 100644 --- a/txcache/selection_test.go +++ b/txcache/selection_test.go @@ -11,6 +11,13 @@ import ( func TestTxCache_selectTransactionsFromBunches(t *testing.T) { sw := core.NewStopWatch() + t.Run("empty cache", func(t *testing.T) { + merged, accumulatedGas := selectTransactionsFromBunches([]BunchOfTransactions{}, 10_000_000_000) + + require.Equal(t, 0, len(merged)) + require.Equal(t, uint64(0), accumulatedGas) + }) + t.Run("numSenders = 1000, numTransactions = 1000", func(t *testing.T) { bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) @@ -22,9 +29,55 @@ func TestTxCache_selectTransactionsFromBunches(t *testing.T) { require.Equal(t, uint64(10_000_000_000), accumulatedGas) }) + t.Run("numSenders = 10000, numTransactions = 100", func(t *testing.T) { + bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) + + sw.Start(t.Name()) + merged, accumulatedGas := selectTransactionsFromBunches(bunches, 10_000_000_000) + sw.Stop(t.Name()) + + require.Equal(t, 200000, len(merged)) + require.Equal(t, uint64(10_000_000_000), accumulatedGas) + }) + + t.Run("numSenders = 100000, numTransactions = 3", func(t *testing.T) { + bunches := createBunchesOfTransactionsWithUniformDistribution(100000, 3) + + sw.Start(t.Name()) + merged, accumulatedGas := selectTransactionsFromBunches(bunches, 10_000_000_000) + sw.Stop(t.Name()) + + require.Equal(t, 200000, len(merged)) + require.Equal(t, uint64(10_000_000_000), accumulatedGas) + }) + + t.Run("numSenders = 300000, numTransactions = 1", func(t *testing.T) { + bunches := createBunchesOfTransactionsWithUniformDistribution(300000, 1) + + sw.Start(t.Name()) + merged, accumulatedGas := selectTransactionsFromBunches(bunches, 10_000_000_000) + sw.Stop(t.Name()) + + require.Equal(t, 200000, len(merged)) + require.Equal(t, uint64(10_000_000_000), accumulatedGas) + }) + for name, measurement := range sw.GetMeasurementsMap() { fmt.Printf("%fs (%s)\n", measurement, name) } + + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // 0.059954s (TestTxCache_selectTransactionsFromBunches/numSenders_=_1000,_numTransactions_=_1000) + // 0.087949s (TestTxCache_selectTransactionsFromBunches/numSenders_=_10000,_numTransactions_=_100) + // 0.204968s (TestTxCache_selectTransactionsFromBunches/numSenders_=_100000,_numTransactions_=_3) + // 0.506842s (TestTxCache_selectTransactionsFromBunches/numSenders_=_300000,_numTransactions_=_1) } func createBunchesOfTransactionsWithUniformDistribution(nSenders int, nTransactionsPerSender int) []BunchOfTransactions { diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index 3cd21895..b0a2c056 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -59,6 +59,20 @@ func (listForSender *txListForSender) getTxHashesAsStrings() []string { return hashesAsStrings(hashes) } +func (listForSender *txListForSender) getTxsHashes() [][]byte { + listForSender.mutex.RLock() + defer listForSender.mutex.RUnlock() + + result := make([][]byte, 0, listForSender.countTx()) + + for element := listForSender.items.Front(); element != nil; element = element.Next() { + value := element.Value.(*WrappedTransaction) + result = append(result, value.TxHash) + } + + return result +} + func hashesAsStrings(hashes [][]byte) []string { result := make([]string, len(hashes)) for i := 0; i < len(hashes); i++ { diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 1eb84a32..b87cd7b6 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -182,21 +182,6 @@ func (listForSender *txListForSender) IsEmpty() bool { return listForSender.countTxWithLock() == 0 } -// getTxsHashes returns the hashes of transactions in the list -func (listForSender *txListForSender) getTxsHashes() [][]byte { - listForSender.mutex.RLock() - defer listForSender.mutex.RUnlock() - - result := make([][]byte, 0, listForSender.countTx()) - - for element := listForSender.items.Front(); element != nil; element = element.Next() { - value := element.Value.(*WrappedTransaction) - result = append(result, value.TxHash) - } - - return result -} - // getTxs returns the transactions in the list func (listForSender *txListForSender) getTxs() []*WrappedTransaction { listForSender.mutex.RLock() @@ -229,11 +214,13 @@ func (listForSender *txListForSender) getTxsWithoutGaps() []*WrappedTransaction // Detect initial gaps. if len(result) == 0 && accountNonceKnown && accountNonce != nonce { + log.Trace("txListForSender.getTxsWithoutGaps, initial gap", "sender", listForSender.sender, "nonce", nonce, "accountNonce", accountNonce) break } // Detect middle gaps. if len(result) > 0 && nonce != previousNonce+1 { + log.Trace("txListForSender.getTxsWithoutGaps, middle gap", "sender", listForSender.sender, "nonce", nonce, "previousNonce", previousNonce) break } @@ -255,16 +242,6 @@ func (listForSender *txListForSender) countTxWithLock() uint64 { return uint64(listForSender.items.Len()) } -func approximatelyCountTxInLists(lists []*txListForSender) uint64 { - count := uint64(0) - - for _, listForSender := range lists { - count += listForSender.countTxWithLock() - } - - return count -} - // Removes transactions with lower nonces and returns their hashes. func (listForSender *txListForSender) notifyAccountNonceReturnEvictedTransactions(nonce uint64) [][]byte { // Optimization: if nonce is the same, do nothing. @@ -324,43 +301,6 @@ func (listForSender *txListForSender) evictTransactionsWithHigherOrEqualNonces(g } } -// This function should only be used in critical section (listForSender.mutex). -// When a gap is detected, the (known) account nonce and the first transactio nonce are also returned. -func (listForSender *txListForSender) hasInitialGap() (uint64, uint64, bool) { - accountNonceKnown := listForSender.accountNonceKnown.IsSet() - if !accountNonceKnown { - return 0, 0, false - } - - firstTx := listForSender.getLowestNonceTx() - if firstTx == nil { - return 0, 0, false - } - - accountNonce := listForSender.accountNonce.Get() - firstTxNonce := firstTx.Tx.GetNonce() - hasGap := firstTxNonce > accountNonce - return accountNonce, firstTxNonce, hasGap -} - -func (listForSender *txListForSender) hasInitialGapWithLock() bool { - listForSender.mutex.RLock() - defer listForSender.mutex.RUnlock() - _, _, hasInitialGap := listForSender.hasInitialGap() - return hasInitialGap -} - -// This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) getLowestNonceTx() *WrappedTransaction { - front := listForSender.items.Front() - if front == nil { - return nil - } - - value := front.Value.(*WrappedTransaction) - return value -} - // GetKey returns the key func (listForSender *txListForSender) GetKey() string { return listForSender.sender diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 1791ddda..d8bf9262 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -2,6 +2,7 @@ package txcache import ( "math" + "sync" "testing" "github.com/stretchr/testify/require" @@ -198,47 +199,40 @@ func TestListForSender_hasInitialGap(t *testing.T) { list.notifyAccountNonceReturnEvictedTransactions(42) // No transaction, no gap - _, _, hasInitialGap := list.hasInitialGap() - require.False(t, hasInitialGap) require.Len(t, list.getTxsWithoutGaps(), 0) // One gap list.AddTx(createTx([]byte("tx-43"), ".", 43)) - _, _, hasInitialGap = list.hasInitialGap() - require.True(t, hasInitialGap) require.Len(t, list.getTxsWithoutGaps(), 0) // Resolve gap list.AddTx(createTx([]byte("tx-42"), ".", 42)) - _, _, hasInitialGap = list.hasInitialGap() - require.False(t, hasInitialGap) require.Len(t, list.getTxsWithoutGaps(), 2) } -func TestListForSender_getTxHashes(t *testing.T) { +func TestListForSender_DetectRaceConditions(t *testing.T) { list := newUnconstrainedListToTest() - require.Len(t, list.getTxsHashes(), 0) - list.AddTx(createTx([]byte("A"), ".", 1)) - require.Len(t, list.getTxsHashes(), 1) + wg := sync.WaitGroup{} - list.AddTx(createTx([]byte("B"), ".", 2)) - list.AddTx(createTx([]byte("C"), ".", 3)) - require.Len(t, list.getTxsHashes(), 3) -} + doOperations := func() { + // These might be called concurrently: + _ = list.IsEmpty() + _ = list.getTxs() + _ = list.getTxsWithoutGaps() + _ = list.countTxWithLock() + _ = list.notifyAccountNonceReturnEvictedTransactions(42) + _, _ = list.AddTx(createTx([]byte("test"), ".", 42)) -func TestListForSender_DetectRaceConditions(t *testing.T) { - list := newUnconstrainedListToTest() + wg.Done() + } - go func() { - // These are called concurrently with addition: during eviction, during removal etc. - approximatelyCountTxInLists([]*txListForSender{list}) - list.IsEmpty() - }() + for i := 0; i < 100; i++ { + wg.Add(1) + go doOperations() + } - go func() { - list.AddTx(createTx([]byte("test"), ".", 42)) - }() + wg.Wait() } func newUnconstrainedListToTest() *txListForSender { From 9b2b959bfce9d6eadc0cb943157d4a0e53653a27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 4 Nov 2024 21:36:45 +0200 Subject: [PATCH 102/124] Refactor / split file. --- txcache/heaps.go | 58 --------------------------------- txcache/transactionsHeapItem.go | 7 ++++ txcache/transactionsMaxHeap.go | 27 +++++++++++++++ txcache/transactionsMinHeap.go | 27 +++++++++++++++ 4 files changed, 61 insertions(+), 58 deletions(-) delete mode 100644 txcache/heaps.go create mode 100644 txcache/transactionsHeapItem.go create mode 100644 txcache/transactionsMaxHeap.go create mode 100644 txcache/transactionsMinHeap.go diff --git a/txcache/heaps.go b/txcache/heaps.go deleted file mode 100644 index bde84f94..00000000 --- a/txcache/heaps.go +++ /dev/null @@ -1,58 +0,0 @@ -package txcache - -type TransactionsHeapItem struct { - senderIndex int - transactionIndex int - transaction *WrappedTransaction -} - -type TransactionsMaxHeap []*TransactionsHeapItem -type TransactionsMinHeap []*TransactionsHeapItem - -func (maxHeap TransactionsMaxHeap) Len() int { return len(maxHeap) } - -func (maxHeap TransactionsMaxHeap) Less(i, j int) bool { - return maxHeap[i].transaction.isTransactionMoreDesirableByProtocol(maxHeap[j].transaction) -} - -func (maxHeap TransactionsMaxHeap) Swap(i, j int) { - maxHeap[i], maxHeap[j] = maxHeap[j], maxHeap[i] -} - -func (maxHeap *TransactionsMaxHeap) Push(x interface{}) { - *maxHeap = append(*maxHeap, x.(*TransactionsHeapItem)) -} - -func (maxHeap *TransactionsMaxHeap) Pop() interface{} { - // Standard code when storing the heap in a slice: - // https://pkg.go.dev/container/heap - old := *maxHeap - n := len(old) - item := old[n-1] - *maxHeap = old[0 : n-1] - return item -} - -func (minHeap TransactionsMinHeap) Len() int { return len(minHeap) } - -func (minHeap TransactionsMinHeap) Less(i, j int) bool { - return minHeap[j].transaction.isTransactionMoreDesirableByProtocol(minHeap[i].transaction) -} - -func (minHeap TransactionsMinHeap) Swap(i, j int) { - minHeap[i], minHeap[j] = minHeap[j], minHeap[i] -} - -func (minHeap *TransactionsMinHeap) Push(x interface{}) { - *minHeap = append(*minHeap, x.(*TransactionsHeapItem)) -} - -func (minHeap *TransactionsMinHeap) Pop() interface{} { - // Standard code when storing the heap in a slice: - // https://pkg.go.dev/container/heap - old := *minHeap - n := len(old) - item := old[n-1] - *minHeap = old[0 : n-1] - return item -} diff --git a/txcache/transactionsHeapItem.go b/txcache/transactionsHeapItem.go new file mode 100644 index 00000000..c458115f --- /dev/null +++ b/txcache/transactionsHeapItem.go @@ -0,0 +1,7 @@ +package txcache + +type TransactionsHeapItem struct { + senderIndex int + transactionIndex int + transaction *WrappedTransaction +} diff --git a/txcache/transactionsMaxHeap.go b/txcache/transactionsMaxHeap.go new file mode 100644 index 00000000..145d17c2 --- /dev/null +++ b/txcache/transactionsMaxHeap.go @@ -0,0 +1,27 @@ +package txcache + +type TransactionsMaxHeap []*TransactionsHeapItem + +func (maxHeap TransactionsMaxHeap) Len() int { return len(maxHeap) } + +func (maxHeap TransactionsMaxHeap) Less(i, j int) bool { + return maxHeap[i].transaction.isTransactionMoreDesirableByProtocol(maxHeap[j].transaction) +} + +func (maxHeap TransactionsMaxHeap) Swap(i, j int) { + maxHeap[i], maxHeap[j] = maxHeap[j], maxHeap[i] +} + +func (maxHeap *TransactionsMaxHeap) Push(x interface{}) { + *maxHeap = append(*maxHeap, x.(*TransactionsHeapItem)) +} + +func (maxHeap *TransactionsMaxHeap) Pop() interface{} { + // Standard code when storing the heap in a slice: + // https://pkg.go.dev/container/heap + old := *maxHeap + n := len(old) + item := old[n-1] + *maxHeap = old[0 : n-1] + return item +} diff --git a/txcache/transactionsMinHeap.go b/txcache/transactionsMinHeap.go new file mode 100644 index 00000000..e69b00b0 --- /dev/null +++ b/txcache/transactionsMinHeap.go @@ -0,0 +1,27 @@ +package txcache + +type TransactionsMinHeap []*TransactionsHeapItem + +func (minHeap TransactionsMinHeap) Len() int { return len(minHeap) } + +func (minHeap TransactionsMinHeap) Less(i, j int) bool { + return minHeap[j].transaction.isTransactionMoreDesirableByProtocol(minHeap[i].transaction) +} + +func (minHeap TransactionsMinHeap) Swap(i, j int) { + minHeap[i], minHeap[j] = minHeap[j], minHeap[i] +} + +func (minHeap *TransactionsMinHeap) Push(x interface{}) { + *minHeap = append(*minHeap, x.(*TransactionsHeapItem)) +} + +func (minHeap *TransactionsMinHeap) Pop() interface{} { + // Standard code when storing the heap in a slice: + // https://pkg.go.dev/container/heap + old := *minHeap + n := len(old) + item := old[n-1] + *minHeap = old[0 : n-1] + return item +} From 105701d80646cba019cd4a64eedc15a4efa0e0a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 4 Nov 2024 23:27:00 +0200 Subject: [PATCH 103/124] Optimize isTransactionMoreDesirableByProtocol(). --- txcache/diagnosis.go | 16 ++++----- txcache/txCache_test.go | 31 ++++++++++++----- txcache/txListBySenderMap.go | 2 +- txcache/wrappedTransaction.go | 42 ++++++++++------------ txcache/wrappedTransaction_test.go | 56 ++++++++++++------------------ 5 files changed, 73 insertions(+), 74 deletions(-) diff --git a/txcache/diagnosis.go b/txcache/diagnosis.go index 56e64f2c..4b5fedaf 100644 --- a/txcache/diagnosis.go +++ b/txcache/diagnosis.go @@ -11,14 +11,14 @@ import ( ) type printedTransaction struct { - Hash string `json:"hash"` - PPU float64 `json:"ppu"` - Nonce uint64 `json:"nonce"` - Sender string `json:"sender"` - GasPrice uint64 `json:"gasPrice"` - GasLimit uint64 `json:"gasLimit"` - Receiver string `json:"receiver"` - DataLength int `json:"dataLength"` + Hash string `json:"hash"` + PPU uint64 `json:"ppu"` + Nonce uint64 `json:"nonce"` + Sender string `json:"sender"` + GasPrice uint64 `json:"gasPrice"` + GasLimit uint64 `json:"gasLimit"` + Receiver string `json:"receiver"` + DataLength int `json:"dataLength"` } // Diagnose checks the state of the cache for inconsistencies and displays a summary, senders and transactions. diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index 5b854d1e..7052b359 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -293,26 +293,41 @@ func Test_SelectTransactions_Dummy(t *testing.T) { cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4)) + require.Equal(t, 3193030061, int(fnv32("hash-alice-4"))) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) + require.Equal(t, 3193030058, int(fnv32("hash-alice-3"))) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + require.Equal(t, 3193030059, int(fnv32("hash-alice-2"))) + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + require.Equal(t, 3193030056, int(fnv32("hash-alice-1"))) + cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) + require.Equal(t, 187766579, int(fnv32("hash-bob-7"))) + cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6)) + require.Equal(t, 187766578, int(fnv32("hash-bob-6"))) + cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5)) + require.Equal(t, 187766577, int(fnv32("hash-bob-5"))) + cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1)) + require.Equal(t, 3082288595, int(fnv32("hash-carol-1"))) selected := cache.SelectTransactions(math.MaxUint64) require.Len(t, selected, 8) // Check order - require.Equal(t, "hash-carol-1", string(selected[0].TxHash)) - require.Equal(t, "hash-bob-5", string(selected[1].TxHash)) - require.Equal(t, "hash-bob-6", string(selected[2].TxHash)) - require.Equal(t, "hash-bob-7", string(selected[3].TxHash)) - require.Equal(t, "hash-alice-1", string(selected[4].TxHash)) - require.Equal(t, "hash-alice-2", string(selected[5].TxHash)) - require.Equal(t, "hash-alice-3", string(selected[6].TxHash)) - require.Equal(t, "hash-alice-4", string(selected[7].TxHash)) + require.Equal(t, "hash-alice-1", string(selected[0].TxHash)) + require.Equal(t, "hash-alice-2", string(selected[1].TxHash)) + require.Equal(t, "hash-alice-3", string(selected[2].TxHash)) + require.Equal(t, "hash-alice-4", string(selected[3].TxHash)) + require.Equal(t, "hash-carol-1", string(selected[4].TxHash)) + require.Equal(t, "hash-bob-5", string(selected[5].TxHash)) + require.Equal(t, "hash-bob-6", string(selected[6].TxHash)) + require.Equal(t, "hash-bob-7", string(selected[7].TxHash)) }) t.Run("alice > carol > bob", func(t *testing.T) { diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index 55c8bf62..ca53bd7f 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -36,7 +36,7 @@ func newTxListBySenderMap( func (txMap *txListBySenderMap) addTxReturnEvicted(tx *WrappedTransaction) (bool, [][]byte) { sender := string(tx.Tx.GetSndAddr()) listForSender := txMap.getOrAddListForSender(sender) - tx.computePricePerGasUnit(txMap.txGasHandler) + tx.precomputeFields(txMap.txGasHandler) added, evictedHashes := listForSender.AddTx(tx) diff --git a/txcache/wrappedTransaction.go b/txcache/wrappedTransaction.go index 5d623536..ec29bf81 100644 --- a/txcache/wrappedTransaction.go +++ b/txcache/wrappedTransaction.go @@ -1,8 +1,6 @@ package txcache import ( - "bytes" - "github.com/multiversx/mx-chain-core-go/data" ) @@ -16,11 +14,12 @@ type WrappedTransaction struct { ReceiverShardID uint32 Size int64 - PricePerUnit float64 + PricePerUnit uint64 + HashFnv32 uint32 } -// computePricePerGasUnit computes (and caches) the (average) price per gas unit. -func (transaction *WrappedTransaction) computePricePerGasUnit(txGasHandler TxGasHandler) { +// precomputeFields computes (and caches) the (average) price per gas unit. +func (transaction *WrappedTransaction) precomputeFields(txGasHandler TxGasHandler) { fee := txGasHandler.ComputeTxFee(transaction.Tx).Uint64() gasLimit := transaction.Tx.GetGasLimit() @@ -28,7 +27,19 @@ func (transaction *WrappedTransaction) computePricePerGasUnit(txGasHandler TxGas return } - transaction.PricePerUnit = float64(fee) / float64(gasLimit) + transaction.PricePerUnit = fee / gasLimit + transaction.HashFnv32 = fnv32(string(transaction.TxHash)) +} + +// fnv32 implements https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function for 32 bits +func fnv32(key string) uint32 { + hash := uint32(2166136261) + const prime32 = uint32(16777619) + for i := 0; i < len(key); i++ { + hash *= prime32 + hash ^= uint32(key[i]) + } + return hash } // Equality is out of scope (not possible in our case). @@ -40,21 +51,6 @@ func (transaction *WrappedTransaction) isTransactionMoreDesirableByProtocol(othe return ppu > ppuOther } - // Then, compare by gas price (to promote the practice of a higher gas price) - gasPrice := transaction.Tx.GetGasPrice() - gasPriceOther := otherTransaction.Tx.GetGasPrice() - if gasPrice != gasPriceOther { - return gasPrice > gasPriceOther - } - - // Then, compare by gas limit (promote the practice of lower gas limit) - // Compare Gas Limits (promote lower gas limit) - gasLimit := transaction.Tx.GetGasLimit() - gasLimitOther := otherTransaction.Tx.GetGasLimit() - if gasLimit != gasLimitOther { - return gasLimit < gasLimitOther - } - - // In the end, compare by transaction hash - return bytes.Compare(transaction.TxHash, otherTransaction.TxHash) > 0 + // In the end, compare by hash number of transaction hash + return transaction.HashFnv32 > otherTransaction.HashFnv32 } diff --git a/txcache/wrappedTransaction_test.go b/txcache/wrappedTransaction_test.go index 04c8cd92..1c894426 100644 --- a/txcache/wrappedTransaction_test.go +++ b/txcache/wrappedTransaction_test.go @@ -7,31 +7,34 @@ import ( "github.com/stretchr/testify/require" ) -func TestWrappedTransaction_computePricePerGasUnit(t *testing.T) { +func TestWrappedTransaction_precomputeFields(t *testing.T) { txGasHandler := txcachemocks.NewTxGasHandlerMock() t.Run("only move balance gas limit", func(t *testing.T) { tx := createTx([]byte("a"), "a", 1).withDataLength(1).withGasLimit(51500).withGasPrice(oneBillion) - tx.computePricePerGasUnit(txGasHandler) + tx.precomputeFields(txGasHandler) - require.Equal(t, float64(oneBillion), tx.PricePerUnit) + require.Equal(t, oneBillion, int(tx.PricePerUnit)) + require.Equal(t, 84696446, int(tx.HashFnv32)) }) t.Run("move balance gas limit and execution gas limit (1)", func(t *testing.T) { - tx := createTx([]byte("a"), "a", 1).withDataLength(1).withGasLimit(51501).withGasPrice(oneBillion) - tx.computePricePerGasUnit(txGasHandler) + tx := createTx([]byte("b"), "b", 1).withDataLength(1).withGasLimit(51501).withGasPrice(oneBillion) + tx.precomputeFields(txGasHandler) - require.InDelta(t, float64(999980777), tx.PricePerUnit, 0.1) + require.Equal(t, 999_980_777, int(tx.PricePerUnit)) + require.Equal(t, 84696445, int(tx.HashFnv32)) }) t.Run("move balance gas limit and execution gas limit (2)", func(t *testing.T) { - tx := createTx([]byte("a"), "a", 1).withDataLength(1).withGasLimit(oneMilion).withGasPrice(oneBillion) - tx.computePricePerGasUnit(txGasHandler) + tx := createTx([]byte("c"), "c", 1).withDataLength(1).withGasLimit(oneMilion).withGasPrice(oneBillion) + tx.precomputeFields(txGasHandler) actualFee := 51500*oneBillion + (oneMilion-51500)*oneBillion/100 - require.Equal(t, 60985000000000, actualFee) + require.Equal(t, 60_985_000_000_000, actualFee) - require.InDelta(t, actualFee/oneMilion, tx.PricePerUnit, 0.1) + require.Equal(t, actualFee/oneMilion, int(tx.PricePerUnit)) + require.Equal(t, 84696444, int(tx.HashFnv32)) }) } @@ -40,38 +43,23 @@ func TestWrappedTransaction_isTransactionMoreDesirableByProtocol(t *testing.T) { t.Run("decide by price per unit", func(t *testing.T) { a := createTx([]byte("a-1"), "a", 1).withDataLength(1).withGasLimit(51500).withGasPrice(oneBillion) - a.computePricePerGasUnit(txGasHandler) + a.precomputeFields(txGasHandler) b := createTx([]byte("b-1"), "b", 1).withDataLength(1).withGasLimit(51501).withGasPrice(oneBillion) - b.computePricePerGasUnit(txGasHandler) + b.precomputeFields(txGasHandler) require.True(t, a.isTransactionMoreDesirableByProtocol(b)) }) - t.Run("decide by gas price (set them up to have the same PPU)", func(t *testing.T) { - a := createTx([]byte("a-2"), "a", 1).withGasPrice(oneBillion + 1) - b := createTx([]byte("b-2"), "b", 1).withGasPrice(oneBillion) + t.Run("decide by transaction hash (set them up to have the same PPU)", func(t *testing.T) { + a := createTx([]byte("a-7"), "a", 7) + a.precomputeFields(txGasHandler) + require.Equal(t, 2191299170, int(a.HashFnv32)) - a.PricePerUnit = 42 - b.PricePerUnit = 42 + b := createTx([]byte("b-7"), "b", 7) + b.precomputeFields(txGasHandler) + require.Equal(t, 1654268265, int(b.HashFnv32)) require.True(t, a.isTransactionMoreDesirableByProtocol(b)) }) - - t.Run("decide by gas limit (set them up to have the same PPU and gas price)", func(t *testing.T) { - a := createTx([]byte("a-2"), "a", 1).withGasLimit(55000) - b := createTx([]byte("b-2"), "b", 1).withGasLimit(60000) - - a.PricePerUnit = 42 - b.PricePerUnit = 42 - - require.True(t, a.isTransactionMoreDesirableByProtocol(b)) - }) - - t.Run("decide by transaction hash (set them up to have the same PPU, gas price and gas limit)", func(t *testing.T) { - a := createTx([]byte("a-2"), "a", 1) - b := createTx([]byte("b-2"), "b", 1) - - require.True(t, b.isTransactionMoreDesirableByProtocol(a)) - }) } From 0257f64803d7b30dac81c835f61b8bc3d0390167 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 5 Nov 2024 10:36:45 +0200 Subject: [PATCH 104/124] Limit selection by "maxNum" transactions. --- txcache/diagnosis.go | 3 ++- txcache/selection.go | 9 ++++++--- txcache/selection_test.go | 19 ------------------- txcache/testutils_test.go | 35 +++++++++++++++++++++++++++++++---- txcache/txCache.go | 6 +++--- txcache/txCache_test.go | 19 ++++++++++++------- 6 files changed, 54 insertions(+), 37 deletions(-) diff --git a/txcache/diagnosis.go b/txcache/diagnosis.go index 4b5fedaf..34a768f1 100644 --- a/txcache/diagnosis.go +++ b/txcache/diagnosis.go @@ -4,6 +4,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "math" "strings" "github.com/multiversx/mx-chain-core-go/core" @@ -112,7 +113,7 @@ func (cache *TxCache) diagnoseSelection() { return } - transactions, _ := cache.doSelectTransactions(diagnosisSelectionGasRequested) + transactions, _ := cache.doSelectTransactions(diagnosisSelectionGasRequested, math.MaxInt) displaySelectionOutcome(logDiagnoseSelection, "diagnoseSelection", transactions) } diff --git a/txcache/selection.go b/txcache/selection.go index b1b0204e..08bc7c67 100644 --- a/txcache/selection.go +++ b/txcache/selection.go @@ -2,7 +2,7 @@ package txcache import "container/heap" -func (cache *TxCache) doSelectTransactions(gasRequested uint64) (BunchOfTransactions, uint64) { +func (cache *TxCache) doSelectTransactions(gasRequested uint64, maxNum int) (BunchOfTransactions, uint64) { senders := cache.getSenders() bunches := make([]BunchOfTransactions, 0, len(senders)) @@ -10,11 +10,11 @@ func (cache *TxCache) doSelectTransactions(gasRequested uint64) (BunchOfTransact bunches = append(bunches, sender.getTxsWithoutGaps()) } - return selectTransactionsFromBunches(bunches, gasRequested) + return selectTransactionsFromBunches(bunches, gasRequested, maxNum) } // Selection tolerates concurrent transaction additions / removals. -func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested uint64) (BunchOfTransactions, uint64) { +func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested uint64, maxNum int) (BunchOfTransactions, uint64) { selectedTransactions := make(BunchOfTransactions, 0, initialCapacityOfSelectionSlice) // Items popped from the heap are added to "selectedTransactions". @@ -47,6 +47,9 @@ func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested u if accumulatedGas+gasLimit > gasRequested { break } + if len(selectedTransactions) >= maxNum { + break + } accumulatedGas += gasLimit selectedTransactions = append(selectedTransactions, item.transaction) diff --git a/txcache/selection_test.go b/txcache/selection_test.go index 94ca1825..16c1d64e 100644 --- a/txcache/selection_test.go +++ b/txcache/selection_test.go @@ -79,22 +79,3 @@ func TestTxCache_selectTransactionsFromBunches(t *testing.T) { // 0.204968s (TestTxCache_selectTransactionsFromBunches/numSenders_=_100000,_numTransactions_=_3) // 0.506842s (TestTxCache_selectTransactionsFromBunches/numSenders_=_300000,_numTransactions_=_1) } - -func createBunchesOfTransactionsWithUniformDistribution(nSenders int, nTransactionsPerSender int) []BunchOfTransactions { - bunches := make([]BunchOfTransactions, 0, nSenders) - - for senderTag := 0; senderTag < nSenders; senderTag++ { - bunch := make(BunchOfTransactions, 0, nTransactionsPerSender) - sender := createFakeSenderAddress(senderTag) - - for txNonce := nTransactionsPerSender; txNonce > 0; txNonce-- { - transactionHash := createFakeTxHash(sender, txNonce) - transaction := createTx(transactionHash, string(sender), uint64(txNonce)) - bunch = append(bunch, transaction) - } - - bunches = append(bunches, bunch) - } - - return bunches -} diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index b0a2c056..51e16773 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -2,10 +2,12 @@ package txcache import ( "encoding/binary" + "math/rand" "sync" "time" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" ) const oneMilion = 1000000 @@ -95,12 +97,37 @@ func addManyTransactionsWithUniformDistribution(cache *TxCache, nSenders int, nT for senderTag := 0; senderTag < nSenders; senderTag++ { sender := createFakeSenderAddress(senderTag) - for txNonce := nTransactionsPerSender; txNonce > 0; txNonce-- { - txHash := createFakeTxHash(sender, txNonce) - tx := createTx(txHash, string(sender), uint64(txNonce)) - cache.AddTx(tx) + for nonce := nTransactionsPerSender; nonce > 0; nonce-- { + transactionHash := createFakeTxHash(sender, nonce) + gasPrice := oneBillion + rand.Intn(3*oneBillion) + transaction := createTx(transactionHash, string(sender), uint64(nonce)).withGasPrice(uint64(gasPrice)) + + cache.AddTx(transaction) + } + } +} + +func createBunchesOfTransactionsWithUniformDistribution(nSenders int, nTransactionsPerSender int) []BunchOfTransactions { + bunches := make([]BunchOfTransactions, 0, nSenders) + txGasHandler := txcachemocks.NewTxGasHandlerMock() + + for senderTag := 0; senderTag < nSenders; senderTag++ { + bunch := make(BunchOfTransactions, 0, nTransactionsPerSender) + sender := createFakeSenderAddress(senderTag) + + for nonce := nTransactionsPerSender; nonce > 0; nonce-- { + transactionHash := createFakeTxHash(sender, nonce) + gasPrice := oneBillion + rand.Intn(3*oneBillion) + transaction := createTx(transactionHash, string(sender), uint64(nonce)).withGasPrice(uint64(gasPrice)) + transaction.precomputeFields(txGasHandler) + + bunch = append(bunch, transaction) } + + bunches = append(bunches, bunch) } + + return bunches } func createTx(hash []byte, sender string, nonce uint64) *WrappedTransaction { diff --git a/txcache/txCache.go b/txcache/txCache.go index 86a3a623..08c1343f 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -95,7 +95,7 @@ func (cache *TxCache) GetByTxHash(txHash []byte) (*WrappedTransaction, bool) { // SelectTransactions selects a reasonably fair list of transactions to be included in the next miniblock // It returns transactions with total gas ~ "gasRequested". -func (cache *TxCache) SelectTransactions(gasRequested uint64) []*WrappedTransaction { +func (cache *TxCache) SelectTransactions(gasRequested uint64, maxNum int) ([]*WrappedTransaction, uint64) { stopWatch := core.NewStopWatch() stopWatch.Start("selection") @@ -106,7 +106,7 @@ func (cache *TxCache) SelectTransactions(gasRequested uint64) []*WrappedTransact "num senders", cache.CountSenders(), ) - transactions, accumulatedGas := cache.doSelectTransactions(gasRequested) + transactions, accumulatedGas := cache.doSelectTransactions(gasRequested, maxNum) stopWatch.Stop("selection") @@ -120,7 +120,7 @@ func (cache *TxCache) SelectTransactions(gasRequested uint64) []*WrappedTransact go cache.diagnoseCounters() go displaySelectionOutcome(logSelect, "selection", transactions) - return transactions + return transactions, accumulatedGas } func (cache *TxCache) getSenders() []*txListForSender { diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index 7052b359..6dc07f2b 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -316,8 +316,9 @@ func Test_SelectTransactions_Dummy(t *testing.T) { cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1)) require.Equal(t, 3082288595, int(fnv32("hash-carol-1"))) - selected := cache.SelectTransactions(math.MaxUint64) + selected, accumulatedGas := cache.SelectTransactions(math.MaxUint64, math.MaxInt) require.Len(t, selected, 8) + require.Equal(t, 400000, int(accumulatedGas)) // Check order require.Equal(t, "hash-alice-1", string(selected[0].TxHash)) @@ -337,8 +338,9 @@ func Test_SelectTransactions_Dummy(t *testing.T) { cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasPrice(50)) cache.AddTx(createTx([]byte("hash-carol-3"), "carol", 3).withGasPrice(75)) - selected := cache.SelectTransactions(math.MaxUint64) + selected, accumulatedGas := cache.SelectTransactions(math.MaxUint64, math.MaxInt) require.Len(t, selected, 3) + require.Equal(t, 150000, int(accumulatedGas)) // Check order require.Equal(t, "hash-alice-1", string(selected[0].TxHash)) @@ -360,9 +362,9 @@ func Test_SelectTransactionsWithBandwidth_Dummy(t *testing.T) { cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasLimit(50000)) cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1).withGasLimit(50000)) - selected := cache.SelectTransactions(760000) - + selected, accumulatedGas := cache.SelectTransactions(760000, math.MaxInt) require.Len(t, selected, 5) + require.Equal(t, 750000, int(accumulatedGas)) // Check order require.Equal(t, "hash-carol-1", string(selected[0].TxHash)) @@ -390,8 +392,9 @@ func Test_SelectTransactions_BreaksAtNonceGaps(t *testing.T) { numSelected := 3 + 1 + 2 // 3 alice + 1 bob + 2 carol - sorted := cache.SelectTransactions(math.MaxUint64) + sorted, accumulatedGas := cache.SelectTransactions(math.MaxUint64, math.MaxInt) require.Len(t, sorted, numSelected) + require.Equal(t, 300000, int(accumulatedGas)) } func Test_SelectTransactions(t *testing.T) { @@ -414,7 +417,9 @@ func Test_SelectTransactions(t *testing.T) { require.Equal(t, uint64(nTotalTransactions), cache.CountTx()) - sorted := cache.SelectTransactions(math.MaxUint64) + sorted, accumulatedGas := cache.SelectTransactions(math.MaxUint64, math.MaxInt) + require.Len(t, sorted, nTotalTransactions) + require.Equal(t, 5_000_000_000, int(accumulatedGas)) // Check order nonces := make(map[string]uint64, nSenders) @@ -596,7 +601,7 @@ func TestTxCache_ConcurrentMutationAndSelection(t *testing.T) { go func() { for i := 0; i < 100; i++ { fmt.Println("Selection", i) - cache.SelectTransactions(math.MaxUint64) + _, _ = cache.SelectTransactions(math.MaxUint64, math.MaxInt) } wg.Done() From 3d7c01c3738ab499fbf797422ec490eae72348d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 5 Nov 2024 11:52:51 +0200 Subject: [PATCH 105/124] Additional tests and benchmarks. --- txcache/eviction_test.go | 27 ++++++++-- txcache/selection_test.go | 107 ++++++++++++++++++++++++++++++++++---- 2 files changed, 119 insertions(+), 15 deletions(-) diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go index db34d5bc..cc38adfe 100644 --- a/txcache/eviction_test.go +++ b/txcache/eviction_test.go @@ -117,7 +117,7 @@ func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { require.Equal(t, 4, int(cache.CountTx())) } -func TestTxCache_DoEviction_Benchmark(t *testing.T) { +func TestBenchmarkTxCache_DoEviction_Benchmark(t *testing.T) { config := ConfigSourceMe{ Name: "untitled", NumChunks: 16, @@ -168,6 +168,24 @@ func TestTxCache_DoEviction_Benchmark(t *testing.T) { require.Equal(t, 4, len(journal.numEvictedByPass)) }) + t.Run("numSenders = 400000, numTransactions = 1", func(t *testing.T) { + cache, err := NewTxCache(config, txGasHandler) + require.Nil(t, err) + + cache.config.EvictionEnabled = false + addManyTransactionsWithUniformDistribution(cache, 400000, 1) + cache.config.EvictionEnabled = true + + require.Equal(t, uint64(400000), cache.CountTx()) + + sw.Start(t.Name()) + journal := cache.doEviction() + sw.Stop(t.Name()) + + require.Equal(t, 100000, journal.numEvicted) + require.Equal(t, 2, len(journal.numEvictedByPass)) + }) + t.Run("numSenders = 10000, numTransactions = 100", func(t *testing.T) { cache, err := NewTxCache(config, txGasHandler) require.Nil(t, err) @@ -198,7 +216,8 @@ func TestTxCache_DoEviction_Benchmark(t *testing.T) { // Thread(s) per core: 2 // Core(s) per socket: 4 // - // 0.079401s (TestTxCache_DoEviction_Benchmark/numSenders_=_35000,_numTransactions_=_10) - // 0.366044s (TestTxCache_DoEviction_Benchmark/numSenders_=_100000,_numTransactions_=_5) - // 0.611849s (TestTxCache_DoEviction_Benchmark/numSenders_=_10000,_numTransactions_=_100) + // 0.093771s (TestBenchmarkTxCache_DoEviction_Benchmark/numSenders_=_35000,_numTransactions_=_10) + // 0.424683s (TestBenchmarkTxCache_DoEviction_Benchmark/numSenders_=_100000,_numTransactions_=_5) + // 0.448017s (TestBenchmarkTxCache_DoEviction_Benchmark/numSenders_=_10000,_numTransactions_=_100) + // 0.476738s (TestBenchmarkTxCache_DoEviction_Benchmark/numSenders_=_400000,_numTransactions_=_1) } diff --git a/txcache/selection_test.go b/txcache/selection_test.go index 16c1d64e..1161f368 100644 --- a/txcache/selection_test.go +++ b/txcache/selection_test.go @@ -2,27 +2,31 @@ package txcache import ( "fmt" + "math" "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) func TestTxCache_selectTransactionsFromBunches(t *testing.T) { - sw := core.NewStopWatch() - t.Run("empty cache", func(t *testing.T) { - merged, accumulatedGas := selectTransactionsFromBunches([]BunchOfTransactions{}, 10_000_000_000) + merged, accumulatedGas := selectTransactionsFromBunches([]BunchOfTransactions{}, 10_000_000_000, math.MaxInt) require.Equal(t, 0, len(merged)) require.Equal(t, uint64(0), accumulatedGas) }) +} + +func TestBenchmarkTxCache_selectTransactionsFromBunches(t *testing.T) { + sw := core.NewStopWatch() t.Run("numSenders = 1000, numTransactions = 1000", func(t *testing.T) { bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) sw.Start(t.Name()) - merged, accumulatedGas := selectTransactionsFromBunches(bunches, 10_000_000_000) + merged, accumulatedGas := selectTransactionsFromBunches(bunches, 10_000_000_000, math.MaxInt) sw.Stop(t.Name()) require.Equal(t, 200000, len(merged)) @@ -33,7 +37,7 @@ func TestTxCache_selectTransactionsFromBunches(t *testing.T) { bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) sw.Start(t.Name()) - merged, accumulatedGas := selectTransactionsFromBunches(bunches, 10_000_000_000) + merged, accumulatedGas := selectTransactionsFromBunches(bunches, 10_000_000_000, math.MaxInt) sw.Stop(t.Name()) require.Equal(t, 200000, len(merged)) @@ -44,7 +48,7 @@ func TestTxCache_selectTransactionsFromBunches(t *testing.T) { bunches := createBunchesOfTransactionsWithUniformDistribution(100000, 3) sw.Start(t.Name()) - merged, accumulatedGas := selectTransactionsFromBunches(bunches, 10_000_000_000) + merged, accumulatedGas := selectTransactionsFromBunches(bunches, 10_000_000_000, math.MaxInt) sw.Stop(t.Name()) require.Equal(t, 200000, len(merged)) @@ -55,7 +59,7 @@ func TestTxCache_selectTransactionsFromBunches(t *testing.T) { bunches := createBunchesOfTransactionsWithUniformDistribution(300000, 1) sw.Start(t.Name()) - merged, accumulatedGas := selectTransactionsFromBunches(bunches, 10_000_000_000) + merged, accumulatedGas := selectTransactionsFromBunches(bunches, 10_000_000_000, math.MaxInt) sw.Stop(t.Name()) require.Equal(t, 200000, len(merged)) @@ -74,8 +78,89 @@ func TestTxCache_selectTransactionsFromBunches(t *testing.T) { // Thread(s) per core: 2 // Core(s) per socket: 4 // - // 0.059954s (TestTxCache_selectTransactionsFromBunches/numSenders_=_1000,_numTransactions_=_1000) - // 0.087949s (TestTxCache_selectTransactionsFromBunches/numSenders_=_10000,_numTransactions_=_100) - // 0.204968s (TestTxCache_selectTransactionsFromBunches/numSenders_=_100000,_numTransactions_=_3) - // 0.506842s (TestTxCache_selectTransactionsFromBunches/numSenders_=_300000,_numTransactions_=_1) + // 0.029651s (TestTxCache_selectTransactionsFromBunches/numSenders_=_1000,_numTransactions_=_1000) + // 0.026440s (TestTxCache_selectTransactionsFromBunches/numSenders_=_10000,_numTransactions_=_100) + // 0.122592s (TestTxCache_selectTransactionsFromBunches/numSenders_=_100000,_numTransactions_=_3) + // 0.219072s (TestTxCache_selectTransactionsFromBunches/numSenders_=_300000,_numTransactions_=_1) +} + +func TestBenchmarktTxCache_doSelectTransactions(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 1000000000, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 300001, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: false, + NumItemsToPreemptivelyEvict: 1, + } + + txGasHandler := txcachemocks.NewTxGasHandlerMock() + + sw := core.NewStopWatch() + + t.Run("numSenders = 50000, numTransactions = 2, maxNum = 50_000", func(t *testing.T) { + cache, err := NewTxCache(config, txGasHandler) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 50000, 2) + + require.Equal(t, 100000, int(cache.CountTx())) + + sw.Start(t.Name()) + merged, accumulatedGas := cache.SelectTransactions(10_000_000_000, 50_000) + sw.Stop(t.Name()) + + require.Equal(t, 50000, len(merged)) + require.Equal(t, uint64(2_500_000_000), accumulatedGas) + }) + + t.Run("numSenders = 100000, numTransactions = 1, maxNum = 50_000", func(t *testing.T) { + cache, err := NewTxCache(config, txGasHandler) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 100000, 1) + + require.Equal(t, 100000, int(cache.CountTx())) + + sw.Start(t.Name()) + merged, accumulatedGas := cache.SelectTransactions(10_000_000_000, 50_000) + sw.Stop(t.Name()) + + require.Equal(t, 50000, len(merged)) + require.Equal(t, uint64(2_500_000_000), accumulatedGas) + }) + + t.Run("numSenders = 300000, numTransactions = 1, maxNum = 50_000", func(t *testing.T) { + cache, err := NewTxCache(config, txGasHandler) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 300000, 1) + + require.Equal(t, 300000, int(cache.CountTx())) + + sw.Start(t.Name()) + merged, accumulatedGas := cache.SelectTransactions(10_000_000_000, 50_000) + sw.Stop(t.Name()) + + require.Equal(t, 50000, len(merged)) + require.Equal(t, uint64(2_500_000_000), accumulatedGas) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } + + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // 0.060508s (TestBenchmarktTxCache_doSelectTransactions/numSenders_=_50000,_numTransactions_=_2,_maxNum_=_50_000) + // 0.103369s (TestBenchmarktTxCache_doSelectTransactions/numSenders_=_100000,_numTransactions_=_1,_maxNum_=_50_000) + // 0.245621s (TestBenchmarktTxCache_doSelectTransactions/numSenders_=_300000,_numTransactions_=_1,_maxNum_=_50_000) } From 7ee693cd99cfa2e476b72ec067b7c3309ca9d31e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 5 Nov 2024 11:55:51 +0200 Subject: [PATCH 106/124] Move tests. --- txcache/selection_test.go | 146 ++++++++++++++++++++++++++++++++++++++ txcache/txCache_test.go | 145 ------------------------------------- 2 files changed, 146 insertions(+), 145 deletions(-) diff --git a/txcache/selection_test.go b/txcache/selection_test.go index 1161f368..7c0755a5 100644 --- a/txcache/selection_test.go +++ b/txcache/selection_test.go @@ -10,6 +10,152 @@ import ( "github.com/stretchr/testify/require" ) +func TestTxCache_SelectTransactions_Dummy(t *testing.T) { + t.Run("all having same PPU", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4)) + require.Equal(t, 3193030061, int(fnv32("hash-alice-4"))) + + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) + require.Equal(t, 3193030058, int(fnv32("hash-alice-3"))) + + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + require.Equal(t, 3193030059, int(fnv32("hash-alice-2"))) + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + require.Equal(t, 3193030056, int(fnv32("hash-alice-1"))) + + cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) + require.Equal(t, 187766579, int(fnv32("hash-bob-7"))) + + cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6)) + require.Equal(t, 187766578, int(fnv32("hash-bob-6"))) + + cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5)) + require.Equal(t, 187766577, int(fnv32("hash-bob-5"))) + + cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1)) + require.Equal(t, 3082288595, int(fnv32("hash-carol-1"))) + + selected, accumulatedGas := cache.SelectTransactions(math.MaxUint64, math.MaxInt) + require.Len(t, selected, 8) + require.Equal(t, 400000, int(accumulatedGas)) + + // Check order + require.Equal(t, "hash-alice-1", string(selected[0].TxHash)) + require.Equal(t, "hash-alice-2", string(selected[1].TxHash)) + require.Equal(t, "hash-alice-3", string(selected[2].TxHash)) + require.Equal(t, "hash-alice-4", string(selected[3].TxHash)) + require.Equal(t, "hash-carol-1", string(selected[4].TxHash)) + require.Equal(t, "hash-bob-5", string(selected[5].TxHash)) + require.Equal(t, "hash-bob-6", string(selected[6].TxHash)) + require.Equal(t, "hash-bob-7", string(selected[7].TxHash)) + }) + + t.Run("alice > carol > bob", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1).withGasPrice(100)) + cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasPrice(50)) + cache.AddTx(createTx([]byte("hash-carol-3"), "carol", 3).withGasPrice(75)) + + selected, accumulatedGas := cache.SelectTransactions(math.MaxUint64, math.MaxInt) + require.Len(t, selected, 3) + require.Equal(t, 150000, int(accumulatedGas)) + + // Check order + require.Equal(t, "hash-alice-1", string(selected[0].TxHash)) + require.Equal(t, "hash-carol-3", string(selected[1].TxHash)) + require.Equal(t, "hash-bob-5", string(selected[2].TxHash)) + }) +} + +func TestTxCache_SelectTransactionsWithBandwidth_Dummy(t *testing.T) { + t.Run("transactions with no data field", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2).withGasLimit(500000)) + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1).withGasLimit(200000)) + cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7).withGasLimit(400000)) + cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6).withGasLimit(50000)) + cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasLimit(50000)) + cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1).withGasLimit(50000)) + + selected, accumulatedGas := cache.SelectTransactions(760000, math.MaxInt) + require.Len(t, selected, 5) + require.Equal(t, 750000, int(accumulatedGas)) + + // Check order + require.Equal(t, "hash-carol-1", string(selected[0].TxHash)) + require.Equal(t, "hash-bob-5", string(selected[1].TxHash)) + require.Equal(t, "hash-bob-6", string(selected[2].TxHash)) + require.Equal(t, "hash-alice-1", string(selected[3].TxHash)) + require.Equal(t, "hash-bob-7", string(selected[4].TxHash)) + }) +} + +func TestTxCache_SelectTransactions_BreaksAtNonceGaps(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) + cache.AddTx(createTx([]byte("hash-alice-5"), "alice", 5)) + cache.AddTx(createTx([]byte("hash-bob-42"), "bob", 42)) + cache.AddTx(createTx([]byte("hash-bob-44"), "bob", 44)) + cache.AddTx(createTx([]byte("hash-bob-45"), "bob", 45)) + cache.AddTx(createTx([]byte("hash-carol-7"), "carol", 7)) + cache.AddTx(createTx([]byte("hash-carol-8"), "carol", 8)) + cache.AddTx(createTx([]byte("hash-carol-10"), "carol", 10)) + cache.AddTx(createTx([]byte("hash-carol-11"), "carol", 11)) + + numSelected := 3 + 1 + 2 // 3 alice + 1 bob + 2 carol + + sorted, accumulatedGas := cache.SelectTransactions(math.MaxUint64, math.MaxInt) + require.Len(t, sorted, numSelected) + require.Equal(t, 300000, int(accumulatedGas)) +} + +func TestTxCache_SelectTransactions_WhenTransactionsAddedInReversedNonceOrder(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + // Add "nSenders" * "nTransactionsPerSender" transactions in the cache (in reversed nonce order) + nSenders := 1000 + nTransactionsPerSender := 100 + nTotalTransactions := nSenders * nTransactionsPerSender + + for senderTag := 0; senderTag < nSenders; senderTag++ { + sender := fmt.Sprintf("sender:%d", senderTag) + + for txNonce := nTransactionsPerSender; txNonce > 0; txNonce-- { + txHash := fmt.Sprintf("hash:%d:%d", senderTag, txNonce) + tx := createTx([]byte(txHash), sender, uint64(txNonce)) + cache.AddTx(tx) + } + } + + require.Equal(t, uint64(nTotalTransactions), cache.CountTx()) + + sorted, accumulatedGas := cache.SelectTransactions(math.MaxUint64, math.MaxInt) + require.Len(t, sorted, nTotalTransactions) + require.Equal(t, 5_000_000_000, int(accumulatedGas)) + + // Check order + nonces := make(map[string]uint64, nSenders) + + for _, tx := range sorted { + nonce := tx.Tx.GetNonce() + sender := string(tx.Tx.GetSndAddr()) + previousNonce := nonces[sender] + + require.LessOrEqual(t, previousNonce, nonce) + nonces[sender] = nonce + } +} + func TestTxCache_selectTransactionsFromBunches(t *testing.T) { t.Run("empty cache", func(t *testing.T) { merged, accumulatedGas := selectTransactionsFromBunches([]BunchOfTransactions{}, 10_000_000_000, math.MaxInt) diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index 6dc07f2b..14ed0db2 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -288,151 +288,6 @@ func Test_GetTransactionsPoolForSender(t *testing.T) { require.Equal(t, expectedTxs, txs) } -func Test_SelectTransactions_Dummy(t *testing.T) { - t.Run("all having same PPU", func(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4)) - require.Equal(t, 3193030061, int(fnv32("hash-alice-4"))) - - cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) - require.Equal(t, 3193030058, int(fnv32("hash-alice-3"))) - - cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) - require.Equal(t, 3193030059, int(fnv32("hash-alice-2"))) - - cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) - require.Equal(t, 3193030056, int(fnv32("hash-alice-1"))) - - cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) - require.Equal(t, 187766579, int(fnv32("hash-bob-7"))) - - cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6)) - require.Equal(t, 187766578, int(fnv32("hash-bob-6"))) - - cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5)) - require.Equal(t, 187766577, int(fnv32("hash-bob-5"))) - - cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1)) - require.Equal(t, 3082288595, int(fnv32("hash-carol-1"))) - - selected, accumulatedGas := cache.SelectTransactions(math.MaxUint64, math.MaxInt) - require.Len(t, selected, 8) - require.Equal(t, 400000, int(accumulatedGas)) - - // Check order - require.Equal(t, "hash-alice-1", string(selected[0].TxHash)) - require.Equal(t, "hash-alice-2", string(selected[1].TxHash)) - require.Equal(t, "hash-alice-3", string(selected[2].TxHash)) - require.Equal(t, "hash-alice-4", string(selected[3].TxHash)) - require.Equal(t, "hash-carol-1", string(selected[4].TxHash)) - require.Equal(t, "hash-bob-5", string(selected[5].TxHash)) - require.Equal(t, "hash-bob-6", string(selected[6].TxHash)) - require.Equal(t, "hash-bob-7", string(selected[7].TxHash)) - }) - - t.Run("alice > carol > bob", func(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1).withGasPrice(100)) - cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasPrice(50)) - cache.AddTx(createTx([]byte("hash-carol-3"), "carol", 3).withGasPrice(75)) - - selected, accumulatedGas := cache.SelectTransactions(math.MaxUint64, math.MaxInt) - require.Len(t, selected, 3) - require.Equal(t, 150000, int(accumulatedGas)) - - // Check order - require.Equal(t, "hash-alice-1", string(selected[0].TxHash)) - require.Equal(t, "hash-carol-3", string(selected[1].TxHash)) - require.Equal(t, "hash-bob-5", string(selected[2].TxHash)) - }) -} - -func Test_SelectTransactionsWithBandwidth_Dummy(t *testing.T) { - t.Run("transactions with no data field", func(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4).withGasLimit(100000)) - cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3).withGasLimit(100000)) - cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2).withGasLimit(500000)) - cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1).withGasLimit(200000)) - cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7).withGasLimit(400000)) - cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6).withGasLimit(50000)) - cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasLimit(50000)) - cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1).withGasLimit(50000)) - - selected, accumulatedGas := cache.SelectTransactions(760000, math.MaxInt) - require.Len(t, selected, 5) - require.Equal(t, 750000, int(accumulatedGas)) - - // Check order - require.Equal(t, "hash-carol-1", string(selected[0].TxHash)) - require.Equal(t, "hash-bob-5", string(selected[1].TxHash)) - require.Equal(t, "hash-bob-6", string(selected[2].TxHash)) - require.Equal(t, "hash-alice-1", string(selected[3].TxHash)) - require.Equal(t, "hash-bob-7", string(selected[4].TxHash)) - }) -} - -func Test_SelectTransactions_BreaksAtNonceGaps(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) - cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) - cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) - cache.AddTx(createTx([]byte("hash-alice-5"), "alice", 5)) - cache.AddTx(createTx([]byte("hash-bob-42"), "bob", 42)) - cache.AddTx(createTx([]byte("hash-bob-44"), "bob", 44)) - cache.AddTx(createTx([]byte("hash-bob-45"), "bob", 45)) - cache.AddTx(createTx([]byte("hash-carol-7"), "carol", 7)) - cache.AddTx(createTx([]byte("hash-carol-8"), "carol", 8)) - cache.AddTx(createTx([]byte("hash-carol-10"), "carol", 10)) - cache.AddTx(createTx([]byte("hash-carol-11"), "carol", 11)) - - numSelected := 3 + 1 + 2 // 3 alice + 1 bob + 2 carol - - sorted, accumulatedGas := cache.SelectTransactions(math.MaxUint64, math.MaxInt) - require.Len(t, sorted, numSelected) - require.Equal(t, 300000, int(accumulatedGas)) -} - -func Test_SelectTransactions(t *testing.T) { - cache := newUnconstrainedCacheToTest() - - // Add "nSenders" * "nTransactionsPerSender" transactions in the cache (in reversed nonce order) - nSenders := 1000 - nTransactionsPerSender := 100 - nTotalTransactions := nSenders * nTransactionsPerSender - - for senderTag := 0; senderTag < nSenders; senderTag++ { - sender := fmt.Sprintf("sender:%d", senderTag) - - for txNonce := nTransactionsPerSender; txNonce > 0; txNonce-- { - txHash := fmt.Sprintf("hash:%d:%d", senderTag, txNonce) - tx := createTx([]byte(txHash), sender, uint64(txNonce)) - cache.AddTx(tx) - } - } - - require.Equal(t, uint64(nTotalTransactions), cache.CountTx()) - - sorted, accumulatedGas := cache.SelectTransactions(math.MaxUint64, math.MaxInt) - require.Len(t, sorted, nTotalTransactions) - require.Equal(t, 5_000_000_000, int(accumulatedGas)) - - // Check order - nonces := make(map[string]uint64, nSenders) - for _, tx := range sorted { - nonce := tx.Tx.GetNonce() - sender := string(tx.Tx.GetSndAddr()) - previousNonce := nonces[sender] - - require.LessOrEqual(t, previousNonce, nonce) - nonces[sender] = nonce - } -} - func Test_Keys(t *testing.T) { cache := newUnconstrainedCacheToTest() From 6a69b3ed577f080eb71ff1b7ec14e851eff96afc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 5 Nov 2024 12:45:23 +0200 Subject: [PATCH 107/124] Fix disabled cache. --- txcache/disabledCache.go | 4 ++-- txcache/disabledCache_test.go | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/txcache/disabledCache.go b/txcache/disabledCache.go index cf4b2049..26cd114b 100644 --- a/txcache/disabledCache.go +++ b/txcache/disabledCache.go @@ -26,8 +26,8 @@ func (cache *DisabledCache) GetByTxHash(_ []byte) (*WrappedTransaction, bool) { } // SelectTransactionsWithBandwidth returns an empty slice -func (cache *DisabledCache) SelectTransactions(uint64) []*WrappedTransaction { - return make([]*WrappedTransaction, 0) +func (cache *DisabledCache) SelectTransactions(uint64, int) ([]*WrappedTransaction, uint64) { + return make([]*WrappedTransaction, 0), 0 } // RemoveTxByHash does nothing diff --git a/txcache/disabledCache_test.go b/txcache/disabledCache_test.go index c71130d9..9725a01e 100644 --- a/txcache/disabledCache_test.go +++ b/txcache/disabledCache_test.go @@ -17,8 +17,9 @@ func TestDisabledCache_DoesNothing(t *testing.T) { require.Nil(t, tx) require.False(t, ok) - selection := cache.SelectTransactions(42) + selection, accumulatedGas := cache.SelectTransactions(42, 42) require.Equal(t, 0, len(selection)) + require.Equal(t, uint64(0), accumulatedGas) removed := cache.RemoveTxByHash([]byte{}) require.False(t, removed) From 7c5b85d97b0560c4b3c018c01c199e8b1b5b2786 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 5 Nov 2024 12:57:58 +0200 Subject: [PATCH 108/124] Fix lint warnings. --- txcache/constants.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/txcache/constants.go b/txcache/constants.go index 274c9e99..0ff0b536 100644 --- a/txcache/constants.go +++ b/txcache/constants.go @@ -1,9 +1,5 @@ package txcache -const excellentGasPriceFactor = 5 -const maxSenderScore = 100 -const diagnosisMaxSendersToDisplay = 1000 const diagnosisMaxTransactionsToDisplay = 10000 const diagnosisSelectionGasRequested = 10_000_000_000 -const numJobsForMerging = 1 const initialCapacityOfSelectionSlice = 30000 From 8e3d20fcba461d163eb467d3942a3b15d75026c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 5 Nov 2024 13:36:26 +0200 Subject: [PATCH 109/124] Remove unused constant (fix linter). --- txcache/testutils_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index 51e16773..711e3006 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -12,7 +12,6 @@ import ( const oneMilion = 1000000 const oneBillion = oneMilion * 1000 -const oneTrillion = oneBillion * 1000 const estimatedSizeOfBoundedTxFields = uint64(128) func (cache *TxCache) areInternalMapsConsistent() bool { From 2bc824a16b2fa4602c79c7488e6fc2a6fb14e0b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 5 Nov 2024 18:26:38 +0200 Subject: [PATCH 110/124] Minor simplification at eviction. Add function getTxsReversed(). --- txcache/eviction.go | 4 +--- txcache/eviction_test.go | 2 +- txcache/txListForSender.go | 15 +++++++++++++++ txcache/txListForSender_test.go | 16 +++++++++++++++- 4 files changed, 32 insertions(+), 5 deletions(-) diff --git a/txcache/eviction.go b/txcache/eviction.go index 75384644..1e9db011 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -89,10 +89,8 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { for _, sender := range senders { // Include transactions after gaps, as well (important), unlike when selecting transactions for processing. - bunch := sender.getTxs() // Reverse the order of transactions (will come in handy later, when creating the min-heap). - reverseSlice(bunch) - + bunch := sender.getTxsReversed() bunches = append(bunches, bunch) } diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go index cc38adfe..fe3de7d3 100644 --- a/txcache/eviction_test.go +++ b/txcache/eviction_test.go @@ -117,7 +117,7 @@ func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { require.Equal(t, 4, int(cache.CountTx())) } -func TestBenchmarkTxCache_DoEviction_Benchmark(t *testing.T) { +func TestBenchmarkTxCache_DoEviction(t *testing.T) { config := ConfigSourceMe{ Name: "untitled", NumChunks: 16, diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index b87cd7b6..f3ab8d2b 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -197,6 +197,21 @@ func (listForSender *txListForSender) getTxs() []*WrappedTransaction { return result } +// getTxsReversed returns the transactions in the list, in reverse nonce order +func (listForSender *txListForSender) getTxsReversed() []*WrappedTransaction { + listForSender.mutex.RLock() + defer listForSender.mutex.RUnlock() + + result := make([]*WrappedTransaction, 0, listForSender.countTx()) + + for element := listForSender.items.Back(); element != nil; element = element.Prev() { + value := element.Value.(*WrappedTransaction) + result = append(result, value) + } + + return result +} + // getTxsWithoutGaps returns the transactions in the list (gaps are handled, affected transactions are excluded) func (listForSender *txListForSender) getTxsWithoutGaps() []*WrappedTransaction { listForSender.mutex.RLock() diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index d8bf9262..41cb76e0 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -194,20 +194,33 @@ func TestListForSender_evictTransactionsWithLowerNoncesNoLock(t *testing.T) { require.Equal(t, 0, list.items.Len()) } -func TestListForSender_hasInitialGap(t *testing.T) { +func TestListForSender_getTxs(t *testing.T) { list := newUnconstrainedListToTest() list.notifyAccountNonceReturnEvictedTransactions(42) // No transaction, no gap + require.Len(t, list.getTxs(), 0) + require.Len(t, list.getTxsReversed(), 0) require.Len(t, list.getTxsWithoutGaps(), 0) // One gap list.AddTx(createTx([]byte("tx-43"), ".", 43)) + require.Len(t, list.getTxs(), 1) + require.Len(t, list.getTxsReversed(), 1) require.Len(t, list.getTxsWithoutGaps(), 0) // Resolve gap list.AddTx(createTx([]byte("tx-42"), ".", 42)) + require.Len(t, list.getTxs(), 2) + require.Len(t, list.getTxsReversed(), 2) require.Len(t, list.getTxsWithoutGaps(), 2) + + require.Equal(t, []byte("tx-42"), list.getTxs()[0].TxHash) + require.Equal(t, []byte("tx-43"), list.getTxs()[1].TxHash) + require.Equal(t, list.getTxs(), list.getTxsWithoutGaps()) + + require.Equal(t, []byte("tx-43"), list.getTxsReversed()[0].TxHash) + require.Equal(t, []byte("tx-42"), list.getTxsReversed()[1].TxHash) } func TestListForSender_DetectRaceConditions(t *testing.T) { @@ -219,6 +232,7 @@ func TestListForSender_DetectRaceConditions(t *testing.T) { // These might be called concurrently: _ = list.IsEmpty() _ = list.getTxs() + _ = list.getTxsReversed() _ = list.getTxsWithoutGaps() _ = list.countTxWithLock() _ = list.notifyAccountNonceReturnEvictedTransactions(42) From 74bc0408a5b8aea0bf0465eb674bbedba500e4b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 5 Nov 2024 20:43:56 +0200 Subject: [PATCH 111/124] Cleanup, some readme, refactoring. --- txcache/README.md | 100 ++++++++++++++++++----------- txcache/slices.go | 7 -- txcache/transactionsMaxHeap.go | 2 +- txcache/transactionsMinHeap.go | 2 +- txcache/wrappedTransaction.go | 2 +- txcache/wrappedTransaction_test.go | 6 +- 6 files changed, 69 insertions(+), 50 deletions(-) delete mode 100644 txcache/slices.go diff --git a/txcache/README.md b/txcache/README.md index 16494448..52703600 100644 --- a/txcache/README.md +++ b/txcache/README.md @@ -3,11 +3,13 @@ ### Glossary 1. **selection session:** an ephemeral session during which the mempool selects transactions for a proposer. A session starts when a proposer asks the mempool for transactions and ends when the mempool returns the transactions. The most important part of a session is the _selection loop_. -2. **transaction PPU:** the price per unit of computation, for a transaction. It's computed as `fee / gasLimit`. +2. **transaction PPU:** the price per unit of computation, for a transaction. It's computed as `initiallyPaidFee / gasLimit`. +3. **initially paid transaction fee:** the fee for processing a transaction, as known before its actual processing. That is, without knowing the _refund_ component. ### Configuration -1. **gasRequested:** `10_000_000_000`, the maximum total gas limit of the transactions to be returned to a proposer (one _selection session_). +1. **SelectTransactions::gasRequested:** `10_000_000_000`, the maximum total gas limit of the transactions to be returned to a proposer (one _selection session_). This value is provided by the Protocol. +2. **SelectTransactions::maxNum:** `50_000`, the maximum number of transactions to be returned to a proposer (one _selection session_). This value is provided by the Protocol. ### Transactions selection @@ -16,64 +18,88 @@ When a proposer asks the mempool for transactions, it provides the following parameters: - `gasRequested`: the maximum total gas limit of the transactions to be returned + - `maxNum`: the maximum number of transactions to be returned ### Paragraph 2 -How is the size of a sender batch computed? +The PPU (price per gas unit) of a transaction, is computed (once it enters the mempool) as follows: -1. If the score of the sender is **zero**, then the size of the sender batch is **1**, and the total gas limit of the sender batch is **1**. -2. If the score of the sender is **non-zero**, then the size of the sender batch is computed as follows: - - `scoreDivision = score / maxSenderScore` - - `numPerBatch = baseNumPerSenderBatch * scoreDivision` - - `gasPerBatch = baseGasPerSenderBatch * scoreDivision` +``` +ppu = initiallyPaidFee / gasLimit +``` -Examples: - - for `score == 100`, we have `numPerBatch == 100` and `gasPerBatch == 120000000` - - for `score == 74`, we have `numPerBatch == 74` and `gasPerBatch == 88800000` - - for `score == 1`, we have `numPerBatch == 1` and `gasPerBatch == 1200000` - - for `score == 0`, we have `numPerBatch == 1` and `gasPerBatch == 1` +In the formula above, -### Paragraph 3 +``` +initiallyPaidFee = + dataCost * gasPrice + + executionCost * gasPrice * network.gasPriceModifier -The mempool selects transactions as follows: - - before starting the selection loop, get a snapshot of the senders (sorted by score, descending) - - in the selection loop, do as many _passes_ as needed to satisfy `gasRequested` (see **Paragraph 1**). - - within a _pass_, go through all the senders (appropriately sorted) and select a batch of transactions from each sender. The size of the batch is computed as described in **Paragraph 2**. - - if `gasRequested` is satisfied, stop the _pass_ early. +dataCost = network.minGasLimit + len(data) * network.gasPerDataByte -### Paragraph 4 +executionCost = gasLimit - dataCost +``` -Within a _selection pass_, a batch of transactions from a sender is selected as follows: - - if it's the first pass, then reset the internal state used for copy operations (in the scope of a sender). Furthermore, attempt to **detect an initial nonces gap** (if enough information is available, that is, if the current account nonce is known - see section **Account nonce notifications**). - - if a nonces gap is detected, return an empty batch. Subsequent passes of the selection loop (within the same selection session) will skip this sender. The sender will be re-considered in a future selection session. - - go through the list of transactions of the sender (sorted by nonce, ascending) and select the first `numPerBatch` transactions that fit within `gasPerBatch`. - - in following passes (within the same selection session), the batch selection algorithm will continue from the last selected transaction of the sender (think of it as a cursor). +#### Examples -### Score computation +(a) A simple native transfer with `gasLimit = 50_000` and `gasPrice = 1_000_000_000`: -The score of a sender it's computed based on her transactions (as found in the mempool) and the account nonce (as learned through the _account nonce notifications_). +``` +initiallyPaidFee = 50_000_000_000 atoms +ppu = 1_000_000_000 atoms +``` -The score is strongly correlated with the average price paid by the sender per unit of computation - we'll call this **avgPpu**, as a property of the sender. +(b) A simple native transfer with `gasLimit = 50_000` and `gasPrice = 1_500_000_000`: -Additionally, we define two global properties: `worstPpu` and `excellentPpu`. A sender with an `avgPpu` of `excellentPpu + 1` gets the maximum score, while a sender with an `avgPpu` of `worstPpu` gets the minimum score. +``` +initiallyPaidFee = gasLimit * gasPrice = 75_000_000_000 atoms +ppu = 75_000_000_000 / 50_000 = 1_500_000_000 atoms +``` -`worstPpu` is computed as the average price per unit of the "worst" possible transaction - minimum gas price, maximum gas limit, and minimum data size (thus abusing the Protocol gas price subvention): +(c) A simple native transfer with a data payload of 7 bytes, with `gasLimit = 50_000 + 7 * 1500` and `gasPrice = 1_000_000_000`: ``` -worstPpu = (50000 * 1_000_000_000 + (600_000_000 - 50000) * (1_000_000_000 / 100)) / 600_000_000 - = 10082500 +initiallyPaidFee = 60_500_000_000_000 atoms +ppu = 60_500_000_000_000 / 60_500 = 1_000_000_000 atoms ``` -`excellentPpu` is set to `minGasPrice` times a _chosen_ factor: +That is, for simple native transfers (whether they hold a data payload or not), the PPU is equal to the gas price. + +(d) ... + +### Paragraph 4 + +Transaction **A** is considered more desirable (for the Network) than transaction **B** if **it has a higher PPU**. + +If two transactions have the same PPU, they are ordered using an arbitrary, but deterministic rule: the transaction with the higher [fvn32(transactionHash)](https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function) "wins" the comparison. + +Pseudo-code: ``` -excellentPpu = 1_000_000_000 * 5 = 5_000_000_000 +func isTransactionMoreDesirableToNetwork(A, B): + if A.ppu > B.ppu: + return true + if A.ppu < B.ppu: + return false + return fvn32(A.hash) > fvn32(B.hash) ``` -Examples: - - ... +### Paragraph 3 + +The mempool selects transactions as follows: + - before starting the selection loop, get a snapshot of the senders, in an arbitrary order. + - in the selection loop, do as many _passes_ as needed to satisfy `gasRequested` (see **Paragraph 1**). + - within a _pass_, ... + - if `gasRequested` is satisfied, stop the _selection loop_ early. + - if `maxNum` is satisfied, stop the _selection loop_ early. + +### Paragraph 4 + +Within a _selection pass_, a batch of transactions from a sender is selected as follows: + - ..., attempt to **detect an initial nonces gap** (if enough information is available, that is, if the current account nonce is known - see section **Account nonce notifications**). + - if a nonces gap is detected, ... Subsequent passes of the selection loop (within the same selection session) will skip this sender. The sender will be re-considered in a future selection session. -#### Spotless sequence of transactions +#### Initial gaps and middle gaps ### Account nonce notifications diff --git a/txcache/slices.go b/txcache/slices.go deleted file mode 100644 index 4f6c7c6f..00000000 --- a/txcache/slices.go +++ /dev/null @@ -1,7 +0,0 @@ -package txcache - -func reverseSlice[T any](s []T) { - for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { - s[i], s[j] = s[j], s[i] - } -} diff --git a/txcache/transactionsMaxHeap.go b/txcache/transactionsMaxHeap.go index 145d17c2..d3ebefec 100644 --- a/txcache/transactionsMaxHeap.go +++ b/txcache/transactionsMaxHeap.go @@ -5,7 +5,7 @@ type TransactionsMaxHeap []*TransactionsHeapItem func (maxHeap TransactionsMaxHeap) Len() int { return len(maxHeap) } func (maxHeap TransactionsMaxHeap) Less(i, j int) bool { - return maxHeap[i].transaction.isTransactionMoreDesirableByProtocol(maxHeap[j].transaction) + return maxHeap[i].transaction.isTransactionMoreDesirableToNetwork(maxHeap[j].transaction) } func (maxHeap TransactionsMaxHeap) Swap(i, j int) { diff --git a/txcache/transactionsMinHeap.go b/txcache/transactionsMinHeap.go index e69b00b0..6f70f79a 100644 --- a/txcache/transactionsMinHeap.go +++ b/txcache/transactionsMinHeap.go @@ -5,7 +5,7 @@ type TransactionsMinHeap []*TransactionsHeapItem func (minHeap TransactionsMinHeap) Len() int { return len(minHeap) } func (minHeap TransactionsMinHeap) Less(i, j int) bool { - return minHeap[j].transaction.isTransactionMoreDesirableByProtocol(minHeap[i].transaction) + return minHeap[j].transaction.isTransactionMoreDesirableToNetwork(minHeap[i].transaction) } func (minHeap TransactionsMinHeap) Swap(i, j int) { diff --git a/txcache/wrappedTransaction.go b/txcache/wrappedTransaction.go index ec29bf81..d3eee2b1 100644 --- a/txcache/wrappedTransaction.go +++ b/txcache/wrappedTransaction.go @@ -43,7 +43,7 @@ func fnv32(key string) uint32 { } // Equality is out of scope (not possible in our case). -func (transaction *WrappedTransaction) isTransactionMoreDesirableByProtocol(otherTransaction *WrappedTransaction) bool { +func (transaction *WrappedTransaction) isTransactionMoreDesirableToNetwork(otherTransaction *WrappedTransaction) bool { // First, compare by price per unit ppu := transaction.PricePerUnit ppuOther := otherTransaction.PricePerUnit diff --git a/txcache/wrappedTransaction_test.go b/txcache/wrappedTransaction_test.go index 1c894426..1970dd5e 100644 --- a/txcache/wrappedTransaction_test.go +++ b/txcache/wrappedTransaction_test.go @@ -38,7 +38,7 @@ func TestWrappedTransaction_precomputeFields(t *testing.T) { }) } -func TestWrappedTransaction_isTransactionMoreDesirableByProtocol(t *testing.T) { +func TestWrappedTransaction_isTransactionMoreDesirableToNetwork(t *testing.T) { txGasHandler := txcachemocks.NewTxGasHandlerMock() t.Run("decide by price per unit", func(t *testing.T) { @@ -48,7 +48,7 @@ func TestWrappedTransaction_isTransactionMoreDesirableByProtocol(t *testing.T) { b := createTx([]byte("b-1"), "b", 1).withDataLength(1).withGasLimit(51501).withGasPrice(oneBillion) b.precomputeFields(txGasHandler) - require.True(t, a.isTransactionMoreDesirableByProtocol(b)) + require.True(t, a.isTransactionMoreDesirableToNetwork(b)) }) t.Run("decide by transaction hash (set them up to have the same PPU)", func(t *testing.T) { @@ -60,6 +60,6 @@ func TestWrappedTransaction_isTransactionMoreDesirableByProtocol(t *testing.T) { b.precomputeFields(txGasHandler) require.Equal(t, 1654268265, int(b.HashFnv32)) - require.True(t, a.isTransactionMoreDesirableByProtocol(b)) + require.True(t, a.isTransactionMoreDesirableToNetwork(b)) }) } From 5ee99a2754aaa0d1d77119b89537d38330e687a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 5 Nov 2024 20:45:55 +0200 Subject: [PATCH 112/124] Cleanup. --- txcache/interface.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/txcache/interface.go b/txcache/interface.go index be3b0bc4..f09dc457 100644 --- a/txcache/interface.go +++ b/txcache/interface.go @@ -8,8 +8,6 @@ import ( // TxGasHandler handles a transaction gas and gas cost type TxGasHandler interface { - MinGasPrice() uint64 - MaxGasLimitPerTx() uint64 ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int IsInterfaceNil() bool } From 09f30ed055218e9c1290aa2b00c038e702dd37e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 5 Nov 2024 21:10:25 +0200 Subject: [PATCH 113/124] Fix after self-review. --- testscommon/txcachemocks/txGasHandlerMock.go | 32 -------------------- txcache/txCache.go | 6 ++-- txcache/txListBySenderMap.go | 4 ++- txcache/txListForSender.go | 8 ++--- 4 files changed, 10 insertions(+), 40 deletions(-) diff --git a/testscommon/txcachemocks/txGasHandlerMock.go b/testscommon/txcachemocks/txGasHandlerMock.go index ba0e849a..46e18141 100644 --- a/testscommon/txcachemocks/txGasHandlerMock.go +++ b/testscommon/txcachemocks/txGasHandlerMock.go @@ -7,19 +7,10 @@ import ( "github.com/multiversx/mx-chain-core-go/data" ) -// TxGasHandler - -type TxGasHandler interface { - MinGasPrice() uint64 - MaxGasLimitPerTx() uint64 - ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int - IsInterfaceNil() bool -} - // TxGasHandlerMock - type TxGasHandlerMock struct { minGasLimit uint64 minGasPrice uint64 - maxGasLimitPerTx uint64 gasPerDataByte uint64 gasPriceModifier float64 } @@ -29,40 +20,17 @@ func NewTxGasHandlerMock() *TxGasHandlerMock { return &TxGasHandlerMock{ minGasLimit: 50000, minGasPrice: 1000000000, - maxGasLimitPerTx: 600000000, gasPerDataByte: 1500, gasPriceModifier: 0.01, } } -// WithMinGasLimit - -func (ghm *TxGasHandlerMock) WithMinGasLimit(minGasLimit uint64) *TxGasHandlerMock { - ghm.minGasLimit = minGasLimit - return ghm -} - -// WithMinGasPrice - -func (ghm *TxGasHandlerMock) WithMinGasPrice(minGasPrice uint64) *TxGasHandlerMock { - ghm.minGasPrice = minGasPrice - return ghm -} - // WithGasPriceModifier - func (ghm *TxGasHandlerMock) WithGasPriceModifier(gasPriceModifier float64) *TxGasHandlerMock { ghm.gasPriceModifier = gasPriceModifier return ghm } -// MinGasPrice - -func (ghm *TxGasHandlerMock) MinGasPrice() uint64 { - return ghm.minGasPrice -} - -// MaxGasLimitPerTx - -func (ghm *TxGasHandlerMock) MaxGasLimitPerTx() uint64 { - return ghm.maxGasLimitPerTx -} - // ComputeTxFee - func (ghm *TxGasHandlerMock) ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int { dataLength := uint64(len(tx.GetData())) diff --git a/txcache/txCache.go b/txcache/txCache.go index 08c1343f..0a5e5de8 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -93,8 +93,8 @@ func (cache *TxCache) GetByTxHash(txHash []byte) (*WrappedTransaction, bool) { return tx, ok } -// SelectTransactions selects a reasonably fair list of transactions to be included in the next miniblock -// It returns transactions with total gas ~ "gasRequested". +// SelectTransactions selects the best transactions to be included in the next miniblock. +// It returns up to "maxNum" transactions, with total gas <= "gasRequested". func (cache *TxCache) SelectTransactions(gasRequested uint64, maxNum int) ([]*WrappedTransaction, uint64) { stopWatch := core.NewStopWatch() stopWatch.Start("selection") @@ -134,7 +134,7 @@ func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { tx, foundInByHash := cache.txByHash.removeTx(string(txHash)) if !foundInByHash { - // Could have been previously removed (e.g. due to NotifyAccountNonce). + // Transaction might have been removed in the meantime (e.g. due to NotifyAccountNonce). return false } diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index ca53bd7f..ac3592f9 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -107,7 +107,7 @@ func (txMap *txListBySenderMap) removeTx(tx *WrappedTransaction) bool { return isFound } -// Important: this doesn't remove the transactions from txCache.txByHash. That's done by the caller. +// Important note: this doesn't remove the transactions from txCache.txByHash. That is the responsibility of the caller (of this function). func (txMap *txListBySenderMap) removeSender(sender string) bool { logRemove.Trace("txListBySenderMap.removeSender", "sender", sender) @@ -148,6 +148,8 @@ func (txMap *txListBySenderMap) notifyAccountNonceReturnEvictedTransactions(acco return evictedTxHashes } +// evictTransactionsWithHigherOrEqualNonces removes transactions with nonces higher or equal to the given nonce. +// Useful for the eviction flow. func (txMap *txListBySenderMap) evictTransactionsWithHigherOrEqualNonces(accountKey []byte, nonce uint64) { sender := string(accountKey) listForSender, ok := txMap.getListForSender(sender) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index f3ab8d2b..77f023ec 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -182,7 +182,7 @@ func (listForSender *txListForSender) IsEmpty() bool { return listForSender.countTxWithLock() == 0 } -// getTxs returns the transactions in the list +// getTxs returns the transactions of the sender func (listForSender *txListForSender) getTxs() []*WrappedTransaction { listForSender.mutex.RLock() defer listForSender.mutex.RUnlock() @@ -197,7 +197,7 @@ func (listForSender *txListForSender) getTxs() []*WrappedTransaction { return result } -// getTxsReversed returns the transactions in the list, in reverse nonce order +// getTxsReversed returns the transactions of the sender, in reverse nonce order func (listForSender *txListForSender) getTxsReversed() []*WrappedTransaction { listForSender.mutex.RLock() defer listForSender.mutex.RUnlock() @@ -212,7 +212,7 @@ func (listForSender *txListForSender) getTxsReversed() []*WrappedTransaction { return result } -// getTxsWithoutGaps returns the transactions in the list (gaps are handled, affected transactions are excluded) +// getTxsWithoutGaps returns the transactions of the sender (gaps are handled, affected transactions are excluded) func (listForSender *txListForSender) getTxsWithoutGaps() []*WrappedTransaction { listForSender.mutex.RLock() defer listForSender.mutex.RUnlock() @@ -257,7 +257,7 @@ func (listForSender *txListForSender) countTxWithLock() uint64 { return uint64(listForSender.items.Len()) } -// Removes transactions with lower nonces and returns their hashes. +// notifyAccountNonceReturnEvictedTransactions sets the known account nonce, removes the transactions with lower nonces, and returns their hashes func (listForSender *txListForSender) notifyAccountNonceReturnEvictedTransactions(nonce uint64) [][]byte { // Optimization: if nonce is the same, do nothing. if listForSender.accountNonce.Get() == nonce { From 9e31b0082692c878cc946204836581440324ba21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 6 Nov 2024 12:09:55 +0200 Subject: [PATCH 114/124] Adjust logging. --- txcache/txCache.go | 1 - txcache/txListForSender.go | 9 +++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/txcache/txCache.go b/txcache/txCache.go index 0a5e5de8..10723515 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -283,7 +283,6 @@ func (cache *TxCache) NotifyAccountNonce(accountKey []byte, nonce uint64) { evicted := cache.txListBySender.notifyAccountNonceReturnEvictedTransactions(accountKey, nonce) if len(evicted) > 0 { - logRemove.Trace("NotifyAccountNonce with eviction", "sender", accountKey, "nonce", nonce, "num evicted txs", len(evicted)) cache.txByHash.RemoveTxsBulk(evicted) } } diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 77f023ec..583dbdd4 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -259,8 +259,9 @@ func (listForSender *txListForSender) countTxWithLock() uint64 { // notifyAccountNonceReturnEvictedTransactions sets the known account nonce, removes the transactions with lower nonces, and returns their hashes func (listForSender *txListForSender) notifyAccountNonceReturnEvictedTransactions(nonce uint64) [][]byte { - // Optimization: if nonce is the same, do nothing. + // Optimization: if nonce is the same, do nothing (good for heavy load). if listForSender.accountNonce.Get() == nonce { + logRemove.Trace("notifyAccountNonceReturnEvictedTransactions, nonce is the same", "sender", listForSender.sender, "nonce", nonce) return nil } @@ -270,7 +271,11 @@ func (listForSender *txListForSender) notifyAccountNonceReturnEvictedTransaction listForSender.accountNonce.Set(nonce) _ = listForSender.accountNonceKnown.SetReturningPrevious() - return listForSender.evictTransactionsWithLowerNoncesNoLockReturnEvicted(nonce) + evicted := listForSender.evictTransactionsWithLowerNoncesNoLockReturnEvicted(nonce) + + logRemove.Trace("notifyAccountNonceReturnEvictedTransactions, nonce changed", "sender", listForSender.sender, "nonce", nonce, "num evicted txs", len(evicted)) + + return evicted } // This function should only be used in critical section (listForSender.mutex) From 1a7e9f288c11a12b3d618e854999030195e9c80a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Sun, 10 Nov 2024 19:41:19 +0200 Subject: [PATCH 115/124] Fix after review. --- txcache/README.md | 4 +- txcache/crossTxCache.go | 2 +- txcache/diagnosis.go | 14 +++---- txcache/disabledCache.go | 2 +- txcache/eviction.go | 17 ++++---- txcache/selection.go | 22 +++++----- txcache/selection_test.go | 2 +- txcache/testutils_test.go | 6 +-- txcache/transactionsHeap.go | 65 ++++++++++++++++++++++++++++++ txcache/transactionsHeapItem.go | 7 ---- txcache/transactionsMaxHeap.go | 27 ------------- txcache/transactionsMinHeap.go | 27 ------------- txcache/txCache.go | 9 ++++- txcache/txListBySenderMap.go | 32 ++++----------- txcache/txListBySenderMap_test.go | 4 +- txcache/txListForSender.go | 29 +++++++++---- txcache/txListForSender_test.go | 4 +- txcache/wrappedTransaction.go | 27 +++++++------ txcache/wrappedTransaction_test.go | 22 +++++----- 19 files changed, 167 insertions(+), 155 deletions(-) create mode 100644 txcache/transactionsHeap.go delete mode 100644 txcache/transactionsHeapItem.go delete mode 100644 txcache/transactionsMaxHeap.go delete mode 100644 txcache/transactionsMinHeap.go diff --git a/txcache/README.md b/txcache/README.md index 52703600..0acd9187 100644 --- a/txcache/README.md +++ b/txcache/README.md @@ -69,14 +69,14 @@ That is, for simple native transfers (whether they hold a data payload or not), ### Paragraph 4 -Transaction **A** is considered more desirable (for the Network) than transaction **B** if **it has a higher PPU**. +Transaction **A** is considered more valuable (for the Network) than transaction **B** if **it has a higher PPU**. If two transactions have the same PPU, they are ordered using an arbitrary, but deterministic rule: the transaction with the higher [fvn32(transactionHash)](https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function) "wins" the comparison. Pseudo-code: ``` -func isTransactionMoreDesirableToNetwork(A, B): +func isTransactionMoreValuableForNetwork(A, B): if A.ppu > B.ppu: return true if A.ppu < B.ppu: diff --git a/txcache/crossTxCache.go b/txcache/crossTxCache.go index 9d89c18f..c40bcc51 100644 --- a/txcache/crossTxCache.go +++ b/txcache/crossTxCache.go @@ -116,7 +116,7 @@ func (cache *CrossTxCache) GetTransactionsPoolForSender(_ string) []*WrappedTran } // NotifyAccountNonce does nothing, only to respect the interface -func (cache *CrossTxCache) NotifyAccountNonce(accountKey []byte, nonce uint64) { +func (cache *CrossTxCache) NotifyAccountNonce(_ []byte, _ uint64) { } // IsInterfaceNil returns true if there is no value under the interface diff --git a/txcache/diagnosis.go b/txcache/diagnosis.go index 34a768f1..7bd7f2da 100644 --- a/txcache/diagnosis.go +++ b/txcache/diagnosis.go @@ -71,21 +71,21 @@ func (cache *TxCache) diagnoseTransactions() { numToDisplay := core.MinInt(diagnosisMaxTransactionsToDisplay, len(transactions)) logDiagnoseTransactions.Trace("diagnoseTransactions", "numTransactions", len(transactions), "numToDisplay", numToDisplay) - logDiagnoseTransactions.Trace(marshalTransactionsToNewlineDelimitedJson(transactions[:numToDisplay], "diagnoseTransactions")) + logDiagnoseTransactions.Trace(marshalTransactionsToNewlineDelimitedJSON(transactions[:numToDisplay], "diagnoseTransactions")) } -// marshalTransactionsToNewlineDelimitedJson converts a list of transactions to a newline-delimited JSON string. +// marshalTransactionsToNewlineDelimitedJSON converts a list of transactions to a newline-delimited JSON string. // Note: each line is indexed, to improve readability. The index is easily removable for if separate analysis is needed. -func marshalTransactionsToNewlineDelimitedJson(transactions []*WrappedTransaction, linePrefix string) string { +func marshalTransactionsToNewlineDelimitedJSON(transactions []*WrappedTransaction, linePrefix string) string { builder := strings.Builder{} builder.WriteString("\n") for i, wrappedTx := range transactions { printedTx := convertWrappedTransactionToPrintedTransaction(wrappedTx) - printedTxJson, _ := json.Marshal(printedTx) + printedTxJSON, _ := json.Marshal(printedTx) builder.WriteString(fmt.Sprintf("%s#%d: ", linePrefix, i)) - builder.WriteString(string(printedTxJson)) + builder.WriteString(string(printedTxJSON)) builder.WriteString("\n") } @@ -104,7 +104,7 @@ func convertWrappedTransactionToPrintedTransaction(wrappedTx *WrappedTransaction GasPrice: transaction.GetGasPrice(), GasLimit: transaction.GetGasLimit(), DataLength: len(transaction.GetData()), - PPU: wrappedTx.PricePerUnit, + PPU: wrappedTx.PricePerUnit.Load(), } } @@ -124,7 +124,7 @@ func displaySelectionOutcome(contextualLogger logger.Logger, linePrefix string, if len(transactions) > 0 { contextualLogger.Trace("displaySelectionOutcome - transactions (as newline-separated JSON):") - contextualLogger.Trace(marshalTransactionsToNewlineDelimitedJson(transactions, linePrefix)) + contextualLogger.Trace(marshalTransactionsToNewlineDelimitedJSON(transactions, linePrefix)) } else { contextualLogger.Trace("displaySelectionOutcome - transactions: none") } diff --git a/txcache/disabledCache.go b/txcache/disabledCache.go index 26cd114b..69d7362c 100644 --- a/txcache/disabledCache.go +++ b/txcache/disabledCache.go @@ -25,7 +25,7 @@ func (cache *DisabledCache) GetByTxHash(_ []byte) (*WrappedTransaction, bool) { return nil, false } -// SelectTransactionsWithBandwidth returns an empty slice +// SelectTransactions returns an empty slice func (cache *DisabledCache) SelectTransactions(uint64, int) ([]*WrappedTransaction, uint64) { return make([]*WrappedTransaction, 0), 0 } diff --git a/txcache/eviction.go b/txcache/eviction.go index 1e9db011..adb6bb2e 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -85,7 +85,7 @@ func (cache *TxCache) areThereTooManyTxs() bool { // Eviction tolerates concurrent transaction additions / removals. func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { senders := cache.getSenders() - bunches := make([]BunchOfTransactions, 0, len(senders)) + bunches := make([]bunchOfTransactions, 0, len(senders)) for _, sender := range senders { // Include transactions after gaps, as well (important), unlike when selecting transactions for processing. @@ -98,8 +98,8 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { // Heap is reused among passes. // Items popped from the heap are added to "transactionsToEvict" (slice is re-created in each pass). - transactionsHeap := make(TransactionsMinHeap, 0, len(bunches)) - heap.Init(&transactionsHeap) + transactionsHeap := newMinTransactionsHeap(len(bunches)) + heap.Init(transactionsHeap) // Initialize the heap with the first transaction of each bunch for i, bunch := range bunches { @@ -109,7 +109,7 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { } // Items will be reused (see below). Each sender gets one (and only one) item in the heap. - heap.Push(&transactionsHeap, &TransactionsHeapItem{ + heap.Push(transactionsHeap, &transactionsHeapItem{ senderIndex: i, transactionIndex: 0, transaction: bunch[0], @@ -117,13 +117,13 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { } for pass := 0; cache.isCapacityExceeded(); pass++ { - transactionsToEvict := make(BunchOfTransactions, 0, cache.config.NumItemsToPreemptivelyEvict) + transactionsToEvict := make(bunchOfTransactions, 0, cache.config.NumItemsToPreemptivelyEvict) transactionsToEvictHashes := make([][]byte, 0, cache.config.NumItemsToPreemptivelyEvict) // Select transactions (sorted). for transactionsHeap.Len() > 0 { // Always pick the "worst" transaction. - item := heap.Pop(&transactionsHeap).(*TransactionsHeapItem) + item := heap.Pop(transactionsHeap).(*transactionsHeapItem) if len(transactionsToEvict) >= int(cache.config.NumItemsToPreemptivelyEvict) { // We have enough transactions to evict in this pass. @@ -140,7 +140,7 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { if item.transactionIndex < len(bunches[item.senderIndex]) { // Item is reused (same originating sender), pushed back on the heap. item.transaction = bunches[item.senderIndex][item.transactionIndex] - heap.Push(&transactionsHeap, item) + heap.Push(transactionsHeap, item) } } @@ -149,7 +149,8 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { break } - // For each sender, find the "lowest" (in nonce) transaction to evict. + // For each sender, find the "lowest" (in nonce) transaction to evict, + // so that we can remove all transactions with higher or equal nonces (of a sender) in one go (see below). lowestToEvictBySender := make(map[string]uint64) for _, tx := range transactionsToEvict { diff --git a/txcache/selection.go b/txcache/selection.go index 08bc7c67..45456b7a 100644 --- a/txcache/selection.go +++ b/txcache/selection.go @@ -1,10 +1,12 @@ package txcache -import "container/heap" +import ( + "container/heap" +) -func (cache *TxCache) doSelectTransactions(gasRequested uint64, maxNum int) (BunchOfTransactions, uint64) { +func (cache *TxCache) doSelectTransactions(gasRequested uint64, maxNum int) (bunchOfTransactions, uint64) { senders := cache.getSenders() - bunches := make([]BunchOfTransactions, 0, len(senders)) + bunches := make([]bunchOfTransactions, 0, len(senders)) for _, sender := range senders { bunches = append(bunches, sender.getTxsWithoutGaps()) @@ -14,12 +16,12 @@ func (cache *TxCache) doSelectTransactions(gasRequested uint64, maxNum int) (Bun } // Selection tolerates concurrent transaction additions / removals. -func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested uint64, maxNum int) (BunchOfTransactions, uint64) { - selectedTransactions := make(BunchOfTransactions, 0, initialCapacityOfSelectionSlice) +func selectTransactionsFromBunches(bunches []bunchOfTransactions, gasRequested uint64, maxNum int) (bunchOfTransactions, uint64) { + selectedTransactions := make(bunchOfTransactions, 0, initialCapacityOfSelectionSlice) // Items popped from the heap are added to "selectedTransactions". - transactionsHeap := make(TransactionsMaxHeap, 0, len(bunches)) - heap.Init(&transactionsHeap) + transactionsHeap := newMaxTransactionsHeap(len(bunches)) + heap.Init(transactionsHeap) // Initialize the heap with the first transaction of each bunch for i, bunch := range bunches { @@ -29,7 +31,7 @@ func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested u } // Items will be reused (see below). Each sender gets one (and only one) item in the heap. - heap.Push(&transactionsHeap, &TransactionsHeapItem{ + heap.Push(transactionsHeap, &transactionsHeapItem{ senderIndex: i, transactionIndex: 0, transaction: bunch[0], @@ -41,7 +43,7 @@ func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested u // Select transactions (sorted). for transactionsHeap.Len() > 0 { // Always pick the best transaction. - item := heap.Pop(&transactionsHeap).(*TransactionsHeapItem) + item := heap.Pop(transactionsHeap).(*transactionsHeapItem) gasLimit := item.transaction.Tx.GetGasLimit() if accumulatedGas+gasLimit > gasRequested { @@ -61,7 +63,7 @@ func selectTransactionsFromBunches(bunches []BunchOfTransactions, gasRequested u if item.transactionIndex < len(bunches[item.senderIndex]) { // Item is reused (same originating sender), pushed back on the heap. item.transaction = bunches[item.senderIndex][item.transactionIndex] - heap.Push(&transactionsHeap, item) + heap.Push(transactionsHeap, item) } } diff --git a/txcache/selection_test.go b/txcache/selection_test.go index 7c0755a5..5d358e26 100644 --- a/txcache/selection_test.go +++ b/txcache/selection_test.go @@ -158,7 +158,7 @@ func TestTxCache_SelectTransactions_WhenTransactionsAddedInReversedNonceOrder(t func TestTxCache_selectTransactionsFromBunches(t *testing.T) { t.Run("empty cache", func(t *testing.T) { - merged, accumulatedGas := selectTransactionsFromBunches([]BunchOfTransactions{}, 10_000_000_000, math.MaxInt) + merged, accumulatedGas := selectTransactionsFromBunches([]bunchOfTransactions{}, 10_000_000_000, math.MaxInt) require.Equal(t, 0, len(merged)) require.Equal(t, uint64(0), accumulatedGas) diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go index 711e3006..a2405be5 100644 --- a/txcache/testutils_test.go +++ b/txcache/testutils_test.go @@ -106,12 +106,12 @@ func addManyTransactionsWithUniformDistribution(cache *TxCache, nSenders int, nT } } -func createBunchesOfTransactionsWithUniformDistribution(nSenders int, nTransactionsPerSender int) []BunchOfTransactions { - bunches := make([]BunchOfTransactions, 0, nSenders) +func createBunchesOfTransactionsWithUniformDistribution(nSenders int, nTransactionsPerSender int) []bunchOfTransactions { + bunches := make([]bunchOfTransactions, 0, nSenders) txGasHandler := txcachemocks.NewTxGasHandlerMock() for senderTag := 0; senderTag < nSenders; senderTag++ { - bunch := make(BunchOfTransactions, 0, nTransactionsPerSender) + bunch := make(bunchOfTransactions, 0, nTransactionsPerSender) sender := createFakeSenderAddress(senderTag) for nonce := nTransactionsPerSender; nonce > 0; nonce-- { diff --git a/txcache/transactionsHeap.go b/txcache/transactionsHeap.go new file mode 100644 index 00000000..9693f10b --- /dev/null +++ b/txcache/transactionsHeap.go @@ -0,0 +1,65 @@ +package txcache + +type transactionsHeap struct { + items []*transactionsHeapItem + less func(i, j int) bool +} + +type transactionsHeapItem struct { + senderIndex int + transactionIndex int + transaction *WrappedTransaction +} + +func newMinTransactionsHeap(capacity int) *transactionsHeap { + h := transactionsHeap{ + items: make([]*transactionsHeapItem, 0, capacity), + } + + h.less = func(i, j int) bool { + return h.items[j].transaction.isTransactionMoreValuableForNetwork(h.items[i].transaction) + } + + return &h +} + +func newMaxTransactionsHeap(capacity int) *transactionsHeap { + h := transactionsHeap{ + items: make([]*transactionsHeapItem, 0, capacity), + } + + h.less = func(i, j int) bool { + return h.items[i].transaction.isTransactionMoreValuableForNetwork(h.items[j].transaction) + } + + return &h +} + +// Len returns the number of elements in the heap. +func (h *transactionsHeap) Len() int { return len(h.items) } + +// Less reports whether the element with index i should sort before the element with index j. +func (h *transactionsHeap) Less(i, j int) bool { + return h.less(i, j) +} + +// Swap swaps the elements with indexes i and j. +func (h *transactionsHeap) Swap(i, j int) { + h.items[i], h.items[j] = h.items[j], h.items[i] +} + +// Push pushes the element x onto the heap. +func (h *transactionsHeap) Push(x interface{}) { + h.items = append(h.items, x.(*transactionsHeapItem)) +} + +// Pop removes and returns the minimum element (according to "h.less") from the heap. +func (h *transactionsHeap) Pop() interface{} { + // Standard code when storing the heap in a slice: + // https://pkg.go.dev/container/heap + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} diff --git a/txcache/transactionsHeapItem.go b/txcache/transactionsHeapItem.go deleted file mode 100644 index c458115f..00000000 --- a/txcache/transactionsHeapItem.go +++ /dev/null @@ -1,7 +0,0 @@ -package txcache - -type TransactionsHeapItem struct { - senderIndex int - transactionIndex int - transaction *WrappedTransaction -} diff --git a/txcache/transactionsMaxHeap.go b/txcache/transactionsMaxHeap.go deleted file mode 100644 index d3ebefec..00000000 --- a/txcache/transactionsMaxHeap.go +++ /dev/null @@ -1,27 +0,0 @@ -package txcache - -type TransactionsMaxHeap []*TransactionsHeapItem - -func (maxHeap TransactionsMaxHeap) Len() int { return len(maxHeap) } - -func (maxHeap TransactionsMaxHeap) Less(i, j int) bool { - return maxHeap[i].transaction.isTransactionMoreDesirableToNetwork(maxHeap[j].transaction) -} - -func (maxHeap TransactionsMaxHeap) Swap(i, j int) { - maxHeap[i], maxHeap[j] = maxHeap[j], maxHeap[i] -} - -func (maxHeap *TransactionsMaxHeap) Push(x interface{}) { - *maxHeap = append(*maxHeap, x.(*TransactionsHeapItem)) -} - -func (maxHeap *TransactionsMaxHeap) Pop() interface{} { - // Standard code when storing the heap in a slice: - // https://pkg.go.dev/container/heap - old := *maxHeap - n := len(old) - item := old[n-1] - *maxHeap = old[0 : n-1] - return item -} diff --git a/txcache/transactionsMinHeap.go b/txcache/transactionsMinHeap.go deleted file mode 100644 index 6f70f79a..00000000 --- a/txcache/transactionsMinHeap.go +++ /dev/null @@ -1,27 +0,0 @@ -package txcache - -type TransactionsMinHeap []*TransactionsHeapItem - -func (minHeap TransactionsMinHeap) Len() int { return len(minHeap) } - -func (minHeap TransactionsMinHeap) Less(i, j int) bool { - return minHeap[j].transaction.isTransactionMoreDesirableToNetwork(minHeap[i].transaction) -} - -func (minHeap TransactionsMinHeap) Swap(i, j int) { - minHeap[i], minHeap[j] = minHeap[j], minHeap[i] -} - -func (minHeap *TransactionsMinHeap) Push(x interface{}) { - *minHeap = append(*minHeap, x.(*TransactionsHeapItem)) -} - -func (minHeap *TransactionsMinHeap) Pop() interface{} { - // Standard code when storing the heap in a slice: - // https://pkg.go.dev/container/heap - old := *minHeap - n := len(old) - item := old[n-1] - *minHeap = old[0 : n-1] - return item -} diff --git a/txcache/txCache.go b/txcache/txCache.go index 10723515..2b4acc67 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -19,6 +19,7 @@ type TxCache struct { txListBySender *txListBySenderMap txByHash *txByHashMap config ConfigSourceMe + txGasHandler TxGasHandler evictionMutex sync.Mutex isEvictionInProgress atomic.Flag mutTxOperation sync.Mutex @@ -43,9 +44,10 @@ func NewTxCache(config ConfigSourceMe, txGasHandler TxGasHandler) (*TxCache, err txCache := &TxCache{ name: config.Name, - txListBySender: newTxListBySenderMap(numChunks, senderConstraintsObj, txGasHandler), + txListBySender: newTxListBySenderMap(numChunks, senderConstraintsObj), txByHash: newTxByHashMap(numChunks), config: config, + txGasHandler: txGasHandler, } return txCache, nil @@ -60,6 +62,8 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { logAdd.Trace("AddTx", "tx", tx.TxHash, "nonce", tx.Tx.GetNonce(), "sender", tx.Tx.GetSndAddr()) + tx.precomputeFields(cache.txGasHandler) + if cache.config.EvictionEnabled { _ = cache.doEviction() } @@ -78,7 +82,7 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { } if len(evicted) > 0 { - logRemove.Debug("AddTx with eviction", "sender", tx.Tx.GetSndAddr(), "num evicted txs", len(evicted)) + logRemove.Trace("AddTx with eviction", "sender", tx.Tx.GetSndAddr(), "num evicted txs", len(evicted)) cache.txByHash.RemoveTxsBulk(evicted) } @@ -185,6 +189,7 @@ func (cache *TxCache) ForEachTransaction(function ForEachTransaction) { cache.txByHash.forEach(function) } +// getAllTransactions returns all transactions in the cache func (cache *TxCache) getAllTransactions() []*WrappedTransaction { transactions := make([]*WrappedTransaction, 0, cache.Len()) diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index ac3592f9..4e981bf3 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -12,7 +12,6 @@ type txListBySenderMap struct { backingMap *maps.ConcurrentMap senderConstraints senderConstraints counter atomic.Counter - txGasHandler TxGasHandler mutex sync.Mutex } @@ -20,14 +19,12 @@ type txListBySenderMap struct { func newTxListBySenderMap( nChunksHint uint32, senderConstraints senderConstraints, - txGasHandler TxGasHandler, ) *txListBySenderMap { backingMap := maps.NewConcurrentMap(nChunksHint) return &txListBySenderMap{ backingMap: backingMap, senderConstraints: senderConstraints, - txGasHandler: txGasHandler, } } @@ -36,16 +33,8 @@ func newTxListBySenderMap( func (txMap *txListBySenderMap) addTxReturnEvicted(tx *WrappedTransaction) (bool, [][]byte) { sender := string(tx.Tx.GetSndAddr()) listForSender := txMap.getOrAddListForSender(sender) - tx.precomputeFields(txMap.txGasHandler) added, evictedHashes := listForSender.AddTx(tx) - - if listForSender.IsEmpty() { - // Generally speaking, a sender cannot become empty after upon applying sender-level constraints. - // However: - txMap.removeSender(sender) - } - return added, evictedHashes } @@ -94,17 +83,19 @@ func (txMap *txListBySenderMap) removeTx(tx *WrappedTransaction) bool { if !ok { // This happens when a sender whose transactions were selected for processing is removed from cache in the meantime. // When it comes to remove one if its transactions due to processing (commited / finalized block), they don't exist in cache anymore. - log.Debug("txListBySenderMap.removeTx detected slight inconsistency: sender of tx not in cache", "tx", tx.TxHash, "sender", []byte(sender)) + log.Trace("txListBySenderMap.removeTx detected slight inconsistency: sender of tx not in cache", "tx", tx.TxHash, "sender", []byte(sender)) return false } isFound := listForSender.RemoveTx(tx) + txMap.removeSenderIfEmpty(listForSender) + return isFound +} +func (txMap *txListBySenderMap) removeSenderIfEmpty(listForSender *txListForSender) { if listForSender.IsEmpty() { - txMap.removeSender(sender) + txMap.removeSender(listForSender.sender) } - - return isFound } // Important note: this doesn't remove the transactions from txCache.txByHash. That is the responsibility of the caller (of this function). @@ -140,11 +131,7 @@ func (txMap *txListBySenderMap) notifyAccountNonceReturnEvictedTransactions(acco } evictedTxHashes := listForSender.notifyAccountNonceReturnEvictedTransactions(nonce) - - if listForSender.IsEmpty() { - txMap.removeSender(sender) - } - + txMap.removeSenderIfEmpty(listForSender) return evictedTxHashes } @@ -158,10 +145,7 @@ func (txMap *txListBySenderMap) evictTransactionsWithHigherOrEqualNonces(account } listForSender.evictTransactionsWithHigherOrEqualNonces(nonce) - - if listForSender.IsEmpty() { - txMap.removeSender(sender) - } + txMap.removeSenderIfEmpty(listForSender) } func (txMap *txListBySenderMap) getSenders() []*txListForSender { diff --git a/txcache/txListBySenderMap_test.go b/txcache/txListBySenderMap_test.go index cb937e61..3d0ae10f 100644 --- a/txcache/txListBySenderMap_test.go +++ b/txcache/txListBySenderMap_test.go @@ -5,7 +5,6 @@ import ( "sync" "testing" - "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/stretchr/testify/require" ) @@ -114,9 +113,8 @@ func TestSendersMap_notifyAccountNonce(t *testing.T) { } func newSendersMapToTest() *txListBySenderMap { - txGasHandler := txcachemocks.NewTxGasHandlerMock() return newTxListBySenderMap(4, senderConstraints{ maxNumBytes: math.MaxUint32, maxNumTxs: math.MaxUint32, - }, txGasHandler) + }) } diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 583dbdd4..c4a305ee 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -88,11 +88,19 @@ func (listForSender *txListForSender) onAddedTransaction(tx *WrappedTransaction) listForSender.totalBytes.Add(tx.Size) } -// This function should only be used in critical section (listForSender.mutex) +// This function should only be used in critical section (listForSender.mutex). +// When searching for the insertion place, we consider the following rules: +// - transactions are sorted by nonce in ascending order. +// - transactions with the same nonce are sorted by gas price in descending order. +// - transactions with the same nonce and gas price are sorted by hash in ascending order. +// - duplicates are not allowed. +// - "PPU" measurement is not relevant in this context. Competition among transactions of the same sender (and nonce) is based on gas price. func (listForSender *txListForSender) findInsertionPlace(incomingTx *WrappedTransaction) (*list.Element, error) { incomingNonce := incomingTx.Tx.GetNonce() incomingGasPrice := incomingTx.Tx.GetGasPrice() + // The loop iterates from the back to the front of the list. + // Starting from the back allows the function to quickly find the insertion point for transactions with higher nonces, which are more likely to be added. for element := listForSender.items.Back(); element != nil; element = element.Prev() { currentTx := element.Value.(*WrappedTransaction) currentTxNonce := currentTx.Tx.GetNonce() @@ -100,24 +108,29 @@ func (listForSender *txListForSender) findInsertionPlace(incomingTx *WrappedTran if currentTxNonce == incomingNonce { if currentTxGasPrice > incomingGasPrice { - // The incoming transaction will be placed right after the existing one, which has same nonce but higher price. - // If the nonces are the same, but the incoming gas price is higher or equal, the search loop continues. + // The case of same nonce, lower gas price. + // We've found an insertion place: right after "element". return element, nil } + if currentTxGasPrice == incomingGasPrice { - // The incoming transaction will be placed right after the existing one, which has same nonce and the same price. - // (but different hash, because of some other fields like receiver, value or data) - // This will order out the transactions having the same nonce and gas price + // The case of same nonce, same gas price. comparison := bytes.Compare(currentTx.TxHash, incomingTx.TxHash) if comparison == 0 { - // The incoming transaction will be discarded + // The incoming transaction will be discarded, since it's already in the cache. return nil, common.ErrItemAlreadyInCache } if comparison < 0 { + // We've found an insertion place: right after "element". return element, nil } + + // We allow the search loop to continue, since the incoming transaction has a "higher hash". } + + // We allow the search loop to continue, since the incoming transaction has a higher gas price. + continue } if currentTxNonce < incomingNonce { @@ -125,6 +138,8 @@ func (listForSender *txListForSender) findInsertionPlace(incomingTx *WrappedTran // thus the incoming transaction will be placed right after this one. return element, nil } + + // We allow the search loop to continue, since the incoming transaction has a higher nonce. } // The incoming transaction will be inserted at the head of the list. diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 41cb76e0..9a167c7b 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -72,12 +72,12 @@ func TestListForSender_AddTx_AppliesSizeConstraintsForNumTransactions(t *testing require.Equal(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) - // Gives priority to higher gas - though undesirably to some extent, "tx3" is evicted + // Gives priority to higher gas - though undesirable to some extent, "tx3" is evicted _, evicted = list.AddTx(createTx([]byte("tx2++"), ".", 2).withGasPrice(1.5 * oneBillion)) require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx3"}, hashesAsStrings(evicted)) - // Though Undesirably to some extent, "tx3++"" is added, then evicted + // Though undesirable to some extent, "tx3++"" is added, then evicted _, evicted = list.AddTx(createTx([]byte("tx3++"), ".", 3).withGasPrice(1.5 * oneBillion)) require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) require.Equal(t, []string{"tx3++"}, hashesAsStrings(evicted)) diff --git a/txcache/wrappedTransaction.go b/txcache/wrappedTransaction.go index d3eee2b1..a8968717 100644 --- a/txcache/wrappedTransaction.go +++ b/txcache/wrappedTransaction.go @@ -1,10 +1,13 @@ package txcache import ( + "sync/atomic" + "github.com/multiversx/mx-chain-core-go/data" ) -type BunchOfTransactions []*WrappedTransaction +// bunchOfTransactions is a slice of WrappedTransaction pointers +type bunchOfTransactions []*WrappedTransaction // WrappedTransaction contains a transaction, its hash and extra information type WrappedTransaction struct { @@ -14,21 +17,21 @@ type WrappedTransaction struct { ReceiverShardID uint32 Size int64 - PricePerUnit uint64 - HashFnv32 uint32 + PricePerUnit atomic.Uint64 + HashFnv32 atomic.Uint32 } // precomputeFields computes (and caches) the (average) price per gas unit. -func (transaction *WrappedTransaction) precomputeFields(txGasHandler TxGasHandler) { - fee := txGasHandler.ComputeTxFee(transaction.Tx).Uint64() +func (wrappedTx *WrappedTransaction) precomputeFields(txGasHandler TxGasHandler) { + fee := txGasHandler.ComputeTxFee(wrappedTx.Tx).Uint64() - gasLimit := transaction.Tx.GetGasLimit() + gasLimit := wrappedTx.Tx.GetGasLimit() if gasLimit == 0 { return } - transaction.PricePerUnit = fee / gasLimit - transaction.HashFnv32 = fnv32(string(transaction.TxHash)) + wrappedTx.PricePerUnit.Store(fee / gasLimit) + wrappedTx.HashFnv32.Store(fnv32(string(wrappedTx.TxHash))) } // fnv32 implements https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function for 32 bits @@ -43,14 +46,14 @@ func fnv32(key string) uint32 { } // Equality is out of scope (not possible in our case). -func (transaction *WrappedTransaction) isTransactionMoreDesirableToNetwork(otherTransaction *WrappedTransaction) bool { +func (wrappedTx *WrappedTransaction) isTransactionMoreValuableForNetwork(otherTransaction *WrappedTransaction) bool { // First, compare by price per unit - ppu := transaction.PricePerUnit - ppuOther := otherTransaction.PricePerUnit + ppu := wrappedTx.PricePerUnit.Load() + ppuOther := otherTransaction.PricePerUnit.Load() if ppu != ppuOther { return ppu > ppuOther } // In the end, compare by hash number of transaction hash - return transaction.HashFnv32 > otherTransaction.HashFnv32 + return wrappedTx.HashFnv32.Load() > otherTransaction.HashFnv32.Load() } diff --git a/txcache/wrappedTransaction_test.go b/txcache/wrappedTransaction_test.go index 1970dd5e..67a13695 100644 --- a/txcache/wrappedTransaction_test.go +++ b/txcache/wrappedTransaction_test.go @@ -14,16 +14,16 @@ func TestWrappedTransaction_precomputeFields(t *testing.T) { tx := createTx([]byte("a"), "a", 1).withDataLength(1).withGasLimit(51500).withGasPrice(oneBillion) tx.precomputeFields(txGasHandler) - require.Equal(t, oneBillion, int(tx.PricePerUnit)) - require.Equal(t, 84696446, int(tx.HashFnv32)) + require.Equal(t, oneBillion, int(tx.PricePerUnit.Load())) + require.Equal(t, 84696446, int(tx.HashFnv32.Load())) }) t.Run("move balance gas limit and execution gas limit (1)", func(t *testing.T) { tx := createTx([]byte("b"), "b", 1).withDataLength(1).withGasLimit(51501).withGasPrice(oneBillion) tx.precomputeFields(txGasHandler) - require.Equal(t, 999_980_777, int(tx.PricePerUnit)) - require.Equal(t, 84696445, int(tx.HashFnv32)) + require.Equal(t, 999_980_777, int(tx.PricePerUnit.Load())) + require.Equal(t, 84696445, int(tx.HashFnv32.Load())) }) t.Run("move balance gas limit and execution gas limit (2)", func(t *testing.T) { @@ -33,12 +33,12 @@ func TestWrappedTransaction_precomputeFields(t *testing.T) { actualFee := 51500*oneBillion + (oneMilion-51500)*oneBillion/100 require.Equal(t, 60_985_000_000_000, actualFee) - require.Equal(t, actualFee/oneMilion, int(tx.PricePerUnit)) - require.Equal(t, 84696444, int(tx.HashFnv32)) + require.Equal(t, actualFee/oneMilion, int(tx.PricePerUnit.Load())) + require.Equal(t, 84696444, int(tx.HashFnv32.Load())) }) } -func TestWrappedTransaction_isTransactionMoreDesirableToNetwork(t *testing.T) { +func TestWrappedTransaction_isTransactionMoreValuableForNetwork(t *testing.T) { txGasHandler := txcachemocks.NewTxGasHandlerMock() t.Run("decide by price per unit", func(t *testing.T) { @@ -48,18 +48,18 @@ func TestWrappedTransaction_isTransactionMoreDesirableToNetwork(t *testing.T) { b := createTx([]byte("b-1"), "b", 1).withDataLength(1).withGasLimit(51501).withGasPrice(oneBillion) b.precomputeFields(txGasHandler) - require.True(t, a.isTransactionMoreDesirableToNetwork(b)) + require.True(t, a.isTransactionMoreValuableForNetwork(b)) }) t.Run("decide by transaction hash (set them up to have the same PPU)", func(t *testing.T) { a := createTx([]byte("a-7"), "a", 7) a.precomputeFields(txGasHandler) - require.Equal(t, 2191299170, int(a.HashFnv32)) + require.Equal(t, 2191299170, int(a.HashFnv32.Load())) b := createTx([]byte("b-7"), "b", 7) b.precomputeFields(txGasHandler) - require.Equal(t, 1654268265, int(b.HashFnv32)) + require.Equal(t, 1654268265, int(b.HashFnv32.Load())) - require.True(t, a.isTransactionMoreDesirableToNetwork(b)) + require.True(t, a.isTransactionMoreValuableForNetwork(b)) }) } From b45ce8236632e215271707ccfd8c6b538f859dc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Sun, 10 Nov 2024 19:42:07 +0200 Subject: [PATCH 116/124] In "getSequentialTxs", handle duplicates, as well. --- txcache/selection.go | 2 +- txcache/txListForSender.go | 32 +++++++++++++++++++++----------- txcache/txListForSender_test.go | 30 +++++++++++++++++++++++++----- 3 files changed, 47 insertions(+), 17 deletions(-) diff --git a/txcache/selection.go b/txcache/selection.go index 45456b7a..0d9739bd 100644 --- a/txcache/selection.go +++ b/txcache/selection.go @@ -9,7 +9,7 @@ func (cache *TxCache) doSelectTransactions(gasRequested uint64, maxNum int) (bun bunches := make([]bunchOfTransactions, 0, len(senders)) for _, sender := range senders { - bunches = append(bunches, sender.getTxsWithoutGaps()) + bunches = append(bunches, sender.getSequentialTxs()) } return selectTransactionsFromBunches(bunches, gasRequested, maxNum) diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index c4a305ee..728350bd 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -227,8 +227,9 @@ func (listForSender *txListForSender) getTxsReversed() []*WrappedTransaction { return result } -// getTxsWithoutGaps returns the transactions of the sender (gaps are handled, affected transactions are excluded) -func (listForSender *txListForSender) getTxsWithoutGaps() []*WrappedTransaction { +// getSequentialTxs returns the transactions of the sender, in the context of transactions selection. +// Thus, gaps and duplicates are handled (affected transactions are excluded). +func (listForSender *txListForSender) getSequentialTxs() []*WrappedTransaction { listForSender.mutex.RLock() defer listForSender.mutex.RUnlock() @@ -241,17 +242,26 @@ func (listForSender *txListForSender) getTxsWithoutGaps() []*WrappedTransaction for element := listForSender.items.Front(); element != nil; element = element.Next() { value := element.Value.(*WrappedTransaction) nonce := value.Tx.GetNonce() + isFirstTx := len(result) == 0 - // Detect initial gaps. - if len(result) == 0 && accountNonceKnown && accountNonce != nonce { - log.Trace("txListForSender.getTxsWithoutGaps, initial gap", "sender", listForSender.sender, "nonce", nonce, "accountNonce", accountNonce) - break - } + if isFirstTx { + // Handle initial gaps. + if accountNonceKnown && accountNonce != nonce { + log.Trace("txListForSender.getSequentialTxs, initial gap", "sender", listForSender.sender, "nonce", nonce, "accountNonce", accountNonce) + break + } + } else { + // Handle duplicates (only transactions with the highest gas price are included; see "findInsertionPlace"). + if nonce == previousNonce { + log.Trace("txListForSender.getSequentialTxs, duplicate", "sender", listForSender.sender, "nonce", nonce) + continue + } - // Detect middle gaps. - if len(result) > 0 && nonce != previousNonce+1 { - log.Trace("txListForSender.getTxsWithoutGaps, middle gap", "sender", listForSender.sender, "nonce", nonce, "previousNonce", previousNonce) - break + // Handle middle gaps. + if nonce != previousNonce+1 { + log.Trace("txListForSender.getSequentialTxs, middle gap", "sender", listForSender.sender, "nonce", nonce, "previousNonce", previousNonce) + break + } } result = append(result, value) diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 9a167c7b..9fdee4d7 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -201,26 +201,46 @@ func TestListForSender_getTxs(t *testing.T) { // No transaction, no gap require.Len(t, list.getTxs(), 0) require.Len(t, list.getTxsReversed(), 0) - require.Len(t, list.getTxsWithoutGaps(), 0) + require.Len(t, list.getSequentialTxs(), 0) // One gap list.AddTx(createTx([]byte("tx-43"), ".", 43)) require.Len(t, list.getTxs(), 1) require.Len(t, list.getTxsReversed(), 1) - require.Len(t, list.getTxsWithoutGaps(), 0) + require.Len(t, list.getSequentialTxs(), 0) // Resolve gap list.AddTx(createTx([]byte("tx-42"), ".", 42)) require.Len(t, list.getTxs(), 2) require.Len(t, list.getTxsReversed(), 2) - require.Len(t, list.getTxsWithoutGaps(), 2) + require.Len(t, list.getSequentialTxs(), 2) require.Equal(t, []byte("tx-42"), list.getTxs()[0].TxHash) require.Equal(t, []byte("tx-43"), list.getTxs()[1].TxHash) - require.Equal(t, list.getTxs(), list.getTxsWithoutGaps()) + require.Equal(t, list.getTxs(), list.getSequentialTxs()) require.Equal(t, []byte("tx-43"), list.getTxsReversed()[0].TxHash) require.Equal(t, []byte("tx-42"), list.getTxsReversed()[1].TxHash) + + // With nonce duplicates + list.AddTx(createTx([]byte("tx-42++"), ".", 42).withGasPrice(1.1 * oneBillion)) + list.AddTx(createTx([]byte("tx-43++"), ".", 43).withGasPrice(1.1 * oneBillion)) + require.Len(t, list.getTxs(), 4) + require.Len(t, list.getTxsReversed(), 4) + require.Len(t, list.getSequentialTxs(), 2) + + require.Equal(t, []byte("tx-42++"), list.getSequentialTxs()[0].TxHash) + require.Equal(t, []byte("tx-43++"), list.getSequentialTxs()[1].TxHash) + + require.Equal(t, []byte("tx-42++"), list.getTxs()[0].TxHash) + require.Equal(t, []byte("tx-42"), list.getTxs()[1].TxHash) + require.Equal(t, []byte("tx-43++"), list.getTxs()[2].TxHash) + require.Equal(t, []byte("tx-43"), list.getTxs()[3].TxHash) + + require.Equal(t, []byte("tx-43"), list.getTxsReversed()[0].TxHash) + require.Equal(t, []byte("tx-43++"), list.getTxsReversed()[1].TxHash) + require.Equal(t, []byte("tx-42"), list.getTxsReversed()[2].TxHash) + require.Equal(t, []byte("tx-42++"), list.getTxsReversed()[3].TxHash) } func TestListForSender_DetectRaceConditions(t *testing.T) { @@ -233,7 +253,7 @@ func TestListForSender_DetectRaceConditions(t *testing.T) { _ = list.IsEmpty() _ = list.getTxs() _ = list.getTxsReversed() - _ = list.getTxsWithoutGaps() + _ = list.getSequentialTxs() _ = list.countTxWithLock() _ = list.notifyAccountNonceReturnEvictedTransactions(42) _, _ = list.AddTx(createTx([]byte("test"), ".", 42)) From fe86b6841b96ef45441225084d90182dfd154e1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Mon, 11 Nov 2024 14:59:11 +0200 Subject: [PATCH 117/124] On nonce notifications, don't remove transactions. On RemoveTxByHash, remove those with lower or equal nonces, as well. --- txcache/txCache.go | 22 ++------- txcache/txCache_test.go | 50 +------------------ txcache/txListBySenderMap.go | 20 ++++---- txcache/txListBySenderMap_test.go | 12 ++--- txcache/txListForSender.go | 78 +++++++----------------------- txcache/txListForSender_test.go | 80 +++++++------------------------ 6 files changed, 54 insertions(+), 208 deletions(-) diff --git a/txcache/txCache.go b/txcache/txCache.go index 2b4acc67..140adbef 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -142,20 +142,12 @@ func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { return false } - foundInBySender := cache.txListBySender.removeTx(tx) - if !foundInBySender { - // This condition can arise often at high load & eviction, when two go-routines concur to remove the same transaction: - // - A = remove transactions upon commit / final - // - B = remove transactions due to high load (eviction) - // - // - A reaches "RemoveTxByHash()", then "cache.txByHash.removeTx()". - // - B reaches "cache.txByHash.RemoveTxsBulk()" - // - B reaches "cache.txListBySender.RemoveSendersBulk()" - // - A reaches "cache.txListBySender.removeTx()", but sender does not exist anymore - logRemove.Debug("RemoveTxByHash, but !foundInBySender", "tx", txHash) + evicted := cache.txListBySender.removeTxReturnEvicted(tx) + if len(evicted) > 0 { + cache.txByHash.RemoveTxsBulk(evicted) } - logRemove.Trace("RemoveTxByHash", "tx", txHash) + logRemove.Trace("RemoveTxByHash", "tx", txHash, "len(evicted)", len(evicted)) return true } @@ -285,11 +277,7 @@ func (cache *TxCache) UnRegisterHandler(string) { // NotifyAccountNonce should be called by external components (such as interceptors and transactions processor) // in order to inform the cache about initial nonce gap phenomena func (cache *TxCache) NotifyAccountNonce(accountKey []byte, nonce uint64) { - evicted := cache.txListBySender.notifyAccountNonceReturnEvictedTransactions(accountKey, nonce) - - if len(evicted) > 0 { - cache.txByHash.RemoveTxsBulk(evicted) - } + cache.txListBySender.notifyAccountNonce(accountKey, nonce) } // ImmunizeTxsAgainstEviction does nothing for this type of cache diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index 14ed0db2..ed14a6c4 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -13,7 +13,6 @@ import ( "github.com/multiversx/mx-chain-storage-go/common" "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" "github.com/multiversx/mx-chain-storage-go/types" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -217,7 +216,7 @@ func Test_RemoveByTxHash_RemovesFromByHash_WhenMapsInconsistency(t *testing.T) { cache.AddTx(tx) // Cause an inconsistency between the two internal maps (theoretically possible in case of misbehaving eviction) - cache.txListBySender.removeTx(tx) + _ = cache.txListBySender.removeTxReturnEvicted(tx) _ = cache.RemoveTxByHash(txHash) require.Equal(t, 0, cache.txByHash.backingMap.Count()) @@ -508,7 +507,7 @@ func TestTxCache_TransactionIsAdded_EvenWhenInternalMapsAreInconsistent(t *testi func TestTxCache_NoCriticalInconsistency_WhenConcurrentAdditionsAndRemovals(t *testing.T) { cache := newUnconstrainedCacheToTest() - // A lot of routines concur to add & remove THE FIRST transaction of a sender + // A lot of routines concur to add & remove a transaction for try := 0; try < 100; try++ { var wg sync.WaitGroup @@ -544,51 +543,6 @@ func TestTxCache_NoCriticalInconsistency_WhenConcurrentAdditionsAndRemovals(t *t require.True(t, cache.Has([]byte("alice-x"))) require.Equal(t, []string{"alice-x"}, cache.getHashesForSender("alice")) } - - cache.Clear() - - // A lot of routines concur to add & remove subsequent transactions of a sender - cache.AddTx(createTx([]byte("alice-w"), "alice", 41)) - - for try := 0; try < 100; try++ { - var wg sync.WaitGroup - - for i := 0; i < 50; i++ { - wg.Add(1) - go func() { - cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) - _ = cache.RemoveTxByHash([]byte("alice-x")) - wg.Done() - }() - } - - wg.Wait() - - // In this case, there is the slight chance that: - // go A: add to map by hash - // go B: won't add in map by hash, already there - // go A: add to map by sender (existing sender/list) - // go A: remove from map by hash - // go A: remove from map by sender - // go B: add to map by sender (existing sender/list) - // go B: can't remove from map by hash, not found - // go B: won't remove from map by sender (sender unknown) - - // Therefore, Alice may have one or two transactions in her list. - require.Equal(t, 1, cache.txByHash.backingMap.Count()) - expectedTxsConsistent := []string{"alice-w"} - expectedTxsSlightlyInconsistent := []string{"alice-w", "alice-x"} - actualTxs := cache.getHashesForSender("alice") - require.True(t, assert.ObjectsAreEqual(expectedTxsConsistent, actualTxs) || assert.ObjectsAreEqual(expectedTxsSlightlyInconsistent, actualTxs)) - - // A further addition works: - cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) - require.True(t, cache.Has([]byte("alice-w"))) - require.True(t, cache.Has([]byte("alice-x"))) - require.Equal(t, []string{"alice-w", "alice-x"}, cache.getHashesForSender("alice")) - } - - cache.Clear() } func newUnconstrainedCacheToTest() *TxCache { diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index 4e981bf3..72bfdec3 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -75,21 +75,21 @@ func (txMap *txListBySenderMap) addSender(sender string) *txListForSender { return listForSender } -// removeTx removes a transaction from the map -func (txMap *txListBySenderMap) removeTx(tx *WrappedTransaction) bool { +// removeTxReturnEvicted removes a transaction from the map +func (txMap *txListBySenderMap) removeTxReturnEvicted(tx *WrappedTransaction) [][]byte { sender := string(tx.Tx.GetSndAddr()) listForSender, ok := txMap.getListForSender(sender) if !ok { // This happens when a sender whose transactions were selected for processing is removed from cache in the meantime. // When it comes to remove one if its transactions due to processing (commited / finalized block), they don't exist in cache anymore. - log.Trace("txListBySenderMap.removeTx detected slight inconsistency: sender of tx not in cache", "tx", tx.TxHash, "sender", []byte(sender)) - return false + log.Trace("txListBySenderMap.removeTxReturnEvicted detected slight inconsistency: sender of tx not in cache", "tx", tx.TxHash, "sender", []byte(sender)) + return nil } - isFound := listForSender.RemoveTx(tx) + evicted := listForSender.evictTransactionsWithLowerOrEqualNonces(tx.Tx.GetNonce()) txMap.removeSenderIfEmpty(listForSender) - return isFound + return evicted } func (txMap *txListBySenderMap) removeSenderIfEmpty(listForSender *txListForSender) { @@ -123,16 +123,14 @@ func (txMap *txListBySenderMap) RemoveSendersBulk(senders []string) uint32 { return numRemoved } -func (txMap *txListBySenderMap) notifyAccountNonceReturnEvictedTransactions(accountKey []byte, nonce uint64) [][]byte { +func (txMap *txListBySenderMap) notifyAccountNonce(accountKey []byte, nonce uint64) { sender := string(accountKey) listForSender, ok := txMap.getListForSender(sender) if !ok { - return nil + return } - evictedTxHashes := listForSender.notifyAccountNonceReturnEvictedTransactions(nonce) - txMap.removeSenderIfEmpty(listForSender) - return evictedTxHashes + listForSender.notifyAccountNonce(nonce) } // evictTransactionsWithHigherOrEqualNonces removes transactions with nonces higher or equal to the given nonce. diff --git a/txcache/txListBySenderMap_test.go b/txcache/txListBySenderMap_test.go index 3d0ae10f..3fda916b 100644 --- a/txcache/txListBySenderMap_test.go +++ b/txcache/txListBySenderMap_test.go @@ -33,16 +33,16 @@ func TestSendersMap_RemoveTx_AlsoRemovesSenderWhenNoTransactionLeft(t *testing.T require.Equal(t, uint64(2), myMap.testGetListForSender("alice").countTx()) require.Equal(t, uint64(1), myMap.testGetListForSender("bob").countTx()) - myMap.removeTx(txAlice1) + _ = myMap.removeTxReturnEvicted(txAlice1) require.Equal(t, int64(2), myMap.counter.Get()) require.Equal(t, uint64(1), myMap.testGetListForSender("alice").countTx()) require.Equal(t, uint64(1), myMap.testGetListForSender("bob").countTx()) - myMap.removeTx(txAlice2) + _ = myMap.removeTxReturnEvicted(txAlice2) // All alice's transactions have been removed now require.Equal(t, int64(1), myMap.counter.Get()) - myMap.removeTx(txBob) + _ = myMap.removeTxReturnEvicted(txBob) // Also Bob has no more transactions require.Equal(t, int64(0), myMap.counter.Get()) } @@ -100,14 +100,14 @@ func TestSendersMap_notifyAccountNonce(t *testing.T) { myMap := newSendersMapToTest() // Discarded notification, since sender not added yet - myMap.notifyAccountNonceReturnEvictedTransactions([]byte("alice"), 42) + myMap.notifyAccountNonce([]byte("alice"), 42) - myMap.addTxReturnEvicted(createTx([]byte("tx-42"), "alice", 42)) + _, _ = myMap.addTxReturnEvicted(createTx([]byte("tx-42"), "alice", 42)) alice, _ := myMap.getListForSender("alice") require.Equal(t, uint64(0), alice.accountNonce.Get()) require.False(t, alice.accountNonceKnown.IsSet()) - myMap.notifyAccountNonceReturnEvictedTransactions([]byte("alice"), 42) + myMap.notifyAccountNonce([]byte("alice"), 42) require.Equal(t, uint64(42), alice.accountNonce.Get()) require.True(t, alice.accountNonceKnown.IsSet()) } diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 728350bd..1767b277 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -146,52 +146,11 @@ func (listForSender *txListForSender) findInsertionPlace(incomingTx *WrappedTran return nil, nil } -// RemoveTx removes a transaction from the sender's list -func (listForSender *txListForSender) RemoveTx(tx *WrappedTransaction) bool { - // We don't allow concurrent interceptor goroutines to mutate a given sender's list - listForSender.mutex.Lock() - defer listForSender.mutex.Unlock() - - marker := listForSender.findListElementWithTx(tx) - isFound := marker != nil - if isFound { - listForSender.items.Remove(marker) - listForSender.onRemovedListElement(marker) - } - - return isFound -} - func (listForSender *txListForSender) onRemovedListElement(element *list.Element) { tx := element.Value.(*WrappedTransaction) listForSender.totalBytes.Subtract(tx.Size) } -// This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) findListElementWithTx(txToFind *WrappedTransaction) *list.Element { - txToFindHash := txToFind.TxHash - txToFindNonce := txToFind.Tx.GetNonce() - - for element := listForSender.items.Front(); element != nil; element = element.Next() { - value := element.Value.(*WrappedTransaction) - nonce := value.Tx.GetNonce() - - // Optimization: first, compare nonces, then hashes. - if nonce == txToFindNonce { - if bytes.Equal(value.TxHash, txToFindHash) { - return element - } - } - - // Optimization: stop search at this point, since the list is sorted by nonce - if nonce > txToFindNonce { - break - } - } - - return nil -} - // IsEmpty checks whether the list is empty func (listForSender *txListForSender) IsEmpty() bool { return listForSender.countTxWithLock() == 0 @@ -245,8 +204,14 @@ func (listForSender *txListForSender) getSequentialTxs() []*WrappedTransaction { isFirstTx := len(result) == 0 if isFirstTx { + // Handle lower nonces. + if accountNonce > nonce { + log.Trace("txListForSender.getSequentialTxs, lower nonce", "sender", listForSender.sender, "nonce", nonce, "accountNonce", accountNonce) + continue + } + // Handle initial gaps. - if accountNonceKnown && accountNonce != nonce { + if accountNonceKnown && accountNonce < nonce { log.Trace("txListForSender.getSequentialTxs, initial gap", "sender", listForSender.sender, "nonce", nonce, "accountNonce", accountNonce) break } @@ -282,36 +247,25 @@ func (listForSender *txListForSender) countTxWithLock() uint64 { return uint64(listForSender.items.Len()) } -// notifyAccountNonceReturnEvictedTransactions sets the known account nonce, removes the transactions with lower nonces, and returns their hashes -func (listForSender *txListForSender) notifyAccountNonceReturnEvictedTransactions(nonce uint64) [][]byte { - // Optimization: if nonce is the same, do nothing (good for heavy load). - if listForSender.accountNonce.Get() == nonce { - logRemove.Trace("notifyAccountNonceReturnEvictedTransactions, nonce is the same", "sender", listForSender.sender, "nonce", nonce) - return nil - } - - listForSender.mutex.Lock() - defer listForSender.mutex.Unlock() - +// notifyAccountNonce sets the known account nonce, removes the transactions with lower nonces, and returns their hashes +func (listForSender *txListForSender) notifyAccountNonce(nonce uint64) { listForSender.accountNonce.Set(nonce) _ = listForSender.accountNonceKnown.SetReturningPrevious() - - evicted := listForSender.evictTransactionsWithLowerNoncesNoLockReturnEvicted(nonce) - - logRemove.Trace("notifyAccountNonceReturnEvictedTransactions, nonce changed", "sender", listForSender.sender, "nonce", nonce, "num evicted txs", len(evicted)) - - return evicted } -// This function should only be used in critical section (listForSender.mutex) -func (listForSender *txListForSender) evictTransactionsWithLowerNoncesNoLockReturnEvicted(givenNonce uint64) [][]byte { +// evictTransactionsWithLowerOrEqualNonces removes transactions with nonces lower or equal to the given nonce +func (listForSender *txListForSender) evictTransactionsWithLowerOrEqualNonces(targetNonce uint64) [][]byte { evictedTxHashes := make([][]byte, 0) + // We don't allow concurrent goroutines to mutate a given sender's list + listForSender.mutex.Lock() + defer listForSender.mutex.Unlock() + for element := listForSender.items.Front(); element != nil; { tx := element.Value.(*WrappedTransaction) txNonce := tx.Tx.GetNonce() - if txNonce >= givenNonce { + if txNonce > targetNonce { break } diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 9fdee4d7..3f8b075a 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -107,74 +107,19 @@ func TestListForSender_AddTx_AppliesSizeConstraintsForNumBytes(t *testing.T) { require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) } -func TestListForSender_findTx(t *testing.T) { - list := newUnconstrainedListToTest() - - txA := createTx([]byte("A"), ".", 41) - txANewer := createTx([]byte("ANewer"), ".", 41) - txB := createTx([]byte("B"), ".", 42) - txD := createTx([]byte("none"), ".", 43) - list.AddTx(txA) - list.AddTx(txANewer) - list.AddTx(txB) - - elementWithA := list.findListElementWithTx(txA) - elementWithANewer := list.findListElementWithTx(txANewer) - elementWithB := list.findListElementWithTx(txB) - noElementWithD := list.findListElementWithTx(txD) - - require.NotNil(t, elementWithA) - require.NotNil(t, elementWithANewer) - require.NotNil(t, elementWithB) - - require.Equal(t, txA, elementWithA.Value.(*WrappedTransaction)) - require.Equal(t, txANewer, elementWithANewer.Value.(*WrappedTransaction)) - require.Equal(t, txB, elementWithB.Value.(*WrappedTransaction)) - require.Nil(t, noElementWithD) -} - -func TestListForSender_findTx_CoverNonceComparisonOptimization(t *testing.T) { - list := newUnconstrainedListToTest() - - list.AddTx(createTx([]byte("A"), ".", 42)) - - // Find one with a lower nonce, not added to cache - noElement := list.findListElementWithTx(createTx(nil, ".", 41)) - require.Nil(t, noElement) -} - -func TestListForSender_RemoveTransaction(t *testing.T) { - list := newUnconstrainedListToTest() - tx := createTx([]byte("a"), ".", 1) - - list.AddTx(tx) - require.Equal(t, 1, list.items.Len()) - - list.RemoveTx(tx) - require.Equal(t, 0, list.items.Len()) -} - -func TestListForSender_RemoveTransaction_NoPanicWhenTxMissing(t *testing.T) { - list := newUnconstrainedListToTest() - tx := createTx([]byte(""), ".", 1) - - list.RemoveTx(tx) - require.Equal(t, 0, list.items.Len()) -} - func TestListForSender_NotifyAccountNonce(t *testing.T) { list := newUnconstrainedListToTest() require.Equal(t, uint64(0), list.accountNonce.Get()) require.False(t, list.accountNonceKnown.IsSet()) - list.notifyAccountNonceReturnEvictedTransactions(42) + list.notifyAccountNonce(42) require.Equal(t, uint64(42), list.accountNonce.Get()) require.True(t, list.accountNonceKnown.IsSet()) } -func TestListForSender_evictTransactionsWithLowerNoncesNoLock(t *testing.T) { +func TestListForSender_evictTransactionsWithLowerOrEqualNonces(t *testing.T) { list := newUnconstrainedListToTest() list.AddTx(createTx([]byte("tx-42"), ".", 42)) @@ -184,19 +129,19 @@ func TestListForSender_evictTransactionsWithLowerNoncesNoLock(t *testing.T) { require.Equal(t, 4, list.items.Len()) - list.evictTransactionsWithLowerNoncesNoLockReturnEvicted(43) - require.Equal(t, 3, list.items.Len()) - - list.evictTransactionsWithLowerNoncesNoLockReturnEvicted(44) + _ = list.evictTransactionsWithLowerOrEqualNonces(43) require.Equal(t, 2, list.items.Len()) - list.evictTransactionsWithLowerNoncesNoLockReturnEvicted(99) + _ = list.evictTransactionsWithLowerOrEqualNonces(44) + require.Equal(t, 1, list.items.Len()) + + _ = list.evictTransactionsWithLowerOrEqualNonces(99) require.Equal(t, 0, list.items.Len()) } func TestListForSender_getTxs(t *testing.T) { list := newUnconstrainedListToTest() - list.notifyAccountNonceReturnEvictedTransactions(42) + list.notifyAccountNonce(42) // No transaction, no gap require.Len(t, list.getTxs(), 0) @@ -241,6 +186,13 @@ func TestListForSender_getTxs(t *testing.T) { require.Equal(t, []byte("tx-43++"), list.getTxsReversed()[1].TxHash) require.Equal(t, []byte("tx-42"), list.getTxsReversed()[2].TxHash) require.Equal(t, []byte("tx-42++"), list.getTxsReversed()[3].TxHash) + + // With lower nonces + list.notifyAccountNonce(43) + require.Len(t, list.getTxs(), 4) + require.Len(t, list.getTxsReversed(), 4) + require.Len(t, list.getSequentialTxs(), 1) + require.Equal(t, []byte("tx-43++"), list.getSequentialTxs()[0].TxHash) } func TestListForSender_DetectRaceConditions(t *testing.T) { @@ -255,7 +207,7 @@ func TestListForSender_DetectRaceConditions(t *testing.T) { _ = list.getTxsReversed() _ = list.getSequentialTxs() _ = list.countTxWithLock() - _ = list.notifyAccountNonceReturnEvictedTransactions(42) + list.notifyAccountNonce(42) _, _ = list.AddTx(createTx([]byte("test"), ".", 42)) wg.Done() From 44d940a011156a3ed97635a529b4c6cafe6ea672 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 12 Nov 2024 09:09:38 +0200 Subject: [PATCH 118/124] Fix handling of lower nonces (act only when nonce is known). Refactor, rename, add tests. --- txcache/eviction.go | 2 +- txcache/txCache.go | 8 +- txcache/txCache_test.go | 13 ++- txcache/txListBySenderMap.go | 12 +-- txcache/txListBySenderMap_test.go | 8 +- txcache/txListForSender.go | 14 +++- txcache/txListForSender_test.go | 134 ++++++++++++++++++------------ 7 files changed, 116 insertions(+), 75 deletions(-) diff --git a/txcache/eviction.go b/txcache/eviction.go index adb6bb2e..d82da786 100644 --- a/txcache/eviction.go +++ b/txcache/eviction.go @@ -160,7 +160,7 @@ func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { // Remove those transactions from "txListBySender". for sender, nonce := range lowestToEvictBySender { - cache.txListBySender.evictTransactionsWithHigherOrEqualNonces([]byte(sender), nonce) + cache.txListBySender.removeTransactionsWithHigherOrEqualNonce([]byte(sender), nonce) } // Remove those transactions from "txByHash". diff --git a/txcache/txCache.go b/txcache/txCache.go index 140adbef..2c494831 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -131,18 +131,18 @@ func (cache *TxCache) getSenders() []*txListForSender { return cache.txListBySender.getSenders() } -// RemoveTxByHash removes tx by hash +// RemoveTxByHash removes transactions with nonces lower or equal to the given transaction's nonce func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { cache.mutTxOperation.Lock() defer cache.mutTxOperation.Unlock() tx, foundInByHash := cache.txByHash.removeTx(string(txHash)) if !foundInByHash { - // Transaction might have been removed in the meantime (e.g. due to NotifyAccountNonce). + // Transaction might have been removed in the meantime. return false } - evicted := cache.txListBySender.removeTxReturnEvicted(tx) + evicted := cache.txListBySender.removeTransactionsWithLowerOrEqualNonceReturnHashes(tx) if len(evicted) > 0 { cache.txByHash.RemoveTxsBulk(evicted) } @@ -277,6 +277,8 @@ func (cache *TxCache) UnRegisterHandler(string) { // NotifyAccountNonce should be called by external components (such as interceptors and transactions processor) // in order to inform the cache about initial nonce gap phenomena func (cache *TxCache) NotifyAccountNonce(accountKey []byte, nonce uint64) { + log.Trace("TxCache.NotifyAccountNonce", "account", accountKey, "nonce", nonce) + cache.txListBySender.notifyAccountNonce(accountKey, nonce) } diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index ed14a6c4..e9038543 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -152,7 +152,12 @@ func Test_RemoveByTxHash(t *testing.T) { removed := cache.RemoveTxByHash([]byte("hash-1")) require.True(t, removed) - cache.Remove([]byte("hash-2")) + + removed = cache.RemoveTxByHash([]byte("hash-2")) + require.True(t, removed) + + removed = cache.RemoveTxByHash([]byte("hash-3")) + require.False(t, removed) foundTx, ok := cache.GetByTxHash([]byte("hash-1")) require.False(t, ok) @@ -161,6 +166,8 @@ func Test_RemoveByTxHash(t *testing.T) { foundTx, ok = cache.GetByTxHash([]byte("hash-2")) require.False(t, ok) require.Nil(t, foundTx) + + require.Equal(t, uint64(0), cache.CountTx()) } func Test_CountTx_And_Len(t *testing.T) { @@ -216,7 +223,7 @@ func Test_RemoveByTxHash_RemovesFromByHash_WhenMapsInconsistency(t *testing.T) { cache.AddTx(tx) // Cause an inconsistency between the two internal maps (theoretically possible in case of misbehaving eviction) - _ = cache.txListBySender.removeTxReturnEvicted(tx) + _ = cache.txListBySender.removeTransactionsWithLowerOrEqualNonceReturnHashes(tx) _ = cache.RemoveTxByHash(txHash) require.Equal(t, 0, cache.txByHash.backingMap.Count()) @@ -281,7 +288,7 @@ func Test_GetTransactionsPoolForSender(t *testing.T) { txs = cache.GetTransactionsPoolForSender(txSender2) require.Equal(t, wrappedTxs2, txs) - cache.RemoveTxByHash(txHashes2[0]) + _ = cache.RemoveTxByHash(txHashes2[0]) expectedTxs := wrappedTxs2[1:] txs = cache.GetTransactionsPoolForSender(txSender2) require.Equal(t, expectedTxs, txs) diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index 72bfdec3..a1afbf10 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -75,8 +75,8 @@ func (txMap *txListBySenderMap) addSender(sender string) *txListForSender { return listForSender } -// removeTxReturnEvicted removes a transaction from the map -func (txMap *txListBySenderMap) removeTxReturnEvicted(tx *WrappedTransaction) [][]byte { +// removeTransactionsWithLowerOrEqualNonceReturnHashes removes transactions with nonces lower or equal to the given transaction's nonce. +func (txMap *txListBySenderMap) removeTransactionsWithLowerOrEqualNonceReturnHashes(tx *WrappedTransaction) [][]byte { sender := string(tx.Tx.GetSndAddr()) listForSender, ok := txMap.getListForSender(sender) @@ -87,7 +87,7 @@ func (txMap *txListBySenderMap) removeTxReturnEvicted(tx *WrappedTransaction) [] return nil } - evicted := listForSender.evictTransactionsWithLowerOrEqualNonces(tx.Tx.GetNonce()) + evicted := listForSender.removeTransactionsWithLowerOrEqualNonceReturnHashes(tx.Tx.GetNonce()) txMap.removeSenderIfEmpty(listForSender) return evicted } @@ -133,16 +133,16 @@ func (txMap *txListBySenderMap) notifyAccountNonce(accountKey []byte, nonce uint listForSender.notifyAccountNonce(nonce) } -// evictTransactionsWithHigherOrEqualNonces removes transactions with nonces higher or equal to the given nonce. +// removeTransactionsWithHigherOrEqualNonce removes transactions with nonces higher or equal to the given nonce. // Useful for the eviction flow. -func (txMap *txListBySenderMap) evictTransactionsWithHigherOrEqualNonces(accountKey []byte, nonce uint64) { +func (txMap *txListBySenderMap) removeTransactionsWithHigherOrEqualNonce(accountKey []byte, nonce uint64) { sender := string(accountKey) listForSender, ok := txMap.getListForSender(sender) if !ok { return } - listForSender.evictTransactionsWithHigherOrEqualNonces(nonce) + listForSender.removeTransactionsWithHigherOrEqualNonce(nonce) txMap.removeSenderIfEmpty(listForSender) } diff --git a/txcache/txListBySenderMap_test.go b/txcache/txListBySenderMap_test.go index 3fda916b..083925fb 100644 --- a/txcache/txListBySenderMap_test.go +++ b/txcache/txListBySenderMap_test.go @@ -19,7 +19,7 @@ func TestSendersMap_AddTx_IncrementsCounter(t *testing.T) { require.Equal(t, int64(2), myMap.counter.Get()) } -func TestSendersMap_RemoveTx_AlsoRemovesSenderWhenNoTransactionLeft(t *testing.T) { +func TestSendersMap_removeTransactionsWithLowerOrEqualNonceReturnHashes_alsoRemovesSenderWhenNoTransactionLeft(t *testing.T) { myMap := newSendersMapToTest() txAlice1 := createTx([]byte("a1"), "alice", 1) @@ -33,16 +33,16 @@ func TestSendersMap_RemoveTx_AlsoRemovesSenderWhenNoTransactionLeft(t *testing.T require.Equal(t, uint64(2), myMap.testGetListForSender("alice").countTx()) require.Equal(t, uint64(1), myMap.testGetListForSender("bob").countTx()) - _ = myMap.removeTxReturnEvicted(txAlice1) + _ = myMap.removeTransactionsWithLowerOrEqualNonceReturnHashes(txAlice1) require.Equal(t, int64(2), myMap.counter.Get()) require.Equal(t, uint64(1), myMap.testGetListForSender("alice").countTx()) require.Equal(t, uint64(1), myMap.testGetListForSender("bob").countTx()) - _ = myMap.removeTxReturnEvicted(txAlice2) + _ = myMap.removeTransactionsWithLowerOrEqualNonceReturnHashes(txAlice2) // All alice's transactions have been removed now require.Equal(t, int64(1), myMap.counter.Get()) - _ = myMap.removeTxReturnEvicted(txBob) + _ = myMap.removeTransactionsWithLowerOrEqualNonceReturnHashes(txBob) // Also Bob has no more transactions require.Equal(t, int64(0), myMap.counter.Get()) } diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go index 1767b277..b6b43e12 100644 --- a/txcache/txListForSender.go +++ b/txcache/txListForSender.go @@ -205,7 +205,7 @@ func (listForSender *txListForSender) getSequentialTxs() []*WrappedTransaction { if isFirstTx { // Handle lower nonces. - if accountNonce > nonce { + if accountNonceKnown && accountNonce > nonce { log.Trace("txListForSender.getSequentialTxs, lower nonce", "sender", listForSender.sender, "nonce", nonce, "accountNonce", accountNonce) continue } @@ -253,8 +253,14 @@ func (listForSender *txListForSender) notifyAccountNonce(nonce uint64) { _ = listForSender.accountNonceKnown.SetReturningPrevious() } -// evictTransactionsWithLowerOrEqualNonces removes transactions with nonces lower or equal to the given nonce -func (listForSender *txListForSender) evictTransactionsWithLowerOrEqualNonces(targetNonce uint64) [][]byte { +// forgetAccountNonce resets the known account nonce +func (listForSender *txListForSender) forgetAccountNonce() { + listForSender.accountNonce.Set(0) + listForSender.accountNonceKnown.Reset() +} + +// removeTransactionsWithLowerOrEqualNonceReturnHashes removes transactions with nonces lower or equal to the given nonce +func (listForSender *txListForSender) removeTransactionsWithLowerOrEqualNonceReturnHashes(targetNonce uint64) [][]byte { evictedTxHashes := make([][]byte, 0) // We don't allow concurrent goroutines to mutate a given sender's list @@ -281,7 +287,7 @@ func (listForSender *txListForSender) evictTransactionsWithLowerOrEqualNonces(ta return evictedTxHashes } -func (listForSender *txListForSender) evictTransactionsWithHigherOrEqualNonces(givenNonce uint64) { +func (listForSender *txListForSender) removeTransactionsWithHigherOrEqualNonce(givenNonce uint64) { listForSender.mutex.Lock() defer listForSender.mutex.Unlock() diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go index 3f8b075a..0b8892db 100644 --- a/txcache/txListForSender_test.go +++ b/txcache/txListForSender_test.go @@ -119,7 +119,7 @@ func TestListForSender_NotifyAccountNonce(t *testing.T) { require.True(t, list.accountNonceKnown.IsSet()) } -func TestListForSender_evictTransactionsWithLowerOrEqualNonces(t *testing.T) { +func TestListForSender_removeTransactionsWithLowerOrEqualNonceReturnHashes(t *testing.T) { list := newUnconstrainedListToTest() list.AddTx(createTx([]byte("tx-42"), ".", 42)) @@ -129,70 +129,96 @@ func TestListForSender_evictTransactionsWithLowerOrEqualNonces(t *testing.T) { require.Equal(t, 4, list.items.Len()) - _ = list.evictTransactionsWithLowerOrEqualNonces(43) + _ = list.removeTransactionsWithLowerOrEqualNonceReturnHashes(43) require.Equal(t, 2, list.items.Len()) - _ = list.evictTransactionsWithLowerOrEqualNonces(44) + _ = list.removeTransactionsWithLowerOrEqualNonceReturnHashes(44) require.Equal(t, 1, list.items.Len()) - _ = list.evictTransactionsWithLowerOrEqualNonces(99) + _ = list.removeTransactionsWithLowerOrEqualNonceReturnHashes(99) require.Equal(t, 0, list.items.Len()) } func TestListForSender_getTxs(t *testing.T) { - list := newUnconstrainedListToTest() - list.notifyAccountNonce(42) + t.Run("no transactions", func(t *testing.T) { + list := newUnconstrainedListToTest() + list.notifyAccountNonce(42) - // No transaction, no gap - require.Len(t, list.getTxs(), 0) - require.Len(t, list.getTxsReversed(), 0) - require.Len(t, list.getSequentialTxs(), 0) + require.Len(t, list.getTxs(), 0) + require.Len(t, list.getTxsReversed(), 0) + require.Len(t, list.getSequentialTxs(), 0) + }) - // One gap - list.AddTx(createTx([]byte("tx-43"), ".", 43)) - require.Len(t, list.getTxs(), 1) - require.Len(t, list.getTxsReversed(), 1) - require.Len(t, list.getSequentialTxs(), 0) + t.Run("one transaction, one gap", func(t *testing.T) { + list := newUnconstrainedListToTest() + list.notifyAccountNonce(42) - // Resolve gap - list.AddTx(createTx([]byte("tx-42"), ".", 42)) - require.Len(t, list.getTxs(), 2) - require.Len(t, list.getTxsReversed(), 2) - require.Len(t, list.getSequentialTxs(), 2) - - require.Equal(t, []byte("tx-42"), list.getTxs()[0].TxHash) - require.Equal(t, []byte("tx-43"), list.getTxs()[1].TxHash) - require.Equal(t, list.getTxs(), list.getSequentialTxs()) - - require.Equal(t, []byte("tx-43"), list.getTxsReversed()[0].TxHash) - require.Equal(t, []byte("tx-42"), list.getTxsReversed()[1].TxHash) - - // With nonce duplicates - list.AddTx(createTx([]byte("tx-42++"), ".", 42).withGasPrice(1.1 * oneBillion)) - list.AddTx(createTx([]byte("tx-43++"), ".", 43).withGasPrice(1.1 * oneBillion)) - require.Len(t, list.getTxs(), 4) - require.Len(t, list.getTxsReversed(), 4) - require.Len(t, list.getSequentialTxs(), 2) - - require.Equal(t, []byte("tx-42++"), list.getSequentialTxs()[0].TxHash) - require.Equal(t, []byte("tx-43++"), list.getSequentialTxs()[1].TxHash) - - require.Equal(t, []byte("tx-42++"), list.getTxs()[0].TxHash) - require.Equal(t, []byte("tx-42"), list.getTxs()[1].TxHash) - require.Equal(t, []byte("tx-43++"), list.getTxs()[2].TxHash) - require.Equal(t, []byte("tx-43"), list.getTxs()[3].TxHash) - - require.Equal(t, []byte("tx-43"), list.getTxsReversed()[0].TxHash) - require.Equal(t, []byte("tx-43++"), list.getTxsReversed()[1].TxHash) - require.Equal(t, []byte("tx-42"), list.getTxsReversed()[2].TxHash) - require.Equal(t, []byte("tx-42++"), list.getTxsReversed()[3].TxHash) - - // With lower nonces - list.notifyAccountNonce(43) - require.Len(t, list.getTxs(), 4) - require.Len(t, list.getTxsReversed(), 4) - require.Len(t, list.getSequentialTxs(), 1) - require.Equal(t, []byte("tx-43++"), list.getSequentialTxs()[0].TxHash) + // Gap + list.AddTx(createTx([]byte("tx-43"), ".", 43)) + require.Len(t, list.getTxs(), 1) + require.Len(t, list.getTxsReversed(), 1) + require.Len(t, list.getSequentialTxs(), 0) + + // Resolve gap + list.AddTx(createTx([]byte("tx-42"), ".", 42)) + require.Len(t, list.getTxs(), 2) + require.Len(t, list.getTxsReversed(), 2) + require.Len(t, list.getSequentialTxs(), 2) + + require.Equal(t, []byte("tx-42"), list.getTxs()[0].TxHash) + require.Equal(t, []byte("tx-43"), list.getTxs()[1].TxHash) + require.Equal(t, list.getTxs(), list.getSequentialTxs()) + + require.Equal(t, []byte("tx-43"), list.getTxsReversed()[0].TxHash) + require.Equal(t, []byte("tx-42"), list.getTxsReversed()[1].TxHash) + }) + + t.Run("with nonce duplicates", func(t *testing.T) { + list := newUnconstrainedListToTest() + list.notifyAccountNonce(42) + + list.AddTx(createTx([]byte("tx-42"), ".", 42)) + list.AddTx(createTx([]byte("tx-43"), ".", 43)) + + list.AddTx(createTx([]byte("tx-42++"), ".", 42).withGasPrice(1.1 * oneBillion)) + list.AddTx(createTx([]byte("tx-43++"), ".", 43).withGasPrice(1.1 * oneBillion)) + + require.Len(t, list.getTxs(), 4) + require.Len(t, list.getTxsReversed(), 4) + require.Len(t, list.getSequentialTxs(), 2) + + require.Equal(t, []byte("tx-42++"), list.getSequentialTxs()[0].TxHash) + require.Equal(t, []byte("tx-43++"), list.getSequentialTxs()[1].TxHash) + + require.Equal(t, []byte("tx-42++"), list.getTxs()[0].TxHash) + require.Equal(t, []byte("tx-42"), list.getTxs()[1].TxHash) + require.Equal(t, []byte("tx-43++"), list.getTxs()[2].TxHash) + require.Equal(t, []byte("tx-43"), list.getTxs()[3].TxHash) + + require.Equal(t, []byte("tx-43"), list.getTxsReversed()[0].TxHash) + require.Equal(t, []byte("tx-43++"), list.getTxsReversed()[1].TxHash) + require.Equal(t, []byte("tx-42"), list.getTxsReversed()[2].TxHash) + require.Equal(t, []byte("tx-42++"), list.getTxsReversed()[3].TxHash) + }) + + t.Run("with lower nonces", func(t *testing.T) { + list := newUnconstrainedListToTest() + list.notifyAccountNonce(43) + + list.AddTx(createTx([]byte("tx-42"), ".", 42)) + list.AddTx(createTx([]byte("tx-43"), ".", 43)) + + require.Len(t, list.getTxs(), 2) + require.Len(t, list.getTxsReversed(), 2) + require.Len(t, list.getSequentialTxs(), 1) + require.Equal(t, []byte("tx-43"), list.getSequentialTxs()[0].TxHash) + + list.forgetAccountNonce() + + require.Len(t, list.getTxs(), 2) + require.Len(t, list.getTxsReversed(), 2) + require.Len(t, list.getSequentialTxs(), 2) + }) } func TestListForSender_DetectRaceConditions(t *testing.T) { From 53c5bd8d04cb3c3860ba24904dce5aeebb4a47ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Tue, 12 Nov 2024 16:17:12 +0200 Subject: [PATCH 119/124] Implement ForgetAllAccountNonces(). --- txcache/crossTxCache.go | 5 +++ txcache/disabledCache.go | 4 +++ txcache/txCache.go | 20 +++++++---- txcache/txCache_test.go | 65 ++++++++++++++++++++++++++++++++++++ txcache/txListBySenderMap.go | 7 ++++ 5 files changed, 95 insertions(+), 6 deletions(-) diff --git a/txcache/crossTxCache.go b/txcache/crossTxCache.go index c40bcc51..6749630e 100644 --- a/txcache/crossTxCache.go +++ b/txcache/crossTxCache.go @@ -119,6 +119,11 @@ func (cache *CrossTxCache) GetTransactionsPoolForSender(_ string) []*WrappedTran func (cache *CrossTxCache) NotifyAccountNonce(_ []byte, _ uint64) { } +// ForgetAllAccountNonces does nothing, only to respect the interface +func (cache *CrossTxCache) ForgetAllAccountNonces() { + log.Error("CrossTxCache.ForgetAllAccountNonces is not implemented") +} + // IsInterfaceNil returns true if there is no value under the interface func (cache *CrossTxCache) IsInterfaceNil() bool { return cache == nil diff --git a/txcache/disabledCache.go b/txcache/disabledCache.go index 69d7362c..805b3164 100644 --- a/txcache/disabledCache.go +++ b/txcache/disabledCache.go @@ -109,6 +109,10 @@ func (cache *DisabledCache) UnRegisterHandler(string) { func (cache *DisabledCache) NotifyAccountNonce(_ []byte, _ uint64) { } +// ForgetAllAccountNonces does nothing +func (cache *DisabledCache) ForgetAllAccountNonces() { +} + // ImmunizeTxsAgainstEviction does nothing func (cache *DisabledCache) ImmunizeTxsAgainstEviction(_ [][]byte) { } diff --git a/txcache/txCache.go b/txcache/txCache.go index 2c494831..92c60c0c 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -60,7 +60,7 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { return false, false } - logAdd.Trace("AddTx", "tx", tx.TxHash, "nonce", tx.Tx.GetNonce(), "sender", tx.Tx.GetSndAddr()) + logAdd.Trace("TxCache.AddTx", "tx", tx.TxHash, "nonce", tx.Tx.GetNonce(), "sender", tx.Tx.GetSndAddr()) tx.precomputeFields(cache.txGasHandler) @@ -78,11 +78,11 @@ func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { // - B won't add to "txByHash" (duplicate) // - B adds to "txListBySender" // - A won't add to "txListBySender" (duplicate) - logAdd.Debug("AddTx: slight inconsistency detected:", "tx", tx.TxHash, "sender", tx.Tx.GetSndAddr(), "addedInByHash", addedInByHash, "addedInBySender", addedInBySender) + logAdd.Debug("TxCache.AddTx: slight inconsistency detected:", "tx", tx.TxHash, "sender", tx.Tx.GetSndAddr(), "addedInByHash", addedInByHash, "addedInBySender", addedInBySender) } if len(evicted) > 0 { - logRemove.Trace("AddTx with eviction", "sender", tx.Tx.GetSndAddr(), "num evicted txs", len(evicted)) + logRemove.Trace("TxCache.AddTx with eviction", "sender", tx.Tx.GetSndAddr(), "num evicted txs", len(evicted)) cache.txByHash.RemoveTxsBulk(evicted) } @@ -104,7 +104,7 @@ func (cache *TxCache) SelectTransactions(gasRequested uint64, maxNum int) ([]*Wr stopWatch.Start("selection") logSelect.Debug( - "doSelectTransactions: begin", + "TxCache.SelectTransactions: begin", "num bytes", cache.NumBytes(), "num txs", cache.CountTx(), "num senders", cache.CountSenders(), @@ -115,7 +115,7 @@ func (cache *TxCache) SelectTransactions(gasRequested uint64, maxNum int) ([]*Wr stopWatch.Stop("selection") logSelect.Debug( - "doSelectTransactions: end", + "TxCache.SelectTransactions: end", "duration", stopWatch.GetMeasurement("selection"), "num txs selected", len(transactions), "gas", accumulatedGas, @@ -147,7 +147,7 @@ func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { cache.txByHash.RemoveTxsBulk(evicted) } - logRemove.Trace("RemoveTxByHash", "tx", txHash, "len(evicted)", len(evicted)) + logRemove.Trace("TxCache.RemoveTxByHash", "tx", txHash, "len(evicted)", len(evicted)) return true } @@ -282,6 +282,14 @@ func (cache *TxCache) NotifyAccountNonce(accountKey []byte, nonce uint64) { cache.txListBySender.notifyAccountNonce(accountKey, nonce) } +// ForgetAllAccountNonces clears all known account nonces. +// Generally speaking, should be called when a block is reverted. +func (cache *TxCache) ForgetAllAccountNonces() { + log.Debug("TxCache.ForgetAllAccountNonces", "name", cache.name) + + cache.txListBySender.forgetAllAccountNonces() +} + // ImmunizeTxsAgainstEviction does nothing for this type of cache func (cache *TxCache) ImmunizeTxsAgainstEviction(_ [][]byte) { } diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go index e9038543..085a85c4 100644 --- a/txcache/txCache_test.go +++ b/txcache/txCache_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-storage-go/common" "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" @@ -552,6 +553,70 @@ func TestTxCache_NoCriticalInconsistency_WhenConcurrentAdditionsAndRemovals(t *t } } +func TestTxCache_ForgetAllAccountNonces(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 1000000000, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 300001, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: false, + NumItemsToPreemptivelyEvict: 1, + } + + txGasHandler := txcachemocks.NewTxGasHandlerMock() + + sw := core.NewStopWatch() + + t.Run("numSenders = 100000, numTransactions = 1", func(t *testing.T) { + cache, err := NewTxCache(config, txGasHandler) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 100_000, 1) + require.Equal(t, 100000, int(cache.CountTx())) + + sw.Start(t.Name()) + cache.ForgetAllAccountNonces() + sw.Stop(t.Name()) + + cache.txListBySender.backingMap.IterCb(func(key string, item interface{}) { + require.False(t, item.(*txListForSender).accountNonceKnown.IsSet()) + }) + }) + + t.Run("numSenders = 300000, numTransactions = 1", func(t *testing.T) { + cache, err := NewTxCache(config, txGasHandler) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 300_000, 1) + require.Equal(t, 300000, int(cache.CountTx())) + + sw.Start(t.Name()) + cache.ForgetAllAccountNonces() + sw.Stop(t.Name()) + + cache.txListBySender.backingMap.IterCb(func(key string, item interface{}) { + require.False(t, item.(*txListForSender).accountNonceKnown.IsSet()) + }) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } + + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // 0.004712s (TestTxCache_ForgetAllAccountNonces/numSenders_=_100000,_numTransactions_=_1) + // 0.015129s (TestTxCache_ForgetAllAccountNonces/numSenders_=_300000,_numTransactions_=_1) +} + func newUnconstrainedCacheToTest() *TxCache { txGasHandler := txcachemocks.NewTxGasHandlerMock() cache, err := NewTxCache(ConfigSourceMe{ diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go index a1afbf10..1aa36108 100644 --- a/txcache/txListBySenderMap.go +++ b/txcache/txListBySenderMap.go @@ -133,6 +133,13 @@ func (txMap *txListBySenderMap) notifyAccountNonce(accountKey []byte, nonce uint listForSender.notifyAccountNonce(nonce) } +func (txMap *txListBySenderMap) forgetAllAccountNonces() { + txMap.backingMap.IterCb(func(key string, item interface{}) { + listForSender := item.(*txListForSender) + listForSender.forgetAccountNonce() + }) +} + // removeTransactionsWithHigherOrEqualNonce removes transactions with nonces higher or equal to the given nonce. // Useful for the eviction flow. func (txMap *txListBySenderMap) removeTransactionsWithHigherOrEqualNonce(accountKey []byte, nonce uint64) { From b91be710b2b37085d09a4a18143ac8e571a875b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 13 Nov 2024 09:43:56 +0200 Subject: [PATCH 120/124] Describe selection flow in readme. --- txcache/README.md | 125 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 97 insertions(+), 28 deletions(-) diff --git a/txcache/README.md b/txcache/README.md index 0acd9187..24ccdb35 100644 --- a/txcache/README.md +++ b/txcache/README.md @@ -40,6 +40,14 @@ dataCost = network.minGasLimit + len(data) * network.gasPerDataByte executionCost = gasLimit - dataCost ``` +Network parameters (as of November of 2024): + +``` +gasPriceModifier = 0.01 +minGasLimit = 50_000 +gasPerDataByte = 1_500 +``` + #### Examples (a) A simple native transfer with `gasLimit = 50_000` and `gasPrice = 1_000_000_000`: @@ -65,13 +73,27 @@ ppu = 60_500_000_000_000 / 60_500 = 1_000_000_000 atoms That is, for simple native transfers (whether they hold a data payload or not), the PPU is equal to the gas price. -(d) ... +(d) A contract call with `gasLimit = 75_000_000` and `gasPrice = 1_000_000_000`, with a data payload of `42` bytes: -### Paragraph 4 +``` +initiallyPaidFee = 861_870_000_000_000 atoms +ppu = 11_491_600 atoms +``` -Transaction **A** is considered more valuable (for the Network) than transaction **B** if **it has a higher PPU**. +(e) Similar to (d), but with `gasPrice = 2_000_000_000`: + +``` +initiallyPaidFee = 1_723_740_000_000_000 atoms +ppu = 22_983_200 atoms +``` + +That is, for contract calls, the PPU is not equal to the gas price, but much lower, due to the contract call _cost subsidy_. A higer gas price will result in a higher PPU. + +### Paragraph 3 -If two transactions have the same PPU, they are ordered using an arbitrary, but deterministic rule: the transaction with the higher [fvn32(transactionHash)](https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function) "wins" the comparison. +Transaction **A** is considered **more valuable (for the Network)** than transaction **B** if **it has a higher PPU**. + +If two transactions have the same PPU, they are ordered using an arbitrary, but deterministic rule: the transaction with the higher [fnv32(transactionHash)](https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function) "wins" the comparison. Pseudo-code: @@ -81,33 +103,80 @@ func isTransactionMoreValuableForNetwork(A, B): return true if A.ppu < B.ppu: return false - return fvn32(A.hash) > fvn32(B.hash) + return fnv32(A.hash) > fnv32(B.hash) ``` -### Paragraph 3 - -The mempool selects transactions as follows: - - before starting the selection loop, get a snapshot of the senders, in an arbitrary order. - - in the selection loop, do as many _passes_ as needed to satisfy `gasRequested` (see **Paragraph 1**). - - within a _pass_, ... - - if `gasRequested` is satisfied, stop the _selection loop_ early. - - if `maxNum` is satisfied, stop the _selection loop_ early. - ### Paragraph 4 -Within a _selection pass_, a batch of transactions from a sender is selected as follows: - - ..., attempt to **detect an initial nonces gap** (if enough information is available, that is, if the current account nonce is known - see section **Account nonce notifications**). - - if a nonces gap is detected, ... Subsequent passes of the selection loop (within the same selection session) will skip this sender. The sender will be re-considered in a future selection session. - -#### Initial gaps and middle gaps - -### Account nonce notifications +The mempool selects transactions as follows (pseudo-code): -### Transactions addition - -### Transactions removal - -### Transactions eviction - -### Monitoring and diagnostics +``` +func selectTransactions(gasRequested, maxNum): + // Setup phase + senders := list of all current senders in the mempool, in an arbitrary order + bunchesOfTransactions := sourced from senders; nonces-gap-free, duplicates-free, nicely sorted by nonce + + // Holds selected transactions + selectedTransactions := empty + + // Holds not-yet-selected transactions, ordered by PPU + competitionHeap := empty + + for each bunch in bunchesOfTransactions: + competitionHeap.push(next available transaction from bunch) + + // Selection loop + while competitionHeap is not empty: + mostValuableTransaction := competitionHeap.pop() + + // Check if adding the next transaction exceeds limits + if selectedTransactions.totalGasLimit + mostValuableTransaction.gasLimit > gasRequested: + break + if selectedTransactions.length + 1 > maxNum: + break + + selectedTransactions.append(mostValuableTransaction) + + nextTransaction := next available transaction from the bunch of mostValuableTransaction + if nextTransaction exists: + competitionHeap.push(nextTransaction) + + return selectedTransactions +``` +Thus, the mempool selects transactions using an efficient and value-driven algorithm that ensures the most valuable transactions (in terms of PPU) are prioritized while maintaining correct nonce sequencing per sender. The selection process is as follows: + +**Setup phase:** + + - **Snapshot of senders:** + - Before starting the selection loop, obtain a snapshot of all current senders in the mempool in an arbitrary order. + + - **Organize transactions into bunches:** + - For each sender, collect all their pending transactions and organize them into a "bunch." + - Each bunch is: + - **Nonce-gap-free:** There are no missing nonces between transactions. + - **Duplicates-free:** No duplicate transactions are included. + - **Sorted by nonce:** Transactions are ordered in ascending order based on their nonce values. + + - **Prepare the heap:** + - Extract the first transaction (lowest nonce) from each sender's bunch. + - Place these transactions onto a max heap, which is ordered based on the transaction's PPU. + +**Selection loop:** + + - **Iterative selection:** + - Continue the loop until either the total gas of selected transactions meets or exceeds `gasRequested`, or the number of selected transactions reaches `maxNum`. + - In each iteration: + - **Select the most valuable transaction:** + - Pop the transaction with the highest PPU from the heap. + - Append this transaction to the list of `selectedTransactions`. + - **Update the Sender's Bunch:** + - If the sender of the selected transaction has more transactions in their bunch: + - Take the next transaction (next higher nonce) from the bunch. + - Push this transaction onto the heap to compete in subsequent iterations. + - This process ensures that at each step, the most valuable transaction across all senders is selected while maintaining proper nonce order for each sender. + + - **Early Termination:** + - The selection loop can terminate early if either of the following conditions is satisfied before all transactions are processed: + - The accumulated gas of selected transactions meets or exceeds `gasRequested`. + - The number of selected transactions reaches `maxNum`. From 10f1c3665fa594146a3f6b0aedee557dc3e254e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 13 Nov 2024 09:48:34 +0200 Subject: [PATCH 121/124] Typos. --- txcache/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/txcache/README.md b/txcache/README.md index 24ccdb35..83937543 100644 --- a/txcache/README.md +++ b/txcache/README.md @@ -170,13 +170,13 @@ Thus, the mempool selects transactions using an efficient and value-driven algor - **Select the most valuable transaction:** - Pop the transaction with the highest PPU from the heap. - Append this transaction to the list of `selectedTransactions`. - - **Update the Sender's Bunch:** + - **Update the sender's bunch:** - If the sender of the selected transaction has more transactions in their bunch: - Take the next transaction (next higher nonce) from the bunch. - Push this transaction onto the heap to compete in subsequent iterations. - This process ensures that at each step, the most valuable transaction across all senders is selected while maintaining proper nonce order for each sender. - - **Early Termination:** + - **Early termination:** - The selection loop can terminate early if either of the following conditions is satisfied before all transactions are processed: - The accumulated gas of selected transactions meets or exceeds `gasRequested`. - The number of selected transactions reaches `maxNum`. From 8cd2eb02208e42738e089fb7ef53e566e60885e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 13 Nov 2024 09:57:09 +0200 Subject: [PATCH 122/124] Update readme. --- txcache/README.md | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/txcache/README.md b/txcache/README.md index 83937543..6330b033 100644 --- a/txcache/README.md +++ b/txcache/README.md @@ -50,21 +50,21 @@ gasPerDataByte = 1_500 #### Examples -(a) A simple native transfer with `gasLimit = 50_000` and `gasPrice = 1_000_000_000`: +**(a)** A simple native transfer with `gasLimit = 50_000` and `gasPrice = 1_000_000_000`: ``` initiallyPaidFee = 50_000_000_000 atoms ppu = 1_000_000_000 atoms ``` -(b) A simple native transfer with `gasLimit = 50_000` and `gasPrice = 1_500_000_000`: +**(b)** A simple native transfer with `gasLimit = 50_000` and `gasPrice = 1_500_000_000`: ``` initiallyPaidFee = gasLimit * gasPrice = 75_000_000_000 atoms ppu = 75_000_000_000 / 50_000 = 1_500_000_000 atoms ``` -(c) A simple native transfer with a data payload of 7 bytes, with `gasLimit = 50_000 + 7 * 1500` and `gasPrice = 1_000_000_000`: +**(c)** A simple native transfer with a data payload of 7 bytes, with `gasLimit = 50_000 + 7 * 1500` and `gasPrice = 1_000_000_000`: ``` initiallyPaidFee = 60_500_000_000_000 atoms @@ -73,21 +73,21 @@ ppu = 60_500_000_000_000 / 60_500 = 1_000_000_000 atoms That is, for simple native transfers (whether they hold a data payload or not), the PPU is equal to the gas price. -(d) A contract call with `gasLimit = 75_000_000` and `gasPrice = 1_000_000_000`, with a data payload of `42` bytes: +**(d)** A contract call with `gasLimit = 75_000_000` and `gasPrice = 1_000_000_000`, with a data payload of `42` bytes: ``` initiallyPaidFee = 861_870_000_000_000 atoms ppu = 11_491_600 atoms ``` -(e) Similar to (d), but with `gasPrice = 2_000_000_000`: +**(e)** Similar to **(d)**, but with `gasPrice = 2_000_000_000`: ``` initiallyPaidFee = 1_723_740_000_000_000 atoms ppu = 22_983_200 atoms ``` -That is, for contract calls, the PPU is not equal to the gas price, but much lower, due to the contract call _cost subsidy_. A higer gas price will result in a higher PPU. +That is, for contract calls, the PPU is not equal to the gas price, but much lower, due to the contract call _cost subsidy_. **A higher gas price will result in a higher PPU.** ### Paragraph 3 @@ -180,3 +180,8 @@ Thus, the mempool selects transactions using an efficient and value-driven algor - The selection loop can terminate early if either of the following conditions is satisfied before all transactions are processed: - The accumulated gas of selected transactions meets or exceeds `gasRequested`. - The number of selected transactions reaches `maxNum`. + + +### Paragraph 5 + +On the node's side, the selected transactions are shuffled using a deterministic algorithm. This shuffling ensures that the transaction order remains unpredictable to the proposer, effectively preventing _front-running attacks_. Therefore, being selected first by the mempool does not guarantee that a transaction will be included first in the block. Additionally, selection by the mempool does not ensure inclusion in the very next block, as the proposer has the final authority on which transactions to include, based on **the remaining space available** in the block. From fa41478ce26f3915916ea6c6d6cebf6e33ddf50b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Wed, 13 Nov 2024 17:32:50 +0200 Subject: [PATCH 123/124] Fix after review. --- txcache/crossTxCache.go | 1 - txcache/txCache.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/txcache/crossTxCache.go b/txcache/crossTxCache.go index 6749630e..e39723e4 100644 --- a/txcache/crossTxCache.go +++ b/txcache/crossTxCache.go @@ -121,7 +121,6 @@ func (cache *CrossTxCache) NotifyAccountNonce(_ []byte, _ uint64) { // ForgetAllAccountNonces does nothing, only to respect the interface func (cache *CrossTxCache) ForgetAllAccountNonces() { - log.Error("CrossTxCache.ForgetAllAccountNonces is not implemented") } // IsInterfaceNil returns true if there is no value under the interface diff --git a/txcache/txCache.go b/txcache/txCache.go index 92c60c0c..fe6f37f3 100644 --- a/txcache/txCache.go +++ b/txcache/txCache.go @@ -283,7 +283,7 @@ func (cache *TxCache) NotifyAccountNonce(accountKey []byte, nonce uint64) { } // ForgetAllAccountNonces clears all known account nonces. -// Generally speaking, should be called when a block is reverted. +// Should be called when a block is reverted. func (cache *TxCache) ForgetAllAccountNonces() { log.Debug("TxCache.ForgetAllAccountNonces", "name", cache.name) From 68835d19f19e8515d85762c4391a15bf4c28049b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrei=20B=C4=83ncioiu?= Date: Thu, 14 Nov 2024 10:38:30 +0200 Subject: [PATCH 124/124] Fix after review, drop logic around fnv32. --- txcache/README.md | 4 ++-- txcache/selection_test.go | 29 +++++++---------------------- txcache/wrappedTransaction.go | 18 +++--------------- txcache/wrappedTransaction_test.go | 6 ------ 4 files changed, 12 insertions(+), 45 deletions(-) diff --git a/txcache/README.md b/txcache/README.md index 6330b033..ec9fb7ba 100644 --- a/txcache/README.md +++ b/txcache/README.md @@ -93,7 +93,7 @@ That is, for contract calls, the PPU is not equal to the gas price, but much low Transaction **A** is considered **more valuable (for the Network)** than transaction **B** if **it has a higher PPU**. -If two transactions have the same PPU, they are ordered using an arbitrary, but deterministic rule: the transaction with the higher [fnv32(transactionHash)](https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function) "wins" the comparison. +If two transactions have the same PPU, they are ordered using an arbitrary, but deterministic rule: the transaction with the "lower" transaction hash "wins" the comparison. Pseudo-code: @@ -103,7 +103,7 @@ func isTransactionMoreValuableForNetwork(A, B): return true if A.ppu < B.ppu: return false - return fnv32(A.hash) > fnv32(B.hash) + return A.hash < B.hash ``` ### Paragraph 4 diff --git a/txcache/selection_test.go b/txcache/selection_test.go index 5d358e26..fb8f45c4 100644 --- a/txcache/selection_test.go +++ b/txcache/selection_test.go @@ -15,28 +15,13 @@ func TestTxCache_SelectTransactions_Dummy(t *testing.T) { cache := newUnconstrainedCacheToTest() cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4)) - require.Equal(t, 3193030061, int(fnv32("hash-alice-4"))) - cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) - require.Equal(t, 3193030058, int(fnv32("hash-alice-3"))) - cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) - require.Equal(t, 3193030059, int(fnv32("hash-alice-2"))) - cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) - require.Equal(t, 3193030056, int(fnv32("hash-alice-1"))) - cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) - require.Equal(t, 187766579, int(fnv32("hash-bob-7"))) - cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6)) - require.Equal(t, 187766578, int(fnv32("hash-bob-6"))) - cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5)) - require.Equal(t, 187766577, int(fnv32("hash-bob-5"))) - cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1)) - require.Equal(t, 3082288595, int(fnv32("hash-carol-1"))) selected, accumulatedGas := cache.SelectTransactions(math.MaxUint64, math.MaxInt) require.Len(t, selected, 8) @@ -47,10 +32,10 @@ func TestTxCache_SelectTransactions_Dummy(t *testing.T) { require.Equal(t, "hash-alice-2", string(selected[1].TxHash)) require.Equal(t, "hash-alice-3", string(selected[2].TxHash)) require.Equal(t, "hash-alice-4", string(selected[3].TxHash)) - require.Equal(t, "hash-carol-1", string(selected[4].TxHash)) - require.Equal(t, "hash-bob-5", string(selected[5].TxHash)) - require.Equal(t, "hash-bob-6", string(selected[6].TxHash)) - require.Equal(t, "hash-bob-7", string(selected[7].TxHash)) + require.Equal(t, "hash-bob-5", string(selected[4].TxHash)) + require.Equal(t, "hash-bob-6", string(selected[5].TxHash)) + require.Equal(t, "hash-bob-7", string(selected[6].TxHash)) + require.Equal(t, "hash-carol-1", string(selected[7].TxHash)) }) t.Run("alice > carol > bob", func(t *testing.T) { @@ -89,9 +74,9 @@ func TestTxCache_SelectTransactionsWithBandwidth_Dummy(t *testing.T) { require.Equal(t, 750000, int(accumulatedGas)) // Check order - require.Equal(t, "hash-carol-1", string(selected[0].TxHash)) - require.Equal(t, "hash-bob-5", string(selected[1].TxHash)) - require.Equal(t, "hash-bob-6", string(selected[2].TxHash)) + require.Equal(t, "hash-bob-5", string(selected[0].TxHash)) + require.Equal(t, "hash-bob-6", string(selected[1].TxHash)) + require.Equal(t, "hash-carol-1", string(selected[2].TxHash)) require.Equal(t, "hash-alice-1", string(selected[3].TxHash)) require.Equal(t, "hash-bob-7", string(selected[4].TxHash)) }) diff --git a/txcache/wrappedTransaction.go b/txcache/wrappedTransaction.go index a8968717..d5d652fb 100644 --- a/txcache/wrappedTransaction.go +++ b/txcache/wrappedTransaction.go @@ -1,6 +1,7 @@ package txcache import ( + "bytes" "sync/atomic" "github.com/multiversx/mx-chain-core-go/data" @@ -18,7 +19,6 @@ type WrappedTransaction struct { Size int64 PricePerUnit atomic.Uint64 - HashFnv32 atomic.Uint32 } // precomputeFields computes (and caches) the (average) price per gas unit. @@ -31,18 +31,6 @@ func (wrappedTx *WrappedTransaction) precomputeFields(txGasHandler TxGasHandler) } wrappedTx.PricePerUnit.Store(fee / gasLimit) - wrappedTx.HashFnv32.Store(fnv32(string(wrappedTx.TxHash))) -} - -// fnv32 implements https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function for 32 bits -func fnv32(key string) uint32 { - hash := uint32(2166136261) - const prime32 = uint32(16777619) - for i := 0; i < len(key); i++ { - hash *= prime32 - hash ^= uint32(key[i]) - } - return hash } // Equality is out of scope (not possible in our case). @@ -54,6 +42,6 @@ func (wrappedTx *WrappedTransaction) isTransactionMoreValuableForNetwork(otherTr return ppu > ppuOther } - // In the end, compare by hash number of transaction hash - return wrappedTx.HashFnv32.Load() > otherTransaction.HashFnv32.Load() + // In the end, compare by transaction hash + return bytes.Compare(wrappedTx.TxHash, otherTransaction.TxHash) < 0 } diff --git a/txcache/wrappedTransaction_test.go b/txcache/wrappedTransaction_test.go index 67a13695..b24dbc3f 100644 --- a/txcache/wrappedTransaction_test.go +++ b/txcache/wrappedTransaction_test.go @@ -15,7 +15,6 @@ func TestWrappedTransaction_precomputeFields(t *testing.T) { tx.precomputeFields(txGasHandler) require.Equal(t, oneBillion, int(tx.PricePerUnit.Load())) - require.Equal(t, 84696446, int(tx.HashFnv32.Load())) }) t.Run("move balance gas limit and execution gas limit (1)", func(t *testing.T) { @@ -23,7 +22,6 @@ func TestWrappedTransaction_precomputeFields(t *testing.T) { tx.precomputeFields(txGasHandler) require.Equal(t, 999_980_777, int(tx.PricePerUnit.Load())) - require.Equal(t, 84696445, int(tx.HashFnv32.Load())) }) t.Run("move balance gas limit and execution gas limit (2)", func(t *testing.T) { @@ -32,9 +30,7 @@ func TestWrappedTransaction_precomputeFields(t *testing.T) { actualFee := 51500*oneBillion + (oneMilion-51500)*oneBillion/100 require.Equal(t, 60_985_000_000_000, actualFee) - require.Equal(t, actualFee/oneMilion, int(tx.PricePerUnit.Load())) - require.Equal(t, 84696444, int(tx.HashFnv32.Load())) }) } @@ -54,11 +50,9 @@ func TestWrappedTransaction_isTransactionMoreValuableForNetwork(t *testing.T) { t.Run("decide by transaction hash (set them up to have the same PPU)", func(t *testing.T) { a := createTx([]byte("a-7"), "a", 7) a.precomputeFields(txGasHandler) - require.Equal(t, 2191299170, int(a.HashFnv32.Load())) b := createTx([]byte("b-7"), "b", 7) b.precomputeFields(txGasHandler) - require.Equal(t, 1654268265, int(b.HashFnv32.Load())) require.True(t, a.isTransactionMoreValuableForNetwork(b)) })