From 0489ccf2f0e856a28e6220139190b0d2181d8f51 Mon Sep 17 00:00:00 2001 From: amirylm Date: Thu, 7 Mar 2024 18:55:02 +0200 Subject: [PATCH 01/58] block window utilities --- .../evmregistry/v21/logprovider/window.go | 39 +++++ .../v21/logprovider/window_test.go | 157 ++++++++++++++++++ 2 files changed, 196 insertions(+) create mode 100644 core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go create mode 100644 core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go new file mode 100644 index 00000000000..8042fe5cb05 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go @@ -0,0 +1,39 @@ +package logprovider + +import ( + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" +) + +// BlockWindow returns the start and end block for the given window. +func BlockWindow(block int64, blockRate int) (start int64, end int64) { + windowSize := int64(blockRate) + if windowSize == 0 { + return block, block + } + start = block - (block % windowSize) + end = block + (windowSize - (block % windowSize) - 1) + return +} + +// LogSorter sorts the logs based on block number, tx hash and log index. +// returns true if b should come before a. +func LogSorter(a, b logpoller.Log) bool { + return LogComparator(a, b) > 0 +} + +// LogComparator compares the logs based on block number, tx hash and log index. +// +// Returns: +// +// -1 if a < b +// 0 if a == b +// +1 if a > b +func LogComparator(a, b logpoller.Log) int { + if b.BlockNumber != a.BlockNumber { + return int(a.BlockNumber - b.BlockNumber) + } + if txDiff := a.TxHash.Big().Cmp(b.TxHash.Big()); txDiff != 0 { + return txDiff + } + return int(a.LogIndex - b.LogIndex) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go new file mode 100644 index 00000000000..4a1dadfc410 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go @@ -0,0 +1,157 @@ +package logprovider + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" +) + +func TestBlockWindow(t *testing.T) { + tests := []struct { + name string + block int64 + blockRate int + wantStart int64 + wantEnd int64 + }{ + { + name: "block 0, blockRate 1", + block: 0, + blockRate: 1, + wantStart: 0, + wantEnd: 0, + }, + { + name: "block 0, blockRate 4", + block: 0, + blockRate: 4, + wantStart: 0, + wantEnd: 3, + }, + { + name: "block 81, blockRate 4", + block: 81, + blockRate: 4, + wantStart: 80, + wantEnd: 83, + }, + { + name: "block 83, blockRate 4", + block: 83, + blockRate: 4, + wantStart: 80, + wantEnd: 83, + }, + { + name: "block 84, blockRate 4", + block: 84, + blockRate: 4, + wantStart: 84, + wantEnd: 87, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + start, end := BlockWindow(tc.block, tc.blockRate) + require.Equal(t, tc.wantStart, start) + require.Equal(t, tc.wantEnd, end) + }) + } +} + +func TestLogComparatorSorter(t *testing.T) { + tests := []struct { + name string + a logpoller.Log + b logpoller.Log + wantCmp int + wantSort bool + }{ + { + name: "a == b", + a: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + b: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + wantCmp: 0, + wantSort: false, + }, + { + name: "a < b: block number", + a: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + b: logpoller.Log{ + BlockNumber: 2, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + wantCmp: -1, + wantSort: false, + }, + { + name: "a < b: log index", + a: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + b: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 2, + }, + wantCmp: -1, + wantSort: false, + }, + { + name: "a > b: block number", + a: logpoller.Log{ + BlockNumber: 3, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + b: logpoller.Log{ + BlockNumber: 2, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + wantCmp: 1, + wantSort: true, + }, + { + name: "a > b: log index", + a: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 3, + }, + b: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 2, + }, + wantCmp: 1, + wantSort: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + require.Equal(t, tc.wantCmp, LogComparator(tc.a, tc.b)) + require.Equal(t, tc.wantSort, LogSorter(tc.a, tc.b)) + }) + } +} From fe14f84afb27a0613277d3b78a694c1a5a381706 Mon Sep 17 00:00:00 2001 From: amirylm Date: Fri, 8 Mar 2024 17:04:42 +0200 Subject: [PATCH 02/58] new buffer (wip) --- .../evmregistry/v21/logprovider/buffer_v2.go | 324 ++++++++++++++++++ .../v21/logprovider/buffer_v2_test.go | 295 ++++++++++++++++ 2 files changed, 619 insertions(+) create mode 100644 core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go create mode 100644 core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2_test.go diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go new file mode 100644 index 00000000000..f9c6e5cab46 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go @@ -0,0 +1,324 @@ +package logprovider + +import ( + "encoding/hex" + "math/big" + "sort" + "sync" + "sync/atomic" + + ocr2keepers "github.com/smartcontractkit/chainlink-common/pkg/types/automation" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/prommetrics" +) + +type BufferedLog struct { + ID *big.Int + Log logpoller.Log +} + +type LogBuffer interface { + // Enqueue adds logs to the buffer and might also drop logs if the limit for the + // given upkeep was exceeded. Returns the number of logs that were added and number of logs that were dropped. + Enqueue(id *big.Int, logs ...logpoller.Log) (added int, dropped int) + // Dequeue pulls logs from the buffer that are within the given block window, + // with a maximum number of logs per upkeep and a total maximum number of logs to return. + // It also accepts a function to select upkeeps. + // Returns logs (associated to upkeeps) and the number of remaining + // logs in that window for the involved upkeeps. + Dequeue(block int64, blockRate, upkeepLimit, maxResults int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) + // Size returns the total number of logs in the buffer. + Size() int + // SetConfig sets the buffer size and the maximum number of logs to keep for each upkeep. + SetConfig(lookback, maxUpkeepLogs int) +} + +func DefaultUpkeepSelector(id *big.Int) bool { + return true +} + +type logBuffer struct { + lggr logger.Logger + // max number of logs to keep in the buffer for each upkeep per block + maxUpkeepLogs *atomic.Int32 + // number of blocks to keep in the buffer + bufferSize *atomic.Int32 + // last block number seen by the buffer + lastBlockSeen *atomic.Int64 + // map of upkeep id to its buffer + upkeepBuffers map[string]*upkeepLogBuffer + lock sync.RWMutex +} + +func NewLogBuffer(lggr logger.Logger, size, upkeepLogLimit int) LogBuffer { + s := &atomic.Int32{} + s.Add(int32(size)) + l := &atomic.Int32{} + l.Add(int32(upkeepLogLimit)) + return &logBuffer{ + lggr: lggr.Named("KeepersRegistry.LogEventBufferV2"), + maxUpkeepLogs: l, + bufferSize: s, + lastBlockSeen: &atomic.Int64{}, + upkeepBuffers: make(map[string]*upkeepLogBuffer), + } +} + +func (b *logBuffer) SetConfig(lookback, logLimitHigh int) { + b.lock.Lock() + defer b.lock.Unlock() + + b.bufferSize.Store(int32(lookback)) + b.maxUpkeepLogs.Store(int32(logLimitHigh)) + + for _, ub := range b.upkeepBuffers { + ub.setConfig(logLimitHigh) + } +} + +func (b *logBuffer) Size() int { + b.lock.RLock() + defer b.lock.RUnlock() + + size := 0 + for _, ub := range b.upkeepBuffers { + size += ub.size() + } + + return size +} + +func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { + buf, ok := b.getUpkeepBuffer(uid) + if !ok || buf == nil { + buf = newUpkeepLogBuffer(b.lggr, uid, int(b.maxUpkeepLogs.Load()*b.bufferSize.Load())) + b.setUpkeepBuffer(uid, buf) + } + lastBlockSeen := latestBlockNumber(logs...) + if b.lastBlockSeen.Load() < lastBlockSeen { + b.lastBlockSeen.Store(lastBlockSeen) + } + offsetBlock := b.lastBlockSeen.Load() - int64(b.bufferSize.Load()) + if offsetBlock <= 0 { + offsetBlock = 1 + } + added, dropped := buf.enqueue(offsetBlock, logs...) + + return added, dropped +} + +func (b *logBuffer) Dequeue(block int64, blockRate, upkeepLimit, maxResults int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) { + b.lock.RLock() + defer b.lock.RUnlock() + + start, end := BlockWindow(block, blockRate) + result, remaining := b.tryDequeue(start, end, upkeepLimit, maxResults, upkeepSelector) + for len(result) < maxResults && remaining > 0 { + nextResults, nextRemaining := b.tryDequeue(start, end, upkeepLimit, maxResults-len(result), upkeepSelector) + result = append(result, nextResults...) + remaining = nextRemaining + } + + return result, remaining +} + +func (b *logBuffer) tryDequeue(start, end int64, minUpkeepLogs, capacity int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) { + var result []BufferedLog + var remainingLogs int + for _, buf := range b.upkeepBuffers { + if !upkeepSelector(buf.id) { + continue + } + if capacity == 0 { + // if there is no more capacity for results, just count the remaining logs + remainingLogs += buf.size() + continue + } + if minUpkeepLogs > capacity { + // if there are more logs to fetch than the capacity, fetch the minimum + minUpkeepLogs = capacity + } + logs, remaining := buf.dequeue(start, end, minUpkeepLogs) + for _, l := range logs { + result = append(result, BufferedLog{ID: buf.id, Log: l}) + capacity-- + } + remainingLogs += remaining + } + return result, remainingLogs +} + +func (b *logBuffer) getUpkeepBuffer(uid *big.Int) (*upkeepLogBuffer, bool) { + b.lock.RLock() + defer b.lock.RUnlock() + + ub, ok := b.upkeepBuffers[uid.String()] + return ub, ok +} + +func (b *logBuffer) setUpkeepBuffer(uid *big.Int, buf *upkeepLogBuffer) { + b.lock.Lock() + defer b.lock.Unlock() + + b.upkeepBuffers[uid.String()] = buf +} + +type upkeepLogBuffer struct { + lggr logger.Logger + + id *big.Int + maxLogs *atomic.Int32 + + q []logpoller.Log + visited map[string]int64 + lock sync.RWMutex +} + +func newUpkeepLogBuffer(lggr logger.Logger, id *big.Int, maxLogs int) *upkeepLogBuffer { + limit := &atomic.Int32{} + limit.Add(int32(maxLogs)) + return &upkeepLogBuffer{ + lggr: lggr.With("id", id.String()), + id: id, + maxLogs: limit, + q: make([]logpoller.Log, 0, maxLogs), + visited: make(map[string]int64), + } +} + +func (ub *upkeepLogBuffer) setConfig(maxLogs int) { + ub.maxLogs.Store(int32(maxLogs)) +} + +func (ub *upkeepLogBuffer) size() int { + ub.lock.RLock() + defer ub.lock.RUnlock() + + return len(ub.q) +} + +func (ub *upkeepLogBuffer) dequeue(start, end int64, limit int) ([]logpoller.Log, int) { + ub.lock.Lock() + defer ub.lock.Unlock() + + if len(ub.q) == 0 { + return nil, 0 + } + + var results []logpoller.Log + var remaining int + updatedLogs := make([]logpoller.Log, 0, ub.maxLogs.Load()) + for _, l := range ub.q { + if l.BlockNumber >= start && l.BlockNumber <= end { + if len(results) < limit { + results = append(results, l) + continue + } + remaining++ + } + updatedLogs = append(updatedLogs, l) + } + + if len(results) > 0 { + ub.q = updatedLogs + } + + prommetrics.AutomationLogsInLogBuffer.Sub(float64(len(results))) + + return results, remaining +} + +func (ub *upkeepLogBuffer) enqueue(offsetBlock int64, logsToAdd ...logpoller.Log) (int, int) { + ub.lock.Lock() + defer ub.lock.Unlock() + + logs := ub.q + var added int + for _, log := range logsToAdd { + if log.BlockNumber < offsetBlock { + ub.lggr.Debugw("Skipping log from old block", "offsetBlock", offsetBlock, "logBlock", log.BlockNumber) + continue + } + logid := logID(log) + if _, ok := ub.visited[logid]; ok { + ub.lggr.Debugw("Skipping known log", "offsetBlock", offsetBlock, "logBlock", log.BlockNumber) + continue + } + added++ + if len(logs) == 0 { + logs = append(logs, log) + } else { + i, _ := sort.Find(len(logs), func(i int) int { + return LogComparator(log, logs[i]) + }) + if i == len(logs) { + logs = append(logs, log) + } else { + logs = append(logs[:i], append([]logpoller.Log{log}, logs[i:]...)...) + } + } + ub.visited[logid] = log.BlockNumber + } + ub.q = logs + + prommetrics.AutomationLogsInLogBuffer.Add(float64(added)) + + return added, ub.clean(offsetBlock) +} + +func (ub *upkeepLogBuffer) clean(offsetBlock int64) int { + maxLogs := int(ub.maxLogs.Load()) + + // sort.SliceStable(updated, func(i, j int) bool { + // return LogSorter(updated[i], updated[j]) + // }) + updated := make([]logpoller.Log, 0, maxLogs) + var dropped int + for _, l := range ub.q { + if l.BlockNumber > offsetBlock { + if len(updated) > maxLogs-1 { + prommetrics.AutomationLogsInLogBuffer.Dec() + // TODO: check if we should clean visited as well + ub.lggr.Debugw("Reached log buffer limits, dropping log", "blockNumber", l.BlockNumber, + "blockHash", l.BlockHash, "txHash", l.TxHash, "logIndex", l.LogIndex, "len updated", len(updated), "maxLogs", maxLogs) + dropped++ + } else { + updated = append(updated, l) + } + } else { + prommetrics.AutomationLogsInLogBuffer.Dec() + // old logs are ignored and removed from visited + logid := logID(l) + delete(ub.visited, logid) + } + } + ub.q = updated + + for lid, block := range ub.visited { + if block <= offsetBlock { + delete(ub.visited, lid) + } + } + + return dropped +} + +func logID(l logpoller.Log) string { + ext := ocr2keepers.LogTriggerExtension{ + Index: uint32(l.LogIndex), + } + copy(ext.TxHash[:], l.TxHash[:]) + copy(ext.BlockHash[:], l.BlockHash[:]) + return hex.EncodeToString(ext.LogIdentifier()) +} + +func latestBlockNumber(logs ...logpoller.Log) int64 { + var latest int64 + for _, l := range logs { + if l.BlockNumber > latest { + latest = l.BlockNumber + } + } + return latest +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2_test.go new file mode 100644 index 00000000000..d1419b304d2 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2_test.go @@ -0,0 +1,295 @@ +package logprovider + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +func TestLogEventBufferV2_Clean(t *testing.T) { + t.Run("empty", func(t *testing.T) { + buf := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), 10) + + buf.clean(10) + }) + + t.Run("happy path", func(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), 10, 10) + + buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + ) + buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x171"), LogIndex: 0}, + logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x171"), LogIndex: 1}, + ) + + upkeepBuf, ok := buf.(*logBuffer).getUpkeepBuffer(big.NewInt(1)) + require.True(t, ok) + require.Equal(t, 4, upkeepBuf.size()) + require.Equal(t, 0, upkeepBuf.clean(10)) + require.Equal(t, 2, upkeepBuf.size()) + }) +} + +func TestLogEventBufferV2_EnqueueDequeue(t *testing.T) { + t.Run("dequeue empty", func(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), 10, 10) + + results, remaining := buf.Dequeue(int64(1), 20, 1, 10, DefaultUpkeepSelector) + require.Equal(t, 0, len(results)) + require.Equal(t, 0, remaining) + }) + + t.Run("enqueue", func(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), 10, 10) + + added, dropped := buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 10, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 10, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + ) + require.Equal(t, 2, added) + require.Equal(t, 0, dropped) + upkeepBuf, ok := buf.(*logBuffer).getUpkeepBuffer(big.NewInt(1)) + require.True(t, ok) + require.Equal(t, 2, upkeepBuf.size()) + }) + + t.Run("enqueue dequeue", func(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), 10, 10) + + buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + ) + buf.Enqueue(big.NewInt(2), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 2}, + ) + results, remaining := buf.Dequeue(int64(1), 20, 1, 2, DefaultUpkeepSelector) + require.Equal(t, 2, len(results)) + require.Equal(t, 2, remaining) + require.True(t, results[0].ID.Cmp(results[1].ID) != 0) + results, remaining = buf.Dequeue(int64(1), 20, 1, 2, DefaultUpkeepSelector) + require.Equal(t, 2, len(results)) + require.Equal(t, 0, remaining) + }) + + // t.Run("enqueue logs overflow", func(t *testing.T) { + // buf := NewLogBuffer(logger.TestLogger(t), 2) + + // require.Equal(t, 2, buf.Enqueue(big.NewInt(1), + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, + // )) + // upkeepBuf, ok := buf.(*logBuffer).getUpkeepBuffer(big.NewInt(1)) + // require.True(t, ok) + // require.Equal(t, 2, upkeepBuf.len()) + // }) + + // t.Run("enqueue dequeue with dynamic limits", func(t *testing.T) { + // buf := NewLogBuffer(logger.TestLogger(t), 2) + + // require.Equal(t, 3, buf.Enqueue(big.NewInt(1), + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, + // )) + // results := buf.Dequeue(int64(1), int64(20), 1, 2) + // require.Equal(t, 2, len(results)) + // buf.SetConfig(10, 3) + // require.Equal(t, 4, buf.Enqueue(big.NewInt(1), + // logpoller.Log{BlockNumber: 15, TxHash: common.HexToHash("0x21"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 15, TxHash: common.HexToHash("0x21"), LogIndex: 1}, + // logpoller.Log{BlockNumber: 15, TxHash: common.HexToHash("0x21"), LogIndex: 2}, + // logpoller.Log{BlockNumber: 15, TxHash: common.HexToHash("0x21"), LogIndex: 3}, + // )) + + // results = buf.Dequeue(int64(1), int64(20), 1, 4) + // require.Equal(t, 3, len(results)) + + // for _, r := range results { + // require.Equal(t, int64(15), r.Log.BlockNumber) + // } + // }) + + // t.Run("enqueue logs overflow with dynamic limits", func(t *testing.T) { + // buf := newLogEventBuffer(logger.TestLogger(t), 2, 10, 2) + + // require.Equal(t, 2, buf.enqueue(big.NewInt(1), + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 3}, + // )) + // buf.SetLimits(10, 3) + // require.Equal(t, 3, buf.enqueue(big.NewInt(1), + // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 1}, + // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 2}, + // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 3}, + // )) + + // buf.lock.Lock() + // defer buf.lock.Unlock() + // require.Equal(t, 2, len(buf.blocks[0].logs)) + // }) + + // t.Run("enqueue block overflow", func(t *testing.T) { + // buf := newLogEventBuffer(logger.TestLogger(t), 3, 2, 10) + + // require.Equal(t, 5, buf.enqueue(big.NewInt(1), + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 1}, + // )) + // buf.lock.Lock() + // require.Equal(t, 2, len(buf.blocks[0].logs)) + // buf.lock.Unlock() + // }) + + // t.Run("enqueue upkeep block overflow", func(t *testing.T) { + // buf := newLogEventBuffer(logger.TestLogger(t), 10, 10, 2) + + // require.Equal(t, 2, buf.enqueue(big.NewInt(1), + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 3}, + // )) + // buf.lock.Lock() + // require.Equal(t, 2, len(buf.blocks[0].logs)) + // buf.lock.Unlock() + // }) + + // t.Run("peek range after dequeue", func(t *testing.T) { + // buf := newLogEventBuffer(logger.TestLogger(t), 3, 10, 10) + + // require.Equal(t, buf.enqueue(big.NewInt(10), + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 10}, + // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 11}, + // ), 2) + // require.Equal(t, buf.enqueue(big.NewInt(1), + // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + // ), 2) + // results := buf.peekRange(int64(1), int64(2)) + // require.Equal(t, 2, len(results)) + // verifyBlockNumbers(t, results, 1, 2) + // removed := buf.dequeueRange(int64(1), int64(2), 2, 10) + // require.Equal(t, 2, len(removed)) + // results = buf.peekRange(int64(1), int64(2)) + // require.Equal(t, 0, len(results)) + // }) + + // t.Run("enqueue peek and dequeue", func(t *testing.T) { + // buf := newLogEventBuffer(logger.TestLogger(t), 4, 10, 10) + + // require.Equal(t, buf.enqueue(big.NewInt(10), + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 10}, + // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 11}, + // ), 2) + // require.Equal(t, buf.enqueue(big.NewInt(1), + // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + // ), 2) + // results := buf.peek(8) + // require.Equal(t, 4, len(results)) + // verifyBlockNumbers(t, results, 1, 2, 3, 3) + // removed := buf.dequeueRange(1, 3, 5, 5) + // require.Equal(t, 4, len(removed)) + // buf.lock.Lock() + // require.Equal(t, 0, len(buf.blocks[0].logs)) + // require.Equal(t, int64(2), buf.blocks[1].blockNumber) + // require.Equal(t, 1, len(buf.blocks[1].visited)) + // buf.lock.Unlock() + // }) + + // t.Run("enqueue and peek range circular", func(t *testing.T) { + // buf := newLogEventBuffer(logger.TestLogger(t), 3, 10, 10) + + // require.Equal(t, buf.enqueue(big.NewInt(1), + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, + // ), 3) + // require.Equal(t, buf.enqueue(big.NewInt(10), + // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 10}, + // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 11}, + // ), 2) + + // results := buf.peekRange(int64(1), int64(1)) + // require.Equal(t, 0, len(results)) + + // results = buf.peekRange(int64(3), int64(5)) + // require.Equal(t, 3, len(results)) + // verifyBlockNumbers(t, results, 3, 4, 4) + // }) + + // t.Run("doesnt enqueue old blocks", func(t *testing.T) { + // buf := newLogEventBuffer(logger.TestLogger(t), 3, 5, 10) + + // require.Equal(t, buf.enqueue(big.NewInt(10), + // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 10}, + // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 11}, + // ), 2) + // require.Equal(t, buf.enqueue(big.NewInt(1), + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, + // ), 2) + // results := buf.peekRange(int64(1), int64(5)) + // fmt.Println(results) + // verifyBlockNumbers(t, results, 2, 3, 4, 4) + // }) + + // t.Run("dequeue with limits returns latest block logs", func(t *testing.T) { + // buf := newLogEventBuffer(logger.TestLogger(t), 3, 5, 10) + // require.Equal(t, buf.enqueue(big.NewInt(1), + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 5, TxHash: common.HexToHash("0x5"), LogIndex: 0}, + // ), 5) + + // logs := buf.dequeueRange(1, 5, 2, 10) + // require.Equal(t, 2, len(logs)) + // require.Equal(t, int64(5), logs[0].log.BlockNumber) + // require.Equal(t, int64(4), logs[1].log.BlockNumber) + + // require.Equal(t, buf.enqueue(big.NewInt(1), + // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 1}, + // logpoller.Log{BlockNumber: 5, TxHash: common.HexToHash("0x5"), LogIndex: 1}, + // ), 2) + + // logs = buf.dequeueRange(1, 5, 3, 2) + // require.Equal(t, 2, len(logs)) + // }) + + // t.Run("dequeue doesn't return same logs again", func(t *testing.T) { + // buf := newLogEventBuffer(logger.TestLogger(t), 3, 5, 10) + // require.Equal(t, buf.enqueue(big.NewInt(1), + // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, + // ), 3) + + // logs := buf.dequeueRange(3, 3, 2, 10) + // fmt.Println(logs) + // require.Equal(t, 1, len(logs)) + + // logs = buf.dequeueRange(3, 3, 2, 10) + // fmt.Println(logs) + // require.Equal(t, 0, len(logs)) + // }) +} From 994a68857fe318f9ab6cfeccf4de979aef79d231 Mon Sep 17 00:00:00 2001 From: amirylm Date: Fri, 8 Mar 2024 17:05:14 +0200 Subject: [PATCH 03/58] integrate log buffer in the provider (wip) --- .../evmregistry/v21/logprovider/factory.go | 2 + .../evmregistry/v21/logprovider/provider.go | 69 +++++++++++++------ 2 files changed, 51 insertions(+), 20 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index 2b48fec2b37..5c1d04f82f0 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -37,6 +37,8 @@ type LogTriggersOptions struct { BlockLimitBurst int // Finality depth is the number of blocks to wait before considering a block final. FinalityDepth int64 + + BufferVersion string } func NewOptions(finalityDepth int64) LogTriggersOptions { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index e06593a9109..e98989aa12b 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -99,6 +99,7 @@ type logEventProvider struct { filterStore UpkeepFilterStore buffer *logEventBuffer + bufferV2 LogBuffer opts LogTriggersOptions @@ -111,6 +112,7 @@ func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, packer LogDa lggr: lggr.Named("KeepersRegistry.LogEventProvider"), packer: packer, buffer: newLogEventBuffer(lggr, int(opts.LookbackBlocks), defaultNumOfLogUpkeeps, defaultFastExecLogsHigh), + bufferV2: NewLogBuffer(lggr, int(opts.LookbackBlocks), defaultFastExecLogsHigh), poller: poller, opts: opts, filterStore: filterStore, @@ -164,33 +166,55 @@ func (p *logEventProvider) GetLatestPayloads(ctx context.Context) ([]ocr2keepers return nil, fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) } prommetrics.AutomationLogProviderLatestBlock.Set(float64(latest.BlockNumber)) - start := latest.BlockNumber - p.opts.LookbackBlocks + payloads := p.getPayloadsFromBuffer(latest.BlockNumber) + + return payloads, nil +} + +func (p *logEventProvider) createPayload(id *big.Int, log logpoller.Log) (ocr2keepers.UpkeepPayload, error) { + trig := logToTrigger(log) + checkData, err := p.packer.PackLogData(log) + if err != nil { + p.lggr.Warnw("failed to pack log data", "err", err, "log", log, "id", id) + return ocr2keepers.UpkeepPayload{}, err + } + payload, err := core.NewUpkeepPayload(id, trig, checkData) + if err != nil { + p.lggr.Warnw("failed to create upkeep payload", "err", err, "id", id, "trigger", trig, "checkData", checkData) + return ocr2keepers.UpkeepPayload{}, err + } + return payload, nil +} + +func (p *logEventProvider) getPayloadsFromBuffer(latestBlock int64) []ocr2keepers.UpkeepPayload { + var payloads []ocr2keepers.UpkeepPayload + + start := latestBlock - p.opts.LookbackBlocks if start <= 0 { start = 1 } - logs := p.buffer.dequeueRange(start, latest.BlockNumber, AllowedLogsPerUpkeep, MaxPayloads) - // p.lggr.Debugw("got latest logs from buffer", "latest", latest, "diff", diff, "logs", len(logs)) - - var payloads []ocr2keepers.UpkeepPayload - for _, l := range logs { - log := l.log - trig := logToTrigger(log) - checkData, err := p.packer.PackLogData(log) - if err != nil { - p.lggr.Warnw("failed to pack log data", "err", err, "log", log) - continue + switch p.opts.BufferVersion { + case "v2": + blockRate, upkeepLimit, maxResults := 4, 10, MaxPayloads // TODO: use config + logs, _ := p.bufferV2.Dequeue(start, blockRate, upkeepLimit, maxResults, DefaultUpkeepSelector) + for _, l := range logs { + payload, err := p.createPayload(l.ID, l.Log) + if err == nil { + payloads = append(payloads, payload) + } } - payload, err := core.NewUpkeepPayload(l.upkeepID, trig, checkData) - if err != nil { - p.lggr.Warnw("failed to create upkeep payload", "err", err, "id", l.upkeepID, "trigger", trig, "checkData", checkData) - continue + default: + logs := p.buffer.dequeueRange(start, latestBlock, AllowedLogsPerUpkeep, MaxPayloads) + for _, l := range logs { + payload, err := p.createPayload(l.upkeepID, l.log) + if err == nil { + payloads = append(payloads, payload) + } } - - payloads = append(payloads, payload) } - return payloads, nil + return payloads } // ReadLogs fetches the logs for the given upkeeps. @@ -400,7 +424,12 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ filter.blockLimiter.SetBurst(p.opts.BlockLimitBurst) } - p.buffer.enqueue(filter.upkeepID, filteredLogs...) + switch p.opts.BufferVersion { + case "v2": + p.bufferV2.Enqueue(filter.upkeepID, filteredLogs...) + default: + p.buffer.enqueue(filter.upkeepID, filteredLogs...) + } // Update the lastPollBlock for filter in slice this is then // updated into filter store in updateFiltersLastPoll From c976a89c477c64665e1714a80cfff7550bb974b2 Mon Sep 17 00:00:00 2001 From: amirylm Date: Mon, 11 Mar 2024 14:22:16 +0200 Subject: [PATCH 04/58] review fixes: - comments - renaming - new(atomic.Intxx) - sizeOfWindow(end,start) --- .../evmregistry/v21/logprovider/buffer_v2.go | 70 ++++++++++++++----- .../evmregistry/v21/logprovider/recoverer.go | 4 +- 2 files changed, 56 insertions(+), 18 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go index f9c6e5cab46..18696cd41ae 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go @@ -52,15 +52,15 @@ type logBuffer struct { } func NewLogBuffer(lggr logger.Logger, size, upkeepLogLimit int) LogBuffer { - s := &atomic.Int32{} + s := new(atomic.Int32) s.Add(int32(size)) - l := &atomic.Int32{} + l := new(atomic.Int32) l.Add(int32(upkeepLogLimit)) return &logBuffer{ lggr: lggr.Named("KeepersRegistry.LogEventBufferV2"), maxUpkeepLogs: l, bufferSize: s, - lastBlockSeen: &atomic.Int64{}, + lastBlockSeen: new(atomic.Int64), upkeepBuffers: make(map[string]*upkeepLogBuffer), } } @@ -89,6 +89,9 @@ func (b *logBuffer) Size() int { return size } +// Enqueue adds logs to the buffer and might also drop logs if the limit for the +// given upkeep was exceeded. It will create a new buffer if it does not exist. +// Returns the number of logs that were added and number of logs that were dropped. func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { buf, ok := b.getUpkeepBuffer(uid) if !ok || buf == nil { @@ -103,17 +106,19 @@ func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { if offsetBlock <= 0 { offsetBlock = 1 } - added, dropped := buf.enqueue(offsetBlock, logs...) - - return added, dropped + return buf.enqueue(offsetBlock, logs...) } +// Dequeue greedly pulls logs from the buffers. +// Returns logs and the number of remaining logs in the buffer. func (b *logBuffer) Dequeue(block int64, blockRate, upkeepLimit, maxResults int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) { b.lock.RLock() defer b.lock.RUnlock() start, end := BlockWindow(block, blockRate) result, remaining := b.tryDequeue(start, end, upkeepLimit, maxResults, upkeepSelector) + // if there are still logs to pull, try to dequeue again + // TODO: check if we should limit the number of iterations for len(result) < maxResults && remaining > 0 { nextResults, nextRemaining := b.tryDequeue(start, end, upkeepLimit, maxResults-len(result), upkeepSelector) result = append(result, nextResults...) @@ -123,16 +128,20 @@ func (b *logBuffer) Dequeue(block int64, blockRate, upkeepLimit, maxResults int, return result, remaining } +// tryDequeue pulls logs from the buffers, according to the given selector, in block range [start,end] +// with minimum number of results per upkeep and the total capacity for results. +// Returns logs and the number of remaining logs in the buffer. func (b *logBuffer) tryDequeue(start, end int64, minUpkeepLogs, capacity int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) { var result []BufferedLog var remainingLogs int for _, buf := range b.upkeepBuffers { if !upkeepSelector(buf.id) { + // if the upkeep is not selected, skip it continue } if capacity == 0 { // if there is no more capacity for results, just count the remaining logs - remainingLogs += buf.size() + remainingLogs += buf.sizeOfWindow(start, end) continue } if minUpkeepLogs > capacity { @@ -176,7 +185,7 @@ type upkeepLogBuffer struct { } func newUpkeepLogBuffer(lggr logger.Logger, id *big.Int, maxLogs int) *upkeepLogBuffer { - limit := &atomic.Int32{} + limit := new(atomic.Int32) limit.Add(int32(maxLogs)) return &upkeepLogBuffer{ lggr: lggr.With("id", id.String()), @@ -191,6 +200,7 @@ func (ub *upkeepLogBuffer) setConfig(maxLogs int) { ub.maxLogs.Store(int32(maxLogs)) } +// size returns the total number of logs in the buffer. func (ub *upkeepLogBuffer) size() int { ub.lock.RLock() defer ub.lock.RUnlock() @@ -198,6 +208,22 @@ func (ub *upkeepLogBuffer) size() int { return len(ub.q) } +// size returns the total number of logs in the buffer. +func (ub *upkeepLogBuffer) sizeOfWindow(start, end int64) int { + ub.lock.RLock() + defer ub.lock.RUnlock() + + size := 0 + for _, l := range ub.q { + if l.BlockNumber >= start && l.BlockNumber <= end { + size++ + } + } + return size +} + +// dequeue pulls logs from the buffer that are within the given block range, +// with a limit of logs to pull. Returns logs and the number of remaining logs in the buffer. func (ub *upkeepLogBuffer) dequeue(start, end int64, limit int) ([]logpoller.Log, int) { ub.lock.Lock() defer ub.lock.Unlock() @@ -229,26 +255,33 @@ func (ub *upkeepLogBuffer) dequeue(start, end int64, limit int) ([]logpoller.Log return results, remaining } -func (ub *upkeepLogBuffer) enqueue(offsetBlock int64, logsToAdd ...logpoller.Log) (int, int) { +// enqueue adds logs to the buffer and might also drop logs if the limit for the +// given upkeep was exceeded. Additionally, it will drop logs that are older than blockThreshold. +// Returns the number of logs that were added and number of logs that were dropped. +func (ub *upkeepLogBuffer) enqueue(blockThreshold int64, logsToAdd ...logpoller.Log) (int, int) { ub.lock.Lock() defer ub.lock.Unlock() logs := ub.q var added int for _, log := range logsToAdd { - if log.BlockNumber < offsetBlock { - ub.lggr.Debugw("Skipping log from old block", "offsetBlock", offsetBlock, "logBlock", log.BlockNumber) + if log.BlockNumber < blockThreshold { + ub.lggr.Debugw("Skipping log from old block", "offsetBlock", blockThreshold, "logBlock", log.BlockNumber) continue } logid := logID(log) if _, ok := ub.visited[logid]; ok { - ub.lggr.Debugw("Skipping known log", "offsetBlock", offsetBlock, "logBlock", log.BlockNumber) + ub.lggr.Debugw("Skipping known log", "offsetBlock", blockThreshold, "logBlock", log.BlockNumber) continue } added++ if len(logs) == 0 { + // if the buffer is empty, just add the log logs = append(logs, log) } else { + // otherwise, find the right index to insert the log + // to keep the buffer sorted + // TODO: check what is better: 1. maintain sorted slice; 2. sort once at the end i, _ := sort.Find(len(logs), func(i int) int { return LogComparator(log, logs[i]) }) @@ -264,10 +297,12 @@ func (ub *upkeepLogBuffer) enqueue(offsetBlock int64, logsToAdd ...logpoller.Log prommetrics.AutomationLogsInLogBuffer.Add(float64(added)) - return added, ub.clean(offsetBlock) + return added, ub.clean(blockThreshold) } -func (ub *upkeepLogBuffer) clean(offsetBlock int64) int { +// clean removes logs that are older than blockThreshold and drops logs if the limit for the +// given upkeep was exceeded. Returns the number of logs that were dropped. +func (ub *upkeepLogBuffer) clean(blockThreshold int64) int { maxLogs := int(ub.maxLogs.Load()) // sort.SliceStable(updated, func(i, j int) bool { @@ -276,7 +311,7 @@ func (ub *upkeepLogBuffer) clean(offsetBlock int64) int { updated := make([]logpoller.Log, 0, maxLogs) var dropped int for _, l := range ub.q { - if l.BlockNumber > offsetBlock { + if l.BlockNumber > blockThreshold { if len(updated) > maxLogs-1 { prommetrics.AutomationLogsInLogBuffer.Dec() // TODO: check if we should clean visited as well @@ -296,7 +331,7 @@ func (ub *upkeepLogBuffer) clean(offsetBlock int64) int { ub.q = updated for lid, block := range ub.visited { - if block <= offsetBlock { + if block <= blockThreshold { delete(ub.visited, lid) } } @@ -304,6 +339,8 @@ func (ub *upkeepLogBuffer) clean(offsetBlock int64) int { return dropped } +// logID returns a unique identifier for a log, which is an hex string +// of ocr2keepers.LogTriggerExtension.LogIdentifier() func logID(l logpoller.Log) string { ext := ocr2keepers.LogTriggerExtension{ Index: uint32(l.LogIndex), @@ -313,6 +350,7 @@ func logID(l logpoller.Log) string { return hex.EncodeToString(ext.LogIdentifier()) } +// latestBlockNumber returns the latest block number from the given logs func latestBlockNumber(logs ...logpoller.Log) int64 { var latest int64 for _, l := range logs { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go index 2eef5db17d9..a2a8fd1c961 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go @@ -101,8 +101,8 @@ func NewLogRecoverer(lggr logger.Logger, poller logpoller.LogPoller, client clie threadCtrl: utils.NewThreadControl(), - blockTime: &atomic.Int64{}, - lookbackBlocks: &atomic.Int64{}, + blockTime: new(atomic.Int64), + lookbackBlocks: new(atomic.Int64), interval: opts.ReadInterval * 5, pending: make([]ocr2keepers.UpkeepPayload, 0), From 255a53c88c35dde119f1444fe0ca9a74c5c23763 Mon Sep 17 00:00:00 2001 From: amirylm Date: Mon, 11 Mar 2024 15:38:39 +0200 Subject: [PATCH 05/58] test fix --- .../evmregistry/v21/logprovider/buffer_v2.go | 6 +++--- .../evmregistry/v21/logprovider/buffer_v2_test.go | 13 ++++++++++--- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go index 18696cd41ae..9eada7d93c2 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go @@ -312,14 +312,14 @@ func (ub *upkeepLogBuffer) clean(blockThreshold int64) int { var dropped int for _, l := range ub.q { if l.BlockNumber > blockThreshold { - if len(updated) > maxLogs-1 { + if len(updated) < maxLogs { + updated = append(updated, l) + } else { prommetrics.AutomationLogsInLogBuffer.Dec() // TODO: check if we should clean visited as well ub.lggr.Debugw("Reached log buffer limits, dropping log", "blockNumber", l.BlockNumber, "blockHash", l.BlockHash, "txHash", l.TxHash, "logIndex", l.LogIndex, "len updated", len(updated), "maxLogs", maxLogs) dropped++ - } else { - updated = append(updated, l) } } else { prommetrics.AutomationLogsInLogBuffer.Dec() diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2_test.go index d1419b304d2..ac6a8b83548 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2_test.go @@ -26,14 +26,21 @@ func TestLogEventBufferV2_Clean(t *testing.T) { logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 1}, ) buf.Enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x171"), LogIndex: 0}, - logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x171"), LogIndex: 1}, + logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x111"), LogIndex: 0}, + logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x111"), LogIndex: 1}, ) upkeepBuf, ok := buf.(*logBuffer).getUpkeepBuffer(big.NewInt(1)) require.True(t, ok) require.Equal(t, 4, upkeepBuf.size()) - require.Equal(t, 0, upkeepBuf.clean(10)) + + buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x171"), LogIndex: 0}, + logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x171"), LogIndex: 1}, + ) + + require.Equal(t, 4, upkeepBuf.size()) + require.Equal(t, 0, upkeepBuf.clean(12)) require.Equal(t, 2, upkeepBuf.size()) }) } From b70c09e1abecb0df844964f89a6a40d9557d780e Mon Sep 17 00:00:00 2001 From: amirylm Date: Mon, 11 Mar 2024 17:10:48 +0200 Subject: [PATCH 06/58] clear unused function --- .../evmregistry/v21/logprovider/buffer_v2.go | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go index 9eada7d93c2..fc09d4f1b7f 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go @@ -28,8 +28,6 @@ type LogBuffer interface { // Returns logs (associated to upkeeps) and the number of remaining // logs in that window for the involved upkeeps. Dequeue(block int64, blockRate, upkeepLimit, maxResults int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) - // Size returns the total number of logs in the buffer. - Size() int // SetConfig sets the buffer size and the maximum number of logs to keep for each upkeep. SetConfig(lookback, maxUpkeepLogs int) } @@ -77,18 +75,6 @@ func (b *logBuffer) SetConfig(lookback, logLimitHigh int) { } } -func (b *logBuffer) Size() int { - b.lock.RLock() - defer b.lock.RUnlock() - - size := 0 - for _, ub := range b.upkeepBuffers { - size += ub.size() - } - - return size -} - // Enqueue adds logs to the buffer and might also drop logs if the limit for the // given upkeep was exceeded. It will create a new buffer if it does not exist. // Returns the number of logs that were added and number of logs that were dropped. From c6a1ff1f10fc318dbb71daaa8b2ce152bbf031fd Mon Sep 17 00:00:00 2001 From: amirylm Date: Mon, 11 Mar 2024 20:13:36 +0200 Subject: [PATCH 07/58] provider integration tests for multiple versions of the buffer --- .../evmregistry/v21/logprovider/buffer_v2.go | 12 +- .../v21/logprovider/integration_test.go | 236 ++++++++++-------- .../evmregistry/v21/logprovider/provider.go | 13 +- 3 files changed, 147 insertions(+), 114 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go index fc09d4f1b7f..6f0075da2a3 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go @@ -88,11 +88,11 @@ func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { if b.lastBlockSeen.Load() < lastBlockSeen { b.lastBlockSeen.Store(lastBlockSeen) } - offsetBlock := b.lastBlockSeen.Load() - int64(b.bufferSize.Load()) - if offsetBlock <= 0 { - offsetBlock = 1 + blockThreshold := b.lastBlockSeen.Load() - int64(b.bufferSize.Load()) + if blockThreshold <= 0 { + blockThreshold = 1 } - return buf.enqueue(offsetBlock, logs...) + return buf.enqueue(blockThreshold, logs...) } // Dequeue greedly pulls logs from the buffers. @@ -252,12 +252,12 @@ func (ub *upkeepLogBuffer) enqueue(blockThreshold int64, logsToAdd ...logpoller. var added int for _, log := range logsToAdd { if log.BlockNumber < blockThreshold { - ub.lggr.Debugw("Skipping log from old block", "offsetBlock", blockThreshold, "logBlock", log.BlockNumber) + ub.lggr.Debugw("Skipping log from old block", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber) continue } logid := logID(log) if _, ok := ub.visited[logid]; ok { - ub.lggr.Debugw("Skipping known log", "offsetBlock", blockThreshold, "logBlock", log.BlockNumber) + ub.lggr.Debugw("Skipping known log", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber) continue } added++ diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go index aa8a5c97d70..038ca460c5a 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -39,90 +39,105 @@ import ( ) func TestIntegration_LogEventProvider(t *testing.T) { - ctx, cancel := context.WithCancel(testutils.Context(t)) - defer cancel() + tests := []struct { + name string + version string + }{ + {"default version", ""}, + {"v2", "v2"}, + } - backend, stopMining, accounts := setupBackend(t) - defer stopMining() - carrol := accounts[2] + for _, tc := range tests { + bufferVersion := tc.version + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() - db := setupDB(t) - defer db.Close() + backend, stopMining, accounts := setupBackend(t) + defer stopMining() + carrol := accounts[2] - opts := logprovider.NewOptions(200) - opts.ReadInterval = time.Second / 2 - lp, ethClient := setupDependencies(t, db, backend) - filterStore := logprovider.NewUpkeepFilterStore() - provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) - logProvider := provider.(logprovider.LogEventProviderTest) + db := setupDB(t) + defer db.Close() - n := 10 + opts := logprovider.NewOptions(200) + opts.ReadInterval = time.Second / 2 + opts.BufferVersion = bufferVersion - backend.Commit() - lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block + lp, ethClient := setupDependencies(t, db, backend) + filterStore := logprovider.NewUpkeepFilterStore() + provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) + logProvider := provider.(logprovider.LogEventProviderTest) - ids, addrs, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) - lp.PollAndSaveLogs(ctx, int64(n)) + n := 10 - go func() { - if err := logProvider.Start(ctx); err != nil { - t.Logf("error starting log provider: %s", err) - t.Fail() - } - }() - defer logProvider.Close() + backend.Commit() + lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block - logsRounds := 10 + ids, addrs, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) + lp.PollAndSaveLogs(ctx, int64(n)) - poll := pollFn(ctx, t, lp, ethClient) + go func() { + if err := logProvider.Start(ctx); err != nil { + t.Logf("error starting log provider: %s", err) + t.Fail() + } + }() + defer logProvider.Close() - triggerEvents(ctx, t, backend, carrol, logsRounds, poll, contracts...) + logsRounds := 10 - poll(backend.Commit()) + poll := pollFn(ctx, t, lp, ethClient) - waitLogPoller(ctx, t, backend, lp, ethClient) + triggerEvents(ctx, t, backend, carrol, logsRounds, poll, contracts...) - waitLogProvider(ctx, t, logProvider, 3) + poll(backend.Commit()) - allPayloads := collectPayloads(ctx, t, logProvider, n, 5) - require.GreaterOrEqual(t, len(allPayloads), n, - "failed to get logs after restart") + waitLogPoller(ctx, t, backend, lp, ethClient) - t.Run("Restart", func(t *testing.T) { - t.Log("restarting log provider") - // assuming that our service was closed and restarted, - // we should be able to backfill old logs and fetch new ones - filterStore := logprovider.NewUpkeepFilterStore() - logProvider2 := logprovider.NewLogProvider(logger.TestLogger(t), lp, logprovider.NewLogEventsPacker(), filterStore, opts) + waitLogProvider(ctx, t, logProvider, 3) - poll(backend.Commit()) - go func() { - if err2 := logProvider2.Start(ctx); err2 != nil { - t.Logf("error starting log provider: %s", err2) - t.Fail() - } - }() - defer logProvider2.Close() - - // re-register filters - for i, id := range ids { - err := logProvider2.RegisterFilter(ctx, logprovider.FilterOptions{ - UpkeepID: id, - TriggerConfig: newPlainLogTriggerConfig(addrs[i]), - // using block number at which the upkeep was registered, - // before we emitted any logs - UpdateBlock: uint64(n), - }) - require.NoError(t, err) - } + allPayloads := collectPayloads(ctx, t, logProvider, n, 5) + require.GreaterOrEqual(t, len(allPayloads), n, + "failed to get logs after restart") - waitLogProvider(ctx, t, logProvider2, 2) + t.Run("Restart", func(t *testing.T) { + t.Log("restarting log provider") + // assuming that our service was closed and restarted, + // we should be able to backfill old logs and fetch new ones + filterStore := logprovider.NewUpkeepFilterStore() + logProvider2 := logprovider.NewLogProvider(logger.TestLogger(t), lp, logprovider.NewLogEventsPacker(), filterStore, opts) - t.Log("getting logs after restart") - logsAfterRestart := collectPayloads(ctx, t, logProvider2, n, 5) - require.GreaterOrEqual(t, len(logsAfterRestart), n, - "failed to get logs after restart") - }) + poll(backend.Commit()) + go func() { + if err2 := logProvider2.Start(ctx); err2 != nil { + t.Logf("error starting log provider: %s", err2) + t.Fail() + } + }() + defer logProvider2.Close() + + // re-register filters + for i, id := range ids { + err := logProvider2.RegisterFilter(ctx, logprovider.FilterOptions{ + UpkeepID: id, + TriggerConfig: newPlainLogTriggerConfig(addrs[i]), + // using block number at which the upkeep was registered, + // before we emitted any logs + UpdateBlock: uint64(n), + }) + require.NoError(t, err) + } + + waitLogProvider(ctx, t, logProvider2, 2) + + t.Log("getting logs after restart") + logsAfterRestart := collectPayloads(ctx, t, logProvider2, n, 5) + require.GreaterOrEqual(t, len(logsAfterRestart), n, + "failed to get logs after restart") + }) + }) + } } func TestIntegration_LogEventProvider_UpdateConfig(t *testing.T) { @@ -200,53 +215,68 @@ func TestIntegration_LogEventProvider_UpdateConfig(t *testing.T) { } func TestIntegration_LogEventProvider_Backfill(t *testing.T) { - ctx, cancel := context.WithTimeout(testutils.Context(t), time.Second*60) - defer cancel() + tests := []struct { + name string + bufferVersion string + }{ + {"default version", ""}, + {"v2", "v2"}, + } - backend, stopMining, accounts := setupBackend(t) - defer stopMining() - carrol := accounts[2] + for _, tc := range tests { + bufferVersion := tc.bufferVersion + t.Run(tc.name, func(t *testing.T) { - db := setupDB(t) - defer db.Close() + ctx, cancel := context.WithTimeout(testutils.Context(t), time.Second*60) + defer cancel() - opts := logprovider.NewOptions(200) - opts.ReadInterval = time.Second / 4 - lp, ethClient := setupDependencies(t, db, backend) - filterStore := logprovider.NewUpkeepFilterStore() - provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) - logProvider := provider.(logprovider.LogEventProviderTest) + backend, stopMining, accounts := setupBackend(t) + defer stopMining() + carrol := accounts[2] - n := 10 + db := setupDB(t) + defer db.Close() - backend.Commit() - lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block - _, _, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) + opts := logprovider.NewOptions(200) + opts.ReadInterval = time.Second / 4 + opts.BufferVersion = bufferVersion + lp, ethClient := setupDependencies(t, db, backend) + filterStore := logprovider.NewUpkeepFilterStore() + provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) + logProvider := provider.(logprovider.LogEventProviderTest) - poll := pollFn(ctx, t, lp, ethClient) + n := 10 - rounds := 8 - for i := 0; i < rounds; i++ { - poll(backend.Commit()) - triggerEvents(ctx, t, backend, carrol, n, poll, contracts...) - poll(backend.Commit()) - } + backend.Commit() + lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block + _, _, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) - waitLogPoller(ctx, t, backend, lp, ethClient) + poll := pollFn(ctx, t, lp, ethClient) - // starting the log provider should backfill logs - go func() { - if startErr := logProvider.Start(ctx); startErr != nil { - t.Logf("error starting log provider: %s", startErr) - t.Fail() - } - }() - defer logProvider.Close() + rounds := 8 + for i := 0; i < rounds; i++ { + poll(backend.Commit()) + triggerEvents(ctx, t, backend, carrol, n, poll, contracts...) + poll(backend.Commit()) + } - waitLogProvider(ctx, t, logProvider, 3) + waitLogPoller(ctx, t, backend, lp, ethClient) - allPayloads := collectPayloads(ctx, t, logProvider, n, 5) - require.GreaterOrEqual(t, len(allPayloads), len(contracts), "failed to backfill logs") + // starting the log provider should backfill logs + go func() { + if startErr := logProvider.Start(ctx); startErr != nil { + t.Logf("error starting log provider: %s", startErr) + t.Fail() + } + }() + defer logProvider.Close() + + waitLogProvider(ctx, t, logProvider, 3) + + allPayloads := collectPayloads(ctx, t, logProvider, n*rounds, 5) + require.GreaterOrEqual(t, len(allPayloads), len(contracts), "failed to backfill logs") + }) + } } func TestIntegration_LogEventProvider_RateLimit(t *testing.T) { @@ -535,7 +565,7 @@ func collectPayloads(ctx context.Context, t *testing.T, logProvider logprovider. for ctx.Err() == nil && len(allPayloads) < n && rounds > 0 { logs, err := logProvider.GetLatestPayloads(ctx) require.NoError(t, err) - require.LessOrEqual(t, len(logs), logprovider.AllowedLogsPerUpkeep, "failed to get all logs") + require.LessOrEqual(t, len(logs), n, "failed to get all logs") allPayloads = append(allPayloads, logs...) rounds-- } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index e98989aa12b..942434547c9 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -197,12 +197,15 @@ func (p *logEventProvider) getPayloadsFromBuffer(latestBlock int64) []ocr2keeper switch p.opts.BufferVersion { case "v2": blockRate, upkeepLimit, maxResults := 4, 10, MaxPayloads // TODO: use config - logs, _ := p.bufferV2.Dequeue(start, blockRate, upkeepLimit, maxResults, DefaultUpkeepSelector) - for _, l := range logs { - payload, err := p.createPayload(l.ID, l.Log) - if err == nil { - payloads = append(payloads, payload) + for len(payloads) < MaxPayloads && start < latestBlock { + logs, _ := p.bufferV2.Dequeue(start, blockRate, upkeepLimit, maxResults-len(payloads), DefaultUpkeepSelector) + for _, l := range logs { + payload, err := p.createPayload(l.ID, l.Log) + if err == nil { + payloads = append(payloads, payload) + } } + start += int64(blockRate) } default: logs := p.buffer.dequeueRange(start, latestBlock, AllowedLogsPerUpkeep, MaxPayloads) From 856c7d62644037cce8c041382b95c36ddbd7d2dc Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 12 Mar 2024 13:55:26 +0200 Subject: [PATCH 08/58] rename to v1 --- .../evmregistry/v21/logprovider/{buffer_v2.go => buffer_v1.go} | 0 .../v21/logprovider/{buffer_v2_test.go => buffer_v1_test.go} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/{buffer_v2.go => buffer_v1.go} (100%) rename core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/{buffer_v2_test.go => buffer_v1_test.go} (100%) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go similarity index 100% rename from core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2.go rename to core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go similarity index 100% rename from core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v2_test.go rename to core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go From 6e10f71687838fc075128682e4b3f8825a8bd1bb Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 12 Mar 2024 19:20:35 +0200 Subject: [PATCH 09/58] fix v1 and add logs --- .../evmregistry/v21/logprovider/buffer_v1.go | 8 ++++++-- .../evmregistry/v21/logprovider/integration_test.go | 4 ++-- .../evmregistry/v21/logprovider/provider.go | 13 ++++++++++--- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index 6f0075da2a3..dbd131af919 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -55,7 +55,7 @@ func NewLogBuffer(lggr logger.Logger, size, upkeepLogLimit int) LogBuffer { l := new(atomic.Int32) l.Add(int32(upkeepLogLimit)) return &logBuffer{ - lggr: lggr.Named("KeepersRegistry.LogEventBufferV2"), + lggr: lggr.Named("KeepersRegistry.LogEventBufferV1"), maxUpkeepLogs: l, bufferSize: s, lastBlockSeen: new(atomic.Int64), @@ -236,6 +236,7 @@ func (ub *upkeepLogBuffer) dequeue(start, end int64, limit int) ([]logpoller.Log ub.q = updatedLogs } + ub.lggr.Debugf("Dequeued %d logs, remaining %d", len(results), remaining) prommetrics.AutomationLogsInLogBuffer.Sub(float64(len(results))) return results, remaining @@ -280,10 +281,12 @@ func (ub *upkeepLogBuffer) enqueue(blockThreshold int64, logsToAdd ...logpoller. ub.visited[logid] = log.BlockNumber } ub.q = logs + dropped := ub.clean(blockThreshold) + ub.lggr.Debugf("Enqueued %d logs, dropped %d", added, dropped) prommetrics.AutomationLogsInLogBuffer.Add(float64(added)) - return added, ub.clean(blockThreshold) + return added, dropped } // clean removes logs that are older than blockThreshold and drops logs if the limit for the @@ -310,6 +313,7 @@ func (ub *upkeepLogBuffer) clean(blockThreshold int64) int { } else { prommetrics.AutomationLogsInLogBuffer.Dec() // old logs are ignored and removed from visited + ub.lggr.Debugw("Dropping old log", "blockNumber", l.BlockNumber, "blockThreshold", blockThreshold, "logIndex", l.LogIndex) logid := logID(l) delete(ub.visited, logid) } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go index 038ca460c5a..c1c153a0997 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -44,7 +44,7 @@ func TestIntegration_LogEventProvider(t *testing.T) { version string }{ {"default version", ""}, - {"v2", "v2"}, + {"v1", "v1"}, } for _, tc := range tests { @@ -220,7 +220,7 @@ func TestIntegration_LogEventProvider_Backfill(t *testing.T) { bufferVersion string }{ {"default version", ""}, - {"v2", "v2"}, + {"v1", "v1"}, } for _, tc := range tests { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index 942434547c9..b7ca44fc3b8 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -168,6 +168,10 @@ func (p *logEventProvider) GetLatestPayloads(ctx context.Context) ([]ocr2keepers prommetrics.AutomationLogProviderLatestBlock.Set(float64(latest.BlockNumber)) payloads := p.getPayloadsFromBuffer(latest.BlockNumber) + if len(payloads) > 0 { + p.lggr.Debugw("Fetched payloads from buffer xxx", "latestBlock", latest.BlockNumber, "payloads", len(payloads)) + } + return payloads, nil } @@ -195,10 +199,13 @@ func (p *logEventProvider) getPayloadsFromBuffer(latestBlock int64) []ocr2keeper } switch p.opts.BufferVersion { - case "v2": + case "v1": blockRate, upkeepLimit, maxResults := 4, 10, MaxPayloads // TODO: use config - for len(payloads) < MaxPayloads && start < latestBlock { + for len(payloads) < maxResults && start < latestBlock { logs, _ := p.bufferV2.Dequeue(start, blockRate, upkeepLimit, maxResults-len(payloads), DefaultUpkeepSelector) + if len(logs) > 0 { + p.lggr.Debugw("Dequeued logs xxx", "start", start, "latestBlock", latestBlock, "logs", len(logs)) + } for _, l := range logs { payload, err := p.createPayload(l.ID, l.Log) if err == nil { @@ -428,7 +435,7 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ } switch p.opts.BufferVersion { - case "v2": + case "v1": p.bufferV2.Enqueue(filter.upkeepID, filteredLogs...) default: p.buffer.enqueue(filter.upkeepID, filteredLogs...) From 1e6b8524ff05827fe9b020bd8e9ec8de2a3f4627 Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 12 Mar 2024 19:21:14 +0200 Subject: [PATCH 10/58] temp enablement of v1 buffer --- .../plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index 5c1d04f82f0..1d5ed590603 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -17,6 +17,9 @@ func New(lggr logger.Logger, poller logpoller.LogPoller, c client.Client, stateS filterStore := NewUpkeepFilterStore() packer := NewLogEventsPacker() opts := NewOptions(int64(finalityDepth)) + if len(opts.BufferVersion) == 0 { // TODO: remove once config is ready + opts.BufferVersion = "v1" + } provider := NewLogProvider(lggr, poller, packer, filterStore, opts) recoverer := NewLogRecoverer(lggr, poller, c, stateStore, packer, filterStore, opts) From e8d22661bb7568679786b2bae6081af4d9a78980 Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 13 Mar 2024 10:47:14 +0200 Subject: [PATCH 11/58] added unit tests --- .../evmregistry/v21/logprovider/buffer_v1.go | 5 ++- .../v21/logprovider/buffer_v1_test.go | 34 +++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index dbd131af919..bd7d2b66570 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -283,7 +283,7 @@ func (ub *upkeepLogBuffer) enqueue(blockThreshold int64, logsToAdd ...logpoller. ub.q = logs dropped := ub.clean(blockThreshold) - ub.lggr.Debugf("Enqueued %d logs, dropped %d", added, dropped) + ub.lggr.Debugf("Enqueued %d logs, dropped %d with blockThreshold %d", added, dropped, blockThreshold) prommetrics.AutomationLogsInLogBuffer.Add(float64(added)) return added, dropped @@ -318,6 +318,9 @@ func (ub *upkeepLogBuffer) clean(blockThreshold int64) int { delete(ub.visited, logid) } } + + ub.lggr.Debugw("Cleaned logs", "dropped", dropped, "blockThreshold", blockThreshold, "len updated", len(updated), "len ub.q", len(ub.q), "maxLogs", maxLogs) + ub.q = updated for lid, block := range ub.visited { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go index ac6a8b83548..1b63a3ece45 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go @@ -68,6 +68,40 @@ func TestLogEventBufferV2_EnqueueDequeue(t *testing.T) { require.Equal(t, 2, upkeepBuf.size()) }) + t.Run("enqueue upkeeps limits", func(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), 3, 2) + + added, dropped := buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 9, TxHash: common.HexToHash("0x9"), LogIndex: 0}, + logpoller.Log{BlockNumber: 9, TxHash: common.HexToHash("0x9"), LogIndex: 1}, + logpoller.Log{BlockNumber: 10, TxHash: common.HexToHash("0x10"), LogIndex: 0}, + logpoller.Log{BlockNumber: 10, TxHash: common.HexToHash("0x10"), LogIndex: 1}, + logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x11"), LogIndex: 1}, + logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x11"), LogIndex: 2}, + logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x11"), LogIndex: 3}, + ) + require.Equal(t, 7, added) + require.Equal(t, 1, dropped) + upkeepBuf, ok := buf.(*logBuffer).getUpkeepBuffer(big.NewInt(1)) + require.True(t, ok) + require.Equal(t, 6, upkeepBuf.size()) + }) + + t.Run("enqueue out of block range", func(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), 5, 4) + + added, dropped := buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x10"), LogIndex: 0}, + logpoller.Log{BlockNumber: 10, TxHash: common.HexToHash("0x10"), LogIndex: 1}, + logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x11"), LogIndex: 1}, + ) + require.Equal(t, 2, added) + require.Equal(t, 0, dropped) + upkeepBuf, ok := buf.(*logBuffer).getUpkeepBuffer(big.NewInt(1)) + require.True(t, ok) + require.Equal(t, 2, upkeepBuf.size()) + }) + t.Run("enqueue dequeue", func(t *testing.T) { buf := NewLogBuffer(logger.TestLogger(t), 10, 10) From 5163a627450aa7225b2030cccdc12901f4afb14c Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 13 Mar 2024 15:11:24 +0200 Subject: [PATCH 12/58] fix slice alloc --- .../plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index bd7d2b66570..7f9e64f2b0a 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -220,7 +220,7 @@ func (ub *upkeepLogBuffer) dequeue(start, end int64, limit int) ([]logpoller.Log var results []logpoller.Log var remaining int - updatedLogs := make([]logpoller.Log, 0, ub.maxLogs.Load()) + updatedLogs := make([]logpoller.Log, 0) for _, l := range ub.q { if l.BlockNumber >= start && l.BlockNumber <= end { if len(results) < limit { From 476e3e9112388793d09dac7f5d93e35644327b56 Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 13 Mar 2024 16:17:31 +0200 Subject: [PATCH 13/58] avoid alloc with cap --- .../ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index 7f9e64f2b0a..e1cbf3bc2c5 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -281,7 +281,11 @@ func (ub *upkeepLogBuffer) enqueue(blockThreshold int64, logsToAdd ...logpoller. ub.visited[logid] = log.BlockNumber } ub.q = logs - dropped := ub.clean(blockThreshold) + + var dropped int + if added > 0 { + dropped = ub.clean(blockThreshold) + } ub.lggr.Debugf("Enqueued %d logs, dropped %d with blockThreshold %d", added, dropped, blockThreshold) prommetrics.AutomationLogsInLogBuffer.Add(float64(added)) @@ -297,7 +301,7 @@ func (ub *upkeepLogBuffer) clean(blockThreshold int64) int { // sort.SliceStable(updated, func(i, j int) bool { // return LogSorter(updated[i], updated[j]) // }) - updated := make([]logpoller.Log, 0, maxLogs) + updated := make([]logpoller.Log, 0) var dropped int for _, l := range ub.q { if l.BlockNumber > blockThreshold { From 418db923f3b01ff50719812fbf260082fd3d6248 Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 13 Mar 2024 16:48:39 +0200 Subject: [PATCH 14/58] tweak limit values --- .../evmregistry/v21/logprovider/buffer_v1.go | 4 ++++ .../ocr2keeper/evmregistry/v21/logprovider/provider.go | 10 +++++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index e1cbf3bc2c5..ba747d3efe1 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -13,6 +13,10 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/prommetrics" ) +const ( + defaultLogLimitHigh = 10 +) + type BufferedLog struct { ID *big.Int Log logpoller.Log diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index b7ca44fc3b8..eaefdee2250 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -99,7 +99,7 @@ type logEventProvider struct { filterStore UpkeepFilterStore buffer *logEventBuffer - bufferV2 LogBuffer + bufferV1 LogBuffer opts LogTriggersOptions @@ -112,7 +112,7 @@ func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, packer LogDa lggr: lggr.Named("KeepersRegistry.LogEventProvider"), packer: packer, buffer: newLogEventBuffer(lggr, int(opts.LookbackBlocks), defaultNumOfLogUpkeeps, defaultFastExecLogsHigh), - bufferV2: NewLogBuffer(lggr, int(opts.LookbackBlocks), defaultFastExecLogsHigh), + bufferV1: NewLogBuffer(lggr, int(opts.LookbackBlocks), defaultLogLimitHigh), poller: poller, opts: opts, filterStore: filterStore, @@ -200,9 +200,9 @@ func (p *logEventProvider) getPayloadsFromBuffer(latestBlock int64) []ocr2keeper switch p.opts.BufferVersion { case "v1": - blockRate, upkeepLimit, maxResults := 4, 10, MaxPayloads // TODO: use config + blockRate, upkeepLowLimit, maxResults := 4, 6, MaxPayloads // TODO: use config for len(payloads) < maxResults && start < latestBlock { - logs, _ := p.bufferV2.Dequeue(start, blockRate, upkeepLimit, maxResults-len(payloads), DefaultUpkeepSelector) + logs, _ := p.bufferV1.Dequeue(start, blockRate, upkeepLowLimit, maxResults-len(payloads), DefaultUpkeepSelector) if len(logs) > 0 { p.lggr.Debugw("Dequeued logs xxx", "start", start, "latestBlock", latestBlock, "logs", len(logs)) } @@ -436,7 +436,7 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ switch p.opts.BufferVersion { case "v1": - p.bufferV2.Enqueue(filter.upkeepID, filteredLogs...) + p.bufferV1.Enqueue(filter.upkeepID, filteredLogs...) default: p.buffer.enqueue(filter.upkeepID, filteredLogs...) } From 39f97669455c475a7fa93ba239688f8de3e0a2f8 Mon Sep 17 00:00:00 2001 From: amirylm Date: Thu, 14 Mar 2024 11:54:45 +0200 Subject: [PATCH 15/58] config and handle remaining --- .../evmregistry/v21/logprovider/factory.go | 17 +++++++++++++++++ .../evmregistry/v21/logprovider/provider.go | 17 +++++++++++------ 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index 1d5ed590603..2a5f4f11aad 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -41,7 +41,15 @@ type LogTriggersOptions struct { // Finality depth is the number of blocks to wait before considering a block final. FinalityDepth int64 + // v1 config + BufferVersion string + + LogLimitLow int32 + + LogLimitHigh int32 + + BlockRate int64 } func NewOptions(finalityDepth int64) LogTriggersOptions { @@ -72,4 +80,13 @@ func (o *LogTriggersOptions) Defaults(finalityDepth int64) { if o.FinalityDepth == 0 { o.FinalityDepth = finalityDepth } + if o.BlockRate == 0 { + o.BlockRate = 1 + } + if o.LogLimitLow == 0 { + o.LogLimitLow = 5 + } + if o.LogLimitHigh == 0 { + o.LogLimitHigh = o.LogLimitLow * 2 + } } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index eaefdee2250..9650003316a 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -112,7 +112,7 @@ func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, packer LogDa lggr: lggr.Named("KeepersRegistry.LogEventProvider"), packer: packer, buffer: newLogEventBuffer(lggr, int(opts.LookbackBlocks), defaultNumOfLogUpkeeps, defaultFastExecLogsHigh), - bufferV1: NewLogBuffer(lggr, int(opts.LookbackBlocks), defaultLogLimitHigh), + bufferV1: NewLogBuffer(lggr, int(opts.LookbackBlocks), int(opts.LogLimitHigh)), poller: poller, opts: opts, filterStore: filterStore, @@ -169,7 +169,7 @@ func (p *logEventProvider) GetLatestPayloads(ctx context.Context) ([]ocr2keepers payloads := p.getPayloadsFromBuffer(latest.BlockNumber) if len(payloads) > 0 { - p.lggr.Debugw("Fetched payloads from buffer xxx", "latestBlock", latest.BlockNumber, "payloads", len(payloads)) + p.lggr.Debugw("Fetched payloads from buffer", "latestBlock", latest.BlockNumber, "payloads", len(payloads)) } return payloads, nil @@ -200,11 +200,11 @@ func (p *logEventProvider) getPayloadsFromBuffer(latestBlock int64) []ocr2keeper switch p.opts.BufferVersion { case "v1": - blockRate, upkeepLowLimit, maxResults := 4, 6, MaxPayloads // TODO: use config - for len(payloads) < maxResults && start < latestBlock { - logs, _ := p.bufferV1.Dequeue(start, blockRate, upkeepLowLimit, maxResults-len(payloads), DefaultUpkeepSelector) + blockRate, logLimitLow, maxResults := int(p.opts.BlockRate), int(p.opts.LogLimitLow), MaxPayloads + for len(payloads) < maxResults && start <= latestBlock { + logs, remaining := p.bufferV1.Dequeue(start, blockRate, logLimitLow, maxResults-len(payloads), DefaultUpkeepSelector) if len(logs) > 0 { - p.lggr.Debugw("Dequeued logs xxx", "start", start, "latestBlock", latestBlock, "logs", len(logs)) + p.lggr.Debugw("Dequeued logs", "start", start, "latestBlock", latestBlock, "logs", len(logs)) } for _, l := range logs { payload, err := p.createPayload(l.ID, l.Log) @@ -212,6 +212,11 @@ func (p *logEventProvider) getPayloadsFromBuffer(latestBlock int64) []ocr2keeper payloads = append(payloads, payload) } } + if remaining > 0 { + p.lggr.Debugw("Remaining logs", "start", start, "latestBlock", latestBlock, "remaining", remaining) + // TODO: handle remaining logs in a better way than consuming the entire window + continue + } start += int64(blockRate) } default: From 7dfb7b94f841cf6d09cc77894da6e09f4677ad57 Mon Sep 17 00:00:00 2001 From: amirylm Date: Thu, 14 Mar 2024 13:25:41 +0200 Subject: [PATCH 16/58] trying different config --- .../ocr2keeper/evmregistry/v21/logprovider/factory.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index 2a5f4f11aad..6944152afd4 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -81,12 +81,12 @@ func (o *LogTriggersOptions) Defaults(finalityDepth int64) { o.FinalityDepth = finalityDepth } if o.BlockRate == 0 { - o.BlockRate = 1 + o.BlockRate = 2 } if o.LogLimitLow == 0 { - o.LogLimitLow = 5 + o.LogLimitLow = 2 } if o.LogLimitHigh == 0 { - o.LogLimitHigh = o.LogLimitLow * 2 + o.LogLimitHigh = 5 // o.LogLimitHigh * 2 } } From cd92d92aa6f9105ee4433d917746708b983c56de Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 19 Mar 2024 13:58:13 +0200 Subject: [PATCH 17/58] fix comparator and block window --- .../ocr2keeper/evmregistry/v21/logprovider/window.go | 10 +++++----- .../evmregistry/v21/logprovider/window_test.go | 7 +++++++ 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go index 8042fe5cb05..ba6a154df2e 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go @@ -11,7 +11,7 @@ func BlockWindow(block int64, blockRate int) (start int64, end int64) { return block, block } start = block - (block % windowSize) - end = block + (windowSize - (block % windowSize) - 1) + end = start + windowSize - 1 return } @@ -29,11 +29,11 @@ func LogSorter(a, b logpoller.Log) bool { // 0 if a == b // +1 if a > b func LogComparator(a, b logpoller.Log) int { - if b.BlockNumber != a.BlockNumber { + if a.BlockNumber != b.BlockNumber { return int(a.BlockNumber - b.BlockNumber) } - if txDiff := a.TxHash.Big().Cmp(b.TxHash.Big()); txDiff != 0 { - return txDiff - } + // if txDiff := a.TxHash.Big().Cmp(b.TxHash.Big()); txDiff != 0 { + // return txDiff + // } return int(a.LogIndex - b.LogIndex) } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go index 4a1dadfc410..bd63e74c42c 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go @@ -24,6 +24,13 @@ func TestBlockWindow(t *testing.T) { wantStart: 0, wantEnd: 0, }, + { + name: "block 81, blockRate 1", + block: 81, + blockRate: 1, + wantStart: 81, + wantEnd: 81, + }, { name: "block 0, blockRate 4", block: 0, From 55dd03656877b8a28b5fac124dea837bf9c670c6 Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 19 Mar 2024 13:59:39 +0200 Subject: [PATCH 18/58] fix redundant import --- .../ocr2keeper/evmregistry/v21/logprovider/integration_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go index c1c153a0997..e3454b6b84c 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -23,7 +23,6 @@ import ( ocr2keepers "github.com/smartcontractkit/chainlink-common/pkg/types/automation" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" - "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_upkeep_counter_wrapper" @@ -702,7 +701,7 @@ func setupDependencies(t *testing.T, db *sqlx.DB, backend *backends.SimulatedBac return lp, ethClient } -func setup(lggr logger.Logger, poller logpoller.LogPoller, c client.Client, stateStore evmregistry21.UpkeepStateReader, filterStore logprovider.UpkeepFilterStore, opts *logprovider.LogTriggersOptions) (logprovider.LogEventProvider, logprovider.LogRecoverer) { +func setup(lggr logger.Logger, poller logpoller.LogPoller, c evmclient.Client, stateStore evmregistry21.UpkeepStateReader, filterStore logprovider.UpkeepFilterStore, opts *logprovider.LogTriggersOptions) (logprovider.LogEventProvider, logprovider.LogRecoverer) { packer := logprovider.NewLogEventsPacker() if opts == nil { o := logprovider.NewOptions(200) From 1f578db8566cf5c05a5d51a30c7a3b1251adaa98 Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 19 Mar 2024 17:21:45 +0200 Subject: [PATCH 19/58] sort once instead of maintaining sorted sliced --- .../evmregistry/v21/logprovider/buffer_v1.go | 38 +++++++++---------- .../v21/logprovider/buffer_v1_test.go | 4 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index ba747d3efe1..c9c4363745e 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -266,22 +266,22 @@ func (ub *upkeepLogBuffer) enqueue(blockThreshold int64, logsToAdd ...logpoller. continue } added++ - if len(logs) == 0 { - // if the buffer is empty, just add the log - logs = append(logs, log) - } else { - // otherwise, find the right index to insert the log - // to keep the buffer sorted - // TODO: check what is better: 1. maintain sorted slice; 2. sort once at the end - i, _ := sort.Find(len(logs), func(i int) int { - return LogComparator(log, logs[i]) - }) - if i == len(logs) { - logs = append(logs, log) - } else { - logs = append(logs[:i], append([]logpoller.Log{log}, logs[i:]...)...) - } - } + // if len(logs) == 0 { + // if the buffer is empty, just add the log + logs = append(logs, log) + // } else { + // // otherwise, find the right index to insert the log + // // to keep the buffer sorted + // // TODO: check what is better: 1. maintain sorted slice; 2. sort once at the end + // i, _ := sort.Find(len(logs), func(i int) int { + // return LogComparator(log, logs[i]) + // }) + // if i == len(logs) { + // logs = append(logs, log) + // } else { + // logs = append(logs[:i], append([]logpoller.Log{log}, logs[i:]...)...) + // } + // } ub.visited[logid] = log.BlockNumber } ub.q = logs @@ -302,9 +302,9 @@ func (ub *upkeepLogBuffer) enqueue(blockThreshold int64, logsToAdd ...logpoller. func (ub *upkeepLogBuffer) clean(blockThreshold int64) int { maxLogs := int(ub.maxLogs.Load()) - // sort.SliceStable(updated, func(i, j int) bool { - // return LogSorter(updated[i], updated[j]) - // }) + sort.SliceStable(ub.q, func(i, j int) bool { + return LogSorter(ub.q[i], ub.q[j]) + }) updated := make([]logpoller.Log, 0) var dropped int for _, l := range ub.q { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go index 1b63a3ece45..0327f29d5cb 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go @@ -11,7 +11,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/logger" ) -func TestLogEventBufferV2_Clean(t *testing.T) { +func TestLogEventBufferV1_Clean(t *testing.T) { t.Run("empty", func(t *testing.T) { buf := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), 10) @@ -45,7 +45,7 @@ func TestLogEventBufferV2_Clean(t *testing.T) { }) } -func TestLogEventBufferV2_EnqueueDequeue(t *testing.T) { +func TestLogEventBufferV1_EnqueueDequeue(t *testing.T) { t.Run("dequeue empty", func(t *testing.T) { buf := NewLogBuffer(logger.TestLogger(t), 10, 10) From 867336cdb4482901c65e0f0fe93d50b71c3990b8 Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 19 Mar 2024 17:54:33 +0200 Subject: [PATCH 20/58] align config --- .../plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index 6944152afd4..dbceb9c0f49 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -84,9 +84,9 @@ func (o *LogTriggersOptions) Defaults(finalityDepth int64) { o.BlockRate = 2 } if o.LogLimitLow == 0 { - o.LogLimitLow = 2 + o.LogLimitLow = 4 } if o.LogLimitHigh == 0 { - o.LogLimitHigh = 5 // o.LogLimitHigh * 2 + o.LogLimitHigh = o.LogLimitHigh * 2 } } From f5306906d80c449b602677c0f6125908fb3ce1e3 Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 20 Mar 2024 09:40:57 +0200 Subject: [PATCH 21/58] fix tests and set config --- .../evmregistry/v21/logprovider/factory.go | 2 +- .../v21/logprovider/integration_test.go | 27 ++++++++++++------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index dbceb9c0f49..b3ec9a3d3bd 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -87,6 +87,6 @@ func (o *LogTriggersOptions) Defaults(finalityDepth int64) { o.LogLimitLow = 4 } if o.LogLimitHigh == 0 { - o.LogLimitHigh = o.LogLimitHigh * 2 + o.LogLimitHigh = o.LogLimitHigh * 10 } } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go index e3454b6b84c..1618f695ccf 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -39,15 +39,16 @@ import ( func TestIntegration_LogEventProvider(t *testing.T) { tests := []struct { - name string - version string + name string + version string + limitLow, limitHigh int32 }{ - {"default version", ""}, - {"v1", "v1"}, + {"default version", "", 10, 100}, + {"v1", "v1", 10, 100}, } for _, tc := range tests { - bufferVersion := tc.version + bufferVersion, limitLow, limitHigh := tc.version, tc.limitLow, tc.limitHigh t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithCancel(testutils.Context(t)) defer cancel() @@ -62,6 +63,8 @@ func TestIntegration_LogEventProvider(t *testing.T) { opts := logprovider.NewOptions(200) opts.ReadInterval = time.Second / 2 opts.BufferVersion = bufferVersion + opts.LogLimitLow = limitLow + opts.LogLimitHigh = limitHigh lp, ethClient := setupDependencies(t, db, backend) filterStore := logprovider.NewUpkeepFilterStore() @@ -215,15 +218,16 @@ func TestIntegration_LogEventProvider_UpdateConfig(t *testing.T) { func TestIntegration_LogEventProvider_Backfill(t *testing.T) { tests := []struct { - name string - bufferVersion string + name string + bufferVersion string + limitLow, limitHigh int32 }{ - {"default version", ""}, - {"v1", "v1"}, + {"default version", "", 10, 100}, + {"v1", "v1", 10, 100}, } for _, tc := range tests { - bufferVersion := tc.bufferVersion + bufferVersion, limitLow, limitHigh := tc.bufferVersion, tc.limitLow, tc.limitHigh t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithTimeout(testutils.Context(t), time.Second*60) @@ -239,6 +243,9 @@ func TestIntegration_LogEventProvider_Backfill(t *testing.T) { opts := logprovider.NewOptions(200) opts.ReadInterval = time.Second / 4 opts.BufferVersion = bufferVersion + opts.LogLimitLow = limitLow + opts.LogLimitHigh = limitHigh + lp, ethClient := setupDependencies(t, db, backend) filterStore := logprovider.NewUpkeepFilterStore() provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) From f72baf0ae557f4880aececdeb572b66b0c669ea6 Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 20 Mar 2024 12:31:27 +0200 Subject: [PATCH 22/58] fixes & logs --- .../evmregistry/v21/logprovider/buffer_v1.go | 72 ++++++++++--------- .../evmregistry/v21/logprovider/factory.go | 2 +- .../evmregistry/v21/logprovider/provider.go | 2 +- 3 files changed, 40 insertions(+), 36 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index c9c4363745e..3f49eb9f4dd 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -43,7 +43,7 @@ func DefaultUpkeepSelector(id *big.Int) bool { type logBuffer struct { lggr logger.Logger // max number of logs to keep in the buffer for each upkeep per block - maxUpkeepLogs *atomic.Int32 + logLimitHigh *atomic.Int32 // number of blocks to keep in the buffer bufferSize *atomic.Int32 // last block number seen by the buffer @@ -53,15 +53,15 @@ type logBuffer struct { lock sync.RWMutex } -func NewLogBuffer(lggr logger.Logger, size, upkeepLogLimit int) LogBuffer { - s := new(atomic.Int32) - s.Add(int32(size)) - l := new(atomic.Int32) - l.Add(int32(upkeepLogLimit)) +func NewLogBuffer(lggr logger.Logger, lookback, logLimitHigh uint) LogBuffer { + bufferSize := new(atomic.Int32) + bufferSize.Add(int32(lookback)) + limitHigh := new(atomic.Int32) + limitHigh.Add(int32(logLimitHigh)) return &logBuffer{ lggr: lggr.Named("KeepersRegistry.LogEventBufferV1"), - maxUpkeepLogs: l, - bufferSize: s, + logLimitHigh: limitHigh, + bufferSize: bufferSize, lastBlockSeen: new(atomic.Int64), upkeepBuffers: make(map[string]*upkeepLogBuffer), } @@ -72,10 +72,11 @@ func (b *logBuffer) SetConfig(lookback, logLimitHigh int) { defer b.lock.Unlock() b.bufferSize.Store(int32(lookback)) - b.maxUpkeepLogs.Store(int32(logLimitHigh)) + b.logLimitHigh.Store(int32(logLimitHigh)) + cap := uint(logLimitHigh * lookback) for _, ub := range b.upkeepBuffers { - ub.setConfig(logLimitHigh) + ub.setConfig(cap) } } @@ -85,17 +86,18 @@ func (b *logBuffer) SetConfig(lookback, logLimitHigh int) { func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { buf, ok := b.getUpkeepBuffer(uid) if !ok || buf == nil { - buf = newUpkeepLogBuffer(b.lggr, uid, int(b.maxUpkeepLogs.Load()*b.bufferSize.Load())) + buf = newUpkeepLogBuffer(b.lggr, uid, int(b.logLimitHigh.Load()*b.bufferSize.Load())) b.setUpkeepBuffer(uid, buf) } - lastBlockSeen := latestBlockNumber(logs...) - if b.lastBlockSeen.Load() < lastBlockSeen { - b.lastBlockSeen.Store(lastBlockSeen) + latestBlock := latestBlockNumber(logs...) + if b.lastBlockSeen.Load() < latestBlock { + b.lastBlockSeen.Store(latestBlock) } blockThreshold := b.lastBlockSeen.Load() - int64(b.bufferSize.Load()) if blockThreshold <= 0 { blockThreshold = 1 } + buf.lggr.Debugw("Enqueuing logs", "blockThreshold", blockThreshold, "logsLatestBlock", latestBlock, "lastBlockSeen", b.lastBlockSeen.Load(), "logs", len(logs), "upkeepID", uid.String(), "upkeepBufferSize", buf.size(), "upkeepBufferCap", buf.cap.Load()) return buf.enqueue(blockThreshold, logs...) } @@ -166,28 +168,28 @@ func (b *logBuffer) setUpkeepBuffer(uid *big.Int, buf *upkeepLogBuffer) { type upkeepLogBuffer struct { lggr logger.Logger - id *big.Int - maxLogs *atomic.Int32 + id *big.Int + cap *atomic.Int32 q []logpoller.Log visited map[string]int64 lock sync.RWMutex } -func newUpkeepLogBuffer(lggr logger.Logger, id *big.Int, maxLogs int) *upkeepLogBuffer { - limit := new(atomic.Int32) - limit.Add(int32(maxLogs)) +func newUpkeepLogBuffer(lggr logger.Logger, id *big.Int, capacity int) *upkeepLogBuffer { + cap := new(atomic.Int32) + cap.Add(int32(capacity)) return &upkeepLogBuffer{ - lggr: lggr.With("id", id.String()), + lggr: lggr.With("upkeepID", id.String()), id: id, - maxLogs: limit, - q: make([]logpoller.Log, 0, maxLogs), + cap: cap, + q: make([]logpoller.Log, 0, capacity), visited: make(map[string]int64), } } -func (ub *upkeepLogBuffer) setConfig(maxLogs int) { - ub.maxLogs.Store(int32(maxLogs)) +func (ub *upkeepLogBuffer) setConfig(capacity uint) { + ub.cap.Store(int32(capacity)) } // size returns the total number of logs in the buffer. @@ -238,9 +240,9 @@ func (ub *upkeepLogBuffer) dequeue(start, end int64, limit int) ([]logpoller.Log if len(results) > 0 { ub.q = updatedLogs + ub.lggr.Debugw("Dequeued logs", "start", start, "end", end, "limit", limit, "results", len(results), "remaining", remaining) } - ub.lggr.Debugf("Dequeued %d logs, remaining %d", len(results), remaining) prommetrics.AutomationLogsInLogBuffer.Sub(float64(len(results))) return results, remaining @@ -257,12 +259,12 @@ func (ub *upkeepLogBuffer) enqueue(blockThreshold int64, logsToAdd ...logpoller. var added int for _, log := range logsToAdd { if log.BlockNumber < blockThreshold { - ub.lggr.Debugw("Skipping log from old block", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber) + ub.lggr.Debugw("Skipping log from old block", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber, "logIndex", log.LogIndex) continue } logid := logID(log) if _, ok := ub.visited[logid]; ok { - ub.lggr.Debugw("Skipping known log", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber) + ub.lggr.Debugw("Skipping known log", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber, "logIndex", log.LogIndex) continue } added++ @@ -289,9 +291,9 @@ func (ub *upkeepLogBuffer) enqueue(blockThreshold int64, logsToAdd ...logpoller. var dropped int if added > 0 { dropped = ub.clean(blockThreshold) + ub.lggr.Debugw("Enqueued logs", "added", added, "dropped", dropped, "blockThreshold", blockThreshold, "q size", len(ub.q), "maxLogs", ub.cap.Load(), "visited size", len(ub.visited)) } - ub.lggr.Debugf("Enqueued %d logs, dropped %d with blockThreshold %d", added, dropped, blockThreshold) prommetrics.AutomationLogsInLogBuffer.Add(float64(added)) return added, dropped @@ -300,15 +302,15 @@ func (ub *upkeepLogBuffer) enqueue(blockThreshold int64, logsToAdd ...logpoller. // clean removes logs that are older than blockThreshold and drops logs if the limit for the // given upkeep was exceeded. Returns the number of logs that were dropped. func (ub *upkeepLogBuffer) clean(blockThreshold int64) int { - maxLogs := int(ub.maxLogs.Load()) + maxLogs := int(ub.cap.Load()) sort.SliceStable(ub.q, func(i, j int) bool { return LogSorter(ub.q[i], ub.q[j]) }) updated := make([]logpoller.Log, 0) - var dropped int + var dropped, expired int for _, l := range ub.q { - if l.BlockNumber > blockThreshold { + if l.BlockNumber >= blockThreshold { if len(updated) < maxLogs { updated = append(updated, l) } else { @@ -321,14 +323,16 @@ func (ub *upkeepLogBuffer) clean(blockThreshold int64) int { } else { prommetrics.AutomationLogsInLogBuffer.Dec() // old logs are ignored and removed from visited - ub.lggr.Debugw("Dropping old log", "blockNumber", l.BlockNumber, "blockThreshold", blockThreshold, "logIndex", l.LogIndex) + ub.lggr.Debugw("Expiring old log", "blockNumber", l.BlockNumber, "blockThreshold", blockThreshold, "logIndex", l.LogIndex) logid := logID(l) delete(ub.visited, logid) + expired++ } } - ub.lggr.Debugw("Cleaned logs", "dropped", dropped, "blockThreshold", blockThreshold, "len updated", len(updated), "len ub.q", len(ub.q), "maxLogs", maxLogs) - + if dropped > 0 || expired > 0 { + ub.lggr.Debugw("Cleaned logs", "dropped", dropped, "expired", expired, "blockThreshold", blockThreshold, "len updated", len(updated), "len ub.q", len(ub.q), "maxLogs", maxLogs) + } ub.q = updated for lid, block := range ub.visited { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index b3ec9a3d3bd..90bf871471e 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -87,6 +87,6 @@ func (o *LogTriggersOptions) Defaults(finalityDepth int64) { o.LogLimitLow = 4 } if o.LogLimitHigh == 0 { - o.LogLimitHigh = o.LogLimitHigh * 10 + o.LogLimitHigh = o.LogLimitLow * 10 } } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index 9650003316a..688e5aab028 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -112,7 +112,7 @@ func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, packer LogDa lggr: lggr.Named("KeepersRegistry.LogEventProvider"), packer: packer, buffer: newLogEventBuffer(lggr, int(opts.LookbackBlocks), defaultNumOfLogUpkeeps, defaultFastExecLogsHigh), - bufferV1: NewLogBuffer(lggr, int(opts.LookbackBlocks), int(opts.LogLimitHigh)), + bufferV1: NewLogBuffer(lggr, uint(opts.LookbackBlocks), uint(opts.LogLimitHigh)), poller: poller, opts: opts, filterStore: filterStore, From 6791ac7146f91dd645a7b46f30a7bb1369b8599e Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 20 Mar 2024 15:44:32 +0200 Subject: [PATCH 23/58] custom cla with optmized hook --- core/scripts/go.mod | 2 +- core/scripts/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- integration-tests/go.mod | 2 +- integration-tests/go.sum | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 15a721dc380..70019d344c1 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -19,7 +19,7 @@ require ( github.com/olekukonko/tablewriter v0.0.5 github.com/pelletier/go-toml/v2 v2.1.1 github.com/shopspring/decimal v1.3.1 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240118014648-1ab6a88c9429 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 diff --git a/core/scripts/go.sum b/core/scripts/go.sum index 4be489ecedc..c579484c9a7 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1172,8 +1172,8 @@ github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumv github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240118014648-1ab6a88c9429 h1:xkejUBZhcBpBrTSfxc91Iwzadrb6SXw8ks69bHIQ9Ww= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240118014648-1ab6a88c9429/go.mod h1:wJmVvDf4XSjsahWtfUq3wvIAYEAuhr7oxmxYnEL/LGQ= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7 h1:tbuA89sf5N60BmBNEbV0g2OhvmEQRohN3QQM+qea+mc= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 h1:LsusfMA80iEYoFOad9gcuLRQYdi0rP7PX/dsXq6Y7yw= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/go.mod b/go.mod index cf20ecf9ca7..4e0b8521664 100644 --- a/go.mod +++ b/go.mod @@ -68,7 +68,7 @@ require ( github.com/shopspring/decimal v1.3.1 github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 github.com/smartcontractkit/chain-selectors v1.0.10 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240118014648-1ab6a88c9429 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 diff --git a/go.sum b/go.sum index 733b0e4b2d8..ea1796ede9d 100644 --- a/go.sum +++ b/go.sum @@ -1167,8 +1167,8 @@ github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumv github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240118014648-1ab6a88c9429 h1:xkejUBZhcBpBrTSfxc91Iwzadrb6SXw8ks69bHIQ9Ww= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240118014648-1ab6a88c9429/go.mod h1:wJmVvDf4XSjsahWtfUq3wvIAYEAuhr7oxmxYnEL/LGQ= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7 h1:tbuA89sf5N60BmBNEbV0g2OhvmEQRohN3QQM+qea+mc= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 h1:LsusfMA80iEYoFOad9gcuLRQYdi0rP7PX/dsXq6Y7yw= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index 30241cdc28d..b69024ec15c 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -22,7 +22,7 @@ require ( github.com/scylladb/go-reflectx v1.0.1 github.com/segmentio/ksuid v1.0.4 github.com/slack-go/slack v0.12.2 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240118014648-1ab6a88c9429 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 github.com/smartcontractkit/chainlink-testing-framework v1.25.1 github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 diff --git a/integration-tests/go.sum b/integration-tests/go.sum index 7d1c024a761..b18b87907a1 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1514,8 +1514,8 @@ github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumv github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240118014648-1ab6a88c9429 h1:xkejUBZhcBpBrTSfxc91Iwzadrb6SXw8ks69bHIQ9Ww= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240118014648-1ab6a88c9429/go.mod h1:wJmVvDf4XSjsahWtfUq3wvIAYEAuhr7oxmxYnEL/LGQ= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7 h1:tbuA89sf5N60BmBNEbV0g2OhvmEQRohN3QQM+qea+mc= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 h1:LsusfMA80iEYoFOad9gcuLRQYdi0rP7PX/dsXq6Y7yw= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= From 61c0109ec441bb4ed5686ebc42a82b24e72cccc6 Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 20 Mar 2024 15:55:31 +0200 Subject: [PATCH 24/58] lint --- .../evmregistry/v21/logprovider/buffer_v1.go | 34 ++++++++----------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index 3f49eb9f4dd..aebaaa9e51f 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -13,10 +13,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/prommetrics" ) -const ( - defaultLogLimitHigh = 10 -) - type BufferedLog struct { ID *big.Int Log logpoller.Log @@ -74,9 +70,9 @@ func (b *logBuffer) SetConfig(lookback, logLimitHigh int) { b.bufferSize.Store(int32(lookback)) b.logLimitHigh.Store(int32(logLimitHigh)) - cap := uint(logLimitHigh * lookback) + c := uint(logLimitHigh * lookback) for _, ub := range b.upkeepBuffers { - ub.setConfig(cap) + ub.setConfig(c) } } @@ -97,7 +93,7 @@ func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { if blockThreshold <= 0 { blockThreshold = 1 } - buf.lggr.Debugw("Enqueuing logs", "blockThreshold", blockThreshold, "logsLatestBlock", latestBlock, "lastBlockSeen", b.lastBlockSeen.Load(), "logs", len(logs), "upkeepID", uid.String(), "upkeepBufferSize", buf.size(), "upkeepBufferCap", buf.cap.Load()) + buf.lggr.Debugw("Enqueuing logs", "blockThreshold", blockThreshold, "logsLatestBlock", latestBlock, "lastBlockSeen", b.lastBlockSeen.Load(), "logs", len(logs), "upkeepID", uid.String(), "upkeepBufferSize", buf.size(), "upkeepBufferCap", buf.capacity.Load()) return buf.enqueue(blockThreshold, logs...) } @@ -168,8 +164,8 @@ func (b *logBuffer) setUpkeepBuffer(uid *big.Int, buf *upkeepLogBuffer) { type upkeepLogBuffer struct { lggr logger.Logger - id *big.Int - cap *atomic.Int32 + id *big.Int + capacity *atomic.Int32 q []logpoller.Log visited map[string]int64 @@ -177,19 +173,19 @@ type upkeepLogBuffer struct { } func newUpkeepLogBuffer(lggr logger.Logger, id *big.Int, capacity int) *upkeepLogBuffer { - cap := new(atomic.Int32) - cap.Add(int32(capacity)) + c := new(atomic.Int32) + c.Add(int32(capacity)) return &upkeepLogBuffer{ - lggr: lggr.With("upkeepID", id.String()), - id: id, - cap: cap, - q: make([]logpoller.Log, 0, capacity), - visited: make(map[string]int64), + lggr: lggr.With("upkeepID", id.String()), + id: id, + capacity: c, + q: make([]logpoller.Log, 0, capacity), + visited: make(map[string]int64), } } func (ub *upkeepLogBuffer) setConfig(capacity uint) { - ub.cap.Store(int32(capacity)) + ub.capacity.Store(int32(capacity)) } // size returns the total number of logs in the buffer. @@ -291,7 +287,7 @@ func (ub *upkeepLogBuffer) enqueue(blockThreshold int64, logsToAdd ...logpoller. var dropped int if added > 0 { dropped = ub.clean(blockThreshold) - ub.lggr.Debugw("Enqueued logs", "added", added, "dropped", dropped, "blockThreshold", blockThreshold, "q size", len(ub.q), "maxLogs", ub.cap.Load(), "visited size", len(ub.visited)) + ub.lggr.Debugw("Enqueued logs", "added", added, "dropped", dropped, "blockThreshold", blockThreshold, "q size", len(ub.q), "maxLogs", ub.capacity.Load(), "visited size", len(ub.visited)) } prommetrics.AutomationLogsInLogBuffer.Add(float64(added)) @@ -302,7 +298,7 @@ func (ub *upkeepLogBuffer) enqueue(blockThreshold int64, logsToAdd ...logpoller. // clean removes logs that are older than blockThreshold and drops logs if the limit for the // given upkeep was exceeded. Returns the number of logs that were dropped. func (ub *upkeepLogBuffer) clean(blockThreshold int64) int { - maxLogs := int(ub.cap.Load()) + maxLogs := int(ub.capacity.Load()) sort.SliceStable(ub.q, func(i, j int) bool { return LogSorter(ub.q[i], ub.q[j]) From 875efb1823f18b53e43c8377b910f0291d4a2d42 Mon Sep 17 00:00:00 2001 From: amirylm Date: Thu, 21 Mar 2024 17:19:25 +0200 Subject: [PATCH 25/58] use defer in tests --- core/services/ocr2/plugins/ocr2keeper/integration_21_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go index 4aa9b0cb7dc..9e37b695553 100644 --- a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go @@ -373,9 +373,8 @@ func TestIntegration_KeeperPluginLogUpkeep_Retry(t *testing.T) { }() listener, done := listenPerformed(t, backend, registry, feeds.UpkeepsIds(), int64(1)) + defer done() g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) - - done() } func TestIntegration_KeeperPluginLogUpkeep_ErrHandler(t *testing.T) { @@ -475,8 +474,8 @@ func TestIntegration_KeeperPluginLogUpkeep_ErrHandler(t *testing.T) { } listener, done := listenPerformed(t, backend, registry, idsToCheck, startBlock) + defer done() g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) - done() } func startMercuryServer(t *testing.T, mercuryServer *mercury.SimulatedMercuryServer, responder func(i int) (int, []byte)) { From 02db52d9dab03bed40b0c60e94af07231075e4a1 Mon Sep 17 00:00:00 2001 From: amirylm Date: Fri, 22 Mar 2024 17:03:49 +0200 Subject: [PATCH 26/58] update cla version --- core/scripts/go.mod | 2 +- core/scripts/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- integration-tests/go.mod | 2 +- integration-tests/go.sum | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 70019d344c1..696d87d17f1 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -19,7 +19,7 @@ require ( github.com/olekukonko/tablewriter v0.0.5 github.com/pelletier/go-toml/v2 v2.1.1 github.com/shopspring/decimal v1.3.1 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 diff --git a/core/scripts/go.sum b/core/scripts/go.sum index c579484c9a7..0e0d474589b 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1172,8 +1172,8 @@ github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumv github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7 h1:tbuA89sf5N60BmBNEbV0g2OhvmEQRohN3QQM+qea+mc= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 h1:LsusfMA80iEYoFOad9gcuLRQYdi0rP7PX/dsXq6Y7yw= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/go.mod b/go.mod index 4e0b8521664..3615155301f 100644 --- a/go.mod +++ b/go.mod @@ -68,7 +68,7 @@ require ( github.com/shopspring/decimal v1.3.1 github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 github.com/smartcontractkit/chain-selectors v1.0.10 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 diff --git a/go.sum b/go.sum index ea1796ede9d..2fdc55c3d26 100644 --- a/go.sum +++ b/go.sum @@ -1167,8 +1167,8 @@ github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumv github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7 h1:tbuA89sf5N60BmBNEbV0g2OhvmEQRohN3QQM+qea+mc= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 h1:LsusfMA80iEYoFOad9gcuLRQYdi0rP7PX/dsXq6Y7yw= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index b69024ec15c..7a8b7e56af1 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -22,7 +22,7 @@ require ( github.com/scylladb/go-reflectx v1.0.1 github.com/segmentio/ksuid v1.0.4 github.com/slack-go/slack v0.12.2 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 github.com/smartcontractkit/chainlink-testing-framework v1.25.1 github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 diff --git a/integration-tests/go.sum b/integration-tests/go.sum index b18b87907a1..573e56b2365 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1514,8 +1514,8 @@ github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumv github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7 h1:tbuA89sf5N60BmBNEbV0g2OhvmEQRohN3QQM+qea+mc= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240320134224-f9a7aa27ffa7/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 h1:LsusfMA80iEYoFOad9gcuLRQYdi0rP7PX/dsXq6Y7yw= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= From 4f3bf3ef92b18af9695d3a1fd1f2fd67985b0e47 Mon Sep 17 00:00:00 2001 From: Fergal Gribben Date: Fri, 22 Mar 2024 15:14:35 +0000 Subject: [PATCH 27/58] Go mod tidy --- integration-tests/load/go.mod | 2 +- integration-tests/load/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index 01ec14c9839..34d77005ef9 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -14,7 +14,7 @@ require ( github.com/pelletier/go-toml/v2 v2.1.1 github.com/rs/zerolog v1.30.0 github.com/slack-go/slack v0.12.2 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240118014648-1ab6a88c9429 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 github.com/smartcontractkit/chainlink-testing-framework v1.25.1 github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20240214231432-4ad5eb95178c diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index ac58e4c2b48..afe71ef91aa 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1497,8 +1497,8 @@ github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumv github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240118014648-1ab6a88c9429 h1:xkejUBZhcBpBrTSfxc91Iwzadrb6SXw8ks69bHIQ9Ww= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240118014648-1ab6a88c9429/go.mod h1:wJmVvDf4XSjsahWtfUq3wvIAYEAuhr7oxmxYnEL/LGQ= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 h1:LsusfMA80iEYoFOad9gcuLRQYdi0rP7PX/dsXq6Y7yw= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= From 7c600dc260c00a746c6593d453971a2697794a0c Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 26 Mar 2024 11:06:44 +0200 Subject: [PATCH 28/58] alignments - buffer config - remove redundant logic in buffer.Dequeue - remove commented code - added logs - renaming - comments - remove old rate limiter --- .../evmregistry/v21/logprovider/buffer_v1.go | 296 ++++++++--------- .../v21/logprovider/buffer_v1_test.go | 306 +++--------------- .../evmregistry/v21/logprovider/factory.go | 26 +- .../evmregistry/v21/logprovider/filter.go | 5 - .../v21/logprovider/integration_test.go | 238 +------------- .../evmregistry/v21/logprovider/provider.go | 23 +- .../v21/logprovider/provider_life_cycle.go | 4 +- .../v21/logprovider/provider_test.go | 8 +- 8 files changed, 224 insertions(+), 682 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index aebaaa9e51f..22954b0aa29 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -2,6 +2,7 @@ package logprovider import ( "encoding/hex" + "math" "math/big" "sort" "sync" @@ -29,71 +30,90 @@ type LogBuffer interface { // logs in that window for the involved upkeeps. Dequeue(block int64, blockRate, upkeepLimit, maxResults int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) // SetConfig sets the buffer size and the maximum number of logs to keep for each upkeep. - SetConfig(lookback, maxUpkeepLogs int) + SetConfig(lookback, blockRate, logLimit uint32) } func DefaultUpkeepSelector(id *big.Int) bool { return true } +type logBufferOptions struct { + // max number of logs to keep in the buffer for each upkeep per window + logLimitHigh *atomic.Uint32 + // number of blocks to keep in the buffer + bufferSize *atomic.Uint32 + // blockRate is the number of blocks per window + blockRate *atomic.Uint32 +} + +func newLogBufferOptions(lookback, blockRate, logLimit uint32) *logBufferOptions { + opts := &logBufferOptions{ + logLimitHigh: new(atomic.Uint32), + bufferSize: new(atomic.Uint32), + blockRate: new(atomic.Uint32), + } + opts.override(lookback, blockRate, logLimit) + + return opts +} + +func (o *logBufferOptions) override(lookback, blockRate, logLimit uint32) { + o.logLimitHigh.Store(logLimit * 10) + o.bufferSize.Store(lookback) + o.blockRate.Store(blockRate) +} + +func (o *logBufferOptions) windows() uint { + blockRate := o.blockRate.Load() + if blockRate == 0 { + return 1 + } + return uint(math.Ceil(float64(o.bufferSize.Load()) / float64(blockRate))) +} + type logBuffer struct { lggr logger.Logger - // max number of logs to keep in the buffer for each upkeep per block - logLimitHigh *atomic.Int32 - // number of blocks to keep in the buffer - bufferSize *atomic.Int32 + opts *logBufferOptions // last block number seen by the buffer lastBlockSeen *atomic.Int64 - // map of upkeep id to its buffer - upkeepBuffers map[string]*upkeepLogBuffer - lock sync.RWMutex + // map of upkeep id to its queue + queues map[string]*upkeepLogQueue + lock sync.RWMutex } -func NewLogBuffer(lggr logger.Logger, lookback, logLimitHigh uint) LogBuffer { - bufferSize := new(atomic.Int32) - bufferSize.Add(int32(lookback)) - limitHigh := new(atomic.Int32) - limitHigh.Add(int32(logLimitHigh)) +func NewLogBuffer(lggr logger.Logger, lookback, blockRate, logLimit uint) LogBuffer { return &logBuffer{ lggr: lggr.Named("KeepersRegistry.LogEventBufferV1"), - logLimitHigh: limitHigh, - bufferSize: bufferSize, + opts: newLogBufferOptions(uint32(lookback), uint32(blockRate), uint32(logLimit)), lastBlockSeen: new(atomic.Int64), - upkeepBuffers: make(map[string]*upkeepLogBuffer), + queues: make(map[string]*upkeepLogQueue), } } -func (b *logBuffer) SetConfig(lookback, logLimitHigh int) { +func (b *logBuffer) SetConfig(lookback, blockRate, logLimit uint32) { b.lock.Lock() defer b.lock.Unlock() - b.bufferSize.Store(int32(lookback)) - b.logLimitHigh.Store(int32(logLimitHigh)) - - c := uint(logLimitHigh * lookback) - for _, ub := range b.upkeepBuffers { - ub.setConfig(c) - } + b.opts.override(lookback, blockRate, logLimit) } // Enqueue adds logs to the buffer and might also drop logs if the limit for the // given upkeep was exceeded. It will create a new buffer if it does not exist. // Returns the number of logs that were added and number of logs that were dropped. func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { - buf, ok := b.getUpkeepBuffer(uid) + buf, ok := b.getUpkeepQueue(uid) if !ok || buf == nil { - buf = newUpkeepLogBuffer(b.lggr, uid, int(b.logLimitHigh.Load()*b.bufferSize.Load())) - b.setUpkeepBuffer(uid, buf) + buf = newUpkeepLogBuffer(b.lggr, uid, b.opts) + b.setUpkeepQueue(uid, buf) } latestBlock := latestBlockNumber(logs...) if b.lastBlockSeen.Load() < latestBlock { b.lastBlockSeen.Store(latestBlock) } - blockThreshold := b.lastBlockSeen.Load() - int64(b.bufferSize.Load()) + blockThreshold := b.lastBlockSeen.Load() - int64(b.opts.bufferSize.Load()) if blockThreshold <= 0 { blockThreshold = 1 } - buf.lggr.Debugw("Enqueuing logs", "blockThreshold", blockThreshold, "logsLatestBlock", latestBlock, "lastBlockSeen", b.lastBlockSeen.Load(), "logs", len(logs), "upkeepID", uid.String(), "upkeepBufferSize", buf.size(), "upkeepBufferCap", buf.capacity.Load()) return buf.enqueue(blockThreshold, logs...) } @@ -104,41 +124,33 @@ func (b *logBuffer) Dequeue(block int64, blockRate, upkeepLimit, maxResults int, defer b.lock.RUnlock() start, end := BlockWindow(block, blockRate) - result, remaining := b.tryDequeue(start, end, upkeepLimit, maxResults, upkeepSelector) - // if there are still logs to pull, try to dequeue again - // TODO: check if we should limit the number of iterations - for len(result) < maxResults && remaining > 0 { - nextResults, nextRemaining := b.tryDequeue(start, end, upkeepLimit, maxResults-len(result), upkeepSelector) - result = append(result, nextResults...) - remaining = nextRemaining - } - - return result, remaining + return b.dequeue(start, end, upkeepLimit, maxResults, upkeepSelector) } -// tryDequeue pulls logs from the buffers, according to the given selector, in block range [start,end] -// with minimum number of results per upkeep and the total capacity for results. -// Returns logs and the number of remaining logs in the buffer. -func (b *logBuffer) tryDequeue(start, end int64, minUpkeepLogs, capacity int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) { +// dequeue pulls logs from the buffers, depends the given selector (upkeepSelector), +// in block range [start,end] with minimum number of results per upkeep (upkeepLimit) +// and the maximum number of results (capacity). +// Returns logs and the number of remaining logs in the buffer for the given range and selector. +func (b *logBuffer) dequeue(start, end int64, upkeepLimit, capacity int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) { var result []BufferedLog var remainingLogs int - for _, buf := range b.upkeepBuffers { - if !upkeepSelector(buf.id) { + for _, q := range b.queues { + if !upkeepSelector(q.id) { // if the upkeep is not selected, skip it continue } if capacity == 0 { // if there is no more capacity for results, just count the remaining logs - remainingLogs += buf.sizeOfWindow(start, end) + remainingLogs += q.sizeOfRange(start, end) continue } - if minUpkeepLogs > capacity { - // if there are more logs to fetch than the capacity, fetch the minimum - minUpkeepLogs = capacity + if upkeepLimit > capacity { + // adjust limit if it is higher than the actual capacity + upkeepLimit = capacity } - logs, remaining := buf.dequeue(start, end, minUpkeepLogs) + logs, remaining := q.dequeue(start, end, upkeepLimit) for _, l := range logs { - result = append(result, BufferedLog{ID: buf.id, Log: l}) + result = append(result, BufferedLog{ID: q.id, Log: l}) capacity-- } remainingLogs += remaining @@ -146,63 +158,52 @@ func (b *logBuffer) tryDequeue(start, end int64, minUpkeepLogs, capacity int, up return result, remainingLogs } -func (b *logBuffer) getUpkeepBuffer(uid *big.Int) (*upkeepLogBuffer, bool) { +func (b *logBuffer) getUpkeepQueue(uid *big.Int) (*upkeepLogQueue, bool) { b.lock.RLock() defer b.lock.RUnlock() - ub, ok := b.upkeepBuffers[uid.String()] + ub, ok := b.queues[uid.String()] return ub, ok } -func (b *logBuffer) setUpkeepBuffer(uid *big.Int, buf *upkeepLogBuffer) { +func (b *logBuffer) setUpkeepQueue(uid *big.Int, buf *upkeepLogQueue) { b.lock.Lock() defer b.lock.Unlock() - b.upkeepBuffers[uid.String()] = buf + b.queues[uid.String()] = buf } -type upkeepLogBuffer struct { +// upkeepLogQueue is a priority queue for logs associated to a specific upkeep. +// It keeps track of the logs that were already visited and the capacity of the queue. +type upkeepLogQueue struct { lggr logger.Logger - id *big.Int - capacity *atomic.Int32 + id *big.Int + opts *logBufferOptions - q []logpoller.Log + logs []logpoller.Log visited map[string]int64 lock sync.RWMutex } -func newUpkeepLogBuffer(lggr logger.Logger, id *big.Int, capacity int) *upkeepLogBuffer { - c := new(atomic.Int32) - c.Add(int32(capacity)) - return &upkeepLogBuffer{ - lggr: lggr.With("upkeepID", id.String()), - id: id, - capacity: c, - q: make([]logpoller.Log, 0, capacity), - visited: make(map[string]int64), +func newUpkeepLogBuffer(lggr logger.Logger, id *big.Int, opts *logBufferOptions) *upkeepLogQueue { + logsCapacity := uint(opts.logLimitHigh.Load()) * opts.windows() + return &upkeepLogQueue{ + lggr: lggr.With("upkeepID", id.String()), + id: id, + opts: opts, + logs: make([]logpoller.Log, 0, logsCapacity), + visited: make(map[string]int64), } } -func (ub *upkeepLogBuffer) setConfig(capacity uint) { - ub.capacity.Store(int32(capacity)) -} - -// size returns the total number of logs in the buffer. -func (ub *upkeepLogBuffer) size() int { - ub.lock.RLock() - defer ub.lock.RUnlock() - - return len(ub.q) -} - -// size returns the total number of logs in the buffer. -func (ub *upkeepLogBuffer) sizeOfWindow(start, end int64) int { - ub.lock.RLock() - defer ub.lock.RUnlock() +// sizeOfRange returns the number of logs in the buffer that are within the given block range. +func (q *upkeepLogQueue) sizeOfRange(start, end int64) int { + q.lock.RLock() + defer q.lock.RUnlock() size := 0 - for _, l := range ub.q { + for _, l := range q.logs { if l.BlockNumber >= start && l.BlockNumber <= end { size++ } @@ -212,18 +213,18 @@ func (ub *upkeepLogBuffer) sizeOfWindow(start, end int64) int { // dequeue pulls logs from the buffer that are within the given block range, // with a limit of logs to pull. Returns logs and the number of remaining logs in the buffer. -func (ub *upkeepLogBuffer) dequeue(start, end int64, limit int) ([]logpoller.Log, int) { - ub.lock.Lock() - defer ub.lock.Unlock() +func (q *upkeepLogQueue) dequeue(start, end int64, limit int) ([]logpoller.Log, int) { + q.lock.Lock() + defer q.lock.Unlock() - if len(ub.q) == 0 { + if len(q.logs) == 0 { return nil, 0 } var results []logpoller.Log var remaining int updatedLogs := make([]logpoller.Log, 0) - for _, l := range ub.q { + for _, l := range q.logs { if l.BlockNumber >= start && l.BlockNumber <= end { if len(results) < limit { results = append(results, l) @@ -235,8 +236,8 @@ func (ub *upkeepLogBuffer) dequeue(start, end int64, limit int) ([]logpoller.Log } if len(results) > 0 { - ub.q = updatedLogs - ub.lggr.Debugw("Dequeued logs", "start", start, "end", end, "limit", limit, "results", len(results), "remaining", remaining) + q.logs = updatedLogs + q.lggr.Debugw("Dequeued logs", "start", start, "end", end, "limit", limit, "results", len(results), "remaining", remaining) } prommetrics.AutomationLogsInLogBuffer.Sub(float64(len(results))) @@ -247,47 +248,32 @@ func (ub *upkeepLogBuffer) dequeue(start, end int64, limit int) ([]logpoller.Log // enqueue adds logs to the buffer and might also drop logs if the limit for the // given upkeep was exceeded. Additionally, it will drop logs that are older than blockThreshold. // Returns the number of logs that were added and number of logs that were dropped. -func (ub *upkeepLogBuffer) enqueue(blockThreshold int64, logsToAdd ...logpoller.Log) (int, int) { - ub.lock.Lock() - defer ub.lock.Unlock() +func (q *upkeepLogQueue) enqueue(blockThreshold int64, logsToAdd ...logpoller.Log) (int, int) { + q.lock.Lock() + defer q.lock.Unlock() - logs := ub.q + logs := q.logs var added int for _, log := range logsToAdd { if log.BlockNumber < blockThreshold { - ub.lggr.Debugw("Skipping log from old block", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber, "logIndex", log.LogIndex) + // q.lggr.Debugw("Skipping log from old block", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber, "logIndex", log.LogIndex) continue } logid := logID(log) - if _, ok := ub.visited[logid]; ok { - ub.lggr.Debugw("Skipping known log", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber, "logIndex", log.LogIndex) + if _, ok := q.visited[logid]; ok { + // q.lggr.Debugw("Skipping known log", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber, "logIndex", log.LogIndex) continue } added++ - // if len(logs) == 0 { - // if the buffer is empty, just add the log logs = append(logs, log) - // } else { - // // otherwise, find the right index to insert the log - // // to keep the buffer sorted - // // TODO: check what is better: 1. maintain sorted slice; 2. sort once at the end - // i, _ := sort.Find(len(logs), func(i int) int { - // return LogComparator(log, logs[i]) - // }) - // if i == len(logs) { - // logs = append(logs, log) - // } else { - // logs = append(logs[:i], append([]logpoller.Log{log}, logs[i:]...)...) - // } - // } - ub.visited[logid] = log.BlockNumber + q.visited[logid] = log.BlockNumber } - ub.q = logs + q.logs = logs var dropped int if added > 0 { - dropped = ub.clean(blockThreshold) - ub.lggr.Debugw("Enqueued logs", "added", added, "dropped", dropped, "blockThreshold", blockThreshold, "q size", len(ub.q), "maxLogs", ub.capacity.Load(), "visited size", len(ub.visited)) + dropped = q.clean(blockThreshold) + q.lggr.Debugw("Enqueued logs", "added", added, "dropped", dropped, "blockThreshold", blockThreshold, "q size", len(q.logs), "visited size", len(q.visited)) } prommetrics.AutomationLogsInLogBuffer.Add(float64(added)) @@ -297,47 +283,67 @@ func (ub *upkeepLogBuffer) enqueue(blockThreshold int64, logsToAdd ...logpoller. // clean removes logs that are older than blockThreshold and drops logs if the limit for the // given upkeep was exceeded. Returns the number of logs that were dropped. -func (ub *upkeepLogBuffer) clean(blockThreshold int64) int { - maxLogs := int(ub.capacity.Load()) - - sort.SliceStable(ub.q, func(i, j int) bool { - return LogSorter(ub.q[i], ub.q[j]) +func (q *upkeepLogQueue) clean(blockThreshold int64) int { + blockRate := int(q.opts.blockRate.Load()) + maxLogsPerWindow := int(q.opts.logLimitHigh.Load()) + + // sort logs by block number, tx hash and log index + // to keep the q sorted and to ensure that logs can be + // grouped by block windows for the cleanup + sort.SliceStable(q.logs, func(i, j int) bool { + return LogSorter(q.logs[i], q.logs[j]) }) + // cleanup logs that are older than blockThreshold + // and drop logs if the window/s limit for the given upkeep was exceeded updated := make([]logpoller.Log, 0) - var dropped, expired int - for _, l := range ub.q { - if l.BlockNumber >= blockThreshold { - if len(updated) < maxLogs { - updated = append(updated, l) - } else { - prommetrics.AutomationLogsInLogBuffer.Dec() - // TODO: check if we should clean visited as well - ub.lggr.Debugw("Reached log buffer limits, dropping log", "blockNumber", l.BlockNumber, - "blockHash", l.BlockHash, "txHash", l.TxHash, "logIndex", l.LogIndex, "len updated", len(updated), "maxLogs", maxLogs) - dropped++ - } - } else { + var dropped, expired, currentWindowCapacity int + var currentWindowStart int64 + for _, l := range q.logs { + if blockThreshold > l.BlockNumber { // old log, removed prommetrics.AutomationLogsInLogBuffer.Dec() - // old logs are ignored and removed from visited - ub.lggr.Debugw("Expiring old log", "blockNumber", l.BlockNumber, "blockThreshold", blockThreshold, "logIndex", l.LogIndex) + // q.lggr.Debugw("Expiring old log", "blockNumber", l.BlockNumber, "blockThreshold", blockThreshold, "logIndex", l.LogIndex) logid := logID(l) - delete(ub.visited, logid) + delete(q.visited, logid) expired++ + continue + } + start, _ := BlockWindow(l.BlockNumber, blockRate) + if start != currentWindowStart { + // new window, reset capacity + currentWindowStart = start + currentWindowCapacity = 0 } + currentWindowCapacity++ + // if capacity has been reached, drop the log + if currentWindowCapacity > maxLogsPerWindow { + prommetrics.AutomationLogsInLogBuffer.Dec() + // TODO: check if we should clean visited as well, so it will be possible to add the log again + q.lggr.Debugw("Reached log buffer limits, dropping log", "blockNumber", l.BlockNumber, + "blockHash", l.BlockHash, "txHash", l.TxHash, "logIndex", l.LogIndex, "len updated", len(updated), + "currentWindowStart", currentWindowStart, "currentWindowCapacity", currentWindowCapacity, + "maxLogsPerWindow", maxLogsPerWindow, "blockRate", blockRate) + dropped++ + continue + } + updated = append(updated, l) } if dropped > 0 || expired > 0 { - ub.lggr.Debugw("Cleaned logs", "dropped", dropped, "expired", expired, "blockThreshold", blockThreshold, "len updated", len(updated), "len ub.q", len(ub.q), "maxLogs", maxLogs) + q.lggr.Debugw("Cleaned logs", "dropped", dropped, "expired", expired, "blockThreshold", blockThreshold, "len updated", len(updated), "len before", len(q.logs)) } - ub.q = updated + q.logs = updated - for lid, block := range ub.visited { + q.cleanVisited(blockThreshold) + + return dropped +} + +func (q *upkeepLogQueue) cleanVisited(blockThreshold int64) { + for lid, block := range q.visited { if block <= blockThreshold { - delete(ub.visited, lid) + delete(q.visited, lid) } } - - return dropped } // logID returns a unique identifier for a log, which is an hex string diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go index 0327f29d5cb..d3a50df4c85 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go @@ -11,43 +11,9 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/logger" ) -func TestLogEventBufferV1_Clean(t *testing.T) { - t.Run("empty", func(t *testing.T) { - buf := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), 10) - - buf.clean(10) - }) - - t.Run("happy path", func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 10) - - buf.Enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - ) - buf.Enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x111"), LogIndex: 0}, - logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x111"), LogIndex: 1}, - ) - - upkeepBuf, ok := buf.(*logBuffer).getUpkeepBuffer(big.NewInt(1)) - require.True(t, ok) - require.Equal(t, 4, upkeepBuf.size()) - - buf.Enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x171"), LogIndex: 0}, - logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x171"), LogIndex: 1}, - ) - - require.Equal(t, 4, upkeepBuf.size()) - require.Equal(t, 0, upkeepBuf.clean(12)) - require.Equal(t, 2, upkeepBuf.size()) - }) -} - -func TestLogEventBufferV1_EnqueueDequeue(t *testing.T) { +func TestLogEventBufferV1(t *testing.T) { t.Run("dequeue empty", func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 10) + buf := NewLogBuffer(logger.TestLogger(t), 10, 1, 1) results, remaining := buf.Dequeue(int64(1), 20, 1, 10, DefaultUpkeepSelector) require.Equal(t, 0, len(results)) @@ -55,7 +21,7 @@ func TestLogEventBufferV1_EnqueueDequeue(t *testing.T) { }) t.Run("enqueue", func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 10) + buf := NewLogBuffer(logger.TestLogger(t), 10, 1, 1) added, dropped := buf.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: 10, TxHash: common.HexToHash("0x1"), LogIndex: 0}, @@ -63,32 +29,36 @@ func TestLogEventBufferV1_EnqueueDequeue(t *testing.T) { ) require.Equal(t, 2, added) require.Equal(t, 0, dropped) - upkeepBuf, ok := buf.(*logBuffer).getUpkeepBuffer(big.NewInt(1)) + q, ok := buf.(*logBuffer).getUpkeepQueue(big.NewInt(1)) require.True(t, ok) - require.Equal(t, 2, upkeepBuf.size()) + require.Equal(t, 2, q.sizeOfRange(1, 18)) }) t.Run("enqueue upkeeps limits", func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 3, 2) + buf := NewLogBuffer(logger.TestLogger(t), 10, 1, 1) + limit := 2 + buf.(*logBuffer).opts.logLimitHigh.Store(uint32(limit)) added, dropped := buf.Enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 9, TxHash: common.HexToHash("0x9"), LogIndex: 0}, - logpoller.Log{BlockNumber: 9, TxHash: common.HexToHash("0x9"), LogIndex: 1}, + logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x19"), LogIndex: 0}, + logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x19"), LogIndex: 1}, logpoller.Log{BlockNumber: 10, TxHash: common.HexToHash("0x10"), LogIndex: 0}, logpoller.Log{BlockNumber: 10, TxHash: common.HexToHash("0x10"), LogIndex: 1}, logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x11"), LogIndex: 1}, logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x11"), LogIndex: 2}, logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x11"), LogIndex: 3}, ) + totalLimit := limit * 3 // 3 block windows require.Equal(t, 7, added) - require.Equal(t, 1, dropped) - upkeepBuf, ok := buf.(*logBuffer).getUpkeepBuffer(big.NewInt(1)) + require.Equal(t, 7-totalLimit, dropped) + q, ok := buf.(*logBuffer).getUpkeepQueue(big.NewInt(1)) require.True(t, ok) - require.Equal(t, 6, upkeepBuf.size()) + require.Equal(t, totalLimit, + q.sizeOfRange(1, 18)) }) t.Run("enqueue out of block range", func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 5, 4) + buf := NewLogBuffer(logger.TestLogger(t), 5, 1, 1) added, dropped := buf.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x10"), LogIndex: 0}, @@ -97,13 +67,13 @@ func TestLogEventBufferV1_EnqueueDequeue(t *testing.T) { ) require.Equal(t, 2, added) require.Equal(t, 0, dropped) - upkeepBuf, ok := buf.(*logBuffer).getUpkeepBuffer(big.NewInt(1)) + q, ok := buf.(*logBuffer).getUpkeepQueue(big.NewInt(1)) require.True(t, ok) - require.Equal(t, 2, upkeepBuf.size()) + require.Equal(t, 2, q.sizeOfRange(1, 12)) }) - t.Run("enqueue dequeue", func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 10) + t.Run("happy path", func(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1) buf.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, @@ -113,224 +83,46 @@ func TestLogEventBufferV1_EnqueueDequeue(t *testing.T) { logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 2}, ) - results, remaining := buf.Dequeue(int64(1), 20, 1, 2, DefaultUpkeepSelector) + results, remaining := buf.Dequeue(int64(1), 10, 1, 2, DefaultUpkeepSelector) require.Equal(t, 2, len(results)) require.Equal(t, 2, remaining) require.True(t, results[0].ID.Cmp(results[1].ID) != 0) - results, remaining = buf.Dequeue(int64(1), 20, 1, 2, DefaultUpkeepSelector) + results, remaining = buf.Dequeue(int64(1), 10, 1, 2, DefaultUpkeepSelector) require.Equal(t, 2, len(results)) require.Equal(t, 0, remaining) }) +} - // t.Run("enqueue logs overflow", func(t *testing.T) { - // buf := NewLogBuffer(logger.TestLogger(t), 2) - - // require.Equal(t, 2, buf.Enqueue(big.NewInt(1), - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, - // )) - // upkeepBuf, ok := buf.(*logBuffer).getUpkeepBuffer(big.NewInt(1)) - // require.True(t, ok) - // require.Equal(t, 2, upkeepBuf.len()) - // }) - - // t.Run("enqueue dequeue with dynamic limits", func(t *testing.T) { - // buf := NewLogBuffer(logger.TestLogger(t), 2) - - // require.Equal(t, 3, buf.Enqueue(big.NewInt(1), - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, - // )) - // results := buf.Dequeue(int64(1), int64(20), 1, 2) - // require.Equal(t, 2, len(results)) - // buf.SetConfig(10, 3) - // require.Equal(t, 4, buf.Enqueue(big.NewInt(1), - // logpoller.Log{BlockNumber: 15, TxHash: common.HexToHash("0x21"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 15, TxHash: common.HexToHash("0x21"), LogIndex: 1}, - // logpoller.Log{BlockNumber: 15, TxHash: common.HexToHash("0x21"), LogIndex: 2}, - // logpoller.Log{BlockNumber: 15, TxHash: common.HexToHash("0x21"), LogIndex: 3}, - // )) - - // results = buf.Dequeue(int64(1), int64(20), 1, 4) - // require.Equal(t, 3, len(results)) - - // for _, r := range results { - // require.Equal(t, int64(15), r.Log.BlockNumber) - // } - // }) - - // t.Run("enqueue logs overflow with dynamic limits", func(t *testing.T) { - // buf := newLogEventBuffer(logger.TestLogger(t), 2, 10, 2) - - // require.Equal(t, 2, buf.enqueue(big.NewInt(1), - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 3}, - // )) - // buf.SetLimits(10, 3) - // require.Equal(t, 3, buf.enqueue(big.NewInt(1), - // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 1}, - // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 2}, - // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 3}, - // )) - - // buf.lock.Lock() - // defer buf.lock.Unlock() - // require.Equal(t, 2, len(buf.blocks[0].logs)) - // }) - - // t.Run("enqueue block overflow", func(t *testing.T) { - // buf := newLogEventBuffer(logger.TestLogger(t), 3, 2, 10) - - // require.Equal(t, 5, buf.enqueue(big.NewInt(1), - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 1}, - // )) - // buf.lock.Lock() - // require.Equal(t, 2, len(buf.blocks[0].logs)) - // buf.lock.Unlock() - // }) - - // t.Run("enqueue upkeep block overflow", func(t *testing.T) { - // buf := newLogEventBuffer(logger.TestLogger(t), 10, 10, 2) - - // require.Equal(t, 2, buf.enqueue(big.NewInt(1), - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 3}, - // )) - // buf.lock.Lock() - // require.Equal(t, 2, len(buf.blocks[0].logs)) - // buf.lock.Unlock() - // }) - - // t.Run("peek range after dequeue", func(t *testing.T) { - // buf := newLogEventBuffer(logger.TestLogger(t), 3, 10, 10) - - // require.Equal(t, buf.enqueue(big.NewInt(10), - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 10}, - // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 11}, - // ), 2) - // require.Equal(t, buf.enqueue(big.NewInt(1), - // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - // ), 2) - // results := buf.peekRange(int64(1), int64(2)) - // require.Equal(t, 2, len(results)) - // verifyBlockNumbers(t, results, 1, 2) - // removed := buf.dequeueRange(int64(1), int64(2), 2, 10) - // require.Equal(t, 2, len(removed)) - // results = buf.peekRange(int64(1), int64(2)) - // require.Equal(t, 0, len(results)) - // }) - - // t.Run("enqueue peek and dequeue", func(t *testing.T) { - // buf := newLogEventBuffer(logger.TestLogger(t), 4, 10, 10) - - // require.Equal(t, buf.enqueue(big.NewInt(10), - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 10}, - // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 11}, - // ), 2) - // require.Equal(t, buf.enqueue(big.NewInt(1), - // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - // ), 2) - // results := buf.peek(8) - // require.Equal(t, 4, len(results)) - // verifyBlockNumbers(t, results, 1, 2, 3, 3) - // removed := buf.dequeueRange(1, 3, 5, 5) - // require.Equal(t, 4, len(removed)) - // buf.lock.Lock() - // require.Equal(t, 0, len(buf.blocks[0].logs)) - // require.Equal(t, int64(2), buf.blocks[1].blockNumber) - // require.Equal(t, 1, len(buf.blocks[1].visited)) - // buf.lock.Unlock() - // }) - - // t.Run("enqueue and peek range circular", func(t *testing.T) { - // buf := newLogEventBuffer(logger.TestLogger(t), 3, 10, 10) - - // require.Equal(t, buf.enqueue(big.NewInt(1), - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, - // ), 3) - // require.Equal(t, buf.enqueue(big.NewInt(10), - // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 10}, - // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 11}, - // ), 2) - - // results := buf.peekRange(int64(1), int64(1)) - // require.Equal(t, 0, len(results)) - - // results = buf.peekRange(int64(3), int64(5)) - // require.Equal(t, 3, len(results)) - // verifyBlockNumbers(t, results, 3, 4, 4) - // }) - - // t.Run("doesnt enqueue old blocks", func(t *testing.T) { - // buf := newLogEventBuffer(logger.TestLogger(t), 3, 5, 10) - - // require.Equal(t, buf.enqueue(big.NewInt(10), - // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 10}, - // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 11}, - // ), 2) - // require.Equal(t, buf.enqueue(big.NewInt(1), - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, - // ), 2) - // results := buf.peekRange(int64(1), int64(5)) - // fmt.Println(results) - // verifyBlockNumbers(t, results, 2, 3, 4, 4) - // }) - - // t.Run("dequeue with limits returns latest block logs", func(t *testing.T) { - // buf := newLogEventBuffer(logger.TestLogger(t), 3, 5, 10) - // require.Equal(t, buf.enqueue(big.NewInt(1), - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 5, TxHash: common.HexToHash("0x5"), LogIndex: 0}, - // ), 5) +func TestLogEventBufferV1_UpkeepQueue_clean(t *testing.T) { + t.Run("empty", func(t *testing.T) { + q := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) - // logs := buf.dequeueRange(1, 5, 2, 10) - // require.Equal(t, 2, len(logs)) - // require.Equal(t, int64(5), logs[0].log.BlockNumber) - // require.Equal(t, int64(4), logs[1].log.BlockNumber) + q.clean(10) + }) - // require.Equal(t, buf.enqueue(big.NewInt(1), - // logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 1}, - // logpoller.Log{BlockNumber: 5, TxHash: common.HexToHash("0x5"), LogIndex: 1}, - // ), 2) + t.Run("happy path", func(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), 10, 5, 1) - // logs = buf.dequeueRange(1, 5, 3, 2) - // require.Equal(t, 2, len(logs)) - // }) + buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + ) + buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x111"), LogIndex: 0}, + logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x111"), LogIndex: 1}, + ) - // t.Run("dequeue doesn't return same logs again", func(t *testing.T) { - // buf := newLogEventBuffer(logger.TestLogger(t), 3, 5, 10) - // require.Equal(t, buf.enqueue(big.NewInt(1), - // logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, - // logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, - // ), 3) + q, ok := buf.(*logBuffer).getUpkeepQueue(big.NewInt(1)) + require.True(t, ok) + require.Equal(t, 4, q.sizeOfRange(1, 11)) - // logs := buf.dequeueRange(3, 3, 2, 10) - // fmt.Println(logs) - // require.Equal(t, 1, len(logs)) + buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x171"), LogIndex: 0}, + logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x171"), LogIndex: 1}, + ) - // logs = buf.dequeueRange(3, 3, 2, 10) - // fmt.Println(logs) - // require.Equal(t, 0, len(logs)) - // }) + require.Equal(t, 4, q.sizeOfRange(1, 18)) + require.Equal(t, 0, q.clean(12)) + require.Equal(t, 2, q.sizeOfRange(1, 18)) + }) } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index 90bf871471e..039e01b3cd7 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -3,8 +3,6 @@ package logprovider import ( "time" - "golang.org/x/time/rate" - "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/logger" @@ -34,22 +32,15 @@ type LogTriggersOptions struct { LookbackBlocks int64 // ReadInterval is the interval to fetch logs in the background. ReadInterval time.Duration - // BlockRateLimit is the rate limit on the range of blocks the we fetch logs for. - BlockRateLimit rate.Limit - // blockLimitBurst is the burst upper limit on the range of blocks the we fetch logs for. - BlockLimitBurst int // Finality depth is the number of blocks to wait before considering a block final. FinalityDepth int64 // v1 config - BufferVersion string - LogLimitLow int32 - - LogLimitHigh int32 + LogLimit uint32 - BlockRate int64 + BlockRate uint32 } func NewOptions(finalityDepth int64) LogTriggersOptions { @@ -71,22 +62,13 @@ func (o *LogTriggersOptions) Defaults(finalityDepth int64) { if o.ReadInterval == 0 { o.ReadInterval = time.Second } - if o.BlockLimitBurst == 0 { - o.BlockLimitBurst = int(o.LookbackBlocks) - } - if o.BlockRateLimit == 0 { - o.BlockRateLimit = rate.Every(o.ReadInterval) - } if o.FinalityDepth == 0 { o.FinalityDepth = finalityDepth } if o.BlockRate == 0 { o.BlockRate = 2 } - if o.LogLimitLow == 0 { - o.LogLimitLow = 4 - } - if o.LogLimitHigh == 0 { - o.LogLimitHigh = o.LogLimitLow * 10 + if o.LogLimit == 0 { + o.LogLimit = 4 } } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter.go index 44780cbc4b1..c0f204aa57b 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter.go @@ -5,7 +5,6 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" - "golang.org/x/time/rate" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" ) @@ -21,9 +20,6 @@ type upkeepFilter struct { // lastPollBlock is the last block number the logs were fetched for this upkeep // used by log event provider. lastPollBlock int64 - // blockLimiter is used to limit the number of blocks to fetch logs for an upkeep. - // used by log event provider. - blockLimiter *rate.Limiter // lastRePollBlock is the last block number the logs were recovered for this upkeep // used by log recoverer. lastRePollBlock int64 @@ -42,7 +38,6 @@ func (f upkeepFilter) Clone() upkeepFilter { configUpdateBlock: f.configUpdateBlock, lastPollBlock: f.lastPollBlock, lastRePollBlock: f.lastRePollBlock, - blockLimiter: f.blockLimiter, } } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go index 1618f695ccf..5d67b23d87f 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -2,7 +2,6 @@ package logprovider_test import ( "context" - "errors" "math/big" "testing" "time" @@ -15,10 +14,8 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/jmoiron/sqlx" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" - "golang.org/x/time/rate" ocr2keepers "github.com/smartcontractkit/chainlink-common/pkg/types/automation" @@ -39,16 +36,16 @@ import ( func TestIntegration_LogEventProvider(t *testing.T) { tests := []struct { - name string - version string - limitLow, limitHigh int32 + name string + version string + logLimit uint32 }{ - {"default version", "", 10, 100}, - {"v1", "v1", 10, 100}, + {"default version", "", 10}, + {"v1", "v1", 10}, } for _, tc := range tests { - bufferVersion, limitLow, limitHigh := tc.version, tc.limitLow, tc.limitHigh + bufferVersion, logLimit := tc.version, tc.logLimit t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithCancel(testutils.Context(t)) defer cancel() @@ -63,8 +60,7 @@ func TestIntegration_LogEventProvider(t *testing.T) { opts := logprovider.NewOptions(200) opts.ReadInterval = time.Second / 2 opts.BufferVersion = bufferVersion - opts.LogLimitLow = limitLow - opts.LogLimitHigh = limitHigh + opts.LogLimit = logLimit lp, ethClient := setupDependencies(t, db, backend) filterStore := logprovider.NewUpkeepFilterStore() @@ -218,16 +214,16 @@ func TestIntegration_LogEventProvider_UpdateConfig(t *testing.T) { func TestIntegration_LogEventProvider_Backfill(t *testing.T) { tests := []struct { - name string - bufferVersion string - limitLow, limitHigh int32 + name string + bufferVersion string + logLimit uint32 }{ - {"default version", "", 10, 100}, - {"v1", "v1", 10, 100}, + {"default version", "", 10}, + {"v1", "v1", 10}, } for _, tc := range tests { - bufferVersion, limitLow, limitHigh := tc.bufferVersion, tc.limitLow, tc.limitHigh + bufferVersion, limitLow := tc.bufferVersion, tc.logLimit t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithTimeout(testutils.Context(t), time.Second*60) @@ -243,8 +239,7 @@ func TestIntegration_LogEventProvider_Backfill(t *testing.T) { opts := logprovider.NewOptions(200) opts.ReadInterval = time.Second / 4 opts.BufferVersion = bufferVersion - opts.LogLimitLow = limitLow - opts.LogLimitHigh = limitHigh + opts.LogLimit = limitLow lp, ethClient := setupDependencies(t, db, backend) filterStore := logprovider.NewUpkeepFilterStore() @@ -285,211 +280,6 @@ func TestIntegration_LogEventProvider_Backfill(t *testing.T) { } } -func TestIntegration_LogEventProvider_RateLimit(t *testing.T) { - setupTest := func( - t *testing.T, - opts *logprovider.LogTriggersOptions, - ) ( - context.Context, - *backends.SimulatedBackend, - func(blockHash common.Hash), - logprovider.LogEventProviderTest, - []*big.Int, - func(), - ) { - ctx, cancel := context.WithCancel(testutils.Context(t)) - backend, stopMining, accounts := setupBackend(t) - userContractAccount := accounts[2] - db := setupDB(t) - - deferFunc := func() { - cancel() - stopMining() - _ = db.Close() - } - lp, ethClient := setupDependencies(t, db, backend) - filterStore := logprovider.NewUpkeepFilterStore() - provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, opts) - logProvider := provider.(logprovider.LogEventProviderTest) - backend.Commit() - lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block - - rounds := 5 - numberOfUserContracts := 10 - poll := pollFn(ctx, t, lp, ethClient) - - // deployUpkeepCounter creates 'n' blocks and 'n' contracts - ids, _, contracts := deployUpkeepCounter( - ctx, - t, - numberOfUserContracts, - ethClient, - backend, - userContractAccount, - logProvider) - - // have log poller save logs for current blocks - lp.PollAndSaveLogs(ctx, int64(numberOfUserContracts)) - - for i := 0; i < rounds; i++ { - triggerEvents( - ctx, - t, - backend, - userContractAccount, - numberOfUserContracts, - poll, - contracts...) - - for dummyBlocks := 0; dummyBlocks < numberOfUserContracts; dummyBlocks++ { - _ = backend.Commit() - } - - poll(backend.Commit()) - } - - { - // total block history at this point should be 566 - var minimumBlockCount int64 = 500 - latestBlock, _ := lp.LatestBlock() - - assert.GreaterOrEqual(t, latestBlock.BlockNumber, minimumBlockCount, "to ensure the integrety of the test, the minimum block count before the test should be %d but got %d", minimumBlockCount, latestBlock) - } - - require.NoError(t, logProvider.ReadLogs(ctx, ids...)) - - return ctx, backend, poll, logProvider, ids, deferFunc - } - - // polling for logs at approximately the same rate as a chain produces - // blocks should not encounter rate limits - t.Run("should allow constant polls within the rate and burst limit", func(t *testing.T) { - ctx, backend, poll, logProvider, ids, deferFunc := setupTest(t, &logprovider.LogTriggersOptions{ - LookbackBlocks: 200, - // BlockRateLimit is set low to ensure the test does not exceed the - // rate limit - BlockRateLimit: rate.Every(50 * time.Millisecond), - // BlockLimitBurst is just set to a non-zero value - BlockLimitBurst: 5, - }) - - defer deferFunc() - - // set the wait time between reads higher than the rate limit - readWait := 50 * time.Millisecond - timer := time.NewTimer(readWait) - - for i := 0; i < 4; i++ { - <-timer.C - - // advance 1 block for every read - poll(backend.Commit()) - - err := logProvider.ReadLogs(ctx, ids...) - if err != nil { - assert.False(t, errors.Is(err, logprovider.ErrBlockLimitExceeded), "error should not contain block limit exceeded") - } - - timer.Reset(readWait) - } - - poll(backend.Commit()) - - _, err := logProvider.GetLatestPayloads(ctx) - - require.NoError(t, err) - }) - - t.Run("should produce a rate limit error for over burst limit", func(t *testing.T) { - ctx, backend, poll, logProvider, ids, deferFunc := setupTest(t, &logprovider.LogTriggersOptions{ - LookbackBlocks: 200, - // BlockRateLimit is set low to ensure the test does not exceed the - // rate limit - BlockRateLimit: rate.Every(50 * time.Millisecond), - // BlockLimitBurst is just set to a non-zero value - BlockLimitBurst: 5, - }) - - defer deferFunc() - - // set the wait time between reads higher than the rate limit - readWait := 50 * time.Millisecond - timer := time.NewTimer(readWait) - - for i := 0; i < 4; i++ { - <-timer.C - - // advance 4 blocks for every read - for x := 0; x < 4; x++ { - poll(backend.Commit()) - } - - err := logProvider.ReadLogs(ctx, ids...) - if err != nil { - assert.True(t, errors.Is(err, logprovider.ErrBlockLimitExceeded), "error should not contain block limit exceeded") - } - - timer.Reset(readWait) - } - - poll(backend.Commit()) - - _, err := logProvider.GetLatestPayloads(ctx) - - require.NoError(t, err) - }) - - t.Run("should allow polling after lookback number of blocks have passed", func(t *testing.T) { - ctx, backend, poll, logProvider, ids, deferFunc := setupTest(t, &logprovider.LogTriggersOptions{ - // BlockRateLimit is set low to ensure the test does not exceed the - // rate limit - BlockRateLimit: rate.Every(50 * time.Millisecond), - // BlockLimitBurst is set low to ensure the test exceeds the burst limit - BlockLimitBurst: 5, - // LogBlocksLookback is set low to reduce the number of blocks required - // to reset the block limiter to maxBurst - LookbackBlocks: 50, - }) - - defer deferFunc() - - // simulate a burst in unpolled blocks - for i := 0; i < 20; i++ { - _ = backend.Commit() - } - - poll(backend.Commit()) - - // all entries should error at this point because there are too many - // blocks to processes - err := logProvider.ReadLogs(ctx, ids...) - if err != nil { - assert.True(t, errors.Is(err, logprovider.ErrBlockLimitExceeded), "error should not contain block limit exceeded") - } - - // progress the chain by the same number of blocks as the lookback limit - // to trigger the usage of maxBurst - for i := 0; i < 50; i++ { - _ = backend.Commit() - } - - poll(backend.Commit()) - - // all entries should reset to the maxBurst because they are beyond - // the log lookback - err = logProvider.ReadLogs(ctx, ids...) - if err != nil { - assert.True(t, errors.Is(err, logprovider.ErrBlockLimitExceeded), "error should not contain block limit exceeded") - } - - poll(backend.Commit()) - - _, err = logProvider.GetLatestPayloads(ctx) - - require.NoError(t, err) - }) -} - func TestIntegration_LogRecoverer_Backfill(t *testing.T) { ctx := testutils.Context(t) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index 688e5aab028..dd4c00f633d 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -112,7 +112,7 @@ func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, packer LogDa lggr: lggr.Named("KeepersRegistry.LogEventProvider"), packer: packer, buffer: newLogEventBuffer(lggr, int(opts.LookbackBlocks), defaultNumOfLogUpkeeps, defaultFastExecLogsHigh), - bufferV1: NewLogBuffer(lggr, uint(opts.LookbackBlocks), uint(opts.LogLimitHigh)), + bufferV1: NewLogBuffer(lggr, uint(opts.LookbackBlocks), uint(opts.BlockRate), uint(opts.LogLimit)), poller: poller, opts: opts, filterStore: filterStore, @@ -200,7 +200,7 @@ func (p *logEventProvider) getPayloadsFromBuffer(latestBlock int64) []ocr2keeper switch p.opts.BufferVersion { case "v1": - blockRate, logLimitLow, maxResults := int(p.opts.BlockRate), int(p.opts.LogLimitLow), MaxPayloads + blockRate, logLimitLow, maxResults := int(p.opts.BlockRate), int(p.opts.LogLimit), MaxPayloads for len(payloads) < maxResults && start <= latestBlock { logs, remaining := p.bufferV1.Dequeue(start, blockRate, logLimitLow, maxResults-len(payloads), DefaultUpkeepSelector) if len(logs) > 0 { @@ -393,8 +393,6 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ // special case of a new blockchain (e.g. simulated chain) lookbackBlocks = latest - 1 } - // maxBurst will be used to increase the burst limit to allow a long range scan - maxBurst := int(lookbackBlocks + 1) for i, filter := range filters { if len(filter.addr) == 0 { @@ -404,13 +402,6 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ // range should not exceed [lookbackBlocks, latest] if start < latest-lookbackBlocks { start = latest - lookbackBlocks - filter.blockLimiter.SetBurst(maxBurst) - } - - resv := filter.blockLimiter.ReserveN(time.Now(), int(latest-start)) - if !resv.OK() { - merr = errors.Join(merr, fmt.Errorf("%w: %s", ErrBlockLimitExceeded, filter.upkeepID.String())) - continue } // adding a buffer to check for reorged logs. start = start - reorgBuffer @@ -421,8 +412,6 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ // query logs based on contract address, event sig, and blocks logs, err := p.poller.LogsWithSigs(start, latest, []common.Hash{filter.topics[0]}, common.BytesToAddress(filter.addr), pg.WithParentCtx(ctx)) if err != nil { - // cancel limit reservation as we failed to get logs - resv.Cancel() if ctx.Err() != nil { // exit if the context was canceled return merr @@ -432,20 +421,12 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ } filteredLogs := filter.Select(logs...) - // if this limiter's burst was set to the max -> - // reset it and cancel the reservation to allow further processing - if filter.blockLimiter.Burst() == maxBurst { - resv.Cancel() - filter.blockLimiter.SetBurst(p.opts.BlockLimitBurst) - } - switch p.opts.BufferVersion { case "v1": p.bufferV1.Enqueue(filter.upkeepID, filteredLogs...) default: p.buffer.enqueue(filter.upkeepID, filteredLogs...) } - // Update the lastPollBlock for filter in slice this is then // updated into filter store in updateFiltersLastPoll filters[i].lastPollBlock = latest diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go index 69a4872351d..897bca5923f 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go @@ -9,7 +9,6 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "golang.org/x/time/rate" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/services/pg" @@ -85,8 +84,7 @@ func (p *logEventProvider) RegisterFilter(ctx context.Context, opts FilterOption filter = *currentFilter } else { // new filter filter = upkeepFilter{ - upkeepID: upkeepID, - blockLimiter: rate.NewLimiter(p.opts.BlockRateLimit, p.opts.BlockLimitBurst), + upkeepID: upkeepID, } } filter.lastPollBlock = 0 diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go index 464b9aa3ba6..51b71de24d5 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go @@ -11,7 +11,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "golang.org/x/time/rate" ocr2keepers "github.com/smartcontractkit/chainlink-common/pkg/types/automation" @@ -310,10 +309,9 @@ func newEntry(p *logEventProvider, i int, args ...string) (LogTriggerConfig, upk topics := make([]common.Hash, len(filter.EventSigs)) copy(topics, filter.EventSigs) f := upkeepFilter{ - upkeepID: uid, - addr: filter.Addresses[0].Bytes(), - topics: topics, - blockLimiter: rate.NewLimiter(p.opts.BlockRateLimit, p.opts.BlockLimitBurst), + upkeepID: uid, + addr: filter.Addresses[0].Bytes(), + topics: topics, } return cfg, f } From 1b40e2072398ccd0d20c8d6db53bdaecb823f58b Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 26 Mar 2024 14:13:15 +0200 Subject: [PATCH 29/58] tests --- .../evmregistry/v21/logprovider/buffer_v1.go | 9 +- .../v21/logprovider/buffer_v1_test.go | 351 ++++++++++++++---- 2 files changed, 285 insertions(+), 75 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index 22954b0aa29..7825e1b793a 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -139,9 +139,14 @@ func (b *logBuffer) dequeue(start, end int64, upkeepLimit, capacity int, upkeepS // if the upkeep is not selected, skip it continue } + logsInRange := q.sizeOfRange(start, end) + if logsInRange == 0 { + // if there are no logs in the range, skip the upkeep + continue + } if capacity == 0 { // if there is no more capacity for results, just count the remaining logs - remainingLogs += q.sizeOfRange(start, end) + remainingLogs += logsInRange continue } if upkeepLimit > capacity { @@ -173,6 +178,8 @@ func (b *logBuffer) setUpkeepQueue(uid *big.Int, buf *upkeepLogQueue) { b.queues[uid.String()] = buf } +// TODO: separate files + // upkeepLogQueue is a priority queue for logs associated to a specific upkeep. // It keeps track of the logs that were already visited and the capacity of the queue. type upkeepLogQueue struct { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go index d3a50df4c85..8ccd6279539 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go @@ -12,85 +12,243 @@ import ( ) func TestLogEventBufferV1(t *testing.T) { - t.Run("dequeue empty", func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 1, 1) + buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1) - results, remaining := buf.Dequeue(int64(1), 20, 1, 10, DefaultUpkeepSelector) - require.Equal(t, 0, len(results)) - require.Equal(t, 0, remaining) - }) - - t.Run("enqueue", func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 1, 1) - - added, dropped := buf.Enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 10, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 10, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - ) - require.Equal(t, 2, added) - require.Equal(t, 0, dropped) - q, ok := buf.(*logBuffer).getUpkeepQueue(big.NewInt(1)) - require.True(t, ok) - require.Equal(t, 2, q.sizeOfRange(1, 18)) - }) - - t.Run("enqueue upkeeps limits", func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 1, 1) - limit := 2 - buf.(*logBuffer).opts.logLimitHigh.Store(uint32(limit)) - - added, dropped := buf.Enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x19"), LogIndex: 0}, - logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x19"), LogIndex: 1}, - logpoller.Log{BlockNumber: 10, TxHash: common.HexToHash("0x10"), LogIndex: 0}, - logpoller.Log{BlockNumber: 10, TxHash: common.HexToHash("0x10"), LogIndex: 1}, - logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x11"), LogIndex: 1}, - logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x11"), LogIndex: 2}, - logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x11"), LogIndex: 3}, - ) - totalLimit := limit * 3 // 3 block windows - require.Equal(t, 7, added) - require.Equal(t, 7-totalLimit, dropped) - q, ok := buf.(*logBuffer).getUpkeepQueue(big.NewInt(1)) - require.True(t, ok) - require.Equal(t, totalLimit, - q.sizeOfRange(1, 18)) - }) + buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + ) + buf.Enqueue(big.NewInt(2), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 2}, + ) + results, remaining := buf.Dequeue(int64(1), 10, 1, 2, DefaultUpkeepSelector) + require.Equal(t, 2, len(results)) + require.Equal(t, 2, remaining) + require.True(t, results[0].ID.Cmp(results[1].ID) != 0) + results, remaining = buf.Dequeue(int64(1), 10, 1, 2, DefaultUpkeepSelector) + require.Equal(t, 2, len(results)) + require.Equal(t, 0, remaining) +} - t.Run("enqueue out of block range", func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 5, 1, 1) +func TestLogEventBufferV1_Dequeue(t *testing.T) { + tests := []struct { + name string + logsInBuffer map[*big.Int][]logpoller.Log + args dequeueArgs + lookback int + results []logpoller.Log + remaining int + }{ + { + name: "empty", + logsInBuffer: map[*big.Int][]logpoller.Log{}, + args: newDequeueArgs(10, 1, 1, 10, nil), + lookback: 20, + results: []logpoller.Log{}, + }, + { + name: "happy path", + logsInBuffer: map[*big.Int][]logpoller.Log{ + big.NewInt(1): { + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 0}, + {BlockNumber: 14, TxHash: common.HexToHash("0x15"), LogIndex: 1}, + }, + }, + args: newDequeueArgs(10, 5, 3, 10, nil), + lookback: 20, + results: []logpoller.Log{ + {}, {}, + }, + }, + { + name: "with upkeep limits", + logsInBuffer: map[*big.Int][]logpoller.Log{ + big.NewInt(1): { + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 1}, + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 0}, + {BlockNumber: 13, TxHash: common.HexToHash("0x13"), LogIndex: 0}, + {BlockNumber: 13, TxHash: common.HexToHash("0x13"), LogIndex: 1}, + {BlockNumber: 14, TxHash: common.HexToHash("0x14"), LogIndex: 1}, + {BlockNumber: 14, TxHash: common.HexToHash("0x14"), LogIndex: 2}, + }, + big.NewInt(2): { + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 11}, + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 10}, + {BlockNumber: 13, TxHash: common.HexToHash("0x13"), LogIndex: 10}, + {BlockNumber: 13, TxHash: common.HexToHash("0x13"), LogIndex: 11}, + {BlockNumber: 14, TxHash: common.HexToHash("0x14"), LogIndex: 11}, + {BlockNumber: 14, TxHash: common.HexToHash("0x14"), LogIndex: 12}, + }, + }, + args: newDequeueArgs(10, 5, 2, 10, nil), + lookback: 20, + results: []logpoller.Log{ + {}, {}, {}, {}, + }, + remaining: 8, + }, + { + name: "with max results", + logsInBuffer: map[*big.Int][]logpoller.Log{ + big.NewInt(1): append(createDummyLogSequence(2, 0, 12, common.HexToHash("0x12")), createDummyLogSequence(2, 0, 13, common.HexToHash("0x13"))...), + big.NewInt(2): append(createDummyLogSequence(2, 10, 12, common.HexToHash("0x12")), createDummyLogSequence(2, 10, 13, common.HexToHash("0x13"))...), + }, + args: newDequeueArgs(10, 5, 3, 4, nil), + lookback: 20, + results: []logpoller.Log{ + {}, {}, {}, {}, + }, + remaining: 4, + }, + { + name: "with upkeep selector", + logsInBuffer: map[*big.Int][]logpoller.Log{ + big.NewInt(1): { + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 0}, + {BlockNumber: 14, TxHash: common.HexToHash("0x15"), LogIndex: 1}, + }, + }, + args: newDequeueArgs(10, 5, 5, 10, func(id *big.Int) bool { return false }), + lookback: 20, + results: []logpoller.Log{}, + }, + } - added, dropped := buf.Enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x10"), LogIndex: 0}, - logpoller.Log{BlockNumber: 10, TxHash: common.HexToHash("0x10"), LogIndex: 1}, - logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x11"), LogIndex: 1}, - ) - require.Equal(t, 2, added) - require.Equal(t, 0, dropped) - q, ok := buf.(*logBuffer).getUpkeepQueue(big.NewInt(1)) - require.True(t, ok) - require.Equal(t, 2, q.sizeOfRange(1, 12)) - }) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), uint(tc.lookback), uint(tc.args.blockRate), uint(tc.args.upkeepLimit)) + for id, logs := range tc.logsInBuffer { + added, dropped := buf.Enqueue(id, logs...) + require.Equal(t, len(logs), added+dropped) + } + results, remaining := buf.Dequeue(tc.args.block, tc.args.blockRate, tc.args.upkeepLimit, tc.args.maxResults, tc.args.upkeepSelector) + require.Equal(t, len(tc.results), len(results)) + require.Equal(t, tc.remaining, remaining) + }) + } +} - t.Run("happy path", func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1) +func TestLogEventBufferV1_Enqueue(t *testing.T) { + tests := []struct { + name string + logsToAdd map[*big.Int][]logpoller.Log + added, dropped map[string]int + sizeOfRange map[*big.Int]int + rangeStart, rangeEnd int64 + lookback, blockRate, upkeepLimit uint + }{ + { + name: "empty", + logsToAdd: map[*big.Int][]logpoller.Log{}, + added: map[string]int{}, + dropped: map[string]int{}, + sizeOfRange: map[*big.Int]int{}, + rangeStart: 0, + rangeEnd: 10, + blockRate: 1, + upkeepLimit: 1, + lookback: 20, + }, + { + name: "happy path", + logsToAdd: map[*big.Int][]logpoller.Log{ + big.NewInt(1): { + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 0}, + {BlockNumber: 14, TxHash: common.HexToHash("0x15"), LogIndex: 1}, + }, + big.NewInt(2): { + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 11}, + }, + }, + added: map[string]int{ + big.NewInt(1).String(): 2, + big.NewInt(2).String(): 1, + }, + dropped: map[string]int{ + big.NewInt(1).String(): 0, + big.NewInt(2).String(): 0, + }, + sizeOfRange: map[*big.Int]int{ + big.NewInt(1): 2, + big.NewInt(2): 1, + }, + rangeStart: 10, + rangeEnd: 20, + blockRate: 5, + upkeepLimit: 1, + lookback: 20, + }, + { + name: "above limits", + logsToAdd: map[*big.Int][]logpoller.Log{ + big.NewInt(1): createDummyLogSequence(11, 0, 12, common.HexToHash("0x12")), + big.NewInt(2): { + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 11}, + }, + }, + added: map[string]int{ + big.NewInt(1).String(): 11, + big.NewInt(2).String(): 1, + }, + dropped: map[string]int{ + big.NewInt(1).String(): 1, + big.NewInt(2).String(): 0, + }, + sizeOfRange: map[*big.Int]int{ + big.NewInt(1): 10, + big.NewInt(2): 1, + }, + rangeStart: 10, + rangeEnd: 20, + blockRate: 10, + upkeepLimit: 1, + lookback: 20, + }, + { + name: "out of block range", + logsToAdd: map[*big.Int][]logpoller.Log{ + big.NewInt(1): append(createDummyLogSequence(2, 0, 1, common.HexToHash("0x1")), createDummyLogSequence(2, 0, 100, common.HexToHash("0x1"))...), + }, + added: map[string]int{ + big.NewInt(1).String(): 2, + }, + dropped: map[string]int{ + big.NewInt(1).String(): 0, + }, + sizeOfRange: map[*big.Int]int{ + big.NewInt(1): 2, + }, + rangeStart: 1, + rangeEnd: 101, + blockRate: 10, + upkeepLimit: 10, + lookback: 20, + }, + } - buf.Enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - ) - buf.Enqueue(big.NewInt(2), - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 2}, - ) - results, remaining := buf.Dequeue(int64(1), 10, 1, 2, DefaultUpkeepSelector) - require.Equal(t, 2, len(results)) - require.Equal(t, 2, remaining) - require.True(t, results[0].ID.Cmp(results[1].ID) != 0) - results, remaining = buf.Dequeue(int64(1), 10, 1, 2, DefaultUpkeepSelector) - require.Equal(t, 2, len(results)) - require.Equal(t, 0, remaining) - }) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), tc.lookback, tc.blockRate, tc.upkeepLimit) + for id, logs := range tc.logsToAdd { + added, dropped := buf.Enqueue(id, logs...) + sid := id.String() + if _, ok := tc.added[sid]; !ok { + tc.added[sid] = 0 + } + if _, ok := tc.dropped[sid]; !ok { + tc.dropped[sid] = 0 + } + require.Equal(t, tc.added[sid], added) + require.Equal(t, tc.dropped[sid], dropped) + } + for id, size := range tc.sizeOfRange { + q, ok := buf.(*logBuffer).getUpkeepQueue(id) + require.True(t, ok) + require.Equal(t, size, q.sizeOfRange(tc.rangeStart, tc.rangeEnd)) + } + }) + } } func TestLogEventBufferV1_UpkeepQueue_clean(t *testing.T) { @@ -126,3 +284,48 @@ func TestLogEventBufferV1_UpkeepQueue_clean(t *testing.T) { require.Equal(t, 2, q.sizeOfRange(1, 18)) }) } + +type dequeueArgs struct { + block int64 + blockRate int + upkeepLimit int + maxResults int + upkeepSelector func(id *big.Int) bool +} + +func newDequeueArgs(block int64, blockRate int, upkeepLimit int, maxResults int, upkeepSelector func(id *big.Int) bool) dequeueArgs { + args := dequeueArgs{ + block: block, + blockRate: blockRate, + upkeepLimit: upkeepLimit, + maxResults: maxResults, + upkeepSelector: upkeepSelector, + } + + if upkeepSelector == nil { + args.upkeepSelector = DefaultUpkeepSelector + } + if blockRate == 0 { + args.blockRate = 1 + } + if maxResults == 0 { + args.maxResults = 10 + } + if upkeepLimit == 0 { + args.upkeepLimit = 1 + } + + return args +} + +func createDummyLogSequence(n, startIndex int, block int64, tx common.Hash) []logpoller.Log { + logs := make([]logpoller.Log, n) + for i := 0; i < n; i++ { + logs[i] = logpoller.Log{ + BlockNumber: block, + TxHash: tx, + LogIndex: int64(i + startIndex), + } + } + return logs +} From 60e86aae3f5f4325c2e39a7d65b457c54f37f711 Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 26 Mar 2024 15:34:32 +0200 Subject: [PATCH 30/58] small fixes in int test --- .../services/ocr2/plugins/ocr2keeper/integration_21_test.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go index 9e37b695553..8f5fbc99f9a 100644 --- a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go @@ -255,8 +255,8 @@ func TestIntegration_KeeperPluginLogUpkeep(t *testing.T) { t.Logf("Mined %d blocks, waiting for logs to be recovered", dummyBlocks) listener, done := listenPerformedN(t, backend, registry, ids, int64(beforeDummyBlocks), recoverEmits) + defer done() g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) - done() }) } @@ -421,6 +421,10 @@ func TestIntegration_KeeperPluginLogUpkeep_ErrHandler(t *testing.T) { http.StatusUnauthorized, http.StatusBadRequest, http.StatusInternalServerError, + http.StatusNotFound, + http.StatusNotFound, + http.StatusNotFound, + http.StatusUnauthorized, } startMercuryServer(t, mercuryServer, func(i int) (int, []byte) { var resp int From 0f4199177ac58cc197364fe55f11cd8796b31da7 Mon Sep 17 00:00:00 2001 From: amirylm Date: Mon, 1 Apr 2024 12:53:55 +0300 Subject: [PATCH 31/58] handle large number of upkeeps (up to max_results) TODO: handle the case the number is larger than max_results --- .../evmregistry/v21/logprovider/buffer_v1.go | 43 +++++++++++--- .../evmregistry/v21/logprovider/provider.go | 57 +++++++++++++++++-- 2 files changed, 87 insertions(+), 13 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index 7825e1b793a..54084cb10e6 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -31,6 +31,10 @@ type LogBuffer interface { Dequeue(block int64, blockRate, upkeepLimit, maxResults int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) // SetConfig sets the buffer size and the maximum number of logs to keep for each upkeep. SetConfig(lookback, blockRate, logLimit uint32) + // NumOfUpkeeps returns the number of upkeeps that are being tracked by the buffer. + NumOfUpkeeps() int + // SyncFilters removes upkeeps that are not in the filter store. + SyncFilters(filterStore UpkeepFilterStore) error } func DefaultUpkeepSelector(id *big.Int) bool { @@ -66,7 +70,7 @@ func (o *logBufferOptions) override(lookback, blockRate, logLimit uint32) { func (o *logBufferOptions) windows() uint { blockRate := o.blockRate.Load() if blockRate == 0 { - return 1 + blockRate = 1 } return uint(math.Ceil(float64(o.bufferSize.Load()) / float64(blockRate))) } @@ -90,13 +94,6 @@ func NewLogBuffer(lggr logger.Logger, lookback, blockRate, logLimit uint) LogBuf } } -func (b *logBuffer) SetConfig(lookback, blockRate, logLimit uint32) { - b.lock.Lock() - defer b.lock.Unlock() - - b.opts.override(lookback, blockRate, logLimit) -} - // Enqueue adds logs to the buffer and might also drop logs if the limit for the // given upkeep was exceeded. It will create a new buffer if it does not exist. // Returns the number of logs that were added and number of logs that were dropped. @@ -163,6 +160,36 @@ func (b *logBuffer) dequeue(start, end int64, upkeepLimit, capacity int, upkeepS return result, remainingLogs } +func (b *logBuffer) SetConfig(lookback, blockRate, logLimit uint32) { + b.lock.Lock() + defer b.lock.Unlock() + + b.opts.override(lookback, blockRate, logLimit) +} + +func (b *logBuffer) NumOfUpkeeps() int { + b.lock.RLock() + defer b.lock.RUnlock() + + return len(b.queues) +} + +func (b *logBuffer) SyncFilters(filterStore UpkeepFilterStore) error { + b.lock.Lock() + defer b.lock.Unlock() + + for upkeepID := range b.queues { + uid := new(big.Int) + _, ok := uid.SetString(upkeepID, 10) + if ok && !filterStore.Has(uid) { + // remove upkeep that is not in the filter store + delete(b.queues, upkeepID) + } + } + + return nil +} + func (b *logBuffer) getUpkeepQueue(uid *big.Int) (*upkeepLogQueue, bool) { b.lock.RLock() defer b.lock.RUnlock() diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index dd4c00f633d..02948aec185 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -47,6 +47,8 @@ var ( // reorgBuffer is the number of blocks to add as a buffer to the block range when reading logs. reorgBuffer = int64(32) readerThreads = 4 + + bufferSyncInterval = 10 * time.Minute ) // LogTriggerConfig is an alias for log trigger config. @@ -145,6 +147,27 @@ func (p *logEventProvider) Start(context.Context) error { }) }) + p.threadCtrl.Go(func(ctx context.Context) { + // sync filters with buffer periodically, + // to ensure that inactive upkeeps won't waste capacity. + ticker := time.NewTicker(bufferSyncInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if p.bufferV1 != nil { + err := p.bufferV1.SyncFilters(p.filterStore) + if err != nil { + p.lggr.Warnw("failed to sync filters", "err", err) + } + } + case <-ctx.Done(): + return + } + } + }) + return nil }) } @@ -166,7 +189,7 @@ func (p *logEventProvider) GetLatestPayloads(ctx context.Context) ([]ocr2keepers return nil, fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) } prommetrics.AutomationLogProviderLatestBlock.Set(float64(latest.BlockNumber)) - payloads := p.getPayloadsFromBuffer(latest.BlockNumber) + payloads := p.getLogsFromBuffer(latest.BlockNumber) if len(payloads) > 0 { p.lggr.Debugw("Fetched payloads from buffer", "latestBlock", latest.BlockNumber, "payloads", len(payloads)) @@ -190,17 +213,41 @@ func (p *logEventProvider) createPayload(id *big.Int, log logpoller.Log) (ocr2ke return payload, nil } -func (p *logEventProvider) getPayloadsFromBuffer(latestBlock int64) []ocr2keepers.UpkeepPayload { +// getBufferDequeueArgs returns the arguments for the buffer to dequeue logs. +// It adjust the log limit low based on the number of upkeeps to ensure that more upkeeps get slots in the result set. +func (p *logEventProvider) getBufferDequeueArgs() (blockRate, logLimitLow, maxResults, numOfUpkeeps int) { + blockRate, logLimitLow, maxResults, numOfUpkeeps = int(p.opts.BlockRate), int(p.opts.LogLimit), MaxPayloads, p.bufferV1.NumOfUpkeeps() + // in case we have more upkeeps than the max results, we reduce the log limit low + // so that more upkeeps will get slots in the result set. + for numOfUpkeeps > maxResults/logLimitLow { + if logLimitLow == 1 { + // Log limit low can't go less than 1. + // If some upkeeps are not getting slots in the result set, they supposed to be picked up + // in the next iteration if the range is still applicable. + // TODO: alerts to notify the system is at full capacity. + // TODO: handle this case properly by distributing available slots across upkeeps to avoid + // starvation when log volume is high. + p.lggr.Warnw("The system is at full capacity", "maxResults", maxResults, "numOfUpkeeps", numOfUpkeeps, "logLimitLow", logLimitLow) + break + } + p.lggr.Debugw("Too many upkeeps, reducing the log limit low", "maxResults", maxResults, "numOfUpkeeps", numOfUpkeeps, "logLimitLow_before", logLimitLow) + logLimitLow-- + } + return +} + +func (p *logEventProvider) getLogsFromBuffer(latestBlock int64) []ocr2keepers.UpkeepPayload { var payloads []ocr2keepers.UpkeepPayload start := latestBlock - p.opts.LookbackBlocks - if start <= 0 { + if start <= 0 { // edge case when the chain is new (e.g. tests) start = 1 } switch p.opts.BufferVersion { case "v1": - blockRate, logLimitLow, maxResults := int(p.opts.BlockRate), int(p.opts.LogLimit), MaxPayloads + // in v1, we use a greedy approach - we keep dequeuing logs until we reach the max results or cover the entire range. + blockRate, logLimitLow, maxResults, _ := p.getBufferDequeueArgs() for len(payloads) < maxResults && start <= latestBlock { logs, remaining := p.bufferV1.Dequeue(start, blockRate, logLimitLow, maxResults-len(payloads), DefaultUpkeepSelector) if len(logs) > 0 { @@ -214,7 +261,7 @@ func (p *logEventProvider) getPayloadsFromBuffer(latestBlock int64) []ocr2keeper } if remaining > 0 { p.lggr.Debugw("Remaining logs", "start", start, "latestBlock", latestBlock, "remaining", remaining) - // TODO: handle remaining logs in a better way than consuming the entire window + // TODO: handle remaining logs in a better way than consuming the entire window, e.g. do not repeat more than x times continue } start += int64(blockRate) From e3c58e579fefadef29e863019488d33b52fab3a2 Mon Sep 17 00:00:00 2001 From: amirylm Date: Mon, 1 Apr 2024 15:32:24 +0300 Subject: [PATCH 32/58] update cla version --- core/scripts/go.mod | 2 +- core/scripts/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- integration-tests/go.mod | 2 +- integration-tests/go.sum | 4 ++-- integration-tests/load/go.mod | 2 +- integration-tests/load/go.sum | 4 ++-- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/core/scripts/go.mod b/core/scripts/go.mod index dd174dd5c26..9ee1d8bd907 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -20,7 +20,7 @@ require ( github.com/pelletier/go-toml/v2 v2.1.1 github.com/prometheus/client_golang v1.17.0 github.com/shopspring/decimal v1.3.1 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 diff --git a/core/scripts/go.sum b/core/scripts/go.sum index b6a352db851..89a7ae18a3c 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1185,8 +1185,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 h1:GNhRKD3izyzAoGMXDvVUAwEuzz4Atdj3U3RH7eak5Is= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b h1:zQEsSYaMY4+svBOBr1ZlufKCwb1R4/5QPLKDje0xpeI= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/go.mod b/go.mod index 6a1c4aba9e7..28d969ef7c0 100644 --- a/go.mod +++ b/go.mod @@ -71,7 +71,7 @@ require ( github.com/shirou/gopsutil/v3 v3.23.11 github.com/shopspring/decimal v1.3.1 github.com/smartcontractkit/chain-selectors v1.0.10 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 diff --git a/go.sum b/go.sum index 6df35923ad7..5fcf84c3c27 100644 --- a/go.sum +++ b/go.sum @@ -1180,8 +1180,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 h1:GNhRKD3izyzAoGMXDvVUAwEuzz4Atdj3U3RH7eak5Is= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b h1:zQEsSYaMY4+svBOBr1ZlufKCwb1R4/5QPLKDje0xpeI= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index 5100a3df8c6..dec6e78d346 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -23,7 +23,7 @@ require ( github.com/scylladb/go-reflectx v1.0.1 github.com/segmentio/ksuid v1.0.4 github.com/slack-go/slack v0.12.2 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b github.com/smartcontractkit/chainlink-testing-framework v1.27.8 github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 diff --git a/integration-tests/go.sum b/integration-tests/go.sum index 94821e863c9..98448d96ae2 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1523,8 +1523,8 @@ github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 h1:GNhRKD3izyzAoGMXDvVUAwEuzz4Atdj3U3RH7eak5Is= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b h1:zQEsSYaMY4+svBOBr1ZlufKCwb1R4/5QPLKDje0xpeI= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index 22f0034e364..5e32726fb38 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -15,7 +15,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/rs/zerolog v1.30.0 github.com/slack-go/slack v0.12.2 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b github.com/smartcontractkit/chainlink-testing-framework v1.27.8 github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20240214231432-4ad5eb95178c diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index bd50a119473..b404419c88c 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1506,8 +1506,8 @@ github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 h1:GNhRKD3izyzAoGMXDvVUAwEuzz4Atdj3U3RH7eak5Is= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b h1:zQEsSYaMY4+svBOBr1ZlufKCwb1R4/5QPLKDje0xpeI= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= From cc013c70097b19f3e3ea18471152a1cfa9826c6e Mon Sep 17 00:00:00 2001 From: amirylm Date: Mon, 1 Apr 2024 19:57:23 +0300 Subject: [PATCH 33/58] tests --- .../v21/logprovider/buffer_v1_test.go | 80 +++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go index 8ccd6279539..c728e37eb8c 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go @@ -31,6 +31,25 @@ func TestLogEventBufferV1(t *testing.T) { require.Equal(t, 0, remaining) } +func TestLogEventBufferV1_SyncFilters(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1) + + buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + ) + buf.Enqueue(big.NewInt(2), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 2}, + ) + filterStore := NewUpkeepFilterStore() + filterStore.AddActiveUpkeeps(upkeepFilter{upkeepID: big.NewInt(1)}) + + require.Equal(t, 2, buf.NumOfUpkeeps()) + require.NoError(t, buf.SyncFilters(filterStore)) + require.Equal(t, 1, buf.NumOfUpkeeps()) +} + func TestLogEventBufferV1_Dequeue(t *testing.T) { tests := []struct { name string @@ -251,6 +270,64 @@ func TestLogEventBufferV1_Enqueue(t *testing.T) { } } +func TestLogEventBufferV1_UpkeepQueue(t *testing.T) { + t.Run("enqueue dequeue", func(t *testing.T) { + q := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) + + added, dropped := q.enqueue(10, logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 0}) + require.Equal(t, 0, dropped) + require.Equal(t, 1, added) + require.Equal(t, 1, q.sizeOfRange(1, 20)) + logs, remaining := q.dequeue(19, 21, 10) + require.Equal(t, 1, len(logs)) + require.Equal(t, 0, remaining) + }) + + t.Run("enqueue with limits", func(t *testing.T) { + q := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) + + added, dropped := q.enqueue(10, + createDummyLogSequence(15, 0, 20, common.HexToHash("0x20"))..., + ) + require.Equal(t, 5, dropped) + require.Equal(t, 15, added) + }) + + t.Run("dequeue with limits", func(t *testing.T) { + q := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 3)) + + added, dropped := q.enqueue(10, + logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 10}, + ) + require.Equal(t, 0, dropped) + require.Equal(t, 3, added) + + logs, remaining := q.dequeue(19, 21, 2) + require.Equal(t, 2, len(logs)) + require.Equal(t, 1, remaining) + }) +} + +func TestLogEventBufferV1_UpkeepQueue_sizeOfRange(t *testing.T) { + t.Run("empty", func(t *testing.T) { + q := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) + + require.Equal(t, 0, q.sizeOfRange(1, 10)) + }) + + t.Run("happy path", func(t *testing.T) { + q := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) + + added, dropped := q.enqueue(10, logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 0}) + require.Equal(t, 0, dropped) + require.Equal(t, 1, added) + require.Equal(t, 0, q.sizeOfRange(1, 10)) + require.Equal(t, 1, q.sizeOfRange(1, 20)) + }) +} + func TestLogEventBufferV1_UpkeepQueue_clean(t *testing.T) { t.Run("empty", func(t *testing.T) { q := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) @@ -282,6 +359,9 @@ func TestLogEventBufferV1_UpkeepQueue_clean(t *testing.T) { require.Equal(t, 4, q.sizeOfRange(1, 18)) require.Equal(t, 0, q.clean(12)) require.Equal(t, 2, q.sizeOfRange(1, 18)) + q.lock.Lock() + defer q.lock.Unlock() + require.Equal(t, 2, len(q.visited)) }) } From 765deddc187dd952d5b49719ed7830b7a7634b2e Mon Sep 17 00:00:00 2001 From: amirylm Date: Mon, 1 Apr 2024 20:09:03 +0300 Subject: [PATCH 34/58] fix logs ordering function to treat tx hash if log index is equal --- .../evmregistry/v21/logprovider/window.go | 12 ++++---- .../v21/logprovider/window_test.go | 30 +++++++++++++++++++ 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go index ba6a154df2e..8596afccef1 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go @@ -21,7 +21,8 @@ func LogSorter(a, b logpoller.Log) bool { return LogComparator(a, b) > 0 } -// LogComparator compares the logs based on block number, tx hash and log index. +// LogComparator compares the logs based on block number, log index. +// tx hash is also checked in case the log index is not unique within a block. // // Returns: // @@ -32,8 +33,9 @@ func LogComparator(a, b logpoller.Log) int { if a.BlockNumber != b.BlockNumber { return int(a.BlockNumber - b.BlockNumber) } - // if txDiff := a.TxHash.Big().Cmp(b.TxHash.Big()); txDiff != 0 { - // return txDiff - // } - return int(a.LogIndex - b.LogIndex) + logIndexDiff := a.LogIndex - b.LogIndex + if logIndexDiff == 0 { + return a.TxHash.Big().Cmp(b.TxHash.Big()) + } + return int(logIndexDiff) } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go index bd63e74c42c..3021ff3e6a1 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go @@ -153,6 +153,36 @@ func TestLogComparatorSorter(t *testing.T) { wantCmp: 1, wantSort: true, }, + { + name: "a > b: tx hash", + a: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x21"), + LogIndex: 2, + }, + b: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 2, + }, + wantCmp: 1, + wantSort: true, + }, + { + name: "a < b: tx hash", + a: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 2, + }, + b: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x4"), + LogIndex: 2, + }, + wantCmp: -1, + wantSort: false, + }, } for _, tc := range tests { From 8464e3c561adf180ba19ab490d551cacc6bb0239 Mon Sep 17 00:00:00 2001 From: Amir Y Date: Mon, 1 Apr 2024 22:35:02 +0300 Subject: [PATCH 35/58] Feature flag for new log buffer (#12653) * feature flag useBufferV1 * changesets * align enum --- .changeset/pretty-experts-unite.md | 5 + core/services/ocr2/delegate.go | 9 + .../ocr2/plugins/ocr2keeper/config.go | 2 + .../evmregistry/v21/logprovider/factory.go | 13 +- .../v21/logprovider/integration_test.go | 34 +- .../evmregistry/v21/logprovider/provider.go | 18 +- .../plugins/ocr2keeper/integration_21_test.go | 550 ++++++++++-------- 7 files changed, 369 insertions(+), 262 deletions(-) create mode 100644 .changeset/pretty-experts-unite.md diff --git a/.changeset/pretty-experts-unite.md b/.changeset/pretty-experts-unite.md new file mode 100644 index 00000000000..f21c35f50a4 --- /dev/null +++ b/.changeset/pretty-experts-unite.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +Added a feature flag for using log buffer v1 diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index 38297d96bc7..9a7f22b4c8e 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -59,6 +59,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/autotelemetry21" ocr2keeper21core "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" ocr2vrfconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2vrf/config" ocr2coordinator "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2vrf/coordinator" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2vrf/juelsfeecoin" @@ -1310,6 +1311,14 @@ func (d *Delegate) newServicesOCR2Keepers21( return nil, errors.New("could not coerce PluginProvider to AutomationProvider") } + // TODO: remove once we remove v0 + if useBufferV1 := cfg.UseBufferV1 != nil && *cfg.UseBufferV1; useBufferV1 { + logProviderFeatures, ok := keeperProvider.LogEventProvider().(logprovider.LogEventProviderFeatures) + if ok { + logProviderFeatures.WithBufferVersion("v1") + } + } + services, err := ocr2keeper.EVMDependencies21(kb) if err != nil { return nil, errors.Wrap(err, "could not build dependencies for ocr2 keepers") diff --git a/core/services/ocr2/plugins/ocr2keeper/config.go b/core/services/ocr2/plugins/ocr2keeper/config.go index ec56f9c6993..3ec4c66ecac 100644 --- a/core/services/ocr2/plugins/ocr2keeper/config.go +++ b/core/services/ocr2/plugins/ocr2keeper/config.go @@ -60,6 +60,8 @@ type PluginConfig struct { ContractVersion string `json:"contractVersion"` // CaptureAutomationCustomTelemetry is a bool flag to toggle Custom Telemetry Service CaptureAutomationCustomTelemetry *bool `json:"captureAutomationCustomTelemetry,omitempty"` + // CaptureAutomationCustomTelemetry is a bool flag to toggle Custom Telemetry Service + UseBufferV1 *bool `json:"useBufferV1,omitempty"` } func ValidatePluginConfig(cfg PluginConfig) error { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index 039e01b3cd7..0b6aea0ed45 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -15,9 +15,6 @@ func New(lggr logger.Logger, poller logpoller.LogPoller, c client.Client, stateS filterStore := NewUpkeepFilterStore() packer := NewLogEventsPacker() opts := NewOptions(int64(finalityDepth)) - if len(opts.BufferVersion) == 0 { // TODO: remove once config is ready - opts.BufferVersion = "v1" - } provider := NewLogProvider(lggr, poller, packer, filterStore, opts) recoverer := NewLogRecoverer(lggr, poller, c, stateStore, packer, filterStore, opts) @@ -36,13 +33,21 @@ type LogTriggersOptions struct { FinalityDepth int64 // v1 config - BufferVersion string + BufferVersion BufferVersion LogLimit uint32 BlockRate uint32 } +type BufferVersion string + +const ( + BufferVersionNone BufferVersion = "" + BufferVersionV0 BufferVersion = "v0" + BufferVersionV1 BufferVersion = "v1" +) + func NewOptions(finalityDepth int64) LogTriggersOptions { opts := new(LogTriggersOptions) opts.Defaults(finalityDepth) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go index 21eef4912a3..57e9184e034 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -34,16 +34,24 @@ import ( func TestIntegration_LogEventProvider(t *testing.T) { tests := []struct { - name string - version string - logLimit uint32 + name string + bufferVersion logprovider.BufferVersion + logLimit uint32 }{ - {"default version", "", 10}, - {"v1", "v1", 10}, + { + name: "default version", + bufferVersion: logprovider.BufferVersionV0, + logLimit: 10, + }, + { + name: "v1", + bufferVersion: logprovider.BufferVersionV1, + logLimit: 10, + }, } for _, tc := range tests { - bufferVersion, logLimit := tc.version, tc.logLimit + bufferVersion, logLimit := tc.bufferVersion, tc.logLimit t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithCancel(testutils.Context(t)) defer cancel() @@ -213,11 +221,19 @@ func TestIntegration_LogEventProvider_UpdateConfig(t *testing.T) { func TestIntegration_LogEventProvider_Backfill(t *testing.T) { tests := []struct { name string - bufferVersion string + bufferVersion logprovider.BufferVersion logLimit uint32 }{ - {"default version", "", 10}, - {"v1", "v1", 10}, + { + name: "default version", + bufferVersion: logprovider.BufferVersionV0, + logLimit: 10, + }, + { + name: "v1", + bufferVersion: logprovider.BufferVersionV1, + logLimit: 10, + }, } for _, tc := range tests { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index 254c5533a9c..eb685c3a119 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -81,8 +81,13 @@ type LogEventProviderTest interface { CurrentPartitionIdx() uint64 } +type LogEventProviderFeatures interface { + WithBufferVersion(v BufferVersion) +} + var _ LogEventProvider = &logEventProvider{} var _ LogEventProviderTest = &logEventProvider{} +var _ LogEventProviderFeatures = &logEventProvider{} // logEventProvider manages log filters for upkeeps and enables to read the log events. type logEventProvider struct { @@ -120,6 +125,15 @@ func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, packer LogDa } } +func (p *logEventProvider) WithBufferVersion(v BufferVersion) { + p.lock.Lock() + defer p.lock.Unlock() + + p.lggr.Debugw("with buffer version", "version", v) + + p.opts.BufferVersion = v +} + func (p *logEventProvider) Start(context.Context) error { return p.StartOnce(LogProviderServiceName, func() error { @@ -244,7 +258,7 @@ func (p *logEventProvider) getLogsFromBuffer(latestBlock int64) []ocr2keepers.Up } switch p.opts.BufferVersion { - case "v1": + case BufferVersionV1: // in v1, we use a greedy approach - we keep dequeuing logs until we reach the max results or cover the entire range. blockRate, logLimitLow, maxResults, _ := p.getBufferDequeueArgs() for len(payloads) < maxResults && start <= latestBlock { @@ -468,7 +482,7 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ filteredLogs := filter.Select(logs...) switch p.opts.BufferVersion { - case "v1": + case BufferVersionV1: p.bufferV1.Enqueue(filter.upkeepID, filteredLogs...) default: p.buffer.enqueue(filter.upkeepID, filteredLogs...) diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go index 8f5fbc99f9a..5f8a89ac22f 100644 --- a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go @@ -54,6 +54,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/job" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ethkey" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams" "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" @@ -118,7 +119,7 @@ func TestIntegration_KeeperPluginConditionalUpkeep(t *testing.T) { require.NoError(t, err) registry := deployKeeper21Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) - setupNodes(t, nodeKeys, registry, backend, steve) + setupNodes(t, nodeKeys, registry, backend, steve, false) <-time.After(time.Second * 5) @@ -172,314 +173,368 @@ func TestIntegration_KeeperPluginConditionalUpkeep(t *testing.T) { } func TestIntegration_KeeperPluginLogUpkeep(t *testing.T) { - g := gomega.NewWithT(t) - - // setup blockchain - sergey := testutils.MustNewSimTransactor(t) // owns all the link - steve := testutils.MustNewSimTransactor(t) // registry owner - carrol := testutils.MustNewSimTransactor(t) // upkeep owner - genesisData := core.GenesisAlloc{ - sergey.From: {Balance: assets.Ether(10000).ToInt()}, - steve.From: {Balance: assets.Ether(10000).ToInt()}, - carrol.From: {Balance: assets.Ether(10000).ToInt()}, - } - // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether - var nodeKeys [5]ethkey.KeyV2 - for i := int64(0); i < 5; i++ { - nodeKeys[i] = cltest.MustGenerateRandomKey(t) - genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + tests := []struct { + name string + logBufferVersion logprovider.BufferVersion + }{ + { + name: "default", + logBufferVersion: logprovider.BufferVersionV0, + }, + { + name: "v1", + logBufferVersion: logprovider.BufferVersionV1, + }, } - backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) - stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain - defer stopMining() + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := gomega.NewWithT(t) + + // setup blockchain + sergey := testutils.MustNewSimTransactor(t) // owns all the link + steve := testutils.MustNewSimTransactor(t) // registry owner + carrol := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: assets.Ether(10000).ToInt()}, + steve.From: {Balance: assets.Ether(10000).ToInt()}, + carrol.From: {Balance: assets.Ether(10000).ToInt()}, + } + // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether + var nodeKeys [5]ethkey.KeyV2 + for i := int64(0); i < 5; i++ { + nodeKeys[i] = cltest.MustGenerateRandomKey(t) + genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } - // Deploy registry - linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(sergey, backend) - require.NoError(t, err) - gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(60000000000)) - require.NoError(t, err) - linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(2000000000000000000)) - require.NoError(t, err) + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + defer stopMining() - registry := deployKeeper21Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) - setupNodes(t, nodeKeys, registry, backend, steve) - upkeeps := 1 + // Deploy registry + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(sergey, backend) + require.NoError(t, err) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(2000000000000000000)) + require.NoError(t, err) - _, err = linkToken.Transfer(sergey, carrol.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeeps+1)))) - require.NoError(t, err) + registry := deployKeeper21Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) + setupNodes(t, nodeKeys, registry, backend, steve, tc.logBufferVersion == logprovider.BufferVersionV1) + upkeeps := 1 - backend.Commit() + _, err = linkToken.Transfer(sergey, carrol.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeeps+1)))) + require.NoError(t, err) - ids, addrs, contracts := deployUpkeeps(t, backend, carrol, steve, linkToken, registry, upkeeps) - require.Equal(t, upkeeps, len(ids)) - require.Equal(t, len(ids), len(contracts)) - require.Equal(t, len(ids), len(addrs)) + backend.Commit() - backend.Commit() + ids, addrs, contracts := deployUpkeeps(t, backend, carrol, steve, linkToken, registry, upkeeps) + require.Equal(t, upkeeps, len(ids)) + require.Equal(t, len(ids), len(contracts)) + require.Equal(t, len(ids), len(addrs)) - emits := 1 - go emitEvents(testutils.Context(t), t, emits, contracts, carrol, func() { - backend.Commit() - }) - - listener, done := listenPerformed(t, backend, registry, ids, int64(1)) - g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) - done() + backend.Commit() - t.Run("recover logs", func(t *testing.T) { - addr, contract := addrs[0], contracts[0] - upkeepID := registerUpkeep(t, registry, addr, carrol, steve, backend) - backend.Commit() - t.Logf("Registered new upkeep %s for address %s", upkeepID.String(), addr.String()) - // Emit 100 logs in a burst - recoverEmits := 100 - i := 0 - emitEvents(testutils.Context(t), t, 100, []*log_upkeep_counter_wrapper.LogUpkeepCounter{contract}, carrol, func() { - i++ - if i%(recoverEmits/4) == 0 { + emits := 1 + go emitEvents(testutils.Context(t), t, emits, contracts, carrol, func() { backend.Commit() - time.Sleep(time.Millisecond * 250) // otherwise we get "invalid transaction nonce" errors - } - }) + }) - beforeDummyBlocks := backend.Blockchain().CurrentBlock().Number.Uint64() + listener, done := listenPerformed(t, backend, registry, ids, int64(1)) + g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) + done() - // Mine enough blocks to ensure these logs don't fall into log provider range - dummyBlocks := 500 - for i := 0; i < dummyBlocks; i++ { - backend.Commit() - time.Sleep(time.Millisecond * 10) - } + t.Run("recover logs", func(t *testing.T) { + addr, contract := addrs[0], contracts[0] + upkeepID := registerUpkeep(t, registry, addr, carrol, steve, backend) + backend.Commit() + t.Logf("Registered new upkeep %s for address %s", upkeepID.String(), addr.String()) + // Emit 100 logs in a burst + recoverEmits := 100 + i := 0 + emitEvents(testutils.Context(t), t, 100, []*log_upkeep_counter_wrapper.LogUpkeepCounter{contract}, carrol, func() { + i++ + if i%(recoverEmits/4) == 0 { + backend.Commit() + time.Sleep(time.Millisecond * 250) // otherwise we get "invalid transaction nonce" errors + } + }) - t.Logf("Mined %d blocks, waiting for logs to be recovered", dummyBlocks) + beforeDummyBlocks := backend.Blockchain().CurrentBlock().Number.Uint64() - listener, done := listenPerformedN(t, backend, registry, ids, int64(beforeDummyBlocks), recoverEmits) - defer done() - g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) - }) -} + // Mine enough blocks to ensure these logs don't fall into log provider range + dummyBlocks := 500 + for i := 0; i < dummyBlocks; i++ { + backend.Commit() + time.Sleep(time.Millisecond * 10) + } -func TestIntegration_KeeperPluginLogUpkeep_Retry(t *testing.T) { - g := gomega.NewWithT(t) + t.Logf("Mined %d blocks, waiting for logs to be recovered", dummyBlocks) - // setup blockchain - linkOwner := testutils.MustNewSimTransactor(t) // owns all the link - registryOwner := testutils.MustNewSimTransactor(t) // registry owner - upkeepOwner := testutils.MustNewSimTransactor(t) // upkeep owner - genesisData := core.GenesisAlloc{ - linkOwner.From: {Balance: assets.Ether(10000).ToInt()}, - registryOwner.From: {Balance: assets.Ether(10000).ToInt()}, - upkeepOwner.From: {Balance: assets.Ether(10000).ToInt()}, + listener, done := listenPerformedN(t, backend, registry, ids, int64(beforeDummyBlocks), recoverEmits) + defer done() + g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) + }) + }) } +} - // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether - var nodeKeys [5]ethkey.KeyV2 - for i := int64(0); i < 5; i++ { - nodeKeys[i] = cltest.MustGenerateRandomKey(t) - genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} +func TestIntegration_KeeperPluginLogUpkeep_Retry(t *testing.T) { + tests := []struct { + name string + logBufferVersion logprovider.BufferVersion + }{ + { + name: "default", + logBufferVersion: logprovider.BufferVersionV0, + }, + { + name: "v1", + logBufferVersion: logprovider.BufferVersionV1, + }, } - backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) - stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain - defer stopMining() + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := gomega.NewWithT(t) + + // setup blockchain + linkOwner := testutils.MustNewSimTransactor(t) // owns all the link + registryOwner := testutils.MustNewSimTransactor(t) // registry owner + upkeepOwner := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + linkOwner.From: {Balance: assets.Ether(10000).ToInt()}, + registryOwner.From: {Balance: assets.Ether(10000).ToInt()}, + upkeepOwner.From: {Balance: assets.Ether(10000).ToInt()}, + } - // Deploy registry - linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(linkOwner, backend) - require.NoError(t, err) + // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether + var nodeKeys [5]ethkey.KeyV2 + for i := int64(0); i < 5; i++ { + nodeKeys[i] = cltest.MustGenerateRandomKey(t) + genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } - gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(60000000000)) - require.NoError(t, err) + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + defer stopMining() - linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(2000000000000000000)) - require.NoError(t, err) + // Deploy registry + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(linkOwner, backend) + require.NoError(t, err) - registry := deployKeeper21Registry(t, registryOwner, backend, linkAddr, linkFeedAddr, gasFeedAddr) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) - _, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(2000000000000000000)) + require.NoError(t, err) - const upkeepCount = 10 - const mercuryFailCount = upkeepCount * 3 * 2 + registry := deployKeeper21Registry(t, registryOwner, backend, linkAddr, linkFeedAddr, gasFeedAddr) - // testing with the mercury server involves mocking responses. currently, - // there is not a way to connect a mercury call to an upkeep id (though we - // could add custom headers) so the test must be fairly basic and just - // count calls before switching to successes - var ( - mu sync.Mutex - count int - ) + _, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner, tc.logBufferVersion == logprovider.BufferVersionV1) - mercuryServer.RegisterHandler(func(w http.ResponseWriter, r *http.Request) { - mu.Lock() - defer mu.Unlock() + const upkeepCount = 10 + const mercuryFailCount = upkeepCount * 3 * 2 - count++ + // testing with the mercury server involves mocking responses. currently, + // there is not a way to connect a mercury call to an upkeep id (though we + // could add custom headers) so the test must be fairly basic and just + // count calls before switching to successes + var ( + mu sync.Mutex + count int + ) - _ = r.ParseForm() + mercuryServer.RegisterHandler(func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + defer mu.Unlock() - t.Logf("MercuryHTTPServe:RequestURI: %s", r.RequestURI) + count++ - for key, value := range r.Form { - t.Logf("MercuryHTTPServe:FormValue: key: %s; value: %s;", key, value) - } + _ = r.ParseForm() - // the streams lookup retries against the remote server 3 times before - // returning a result as retryable. - // the simulation here should force the streams lookup process to return - // retryable 2 times. - // the total count of failures should be (upkeepCount * 3 * tryCount) - if count <= mercuryFailCount { - w.WriteHeader(http.StatusNotFound) + t.Logf("MercuryHTTPServe:RequestURI: %s", r.RequestURI) - return - } + for key, value := range r.Form { + t.Logf("MercuryHTTPServe:FormValue: key: %s; value: %s;", key, value) + } - // start sending success messages - output := `{"chainlinkBlob":"0x0001c38d71fed6c320b90e84b6f559459814d068e2a1700adc931ca9717d4fe70000000000000000000000000000000000000000000000000000000001a80b52b4bf1233f9cb71144a253a1791b202113c4ab4a92fa1b176d684b4959666ff8200000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001004254432d5553442d415242495452554d2d544553544e4554000000000000000000000000000000000000000000000000000000000000000000000000645570be000000000000000000000000000000000000000000000000000002af2b818dc5000000000000000000000000000000000000000000000000000002af2426faf3000000000000000000000000000000000000000000000000000002af32dc209700000000000000000000000000000000000000000000000000000000012130f8df0a9745bb6ad5e2df605e158ba8ad8a33ef8a0acf9851f0f01668a3a3f2b68600000000000000000000000000000000000000000000000000000000012130f60000000000000000000000000000000000000000000000000000000000000002c4a7958dce105089cf5edb68dad7dcfe8618d7784eb397f97d5a5fade78c11a58275aebda478968e545f7e3657aba9dcbe8d44605e4c6fde3e24edd5e22c94270000000000000000000000000000000000000000000000000000000000000002459c12d33986018a8959566d145225f0c4a4e61a9a3f50361ccff397899314f0018162cf10cd89897635a0bb62a822355bd199d09f4abe76e4d05261bb44733d"}` + // the streams lookup retries against the remote server 3 times before + // returning a result as retryable. + // the simulation here should force the streams lookup process to return + // retryable 2 times. + // the total count of failures should be (upkeepCount * 3 * tryCount) + if count <= mercuryFailCount { + w.WriteHeader(http.StatusNotFound) - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte(output)) - }) + return + } - defer mercuryServer.Stop() + // start sending success messages + output := `{"chainlinkBlob":"0x0001c38d71fed6c320b90e84b6f559459814d068e2a1700adc931ca9717d4fe70000000000000000000000000000000000000000000000000000000001a80b52b4bf1233f9cb71144a253a1791b202113c4ab4a92fa1b176d684b4959666ff8200000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001004254432d5553442d415242495452554d2d544553544e4554000000000000000000000000000000000000000000000000000000000000000000000000645570be000000000000000000000000000000000000000000000000000002af2b818dc5000000000000000000000000000000000000000000000000000002af2426faf3000000000000000000000000000000000000000000000000000002af32dc209700000000000000000000000000000000000000000000000000000000012130f8df0a9745bb6ad5e2df605e158ba8ad8a33ef8a0acf9851f0f01668a3a3f2b68600000000000000000000000000000000000000000000000000000000012130f60000000000000000000000000000000000000000000000000000000000000002c4a7958dce105089cf5edb68dad7dcfe8618d7784eb397f97d5a5fade78c11a58275aebda478968e545f7e3657aba9dcbe8d44605e4c6fde3e24edd5e22c94270000000000000000000000000000000000000000000000000000000000000002459c12d33986018a8959566d145225f0c4a4e61a9a3f50361ccff397899314f0018162cf10cd89897635a0bb62a822355bd199d09f4abe76e4d05261bb44733d"}` - _, err = linkToken.Transfer(linkOwner, upkeepOwner.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeepCount+1)))) - require.NoError(t, err) + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(output)) + }) - backend.Commit() + defer mercuryServer.Stop() - feeds, err := newFeedLookupUpkeepController(backend, registryOwner) - require.NoError(t, err, "no error expected from creating a feed lookup controller") + _, err = linkToken.Transfer(linkOwner, upkeepOwner.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeepCount+1)))) + require.NoError(t, err) - // deploy multiple upkeeps that listen to a log emitter and need to be - // performed for each log event - _ = feeds.DeployUpkeeps(t, backend, upkeepOwner, upkeepCount, func(int) bool { - return false - }) - _ = feeds.RegisterAndFund(t, registry, registryOwner, backend, linkToken) - _ = feeds.EnableMercury(t, backend, registry, registryOwner) - _ = feeds.VerifyEnv(t, backend, registry, registryOwner) + backend.Commit() - // start emitting events in a separate go-routine - // feed lookup relies on a single contract event log to perform multiple - // listener contracts - go func() { - // only 1 event is necessary to make all 10 upkeeps eligible - _ = feeds.EmitEvents(t, backend, 1, func() { - // pause per emit for expected block production time - time.Sleep(3 * time.Second) + feeds, err := newFeedLookupUpkeepController(backend, registryOwner) + require.NoError(t, err, "no error expected from creating a feed lookup controller") + + // deploy multiple upkeeps that listen to a log emitter and need to be + // performed for each log event + _ = feeds.DeployUpkeeps(t, backend, upkeepOwner, upkeepCount, func(int) bool { + return false + }) + _ = feeds.RegisterAndFund(t, registry, registryOwner, backend, linkToken) + _ = feeds.EnableMercury(t, backend, registry, registryOwner) + _ = feeds.VerifyEnv(t, backend, registry, registryOwner) + + // start emitting events in a separate go-routine + // feed lookup relies on a single contract event log to perform multiple + // listener contracts + go func() { + // only 1 event is necessary to make all 10 upkeeps eligible + _ = feeds.EmitEvents(t, backend, 1, func() { + // pause per emit for expected block production time + time.Sleep(3 * time.Second) + }) + }() + + listener, done := listenPerformed(t, backend, registry, feeds.UpkeepsIds(), int64(1)) + defer done() + g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) }) - }() - - listener, done := listenPerformed(t, backend, registry, feeds.UpkeepsIds(), int64(1)) - defer done() - g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) + } } func TestIntegration_KeeperPluginLogUpkeep_ErrHandler(t *testing.T) { - g := gomega.NewWithT(t) - - // setup blockchain - linkOwner := testutils.MustNewSimTransactor(t) // owns all the link - registryOwner := testutils.MustNewSimTransactor(t) // registry owner - upkeepOwner := testutils.MustNewSimTransactor(t) // upkeep owner - genesisData := core.GenesisAlloc{ - linkOwner.From: {Balance: assets.Ether(10000).ToInt()}, - registryOwner.From: {Balance: assets.Ether(10000).ToInt()}, - upkeepOwner.From: {Balance: assets.Ether(10000).ToInt()}, + tests := []struct { + name string + logBufferVersion logprovider.BufferVersion + }{ + { + name: "default", + logBufferVersion: logprovider.BufferVersionV0, + }, + { + name: "v1", + logBufferVersion: logprovider.BufferVersionV1, + }, } - // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether - var nodeKeys [5]ethkey.KeyV2 - for i := int64(0); i < 5; i++ { - nodeKeys[i] = cltest.MustGenerateRandomKey(t) - genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} - } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := gomega.NewWithT(t) + + // setup blockchain + linkOwner := testutils.MustNewSimTransactor(t) // owns all the link + registryOwner := testutils.MustNewSimTransactor(t) // registry owner + upkeepOwner := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + linkOwner.From: {Balance: assets.Ether(10000).ToInt()}, + registryOwner.From: {Balance: assets.Ether(10000).ToInt()}, + upkeepOwner.From: {Balance: assets.Ether(10000).ToInt()}, + } - backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) - stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain - defer stopMining() + // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether + var nodeKeys [5]ethkey.KeyV2 + for i := int64(0); i < 5; i++ { + nodeKeys[i] = cltest.MustGenerateRandomKey(t) + genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } - // Deploy registry - linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(linkOwner, backend) - require.NoError(t, err) + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + defer stopMining() - gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(60000000000)) - require.NoError(t, err) + // Deploy registry + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(linkOwner, backend) + require.NoError(t, err) - linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(2000000000000000000)) - require.NoError(t, err) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) - registry := deployKeeper21Registry(t, registryOwner, backend, linkAddr, linkFeedAddr, gasFeedAddr) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(2000000000000000000)) + require.NoError(t, err) - _, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner) + registry := deployKeeper21Registry(t, registryOwner, backend, linkAddr, linkFeedAddr, gasFeedAddr) - upkeepCount := 10 + _, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner, tc.logBufferVersion == logprovider.BufferVersionV1) - errResponses := []int{ - http.StatusUnauthorized, - http.StatusBadRequest, - http.StatusInternalServerError, - http.StatusNotFound, - http.StatusNotFound, - http.StatusNotFound, - http.StatusUnauthorized, - } - startMercuryServer(t, mercuryServer, func(i int) (int, []byte) { - var resp int - if i < len(errResponses) { - resp = errResponses[i] - } - if resp == 0 { - resp = http.StatusNotFound - } - return resp, nil - }) - defer mercuryServer.Stop() + upkeepCount := 10 - _, err = linkToken.Transfer(linkOwner, upkeepOwner.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeepCount+1)))) - require.NoError(t, err) + errResponses := []int{ + http.StatusUnauthorized, + http.StatusBadRequest, + http.StatusInternalServerError, + http.StatusNotFound, + http.StatusNotFound, + http.StatusNotFound, + http.StatusUnauthorized, + } + startMercuryServer(t, mercuryServer, func(i int) (int, []byte) { + var resp int + if i < len(errResponses) { + resp = errResponses[i] + } + if resp == 0 { + resp = http.StatusNotFound + } + return resp, nil + }) + defer mercuryServer.Stop() - backend.Commit() + _, err = linkToken.Transfer(linkOwner, upkeepOwner.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeepCount+1)))) + require.NoError(t, err) - feeds, err := newFeedLookupUpkeepController(backend, registryOwner) - require.NoError(t, err, "no error expected from creating a feed lookup controller") + backend.Commit() - // deploy multiple upkeeps that listen to a log emitter and need to be - // performed for each log event - checkResultsProvider := func(i int) bool { - return i%2 == 1 - } - require.NoError(t, feeds.DeployUpkeeps(t, backend, upkeepOwner, upkeepCount, checkResultsProvider)) - require.NoError(t, feeds.RegisterAndFund(t, registry, registryOwner, backend, linkToken)) - require.NoError(t, feeds.EnableMercury(t, backend, registry, registryOwner)) - require.NoError(t, feeds.VerifyEnv(t, backend, registry, registryOwner)) - - startBlock := backend.Blockchain().CurrentBlock().Number.Int64() - // start emitting events in a separate go-routine - // feed lookup relies on a single contract event log to perform multiple - // listener contracts - go func() { - // only 1 event is necessary to make all 10 upkeeps eligible - _ = feeds.EmitEvents(t, backend, 1, func() { - // pause per emit for expected block production time - time.Sleep(3 * time.Second) - }) - }() + feeds, err := newFeedLookupUpkeepController(backend, registryOwner) + require.NoError(t, err, "no error expected from creating a feed lookup controller") - go makeDummyBlocks(t, backend, 3*time.Second, 1000) + // deploy multiple upkeeps that listen to a log emitter and need to be + // performed for each log event + checkResultsProvider := func(i int) bool { + return i%2 == 1 + } + require.NoError(t, feeds.DeployUpkeeps(t, backend, upkeepOwner, upkeepCount, checkResultsProvider)) + require.NoError(t, feeds.RegisterAndFund(t, registry, registryOwner, backend, linkToken)) + require.NoError(t, feeds.EnableMercury(t, backend, registry, registryOwner)) + require.NoError(t, feeds.VerifyEnv(t, backend, registry, registryOwner)) + + startBlock := backend.Blockchain().CurrentBlock().Number.Int64() + // start emitting events in a separate go-routine + // feed lookup relies on a single contract event log to perform multiple + // listener contracts + go func() { + // only 1 event is necessary to make all 10 upkeeps eligible + _ = feeds.EmitEvents(t, backend, 1, func() { + // pause per emit for expected block production time + time.Sleep(3 * time.Second) + }) + }() + + go makeDummyBlocks(t, backend, 3*time.Second, 1000) + + idsToCheck := make([]*big.Int, 0) + for i, uid := range feeds.UpkeepsIds() { + if checkResultsProvider(i) { + idsToCheck = append(idsToCheck, uid) + } + } - idsToCheck := make([]*big.Int, 0) - for i, uid := range feeds.UpkeepsIds() { - if checkResultsProvider(i) { - idsToCheck = append(idsToCheck, uid) - } + listener, done := listenPerformed(t, backend, registry, idsToCheck, startBlock) + defer done() + g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) + }) } - - listener, done := listenPerformed(t, backend, registry, idsToCheck, startBlock) - defer done() - g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) } func startMercuryServer(t *testing.T, mercuryServer *mercury.SimulatedMercuryServer, responder func(i int) (int, []byte)) { @@ -589,7 +644,7 @@ func listenPerformed(t *testing.T, backend *backends.SimulatedBackend, registry return listenPerformedN(t, backend, registry, ids, startBlock, 0) } -func setupNodes(t *testing.T, nodeKeys [5]ethkey.KeyV2, registry *iregistry21.IKeeperRegistryMaster, backend *backends.SimulatedBackend, usr *bind.TransactOpts) ([]Node, *mercury.SimulatedMercuryServer) { +func setupNodes(t *testing.T, nodeKeys [5]ethkey.KeyV2, registry *iregistry21.IKeeperRegistryMaster, backend *backends.SimulatedBackend, usr *bind.TransactOpts, useBufferV1 bool) ([]Node, *mercury.SimulatedMercuryServer) { lggr := logger.TestLogger(t) mServer := mercury.NewSimulatedMercuryServer() mServer.Start() @@ -663,7 +718,8 @@ func setupNodes(t *testing.T, nodeKeys [5]ethkey.KeyV2, registry *iregistry21.IK cacheEvictionInterval = "1s" mercuryCredentialName = "%s" contractVersion = "v2.1" - `, i, registry.Address(), node.KeyBundle.ID(), node.Transmitter, fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), MercuryCredName)) + useBufferV1 = %v + `, i, registry.Address(), node.KeyBundle.ID(), node.Transmitter, fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), MercuryCredName, useBufferV1)) } // Setup config on contract From d3a9b59a9fa9e4e86db100854d8fc5766414784e Mon Sep 17 00:00:00 2001 From: amirylm Date: Mon, 1 Apr 2024 22:53:00 +0300 Subject: [PATCH 36/58] changesets --- .changeset/pretty-experts-unite.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.changeset/pretty-experts-unite.md b/.changeset/pretty-experts-unite.md index f21c35f50a4..4a1f903d439 100644 --- a/.changeset/pretty-experts-unite.md +++ b/.changeset/pretty-experts-unite.md @@ -2,4 +2,6 @@ "chainlink": patch --- -Added a feature flag for using log buffer v1 +Added log buffer v1 with improved performance, stability and control over scaling parameters. + +Added a feature flag for using log buffer v1. From a9027862578a5608718e504b9f017fc73b4e4015 Mon Sep 17 00:00:00 2001 From: amirylm Date: Mon, 1 Apr 2024 22:53:32 +0300 Subject: [PATCH 37/58] Revert "update cla version" This reverts commit e3c58e579fefadef29e863019488d33b52fab3a2. --- core/scripts/go.mod | 2 +- core/scripts/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- integration-tests/go.mod | 2 +- integration-tests/go.sum | 4 ++-- integration-tests/load/go.mod | 2 +- integration-tests/load/go.sum | 4 ++-- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 9ee1d8bd907..dd174dd5c26 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -20,7 +20,7 @@ require ( github.com/pelletier/go-toml/v2 v2.1.1 github.com/prometheus/client_golang v1.17.0 github.com/shopspring/decimal v1.3.1 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 diff --git a/core/scripts/go.sum b/core/scripts/go.sum index 89a7ae18a3c..b6a352db851 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1185,8 +1185,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 h1:GNhRKD3izyzAoGMXDvVUAwEuzz4Atdj3U3RH7eak5Is= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b h1:zQEsSYaMY4+svBOBr1ZlufKCwb1R4/5QPLKDje0xpeI= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/go.mod b/go.mod index 28d969ef7c0..6a1c4aba9e7 100644 --- a/go.mod +++ b/go.mod @@ -71,7 +71,7 @@ require ( github.com/shirou/gopsutil/v3 v3.23.11 github.com/shopspring/decimal v1.3.1 github.com/smartcontractkit/chain-selectors v1.0.10 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 diff --git a/go.sum b/go.sum index 5fcf84c3c27..6df35923ad7 100644 --- a/go.sum +++ b/go.sum @@ -1180,8 +1180,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 h1:GNhRKD3izyzAoGMXDvVUAwEuzz4Atdj3U3RH7eak5Is= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b h1:zQEsSYaMY4+svBOBr1ZlufKCwb1R4/5QPLKDje0xpeI= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index dec6e78d346..5100a3df8c6 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -23,7 +23,7 @@ require ( github.com/scylladb/go-reflectx v1.0.1 github.com/segmentio/ksuid v1.0.4 github.com/slack-go/slack v0.12.2 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b github.com/smartcontractkit/chainlink-testing-framework v1.27.8 github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 diff --git a/integration-tests/go.sum b/integration-tests/go.sum index 98448d96ae2..94821e863c9 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1523,8 +1523,8 @@ github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 h1:GNhRKD3izyzAoGMXDvVUAwEuzz4Atdj3U3RH7eak5Is= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b h1:zQEsSYaMY4+svBOBr1ZlufKCwb1R4/5QPLKDje0xpeI= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index 5e32726fb38..22f0034e364 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -15,7 +15,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/rs/zerolog v1.30.0 github.com/slack-go/slack v0.12.2 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b github.com/smartcontractkit/chainlink-testing-framework v1.27.8 github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20240214231432-4ad5eb95178c diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index b404419c88c..bd50a119473 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1506,8 +1506,8 @@ github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 h1:GNhRKD3izyzAoGMXDvVUAwEuzz4Atdj3U3RH7eak5Is= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b h1:zQEsSYaMY4+svBOBr1ZlufKCwb1R4/5QPLKDje0xpeI= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240329141134-fed25857a09b/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= From 8946762b5f0dae0d7f4e32a54e1c91a4b486a990 Mon Sep 17 00:00:00 2001 From: amirylm Date: Mon, 1 Apr 2024 23:20:38 +0300 Subject: [PATCH 38/58] align buffer config --- .../evmregistry/v21/logprovider/factory.go | 32 ++++++++++++-- .../v21/logprovider/integration_test.go | 6 +-- .../evmregistry/v21/logprovider/provider.go | 43 +++++-------------- .../logprovider/provider_life_cycle_test.go | 6 +-- .../v21/logprovider/provider_test.go | 8 ++-- .../v21/logprovider/recoverer_test.go | 6 +-- 6 files changed, 51 insertions(+), 50 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index 5d0e7854d23..5b818b492d4 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -15,7 +15,7 @@ import ( func New(lggr logger.Logger, poller logpoller.LogPoller, c client.Client, stateStore core.UpkeepStateReader, finalityDepth uint32, chainID *big.Int) (LogEventProvider, LogRecoverer) { filterStore := NewUpkeepFilterStore() packer := NewLogEventsPacker() - opts := NewOptions(int64(finalityDepth)) + opts := NewOptions(int64(finalityDepth), chainID) provider := NewLogProvider(lggr, poller, chainID, packer, filterStore, opts) recoverer := NewLogRecoverer(lggr, poller, c, stateStore, packer, filterStore, opts) @@ -25,6 +25,7 @@ func New(lggr logger.Logger, poller logpoller.LogPoller, c client.Client, stateS // LogTriggersOptions holds the options for the log trigger components. type LogTriggersOptions struct { + chainID *big.Int // LookbackBlocks is the number of blocks the provider will look back for logs. // The recoverer will scan for logs up to this depth. // NOTE: MUST be set to a greater-or-equal to the chain's finality depth. @@ -50,8 +51,9 @@ const ( BufferVersionV1 BufferVersion = "v1" ) -func NewOptions(finalityDepth int64) LogTriggersOptions { +func NewOptions(finalityDepth int64, chainID *big.Int) LogTriggersOptions { opts := new(LogTriggersOptions) + opts.chainID = chainID opts.Defaults(finalityDepth) return *opts } @@ -73,9 +75,31 @@ func (o *LogTriggersOptions) Defaults(finalityDepth int64) { o.FinalityDepth = finalityDepth } if o.BlockRate == 0 { - o.BlockRate = 2 + o.BlockRate = o.defaultBlockRate() } if o.LogLimit == 0 { - o.LogLimit = 4 + o.LogLimit = o.defaultLogLimit() + } +} + +func (o *LogTriggersOptions) defaultBlockRate() uint32 { + switch o.chainID.Int64() { + case 42161, 421613, 421614: // Arbitrum + return 4 + default: + return 1 + } +} + +func (o *LogTriggersOptions) defaultLogLimit() uint32 { + switch o.chainID.Int64() { + case 42161, 421613, 421614: // Arbitrum + return 1 + case 1, 4, 5, 42, 11155111: // Eth + return 20 + case 10, 420, 56, 97, 137, 80001, 43113, 43114, 8453, 84531: // Optimism, BSC, Polygon, Avax, Base + return 5 + default: + return 2 } } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go index 578de5b3049..627ad7f744f 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -63,7 +63,7 @@ func TestIntegration_LogEventProvider(t *testing.T) { db := setupDB(t) defer db.Close() - opts := logprovider.NewOptions(200) + opts := logprovider.NewOptions(200, big.NewInt(1)) opts.ReadInterval = time.Second / 2 opts.BufferVersion = bufferVersion opts.LogLimit = logLimit @@ -250,7 +250,7 @@ func TestIntegration_LogEventProvider_Backfill(t *testing.T) { db := setupDB(t) defer db.Close() - opts := logprovider.NewOptions(200) + opts := logprovider.NewOptions(200, big.NewInt(1)) opts.ReadInterval = time.Second / 4 opts.BufferVersion = bufferVersion opts.LogLimit = limitLow @@ -515,7 +515,7 @@ func setupDependencies(t *testing.T, db *sqlx.DB, backend *backends.SimulatedBac func setup(lggr logger.Logger, poller logpoller.LogPoller, c evmclient.Client, stateStore evmregistry21.UpkeepStateReader, filterStore logprovider.UpkeepFilterStore, opts *logprovider.LogTriggersOptions) (logprovider.LogEventProvider, logprovider.LogRecoverer) { packer := logprovider.NewLogEventsPacker() if opts == nil { - o := logprovider.NewOptions(200) + o := logprovider.NewOptions(200, big.NewInt(1)) opts = &o } provider := logprovider.NewLogProvider(lggr, poller, big.NewInt(1), packer, filterStore, *opts) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index 0a900475cf7..ef44e186477 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -115,13 +115,6 @@ type logEventProvider struct { } func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, chainID *big.Int, packer LogDataPacker, filterStore UpkeepFilterStore, opts LogTriggersOptions) *logEventProvider { - defaultBlockRate := defaultBlockRateForChain(chainID) - defaultLogLimit := defaultLogLimitForChain(chainID) - - // TODO apply these to the log buffer later - _ = defaultBlockRate - _ = defaultLogLimit - return &logEventProvider{ threadCtrl: utils.NewThreadControl(), lggr: lggr.Named("KeepersRegistry.LogEventProvider"), @@ -140,16 +133,22 @@ func (p *logEventProvider) SetConfig(cfg ocr2keepers.LogEventProviderConfig) { logLimit := cfg.LogLimit if blockRate == 0 { - blockRate = defaultBlockRateForChain(p.chainID) + blockRate = p.opts.defaultBlockRate() } if logLimit == 0 { - logLimit = defaultLogLimitForChain(p.chainID) + logLimit = p.opts.defaultLogLimit() } p.lggr.With("where", "setConfig").Infow("setting config ", "bockRate", blockRate, "logLimit", logLimit) - // TODO set block rate and log limit on the buffer - //p.buffer.SetConfig(blockRate, logLimit) + atomic.StoreUint32(&p.opts.BlockRate, blockRate) + atomic.StoreUint32(&p.opts.LogLimit, logLimit) + + switch p.opts.BufferVersion { + case BufferVersionV1: + p.bufferV1.SetConfig(uint32(p.opts.LookbackBlocks), uint32(blockRate), uint32(logLimit)) + default: + } } func (p *logEventProvider) WithBufferVersion(v BufferVersion) { @@ -521,25 +520,3 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ return merr } - -func defaultBlockRateForChain(chainID *big.Int) uint32 { - switch chainID.Int64() { - case 42161, 421613, 421614: // Arbitrum - return 4 - default: - return 1 - } -} - -func defaultLogLimitForChain(chainID *big.Int) uint32 { - switch chainID.Int64() { - case 42161, 421613, 421614: // Arbitrum - return 1 - case 1, 4, 5, 42, 11155111: // Eth - return 20 - case 10, 420, 56, 97, 137, 80001, 43113, 43114, 8453, 84531: // Optimism, BSC, Polygon, Avax, Base - return 5 - default: - return 1 - } -} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go index 96a397827be..26e989c7466 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go @@ -100,7 +100,7 @@ func TestLogEventProvider_LifeCycle(t *testing.T) { }, } - p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200, big.NewInt(1))) for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -152,7 +152,7 @@ func TestEventLogProvider_RefreshActiveUpkeeps(t *testing.T) { mp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{}, nil) mp.On("ReplayAsync", mock.Anything).Return(nil) - p := NewLogProvider(logger.TestLogger(t), mp, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + p := NewLogProvider(logger.TestLogger(t), mp, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200, big.NewInt(1))) require.NoError(t, p.RegisterFilter(ctx, FilterOptions{ UpkeepID: core.GenUpkeepID(types.LogTrigger, "1111").BigInt(), @@ -231,7 +231,7 @@ func TestLogEventProvider_ValidateLogTriggerConfig(t *testing.T) { }, } - p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200, big.NewInt(1))) for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { err := p.validateLogTriggerConfig(tc.cfg) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go index 9b03c43eda6..57da895403e 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go @@ -21,7 +21,7 @@ import ( ) func TestLogEventProvider_GetFilters(t *testing.T) { - p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200, big.NewInt(1))) _, f := newEntry(p, 1) p.filterStore.AddActiveUpkeeps(f) @@ -63,7 +63,7 @@ func TestLogEventProvider_GetFilters(t *testing.T) { } func TestLogEventProvider_UpdateEntriesLastPoll(t *testing.T) { - p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200, big.NewInt(1))) n := 10 @@ -176,7 +176,7 @@ func TestLogEventProvider_ScheduleReadJobs(t *testing.T) { ctx := testutils.Context(t) readInterval := 10 * time.Millisecond - opts := NewOptions(200) + opts := NewOptions(200, big.NewInt(1)) opts.ReadInterval = readInterval p := NewLogProvider(logger.TestLogger(t), mp, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), opts) @@ -254,7 +254,7 @@ func TestLogEventProvider_ReadLogs(t *testing.T) { }, nil) filterStore := NewUpkeepFilterStore() - p := NewLogProvider(logger.TestLogger(t), mp, big.NewInt(1), &mockedPacker{}, filterStore, NewOptions(200)) + p := NewLogProvider(logger.TestLogger(t), mp, big.NewInt(1), &mockedPacker{}, filterStore, NewOptions(200, big.NewInt(1))) var ids []*big.Int for i := 0; i < 10; i++ { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go index 54338207190..65a05b2537e 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go @@ -34,7 +34,7 @@ func TestLogRecoverer_GetRecoverables(t *testing.T) { ctx := testutils.Context(t) lp := &lpmocks.LogPoller{} lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: 100}, nil) - r := NewLogRecoverer(logger.TestLogger(t), lp, nil, nil, nil, nil, NewOptions(200)) + r := NewLogRecoverer(logger.TestLogger(t), lp, nil, nil, nil, nil, NewOptions(200, big.NewInt(1))) tests := []struct { name string @@ -1152,7 +1152,7 @@ func TestLogRecoverer_pending(t *testing.T) { maxPendingPayloadsPerUpkeep = origMaxPendingPayloadsPerUpkeep }() - r := NewLogRecoverer(logger.TestLogger(t), nil, nil, nil, nil, nil, NewOptions(200)) + r := NewLogRecoverer(logger.TestLogger(t), nil, nil, nil, nil, nil, NewOptions(200, big.NewInt(1))) r.lock.Lock() r.pending = tc.exist for i, p := range tc.new { @@ -1233,7 +1233,7 @@ func setupTestRecoverer(t *testing.T, interval time.Duration, lookbackBlocks int lp := new(lpmocks.LogPoller) statesReader := new(mocks.UpkeepStateReader) filterStore := NewUpkeepFilterStore() - opts := NewOptions(lookbackBlocks) + opts := NewOptions(lookbackBlocks, big.NewInt(1)) opts.ReadInterval = interval / 5 opts.LookbackBlocks = lookbackBlocks recoverer := NewLogRecoverer(logger.TestLogger(t), lp, nil, statesReader, &mockedPacker{}, filterStore, opts) From 822ded94027c28efeee483e9d9b52064027a80ca Mon Sep 17 00:00:00 2001 From: amirylm Date: Mon, 1 Apr 2024 23:20:59 +0300 Subject: [PATCH 39/58] align buffer metrics (added label expired) --- .../ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go | 9 +++++---- .../ocr2keeper/evmregistry/v21/prommetrics/metrics.go | 1 + 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index 54084cb10e6..8116185729d 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -274,7 +274,7 @@ func (q *upkeepLogQueue) dequeue(start, end int64, limit int) ([]logpoller.Log, q.lggr.Debugw("Dequeued logs", "start", start, "end", end, "limit", limit, "results", len(results), "remaining", remaining) } - prommetrics.AutomationLogsInLogBuffer.Sub(float64(len(results))) + prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionEgress).Add(float64(len(results))) return results, remaining } @@ -310,7 +310,8 @@ func (q *upkeepLogQueue) enqueue(blockThreshold int64, logsToAdd ...logpoller.Lo q.lggr.Debugw("Enqueued logs", "added", added, "dropped", dropped, "blockThreshold", blockThreshold, "q size", len(q.logs), "visited size", len(q.visited)) } - prommetrics.AutomationLogsInLogBuffer.Add(float64(added)) + prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionIngress).Add(float64(added)) + prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionDropped).Add(float64(dropped)) return added, dropped } @@ -334,7 +335,7 @@ func (q *upkeepLogQueue) clean(blockThreshold int64) int { var currentWindowStart int64 for _, l := range q.logs { if blockThreshold > l.BlockNumber { // old log, removed - prommetrics.AutomationLogsInLogBuffer.Dec() + prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionExpired).Inc() // q.lggr.Debugw("Expiring old log", "blockNumber", l.BlockNumber, "blockThreshold", blockThreshold, "logIndex", l.LogIndex) logid := logID(l) delete(q.visited, logid) @@ -350,7 +351,7 @@ func (q *upkeepLogQueue) clean(blockThreshold int64) int { currentWindowCapacity++ // if capacity has been reached, drop the log if currentWindowCapacity > maxLogsPerWindow { - prommetrics.AutomationLogsInLogBuffer.Dec() + prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionDropped).Inc() // TODO: check if we should clean visited as well, so it will be possible to add the log again q.lggr.Debugw("Reached log buffer limits, dropping log", "blockNumber", l.BlockNumber, "blockHash", l.BlockHash, "txHash", l.TxHash, "logIndex", l.LogIndex, "len updated", len(updated), diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/prommetrics/metrics.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/prommetrics/metrics.go index 0925ce1c153..604f7edda16 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/prommetrics/metrics.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/prommetrics/metrics.go @@ -13,6 +13,7 @@ const ( LogBufferFlowDirectionIngress = "ingress" LogBufferFlowDirectionEgress = "egress" LogBufferFlowDirectionDropped = "dropped" + LogBufferFlowDirectionExpired = "expired" ) // Automation metrics From 367b5e35f2f4832b7b7b7fafb3b36872405319ad Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 2 Apr 2024 00:14:42 +0300 Subject: [PATCH 40/58] update cla version (temp) --- core/scripts/go.mod | 2 +- core/scripts/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- integration-tests/go.mod | 2 +- integration-tests/go.sum | 4 ++-- integration-tests/load/go.mod | 2 +- integration-tests/load/go.sum | 4 ++-- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/core/scripts/go.mod b/core/scripts/go.mod index f03d994435b..3ba18a6b0ba 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -20,7 +20,7 @@ require ( github.com/pelletier/go-toml/v2 v2.1.1 github.com/prometheus/client_golang v1.17.0 github.com/shopspring/decimal v1.3.1 - github.com/smartcontractkit/chainlink-automation v1.0.2 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 diff --git a/core/scripts/go.sum b/core/scripts/go.sum index e8c743af696..b5fcbafe3a1 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1185,8 +1185,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2 h1:xsfyuswL15q2YBGQT3qn2SBz6fnSKiSW7XZ8IZQLpnI= -github.com/smartcontractkit/chainlink-automation v1.0.2/go.mod h1:RjboV0Qd7YP+To+OrzHGXaxUxoSONveCoAK2TQ1INLU= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf h1:yW8rTFycozLVnXRyOgZWGktnmzoFLxSWh1xPJXsp7vg= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/go.mod b/go.mod index e11fd1e5e09..933b3cedda2 100644 --- a/go.mod +++ b/go.mod @@ -71,7 +71,7 @@ require ( github.com/shirou/gopsutil/v3 v3.23.11 github.com/shopspring/decimal v1.3.1 github.com/smartcontractkit/chain-selectors v1.0.10 - github.com/smartcontractkit/chainlink-automation v1.0.2 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 diff --git a/go.sum b/go.sum index 538e8cfa4d6..b0664b612dc 100644 --- a/go.sum +++ b/go.sum @@ -1180,8 +1180,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2 h1:xsfyuswL15q2YBGQT3qn2SBz6fnSKiSW7XZ8IZQLpnI= -github.com/smartcontractkit/chainlink-automation v1.0.2/go.mod h1:RjboV0Qd7YP+To+OrzHGXaxUxoSONveCoAK2TQ1INLU= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf h1:yW8rTFycozLVnXRyOgZWGktnmzoFLxSWh1xPJXsp7vg= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index 0196b1d467d..14e921a7cf0 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -23,7 +23,7 @@ require ( github.com/scylladb/go-reflectx v1.0.1 github.com/segmentio/ksuid v1.0.4 github.com/slack-go/slack v0.12.2 - github.com/smartcontractkit/chainlink-automation v1.0.2 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf github.com/smartcontractkit/chainlink-testing-framework v1.27.8 github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 diff --git a/integration-tests/go.sum b/integration-tests/go.sum index 4346c44681f..ab91d806a42 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1523,8 +1523,8 @@ github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2 h1:xsfyuswL15q2YBGQT3qn2SBz6fnSKiSW7XZ8IZQLpnI= -github.com/smartcontractkit/chainlink-automation v1.0.2/go.mod h1:RjboV0Qd7YP+To+OrzHGXaxUxoSONveCoAK2TQ1INLU= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf h1:yW8rTFycozLVnXRyOgZWGktnmzoFLxSWh1xPJXsp7vg= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index 9c3b6895b9f..beb829cf6b8 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -15,7 +15,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/rs/zerolog v1.30.0 github.com/slack-go/slack v0.12.2 - github.com/smartcontractkit/chainlink-automation v1.0.2 + github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf github.com/smartcontractkit/chainlink-testing-framework v1.27.8 github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20240214231432-4ad5eb95178c diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index 0486f52b427..20bc812b31d 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1506,8 +1506,8 @@ github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2 h1:xsfyuswL15q2YBGQT3qn2SBz6fnSKiSW7XZ8IZQLpnI= -github.com/smartcontractkit/chainlink-automation v1.0.2/go.mod h1:RjboV0Qd7YP+To+OrzHGXaxUxoSONveCoAK2TQ1INLU= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= +github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf h1:yW8rTFycozLVnXRyOgZWGktnmzoFLxSWh1xPJXsp7vg= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= From e702562ecaedab93a20ebc0c3b07569980c4608d Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 2 Apr 2024 00:17:16 +0300 Subject: [PATCH 41/58] lint --- .../plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index ef44e186477..cc65bf76dae 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -146,7 +146,7 @@ func (p *logEventProvider) SetConfig(cfg ocr2keepers.LogEventProviderConfig) { switch p.opts.BufferVersion { case BufferVersionV1: - p.bufferV1.SetConfig(uint32(p.opts.LookbackBlocks), uint32(blockRate), uint32(logLimit)) + p.bufferV1.SetConfig(uint32(p.opts.LookbackBlocks), blockRate, logLimit) default: } } From 7b097e54857e82deb03ba1e5119d70e292d2ae53 Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 2 Apr 2024 10:13:08 +0300 Subject: [PATCH 42/58] set feature flag in load test (temp) --- integration-tests/actions/automationv2/actions.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/integration-tests/actions/automationv2/actions.go b/integration-tests/actions/automationv2/actions.go index 33caf6fbc0f..5985348e458 100644 --- a/integration-tests/actions/automationv2/actions.go +++ b/integration-tests/actions/automationv2/actions.go @@ -371,6 +371,7 @@ func (a *AutomationTest) AddBootstrapJob() error { } func (a *AutomationTest) AddAutomationJobs() error { + useBufferV1 := "true" // TODO: load from config var contractVersion string if a.RegistrySettings.RegistryVersion == ethereum.RegistryVersion_2_2 { contractVersion = "v2.1+" @@ -378,9 +379,17 @@ func (a *AutomationTest) AddAutomationJobs() error { contractVersion = "v2.1" } else if a.RegistrySettings.RegistryVersion == ethereum.RegistryVersion_2_0 { contractVersion = "v2.0" + useBufferV1 = "" } else { return fmt.Errorf("v2.0, v2.1, and v2.2 are the only supported versions") } + pluginCfg := map[string]interface{}{ + "mercuryCredentialName": "\"" + a.MercuryCredentialName + "\"", + "contractVersion": "\"" + contractVersion + "\"", + } + if len(useBufferV1) > 0 { + pluginCfg["useBufferV1"] = useBufferV1 + } for i := 1; i < len(a.ChainlinkNodes); i++ { autoOCR2JobSpec := client.OCR2TaskJobSpec{ Name: "automation-" + contractVersion + "-" + a.Registry.Address(), @@ -392,10 +401,7 @@ func (a *AutomationTest) AddAutomationJobs() error { RelayConfig: map[string]interface{}{ "chainID": int(a.ChainClient.GetChainID().Int64()), }, - PluginConfig: map[string]interface{}{ - "mercuryCredentialName": "\"" + a.MercuryCredentialName + "\"", - "contractVersion": "\"" + contractVersion + "\"", - }, + PluginConfig: pluginCfg, ContractConfigTrackerPollInterval: *models.NewInterval(time.Second * 15), TransmitterID: null.StringFrom(a.NodeDetails[i].TransmitterAddresses[a.TransmitterKeyIndex]), P2PV2Bootstrappers: pq.StringArray{a.DefaultP2Pv2Bootstrapper}, From 09e530df173bf46f04ca5af8521854e97f0c6a0a Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 2 Apr 2024 13:02:19 +0300 Subject: [PATCH 43/58] update cla version --- core/scripts/go.mod | 2 +- core/scripts/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- integration-tests/go.mod | 2 +- integration-tests/go.sum | 4 ++-- integration-tests/load/go.mod | 2 +- integration-tests/load/go.sum | 4 ++-- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 3ba18a6b0ba..2102ed0aea3 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -20,7 +20,7 @@ require ( github.com/pelletier/go-toml/v2 v2.1.1 github.com/prometheus/client_golang v1.17.0 github.com/shopspring/decimal v1.3.1 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 + github.com/smartcontractkit/chainlink-automation v1.0.3-0.20240402080500-559b08d27d21 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 diff --git a/core/scripts/go.sum b/core/scripts/go.sum index b5fcbafe3a1..f41fa6c56b7 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1185,8 +1185,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= +github.com/smartcontractkit/chainlink-automation v1.0.3-0.20240402080500-559b08d27d21 h1:f+TtSCPm2DMuhGRui7YM+oSkCVHVSQYxPtqsilO5XxA= +github.com/smartcontractkit/chainlink-automation v1.0.3-0.20240402080500-559b08d27d21/go.mod h1:RjboV0Qd7YP+To+OrzHGXaxUxoSONveCoAK2TQ1INLU= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf h1:yW8rTFycozLVnXRyOgZWGktnmzoFLxSWh1xPJXsp7vg= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/go.mod b/go.mod index 933b3cedda2..0c1d87a3a56 100644 --- a/go.mod +++ b/go.mod @@ -71,7 +71,7 @@ require ( github.com/shirou/gopsutil/v3 v3.23.11 github.com/shopspring/decimal v1.3.1 github.com/smartcontractkit/chain-selectors v1.0.10 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 + github.com/smartcontractkit/chainlink-automation v1.0.3-0.20240402080500-559b08d27d21 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 diff --git a/go.sum b/go.sum index b0664b612dc..3c75eefc18a 100644 --- a/go.sum +++ b/go.sum @@ -1180,8 +1180,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= +github.com/smartcontractkit/chainlink-automation v1.0.3-0.20240402080500-559b08d27d21 h1:f+TtSCPm2DMuhGRui7YM+oSkCVHVSQYxPtqsilO5XxA= +github.com/smartcontractkit/chainlink-automation v1.0.3-0.20240402080500-559b08d27d21/go.mod h1:RjboV0Qd7YP+To+OrzHGXaxUxoSONveCoAK2TQ1INLU= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf h1:yW8rTFycozLVnXRyOgZWGktnmzoFLxSWh1xPJXsp7vg= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index 14e921a7cf0..3e53991269f 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -23,7 +23,7 @@ require ( github.com/scylladb/go-reflectx v1.0.1 github.com/segmentio/ksuid v1.0.4 github.com/slack-go/slack v0.12.2 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 + github.com/smartcontractkit/chainlink-automation v1.0.3-0.20240402080500-559b08d27d21 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf github.com/smartcontractkit/chainlink-testing-framework v1.27.8 github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 diff --git a/integration-tests/go.sum b/integration-tests/go.sum index ab91d806a42..b6bcba36da8 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1523,8 +1523,8 @@ github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= +github.com/smartcontractkit/chainlink-automation v1.0.3-0.20240402080500-559b08d27d21 h1:f+TtSCPm2DMuhGRui7YM+oSkCVHVSQYxPtqsilO5XxA= +github.com/smartcontractkit/chainlink-automation v1.0.3-0.20240402080500-559b08d27d21/go.mod h1:RjboV0Qd7YP+To+OrzHGXaxUxoSONveCoAK2TQ1INLU= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf h1:yW8rTFycozLVnXRyOgZWGktnmzoFLxSWh1xPJXsp7vg= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index beb829cf6b8..a80a5d95987 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -15,7 +15,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/rs/zerolog v1.30.0 github.com/slack-go/slack v0.12.2 - github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 + github.com/smartcontractkit/chainlink-automation v1.0.3-0.20240402080500-559b08d27d21 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf github.com/smartcontractkit/chainlink-testing-framework v1.27.8 github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20240214231432-4ad5eb95178c diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index 20bc812b31d..682118412cd 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1506,8 +1506,8 @@ github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512 h1:sFmF5uXphwPkD8CwaelZwX5cGUkFKSZxfelf07p4IiM= -github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240322131631-4526ce38a512/go.mod h1:Me7tcN3Gt/PdY43ujOdU4nAxmJGwFErBP9ud2hUtE14= +github.com/smartcontractkit/chainlink-automation v1.0.3-0.20240402080500-559b08d27d21 h1:f+TtSCPm2DMuhGRui7YM+oSkCVHVSQYxPtqsilO5XxA= +github.com/smartcontractkit/chainlink-automation v1.0.3-0.20240402080500-559b08d27d21/go.mod h1:RjboV0Qd7YP+To+OrzHGXaxUxoSONveCoAK2TQ1INLU= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf h1:yW8rTFycozLVnXRyOgZWGktnmzoFLxSWh1xPJXsp7vg= github.com/smartcontractkit/chainlink-common v0.1.7-0.20240401172519-4bfc659b80bf/go.mod h1:kstYjAGqBswdZpl7YkSPeXBDVwaY1VaR6tUMPWl8ykA= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= From 8f8d96043e9f676fd725a1b7a1e977daff5a1c3e Mon Sep 17 00:00:00 2001 From: amirylm Date: Tue, 2 Apr 2024 16:51:51 +0300 Subject: [PATCH 44/58] types alignment --- .../evmregistry/v21/logprovider/buffer_v1.go | 33 +++------- .../v21/logprovider/buffer_v1_test.go | 61 +++++++++++++++++++ .../v21/logprovider/{window.go => log.go} | 37 +++++++---- .../{window_test.go => log_test.go} | 61 ------------------- 4 files changed, 97 insertions(+), 95 deletions(-) rename core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/{window.go => log.go} (52%) rename core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/{window_test.go => log_test.go} (70%) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index 8116185729d..6f170b5c0ee 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -1,14 +1,12 @@ package logprovider import ( - "encoding/hex" "math" "math/big" "sort" "sync" "sync/atomic" - ocr2keepers "github.com/smartcontractkit/chainlink-common/pkg/types/automation" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/prommetrics" @@ -120,7 +118,7 @@ func (b *logBuffer) Dequeue(block int64, blockRate, upkeepLimit, maxResults int, b.lock.RLock() defer b.lock.RUnlock() - start, end := BlockWindow(block, blockRate) + start, end := getBlockWindow(block, blockRate) return b.dequeue(start, end, upkeepLimit, maxResults, upkeepSelector) } @@ -342,7 +340,7 @@ func (q *upkeepLogQueue) clean(blockThreshold int64) int { expired++ continue } - start, _ := BlockWindow(l.BlockNumber, blockRate) + start, _ := getBlockWindow(l.BlockNumber, blockRate) if start != currentWindowStart { // new window, reset capacity currentWindowStart = start @@ -381,24 +379,13 @@ func (q *upkeepLogQueue) cleanVisited(blockThreshold int64) { } } -// logID returns a unique identifier for a log, which is an hex string -// of ocr2keepers.LogTriggerExtension.LogIdentifier() -func logID(l logpoller.Log) string { - ext := ocr2keepers.LogTriggerExtension{ - Index: uint32(l.LogIndex), +// getBlockWindow returns the start and end block of the window for the given block. +func getBlockWindow(block int64, blockRate int) (start int64, end int64) { + windowSize := int64(blockRate) + if windowSize == 0 { + return block, block } - copy(ext.TxHash[:], l.TxHash[:]) - copy(ext.BlockHash[:], l.BlockHash[:]) - return hex.EncodeToString(ext.LogIdentifier()) -} - -// latestBlockNumber returns the latest block number from the given logs -func latestBlockNumber(logs ...logpoller.Log) int64 { - var latest int64 - for _, l := range logs { - if l.BlockNumber > latest { - latest = l.BlockNumber - } - } - return latest + start = block - (block % windowSize) + end = start + windowSize - 1 + return } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go index c728e37eb8c..48ad9ea4549 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go @@ -365,6 +365,67 @@ func TestLogEventBufferV1_UpkeepQueue_clean(t *testing.T) { }) } +func TestLogEventBufferV1_BlockWindow(t *testing.T) { + tests := []struct { + name string + block int64 + blockRate int + wantStart int64 + wantEnd int64 + }{ + { + name: "block 0, blockRate 1", + block: 0, + blockRate: 1, + wantStart: 0, + wantEnd: 0, + }, + { + name: "block 81, blockRate 1", + block: 81, + blockRate: 1, + wantStart: 81, + wantEnd: 81, + }, + { + name: "block 0, blockRate 4", + block: 0, + blockRate: 4, + wantStart: 0, + wantEnd: 3, + }, + { + name: "block 81, blockRate 4", + block: 81, + blockRate: 4, + wantStart: 80, + wantEnd: 83, + }, + { + name: "block 83, blockRate 4", + block: 83, + blockRate: 4, + wantStart: 80, + wantEnd: 83, + }, + { + name: "block 84, blockRate 4", + block: 84, + blockRate: 4, + wantStart: 84, + wantEnd: 87, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + start, end := getBlockWindow(tc.block, tc.blockRate) + require.Equal(t, tc.wantStart, start) + require.Equal(t, tc.wantEnd, end) + }) + } +} + type dequeueArgs struct { block int64 blockRate int diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go similarity index 52% rename from core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go rename to core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go index 8596afccef1..2b193b1b4bc 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go @@ -1,20 +1,13 @@ package logprovider import ( + "encoding/hex" + + ocr2keepers "github.com/smartcontractkit/chainlink-common/pkg/types/automation" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" ) -// BlockWindow returns the start and end block for the given window. -func BlockWindow(block int64, blockRate int) (start int64, end int64) { - windowSize := int64(blockRate) - if windowSize == 0 { - return block, block - } - start = block - (block % windowSize) - end = start + windowSize - 1 - return -} - // LogSorter sorts the logs based on block number, tx hash and log index. // returns true if b should come before a. func LogSorter(a, b logpoller.Log) bool { @@ -39,3 +32,25 @@ func LogComparator(a, b logpoller.Log) int { } return int(logIndexDiff) } + +// logID returns a unique identifier for a log, which is an hex string +// of ocr2keepers.LogTriggerExtension.LogIdentifier() +func logID(l logpoller.Log) string { + ext := ocr2keepers.LogTriggerExtension{ + Index: uint32(l.LogIndex), + } + copy(ext.TxHash[:], l.TxHash[:]) + copy(ext.BlockHash[:], l.BlockHash[:]) + return hex.EncodeToString(ext.LogIdentifier()) +} + +// latestBlockNumber returns the latest block number from the given logs +func latestBlockNumber(logs ...logpoller.Log) int64 { + var latest int64 + for _, l := range logs { + if l.BlockNumber > latest { + latest = l.BlockNumber + } + } + return latest +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log_test.go similarity index 70% rename from core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go rename to core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log_test.go index 3021ff3e6a1..9a2d2760a4f 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/window_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log_test.go @@ -9,67 +9,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" ) -func TestBlockWindow(t *testing.T) { - tests := []struct { - name string - block int64 - blockRate int - wantStart int64 - wantEnd int64 - }{ - { - name: "block 0, blockRate 1", - block: 0, - blockRate: 1, - wantStart: 0, - wantEnd: 0, - }, - { - name: "block 81, blockRate 1", - block: 81, - blockRate: 1, - wantStart: 81, - wantEnd: 81, - }, - { - name: "block 0, blockRate 4", - block: 0, - blockRate: 4, - wantStart: 0, - wantEnd: 3, - }, - { - name: "block 81, blockRate 4", - block: 81, - blockRate: 4, - wantStart: 80, - wantEnd: 83, - }, - { - name: "block 83, blockRate 4", - block: 83, - blockRate: 4, - wantStart: 80, - wantEnd: 83, - }, - { - name: "block 84, blockRate 4", - block: 84, - blockRate: 4, - wantStart: 84, - wantEnd: 87, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - start, end := BlockWindow(tc.block, tc.blockRate) - require.Equal(t, tc.wantStart, start) - require.Equal(t, tc.wantEnd, end) - }) - } -} - func TestLogComparatorSorter(t *testing.T) { tests := []struct { name string From 180ac44f2aa04eb05c25d837956195bdc32e0d7e Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 3 Apr 2024 12:58:58 +0300 Subject: [PATCH 45/58] Revert "set feature flag in load test (temp)" This reverts commit 7b097e54857e82deb03ba1e5119d70e292d2ae53. --- integration-tests/actions/automationv2/actions.go | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/integration-tests/actions/automationv2/actions.go b/integration-tests/actions/automationv2/actions.go index 5985348e458..33caf6fbc0f 100644 --- a/integration-tests/actions/automationv2/actions.go +++ b/integration-tests/actions/automationv2/actions.go @@ -371,7 +371,6 @@ func (a *AutomationTest) AddBootstrapJob() error { } func (a *AutomationTest) AddAutomationJobs() error { - useBufferV1 := "true" // TODO: load from config var contractVersion string if a.RegistrySettings.RegistryVersion == ethereum.RegistryVersion_2_2 { contractVersion = "v2.1+" @@ -379,17 +378,9 @@ func (a *AutomationTest) AddAutomationJobs() error { contractVersion = "v2.1" } else if a.RegistrySettings.RegistryVersion == ethereum.RegistryVersion_2_0 { contractVersion = "v2.0" - useBufferV1 = "" } else { return fmt.Errorf("v2.0, v2.1, and v2.2 are the only supported versions") } - pluginCfg := map[string]interface{}{ - "mercuryCredentialName": "\"" + a.MercuryCredentialName + "\"", - "contractVersion": "\"" + contractVersion + "\"", - } - if len(useBufferV1) > 0 { - pluginCfg["useBufferV1"] = useBufferV1 - } for i := 1; i < len(a.ChainlinkNodes); i++ { autoOCR2JobSpec := client.OCR2TaskJobSpec{ Name: "automation-" + contractVersion + "-" + a.Registry.Address(), @@ -401,7 +392,10 @@ func (a *AutomationTest) AddAutomationJobs() error { RelayConfig: map[string]interface{}{ "chainID": int(a.ChainClient.GetChainID().Int64()), }, - PluginConfig: pluginCfg, + PluginConfig: map[string]interface{}{ + "mercuryCredentialName": "\"" + a.MercuryCredentialName + "\"", + "contractVersion": "\"" + contractVersion + "\"", + }, ContractConfigTrackerPollInterval: *models.NewInterval(time.Second * 15), TransmitterID: null.StringFrom(a.NodeDetails[i].TransmitterAddresses[a.TransmitterKeyIndex]), P2PV2Bootstrappers: pq.StringArray{a.DefaultP2Pv2Bootstrapper}, From 28ccabe66b71898d3afc07cc9e82e48b56574066 Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 3 Apr 2024 13:50:29 +0300 Subject: [PATCH 46/58] comment --- core/services/ocr2/plugins/ocr2keeper/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/config.go b/core/services/ocr2/plugins/ocr2keeper/config.go index 3ec4c66ecac..7aba35201a3 100644 --- a/core/services/ocr2/plugins/ocr2keeper/config.go +++ b/core/services/ocr2/plugins/ocr2keeper/config.go @@ -60,7 +60,7 @@ type PluginConfig struct { ContractVersion string `json:"contractVersion"` // CaptureAutomationCustomTelemetry is a bool flag to toggle Custom Telemetry Service CaptureAutomationCustomTelemetry *bool `json:"captureAutomationCustomTelemetry,omitempty"` - // CaptureAutomationCustomTelemetry is a bool flag to toggle Custom Telemetry Service + // UseBufferV1 is a bool flag to toggle the new log buffer implementation UseBufferV1 *bool `json:"useBufferV1,omitempty"` } From 9fd4f6d14b08be36066320a33e4f87af03c24af0 Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 3 Apr 2024 13:50:48 +0300 Subject: [PATCH 47/58] remove wrong assertion --- .../ocr2keeper/evmregistry/v21/logprovider/integration_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go index 627ad7f744f..6af28015ed8 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -101,7 +101,7 @@ func TestIntegration_LogEventProvider(t *testing.T) { waitLogProvider(ctx, t, logProvider, 3) - allPayloads := collectPayloads(ctx, t, logProvider, n, 5) + allPayloads := collectPayloads(ctx, t, logProvider, n, logsRounds/2) require.GreaterOrEqual(t, len(allPayloads), n, "failed to get logs after restart") @@ -375,7 +375,6 @@ func collectPayloads(ctx context.Context, t *testing.T, logProvider logprovider. for ctx.Err() == nil && len(allPayloads) < n && rounds > 0 { logs, err := logProvider.GetLatestPayloads(ctx) require.NoError(t, err) - require.LessOrEqual(t, len(logs), n, "failed to get all logs") allPayloads = append(allPayloads, logs...) rounds-- } From fee42b455476f56d3e2d9fbcf48ce88f73c963d5 Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 3 Apr 2024 15:30:16 +0300 Subject: [PATCH 48/58] alignments: - log states in buffer - renaming of vars - types --- .../evmregistry/v21/logprovider/buffer_v1.go | 125 +++++++++++------- .../v21/logprovider/buffer_v1_test.go | 6 +- .../evmregistry/v21/logprovider/provider.go | 2 +- 3 files changed, 83 insertions(+), 50 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index 6f170b5c0ee..a9b0a1408c0 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -40,19 +40,19 @@ func DefaultUpkeepSelector(id *big.Int) bool { } type logBufferOptions struct { - // max number of logs to keep in the buffer for each upkeep per window - logLimitHigh *atomic.Uint32 // number of blocks to keep in the buffer - bufferSize *atomic.Uint32 + lookback *atomic.Uint32 // blockRate is the number of blocks per window blockRate *atomic.Uint32 + // max number of logs to keep in the buffer for each upkeep per window (LogLimit*10) + windowLimit *atomic.Uint32 } func newLogBufferOptions(lookback, blockRate, logLimit uint32) *logBufferOptions { opts := &logBufferOptions{ - logLimitHigh: new(atomic.Uint32), - bufferSize: new(atomic.Uint32), - blockRate: new(atomic.Uint32), + windowLimit: new(atomic.Uint32), + lookback: new(atomic.Uint32), + blockRate: new(atomic.Uint32), } opts.override(lookback, blockRate, logLimit) @@ -60,17 +60,17 @@ func newLogBufferOptions(lookback, blockRate, logLimit uint32) *logBufferOptions } func (o *logBufferOptions) override(lookback, blockRate, logLimit uint32) { - o.logLimitHigh.Store(logLimit * 10) - o.bufferSize.Store(lookback) + o.windowLimit.Store(logLimit * 10) + o.lookback.Store(lookback) o.blockRate.Store(blockRate) } -func (o *logBufferOptions) windows() uint { +func (o *logBufferOptions) windows() int { blockRate := o.blockRate.Load() if blockRate == 0 { blockRate = 1 } - return uint(math.Ceil(float64(o.bufferSize.Load()) / float64(blockRate))) + return int(math.Ceil(float64(o.lookback.Load()) / float64(blockRate))) } type logBuffer struct { @@ -83,7 +83,7 @@ type logBuffer struct { lock sync.RWMutex } -func NewLogBuffer(lggr logger.Logger, lookback, blockRate, logLimit uint) LogBuffer { +func NewLogBuffer(lggr logger.Logger, lookback, blockRate, logLimit uint32) LogBuffer { return &logBuffer{ lggr: lggr.Named("KeepersRegistry.LogEventBufferV1"), opts: newLogBufferOptions(uint32(lookback), uint32(blockRate), uint32(logLimit)), @@ -105,7 +105,7 @@ func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { if b.lastBlockSeen.Load() < latestBlock { b.lastBlockSeen.Store(latestBlock) } - blockThreshold := b.lastBlockSeen.Load() - int64(b.opts.bufferSize.Load()) + blockThreshold := b.lastBlockSeen.Load() - int64(b.opts.lookback.Load()) if blockThreshold <= 0 { blockThreshold = 1 } @@ -126,6 +126,7 @@ func (b *logBuffer) Dequeue(block int64, blockRate, upkeepLimit, maxResults int, // in block range [start,end] with minimum number of results per upkeep (upkeepLimit) // and the maximum number of results (capacity). // Returns logs and the number of remaining logs in the buffer for the given range and selector. +// NOTE: this method is not thread safe and should be called within a lock. func (b *logBuffer) dequeue(start, end int64, upkeepLimit, capacity int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) { var result []BufferedLog var remainingLogs int @@ -205,6 +206,20 @@ func (b *logBuffer) setUpkeepQueue(uid *big.Int, buf *upkeepLogQueue) { // TODO: separate files +type logTriggerState uint8 + +const ( + logTriggerStateUnknown logTriggerState = iota + logTriggerStateSeen + logTriggerStateVisited + logTriggerStateDropped +) + +type logTriggerStateEntry struct { + state logTriggerState + block int64 +} + // upkeepLogQueue is a priority queue for logs associated to a specific upkeep. // It keeps track of the logs that were already visited and the capacity of the queue. type upkeepLogQueue struct { @@ -213,19 +228,22 @@ type upkeepLogQueue struct { id *big.Int opts *logBufferOptions - logs []logpoller.Log - visited map[string]int64 - lock sync.RWMutex + // logs is the buffer of logs for the upkeep + logs []logpoller.Log + // states keeps track of the state of the logs that are known to the queue + // and the block number they were seen at + states map[string]logTriggerStateEntry + lock sync.RWMutex } func newUpkeepLogBuffer(lggr logger.Logger, id *big.Int, opts *logBufferOptions) *upkeepLogQueue { - logsCapacity := uint(opts.logLimitHigh.Load()) * opts.windows() + maxLogs := int(opts.windowLimit.Load()) * opts.windows() // limit per window * windows return &upkeepLogQueue{ - lggr: lggr.With("upkeepID", id.String()), - id: id, - opts: opts, - logs: make([]logpoller.Log, 0, logsCapacity), - visited: make(map[string]int64), + lggr: lggr.With("upkeepID", id.String()), + id: id, + opts: opts, + logs: make([]logpoller.Log, 0, maxLogs), + states: make(map[string]logTriggerStateEntry), } } @@ -260,6 +278,11 @@ func (q *upkeepLogQueue) dequeue(start, end int64, limit int) ([]logpoller.Log, if l.BlockNumber >= start && l.BlockNumber <= end { if len(results) < limit { results = append(results, l) + lid := logID(l) + if s, ok := q.states[lid]; ok { + s.state = logTriggerStateVisited + q.states[lid] = s + } continue } remaining++ @@ -291,21 +314,22 @@ func (q *upkeepLogQueue) enqueue(blockThreshold int64, logsToAdd ...logpoller.Lo // q.lggr.Debugw("Skipping log from old block", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber, "logIndex", log.LogIndex) continue } - logid := logID(log) - if _, ok := q.visited[logid]; ok { + lid := logID(log) + if _, ok := q.states[lid]; ok { // q.lggr.Debugw("Skipping known log", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber, "logIndex", log.LogIndex) continue } + q.states[lid] = logTriggerStateEntry{state: logTriggerStateSeen, block: log.BlockNumber} added++ logs = append(logs, log) - q.visited[logid] = log.BlockNumber } q.logs = logs var dropped int if added > 0 { + q.orderLogs() dropped = q.clean(blockThreshold) - q.lggr.Debugw("Enqueued logs", "added", added, "dropped", dropped, "blockThreshold", blockThreshold, "q size", len(q.logs), "visited size", len(q.visited)) + q.lggr.Debugw("Enqueued logs", "added", added, "dropped", dropped, "blockThreshold", blockThreshold, "q size", len(q.logs), "visited size", len(q.states)) } prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionIngress).Add(float64(added)) @@ -314,29 +338,33 @@ func (q *upkeepLogQueue) enqueue(blockThreshold int64, logsToAdd ...logpoller.Lo return added, dropped } -// clean removes logs that are older than blockThreshold and drops logs if the limit for the -// given upkeep was exceeded. Returns the number of logs that were dropped. -func (q *upkeepLogQueue) clean(blockThreshold int64) int { - blockRate := int(q.opts.blockRate.Load()) - maxLogsPerWindow := int(q.opts.logLimitHigh.Load()) - +// orderLogs sorts the logs in the buffer. +// NOTE: this method is not thread safe and should be called within a lock. +func (q *upkeepLogQueue) orderLogs() { // sort logs by block number, tx hash and log index // to keep the q sorted and to ensure that logs can be // grouped by block windows for the cleanup sort.SliceStable(q.logs, func(i, j int) bool { return LogSorter(q.logs[i], q.logs[j]) }) - // cleanup logs that are older than blockThreshold - // and drop logs if the window/s limit for the given upkeep was exceeded +} + +// clean removes logs that are older than blockThreshold and drops logs if the limit for the +// given upkeep was exceeded. Returns the number of logs that were dropped. +// NOTE: this method is not thread safe and should be called within a lock. +func (q *upkeepLogQueue) clean(blockThreshold int64) int { + var dropped, expired int + blockRate := int(q.opts.blockRate.Load()) + windowLimit := int(q.opts.windowLimit.Load()) updated := make([]logpoller.Log, 0) - var dropped, expired, currentWindowCapacity int - var currentWindowStart int64 + // helper variables to keep track of the current window capacity + currentWindowCapacity, currentWindowStart := 0, int64(0) for _, l := range q.logs { if blockThreshold > l.BlockNumber { // old log, removed prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionExpired).Inc() // q.lggr.Debugw("Expiring old log", "blockNumber", l.BlockNumber, "blockThreshold", blockThreshold, "logIndex", l.LogIndex) logid := logID(l) - delete(q.visited, logid) + delete(q.states, logid) expired++ continue } @@ -348,14 +376,18 @@ func (q *upkeepLogQueue) clean(blockThreshold int64) int { } currentWindowCapacity++ // if capacity has been reached, drop the log - if currentWindowCapacity > maxLogsPerWindow { + if currentWindowCapacity > windowLimit { + lid := logID(l) + if s, ok := q.states[lid]; ok { + s.state = logTriggerStateDropped + q.states[lid] = s + } + dropped++ prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionDropped).Inc() - // TODO: check if we should clean visited as well, so it will be possible to add the log again q.lggr.Debugw("Reached log buffer limits, dropping log", "blockNumber", l.BlockNumber, "blockHash", l.BlockHash, "txHash", l.TxHash, "logIndex", l.LogIndex, "len updated", len(updated), "currentWindowStart", currentWindowStart, "currentWindowCapacity", currentWindowCapacity, - "maxLogsPerWindow", maxLogsPerWindow, "blockRate", blockRate) - dropped++ + "maxLogsPerWindow", windowLimit, "blockRate", blockRate) continue } updated = append(updated, l) @@ -363,18 +395,19 @@ func (q *upkeepLogQueue) clean(blockThreshold int64) int { if dropped > 0 || expired > 0 { q.lggr.Debugw("Cleaned logs", "dropped", dropped, "expired", expired, "blockThreshold", blockThreshold, "len updated", len(updated), "len before", len(q.logs)) + q.logs = updated } - q.logs = updated - q.cleanVisited(blockThreshold) + q.cleanStates(blockThreshold) return dropped } -func (q *upkeepLogQueue) cleanVisited(blockThreshold int64) { - for lid, block := range q.visited { - if block <= blockThreshold { - delete(q.visited, lid) +// NOTE: this method is not thread safe and should be called within a lock. +func (q *upkeepLogQueue) cleanStates(blockThreshold int64) { + for lid, s := range q.states { + if s.block <= blockThreshold { + delete(q.states, lid) } } } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go index 48ad9ea4549..0029e17f9f3 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go @@ -136,7 +136,7 @@ func TestLogEventBufferV1_Dequeue(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - buf := NewLogBuffer(logger.TestLogger(t), uint(tc.lookback), uint(tc.args.blockRate), uint(tc.args.upkeepLimit)) + buf := NewLogBuffer(logger.TestLogger(t), uint32(tc.lookback), uint32(tc.args.blockRate), uint32(tc.args.upkeepLimit)) for id, logs := range tc.logsInBuffer { added, dropped := buf.Enqueue(id, logs...) require.Equal(t, len(logs), added+dropped) @@ -155,7 +155,7 @@ func TestLogEventBufferV1_Enqueue(t *testing.T) { added, dropped map[string]int sizeOfRange map[*big.Int]int rangeStart, rangeEnd int64 - lookback, blockRate, upkeepLimit uint + lookback, blockRate, upkeepLimit uint32 }{ { name: "empty", @@ -361,7 +361,7 @@ func TestLogEventBufferV1_UpkeepQueue_clean(t *testing.T) { require.Equal(t, 2, q.sizeOfRange(1, 18)) q.lock.Lock() defer q.lock.Unlock() - require.Equal(t, 2, len(q.visited)) + require.Equal(t, 2, len(q.states)) }) } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index cc65bf76dae..f71b6af51f6 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -120,7 +120,7 @@ func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, chainID *big lggr: lggr.Named("KeepersRegistry.LogEventProvider"), packer: packer, buffer: newLogEventBuffer(lggr, int(opts.LookbackBlocks), defaultNumOfLogUpkeeps, defaultFastExecLogsHigh), - bufferV1: NewLogBuffer(lggr, uint(opts.LookbackBlocks), uint(opts.BlockRate), uint(opts.LogLimit)), + bufferV1: NewLogBuffer(lggr, uint32(opts.LookbackBlocks), opts.BlockRate, opts.LogLimit), poller: poller, opts: opts, filterStore: filterStore, From 7627eba206ecc030a0a0aabce966cc6211acefcc Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 3 Apr 2024 15:44:04 +0300 Subject: [PATCH 49/58] lint --- .../plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index a9b0a1408c0..ca31b5f0a20 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -86,7 +86,7 @@ type logBuffer struct { func NewLogBuffer(lggr logger.Logger, lookback, blockRate, logLimit uint32) LogBuffer { return &logBuffer{ lggr: lggr.Named("KeepersRegistry.LogEventBufferV1"), - opts: newLogBufferOptions(uint32(lookback), uint32(blockRate), uint32(logLimit)), + opts: newLogBufferOptions(lookback, blockRate, logLimit), lastBlockSeen: new(atomic.Int64), queues: make(map[string]*upkeepLogQueue), } From 6922352b15e5dd300e94ca31741fe2819acf113e Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 3 Apr 2024 16:35:38 +0300 Subject: [PATCH 50/58] review --- .../evmregistry/v21/logprovider/factory.go | 7 ++++--- .../v21/logprovider/integration_test.go | 12 ++++++------ .../evmregistry/v21/logprovider/provider.go | 9 +++++---- .../plugins/ocr2keeper/integration_21_test.go | 18 +++++++++--------- 4 files changed, 24 insertions(+), 22 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index 5b818b492d4..f0d039069d7 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -43,12 +43,13 @@ type LogTriggersOptions struct { BlockRate uint32 } +// BufferVersion is the version of the log buffer. +// TODO: remove once we have a single version type BufferVersion string const ( - BufferVersionNone BufferVersion = "" - BufferVersionV0 BufferVersion = "v0" - BufferVersionV1 BufferVersion = "v1" + BufferVersionDefault BufferVersion = "" + BufferVersionV1 BufferVersion = "v1" ) func NewOptions(finalityDepth int64, chainID *big.Int) LogTriggersOptions { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go index 6af28015ed8..8108f1a3466 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -39,12 +39,12 @@ func TestIntegration_LogEventProvider(t *testing.T) { logLimit uint32 }{ { - name: "default version", - bufferVersion: logprovider.BufferVersionV0, + name: "default buffer", + bufferVersion: logprovider.BufferVersionDefault, logLimit: 10, }, { - name: "v1", + name: "buffer v1", bufferVersion: logprovider.BufferVersionV1, logLimit: 10, }, @@ -225,12 +225,12 @@ func TestIntegration_LogEventProvider_Backfill(t *testing.T) { logLimit uint32 }{ { - name: "default version", - bufferVersion: logprovider.BufferVersionV0, + name: "default buffer", + bufferVersion: logprovider.BufferVersionDefault, logLimit: 10, }, { - name: "v1", + name: "buffer v1", bufferVersion: logprovider.BufferVersionV1, logLimit: 10, }, diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index f71b6af51f6..8d80507efc0 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -48,6 +48,8 @@ var ( readerThreads = 4 bufferSyncInterval = 10 * time.Minute + // logLimitMinimum is how low the log limit can go. + logLimitMinimum = 1 ) // LogTriggerConfig is an alias for log trigger config. @@ -196,8 +198,7 @@ func (p *logEventProvider) Start(context.Context) error { select { case <-ticker.C: if p.bufferV1 != nil { - err := p.bufferV1.SyncFilters(p.filterStore) - if err != nil { + if err := p.bufferV1.SyncFilters(p.filterStore); err != nil { p.lggr.Warnw("failed to sync filters", "err", err) } } @@ -259,8 +260,8 @@ func (p *logEventProvider) getBufferDequeueArgs() (blockRate, logLimitLow, maxRe // in case we have more upkeeps than the max results, we reduce the log limit low // so that more upkeeps will get slots in the result set. for numOfUpkeeps > maxResults/logLimitLow { - if logLimitLow == 1 { - // Log limit low can't go less than 1. + if logLimitLow == logLimitMinimum { + // Log limit low can't go less than logLimitMinimum (1). // If some upkeeps are not getting slots in the result set, they supposed to be picked up // in the next iteration if the range is still applicable. // TODO: alerts to notify the system is at full capacity. diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go index 5f8a89ac22f..288e7e74fdb 100644 --- a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go @@ -178,11 +178,11 @@ func TestIntegration_KeeperPluginLogUpkeep(t *testing.T) { logBufferVersion logprovider.BufferVersion }{ { - name: "default", - logBufferVersion: logprovider.BufferVersionV0, + name: "default buffer", + logBufferVersion: logprovider.BufferVersionDefault, }, { - name: "v1", + name: "buffer v1", logBufferVersion: logprovider.BufferVersionV1, }, } @@ -285,11 +285,11 @@ func TestIntegration_KeeperPluginLogUpkeep_Retry(t *testing.T) { logBufferVersion logprovider.BufferVersion }{ { - name: "default", - logBufferVersion: logprovider.BufferVersionV0, + name: "default buffer", + logBufferVersion: logprovider.BufferVersionDefault, }, { - name: "v1", + name: "buffer v1", logBufferVersion: logprovider.BufferVersionV1, }, } @@ -420,11 +420,11 @@ func TestIntegration_KeeperPluginLogUpkeep_ErrHandler(t *testing.T) { logBufferVersion logprovider.BufferVersion }{ { - name: "default", - logBufferVersion: logprovider.BufferVersionV0, + name: "default buffer", + logBufferVersion: logprovider.BufferVersionDefault, }, { - name: "v1", + name: "buffer v1", logBufferVersion: logprovider.BufferVersionV1, }, } From cda0a3f16e4a01febf098b19dd955cb582a2ada9 Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 3 Apr 2024 16:39:50 +0300 Subject: [PATCH 51/58] review: - result 1 or -1 --- .../plugins/ocr2keeper/evmregistry/v21/logprovider/log.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go index 2b193b1b4bc..fc780e98786 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go @@ -30,7 +30,10 @@ func LogComparator(a, b logpoller.Log) int { if logIndexDiff == 0 { return a.TxHash.Big().Cmp(b.TxHash.Big()) } - return int(logIndexDiff) + if logIndexDiff > 0 { + return 1 + } + return -1 } // logID returns a unique identifier for a log, which is an hex string From f534b424ce7f5592220828ec81a33af8bb024a72 Mon Sep 17 00:00:00 2001 From: amirylm Date: Wed, 3 Apr 2024 19:52:01 +0300 Subject: [PATCH 52/58] normalize compare result --- .../evmregistry/v21/logprovider/log.go | 24 +++++++++++++------ .../evmregistry/v21/logprovider/log_test.go | 4 ++-- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go index fc780e98786..9156e341688 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go @@ -23,17 +23,27 @@ func LogSorter(a, b logpoller.Log) bool { // 0 if a == b // +1 if a > b func LogComparator(a, b logpoller.Log) int { - if a.BlockNumber != b.BlockNumber { - return int(a.BlockNumber - b.BlockNumber) + blockDiff := int(a.BlockNumber - b.BlockNumber) + if blockDiff != 0 { + return normalizeCompareResult(blockDiff) } - logIndexDiff := a.LogIndex - b.LogIndex - if logIndexDiff == 0 { - return a.TxHash.Big().Cmp(b.TxHash.Big()) + logIndexDiff := int(a.LogIndex - b.LogIndex) + if logIndexDiff != 0 { + return normalizeCompareResult(logIndexDiff) } - if logIndexDiff > 0 { + return a.TxHash.Big().Cmp(b.TxHash.Big()) +} + +// normalizeCompareResult normalizes the result of a comparison to -1, 0, 1 +func normalizeCompareResult(res int) int { + switch { + case res < 0: + return -1 + case res > 0: return 1 + default: + return 0 } - return -1 } // logID returns a unique identifier for a log, which is an hex string diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log_test.go index 9a2d2760a4f..9ee8e98a996 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log_test.go @@ -40,7 +40,7 @@ func TestLogComparatorSorter(t *testing.T) { LogIndex: 1, }, b: logpoller.Log{ - BlockNumber: 2, + BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 1, }, @@ -82,7 +82,7 @@ func TestLogComparatorSorter(t *testing.T) { a: logpoller.Log{ BlockNumber: 1, TxHash: common.HexToHash("0x1"), - LogIndex: 3, + LogIndex: 4, }, b: logpoller.Log{ BlockNumber: 1, From ee80dcab7f5f15c87ae51b54f6f5cb084cec4802 Mon Sep 17 00:00:00 2001 From: amirylm Date: Thu, 4 Apr 2024 20:51:09 +0300 Subject: [PATCH 53/58] linked TODOs --- core/services/ocr2/delegate.go | 2 +- core/services/ocr2/plugins/ocr2keeper/config.go | 1 + .../ocr2keeper/evmregistry/v21/logprovider/factory.go | 8 ++++---- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index dc68c235976..d5073086481 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -1315,7 +1315,7 @@ func (d *Delegate) newServicesOCR2Keepers21( return nil, errors.New("could not coerce PluginProvider to AutomationProvider") } - // TODO: remove once we remove v0 + // TODO: (AUTO-9355) remove once we remove v0 if useBufferV1 := cfg.UseBufferV1 != nil && *cfg.UseBufferV1; useBufferV1 { logProviderFeatures, ok := keeperProvider.LogEventProvider().(logprovider.LogEventProviderFeatures) if ok { diff --git a/core/services/ocr2/plugins/ocr2keeper/config.go b/core/services/ocr2/plugins/ocr2keeper/config.go index 7aba35201a3..4b41e5a0285 100644 --- a/core/services/ocr2/plugins/ocr2keeper/config.go +++ b/core/services/ocr2/plugins/ocr2keeper/config.go @@ -61,6 +61,7 @@ type PluginConfig struct { // CaptureAutomationCustomTelemetry is a bool flag to toggle Custom Telemetry Service CaptureAutomationCustomTelemetry *bool `json:"captureAutomationCustomTelemetry,omitempty"` // UseBufferV1 is a bool flag to toggle the new log buffer implementation + // TODO: (AUTO-9355) remove once we have a single version UseBufferV1 *bool `json:"useBufferV1,omitempty"` } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index f0d039069d7..64833f9269b 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -35,16 +35,16 @@ type LogTriggersOptions struct { // Finality depth is the number of blocks to wait before considering a block final. FinalityDepth int64 - // v1 config + // TODO: (AUTO-9355) remove once we have a single version BufferVersion BufferVersion - + // LogLimit is the minimum number of logs to process in a single block window. LogLimit uint32 - + // BlockRate determines the block window for log processing. BlockRate uint32 } // BufferVersion is the version of the log buffer. -// TODO: remove once we have a single version +// TODO: (AUTO-9355) remove once we have a single version type BufferVersion string const ( From 7a0aca501fcf01ae13746705b3cc6a8b87c9b14c Mon Sep 17 00:00:00 2001 From: amirylm Date: Fri, 5 Apr 2024 11:06:55 +0300 Subject: [PATCH 54/58] review fix --- .../evmregistry/v21/logprovider/provider.go | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index 8d80507efc0..00129ee8cc4 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -197,10 +197,8 @@ func (p *logEventProvider) Start(context.Context) error { for { select { case <-ticker.C: - if p.bufferV1 != nil { - if err := p.bufferV1.SyncFilters(p.filterStore); err != nil { - p.lggr.Warnw("failed to sync filters", "err", err) - } + if err := p.syncBufferFilters(); err != nil { + p.lggr.Warnw("failed to sync buffer filters", "err", err) } case <-ctx.Done(): return @@ -521,3 +519,16 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ return merr } + +func (p *logEventProvider) syncBufferFilters() error { + p.lock.RLock() + buffVersion := p.opts.BufferVersion + p.lock.RUnlock() + + switch buffVersion { + case BufferVersionV1: + return p.bufferV1.SyncFilters(p.filterStore) + default: + return nil + } +} From b0233af61914d8b199fda580c74bf8935f322503 Mon Sep 17 00:00:00 2001 From: amirylm Date: Fri, 5 Apr 2024 17:04:07 +0300 Subject: [PATCH 55/58] lock while SetConfig --- .../plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index 00129ee8cc4..b07b08d3354 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -131,6 +131,9 @@ func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, chainID *big } func (p *logEventProvider) SetConfig(cfg ocr2keepers.LogEventProviderConfig) { + p.lock.Lock() + defer p.lock.Unlock() + blockRate := cfg.BlockRate logLimit := cfg.LogLimit From f014da621cef7fbf31ec5ab2b6425a9875bbd2f3 Mon Sep 17 00:00:00 2001 From: amirylm Date: Sun, 7 Apr 2024 15:25:19 +0300 Subject: [PATCH 56/58] remove redundant defaulting --- .../ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index ca31b5f0a20..55240915409 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -66,11 +66,7 @@ func (o *logBufferOptions) override(lookback, blockRate, logLimit uint32) { } func (o *logBufferOptions) windows() int { - blockRate := o.blockRate.Load() - if blockRate == 0 { - blockRate = 1 - } - return int(math.Ceil(float64(o.lookback.Load()) / float64(blockRate))) + return int(math.Ceil(float64(o.lookback.Load()) / float64(o.blockRate.Load()))) } type logBuffer struct { From 3e3bfb069472e914027d25b9145be8aedeb32207 Mon Sep 17 00:00:00 2001 From: amirylm Date: Mon, 8 Apr 2024 08:51:52 +0300 Subject: [PATCH 57/58] align trigger states + comments --- .../evmregistry/v21/logprovider/buffer_v1.go | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index 55240915409..778acdf6470 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -200,17 +200,22 @@ func (b *logBuffer) setUpkeepQueue(uid *big.Int, buf *upkeepLogQueue) { b.queues[uid.String()] = buf } -// TODO: separate files +// TODO (AUTO-9256) separate files +// logTriggerState represents the state of a log in the buffer. type logTriggerState uint8 const ( - logTriggerStateUnknown logTriggerState = iota - logTriggerStateSeen - logTriggerStateVisited - logTriggerStateDropped + // the log was dropped due to buffer limits + logTriggerStateDropped logTriggerState = iota + // the log was enqueued by the buffer + logTriggerStateEnqueued + // the log was visited/dequeued from the buffer + logTriggerStateDequeued ) +// logTriggerStateEntry represents the state of a log in the buffer and the block number of the log. +// TODO (AUTO-10013) handling of reorgs might require to store the block hash as well. type logTriggerStateEntry struct { state logTriggerState block int64 @@ -276,7 +281,7 @@ func (q *upkeepLogQueue) dequeue(start, end int64, limit int) ([]logpoller.Log, results = append(results, l) lid := logID(l) if s, ok := q.states[lid]; ok { - s.state = logTriggerStateVisited + s.state = logTriggerStateDequeued q.states[lid] = s } continue @@ -315,7 +320,7 @@ func (q *upkeepLogQueue) enqueue(blockThreshold int64, logsToAdd ...logpoller.Lo // q.lggr.Debugw("Skipping known log", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber, "logIndex", log.LogIndex) continue } - q.states[lid] = logTriggerStateEntry{state: logTriggerStateSeen, block: log.BlockNumber} + q.states[lid] = logTriggerStateEntry{state: logTriggerStateEnqueued, block: log.BlockNumber} added++ logs = append(logs, log) } @@ -399,6 +404,7 @@ func (q *upkeepLogQueue) clean(blockThreshold int64) int { return dropped } +// cleanStates removes states that are older than blockThreshold. // NOTE: this method is not thread safe and should be called within a lock. func (q *upkeepLogQueue) cleanStates(blockThreshold int64) { for lid, s := range q.states { From 49f1c8ab162455e756447b7de0b35e20552fc9b6 Mon Sep 17 00:00:00 2001 From: amirylm Date: Mon, 8 Apr 2024 09:07:13 +0300 Subject: [PATCH 58/58] fix func name --- .../evmregistry/v21/logprovider/buffer_v1.go | 4 ++-- .../evmregistry/v21/logprovider/buffer_v1_test.go | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go index 778acdf6470..fbc1da075df 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -94,7 +94,7 @@ func NewLogBuffer(lggr logger.Logger, lookback, blockRate, logLimit uint32) LogB func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { buf, ok := b.getUpkeepQueue(uid) if !ok || buf == nil { - buf = newUpkeepLogBuffer(b.lggr, uid, b.opts) + buf = newUpkeepLogQueue(b.lggr, uid, b.opts) b.setUpkeepQueue(uid, buf) } latestBlock := latestBlockNumber(logs...) @@ -237,7 +237,7 @@ type upkeepLogQueue struct { lock sync.RWMutex } -func newUpkeepLogBuffer(lggr logger.Logger, id *big.Int, opts *logBufferOptions) *upkeepLogQueue { +func newUpkeepLogQueue(lggr logger.Logger, id *big.Int, opts *logBufferOptions) *upkeepLogQueue { maxLogs := int(opts.windowLimit.Load()) * opts.windows() // limit per window * windows return &upkeepLogQueue{ lggr: lggr.With("upkeepID", id.String()), diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go index 0029e17f9f3..19f806d35b9 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go @@ -272,7 +272,7 @@ func TestLogEventBufferV1_Enqueue(t *testing.T) { func TestLogEventBufferV1_UpkeepQueue(t *testing.T) { t.Run("enqueue dequeue", func(t *testing.T) { - q := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) + q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) added, dropped := q.enqueue(10, logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 0}) require.Equal(t, 0, dropped) @@ -284,7 +284,7 @@ func TestLogEventBufferV1_UpkeepQueue(t *testing.T) { }) t.Run("enqueue with limits", func(t *testing.T) { - q := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) + q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) added, dropped := q.enqueue(10, createDummyLogSequence(15, 0, 20, common.HexToHash("0x20"))..., @@ -294,7 +294,7 @@ func TestLogEventBufferV1_UpkeepQueue(t *testing.T) { }) t.Run("dequeue with limits", func(t *testing.T) { - q := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 3)) + q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 3)) added, dropped := q.enqueue(10, logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 0}, @@ -312,13 +312,13 @@ func TestLogEventBufferV1_UpkeepQueue(t *testing.T) { func TestLogEventBufferV1_UpkeepQueue_sizeOfRange(t *testing.T) { t.Run("empty", func(t *testing.T) { - q := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) + q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) require.Equal(t, 0, q.sizeOfRange(1, 10)) }) t.Run("happy path", func(t *testing.T) { - q := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) + q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) added, dropped := q.enqueue(10, logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 0}) require.Equal(t, 0, dropped) @@ -330,7 +330,7 @@ func TestLogEventBufferV1_UpkeepQueue_sizeOfRange(t *testing.T) { func TestLogEventBufferV1_UpkeepQueue_clean(t *testing.T) { t.Run("empty", func(t *testing.T) { - q := newUpkeepLogBuffer(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) + q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) q.clean(10) })