diff --git a/.changeset/funny-poets-sneeze.md b/.changeset/funny-poets-sneeze.md new file mode 100644 index 00000000000..214ba4504a6 --- /dev/null +++ b/.changeset/funny-poets-sneeze.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +Remove LogPoller filters for outdated Functions coordinator contracts diff --git a/.changeset/pink-ducks-agree.md b/.changeset/pink-ducks-agree.md new file mode 100644 index 00000000000..0b1035c4d84 --- /dev/null +++ b/.changeset/pink-ducks-agree.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +Add OCR3 capability contract wrapper diff --git a/.changeset/pretty-experts-unite.md b/.changeset/pretty-experts-unite.md new file mode 100644 index 00000000000..4a1f903d439 --- /dev/null +++ b/.changeset/pretty-experts-unite.md @@ -0,0 +1,7 @@ +--- +"chainlink": patch +--- + +Added log buffer v1 with improved performance, stability and control over scaling parameters. + +Added a feature flag for using log buffer v1. diff --git a/CHANGELOG.md b/CHANGELOG.md index f60c916a2fc..b5566c64e58 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,8 +10,9 @@ - HeadTracker now respects the `FinalityTagEnabled` config option. If the flag is enabled, HeadTracker backfills blocks up to the latest finalized block provided by the corresponding RPC call. To address potential misconfigurations, `HistoryDepth` is now calculated from the latest finalized block instead of the head. NOTE: Consumers (e.g. TXM and LogPoller) do not fully utilize Finality Tag yet. ... + -## 2.10.0 - UNRELEASED +## 2.10.0 - 2024-04-05 ### Added @@ -20,6 +21,7 @@ - Add preliminary support for "llo" job type (Data Streams V1) - Add `LogPrunePageSize` parameter to the EVM configuration. This parameter controls the number of logs removed during prune phase in LogPoller. Default value is 0, which deletes all logs at once - exactly how it used to work, so it doesn't require any changes on the product's side. - Add Juels Fee Per Coin data source caching for OCR2 Feeds. Cache is time based and is turned on by default with default cache refresh of 5 minutes. Cache can be configured through pluginconfig using "juelsPerFeeCoinCacheDuration" and "juelsPerFeeCoinCacheDisabled" tags. Duration tag accepts values between "30s" and "20m" with default of "0s" that is overridden on cache startup to 5 minutes. +- Add rebalancer support for feeds manager ocr2 plugins. ### Fixed @@ -31,8 +33,6 @@ - Minimum required version of Postgres is now >= 12. Postgres 11 was EOL'd in November 2023. Added a new version check that will prevent Chainlink from running on EOL'd Postgres. If you are running Postgres <= 11 you should upgrade to the latest version. The check can be forcibly overridden by setting SKIP_PG_VERSION_CHECK=true. - Updated the `LimitDefault` and `LimitMax` configs types to `uint64` - - ## 2.9.1 - 2024-03-07 ### Changed diff --git a/contracts/scripts/native_solc_compile_all_keystone b/contracts/scripts/native_solc_compile_all_keystone index 3f4d33d6ecc..49bd6527843 100755 --- a/contracts/scripts/native_solc_compile_all_keystone +++ b/contracts/scripts/native_solc_compile_all_keystone @@ -29,3 +29,4 @@ compileContract () { } compileContract keystone/KeystoneForwarder.sol +compileContract keystone/OCR3Capability.sol diff --git a/core/capabilities/targets/write_target.go b/core/capabilities/targets/write_target.go index a7452be7fc8..677b1148ebb 100644 --- a/core/capabilities/targets/write_target.go +++ b/core/capabilities/targets/write_target.go @@ -176,6 +176,24 @@ func (cap *EvmWrite) Execute(ctx context.Context, callback chan<- capabilities.C return err } inputs := inputsAny.(map[string]any) + rep, ok := inputs["report"] + if !ok { + return errors.New("malformed data: inputs doesn't contain a report key") + } + + if rep == nil { + // We received any empty report -- this means we should skip transmission. + cap.lggr.Debugw("Skipping empty report", "request", request) + go func() { + // TODO: cast tx.Error to Err (or Value to Value?) + callback <- capabilities.CapabilityResponse{ + Value: nil, + Err: nil, + } + close(callback) + }() + return nil + } // evaluate any variables in reqConfig.Params args, err := evaluateParams(reqConfig.Params, inputs) @@ -222,7 +240,7 @@ func (cap *EvmWrite) Execute(ctx context.Context, callback chan<- capabilities.C if err != nil { return err } - fmt.Printf("Transaction submitted %v", tx.ID) + cap.lggr.Debugw("Transaction submitted", "request", request, "transaction", tx) go func() { // TODO: cast tx.Error to Err (or Value to Value?) callback <- capabilities.CapabilityResponse{ diff --git a/core/capabilities/targets/write_target_test.go b/core/capabilities/targets/write_target_test.go index c71c84e172e..fd68234ca70 100644 --- a/core/capabilities/targets/write_target_test.go +++ b/core/capabilities/targets/write_target_test.go @@ -90,3 +90,55 @@ func TestEvmWrite(t *testing.T) { response := <-ch require.Nil(t, response.Err) } + +func TestEvmWrite_EmptyReport(t *testing.T) { + chain := evmmocks.NewChain(t) + + txManager := txmmocks.NewMockEvmTxManager(t) + chain.On("ID").Return(big.NewInt(11155111)) + chain.On("TxManager").Return(txManager) + + cfg := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) { + a := testutils.NewAddress() + addr, err := types.NewEIP55Address(a.Hex()) + require.NoError(t, err) + c.EVM[0].ChainWriter.FromAddress = &addr + + forwarderA := testutils.NewAddress() + forwarderAddr, err := types.NewEIP55Address(forwarderA.Hex()) + require.NoError(t, err) + c.EVM[0].ChainWriter.ForwarderAddress = &forwarderAddr + }) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + chain.On("Config").Return(evmcfg) + + capability := targets.NewEvmWrite(chain, logger.TestLogger(t)) + ctx := testutils.Context(t) + + config, err := values.NewMap(map[string]any{ + "abi": "receive(report bytes)", + "params": []any{"$(report)"}, + }) + require.NoError(t, err) + + inputs, err := values.NewMap(map[string]any{ + "report": nil, + }) + require.NoError(t, err) + + req := capabilities.CapabilityRequest{ + Metadata: capabilities.RequestMetadata{ + WorkflowID: "hello", + }, + Config: config, + Inputs: inputs, + } + + ch := make(chan capabilities.CapabilityResponse) + + err = capability.Execute(ctx, ch, req) + require.NoError(t, err) + + response := <-ch + require.Nil(t, response.Err) +} diff --git a/core/gethwrappers/keystone/generated/ocr3_capability/ocr3_capability.go b/core/gethwrappers/keystone/generated/ocr3_capability/ocr3_capability.go new file mode 100644 index 00000000000..ad1173b3acd --- /dev/null +++ b/core/gethwrappers/keystone/generated/ocr3_capability/ocr3_capability.go @@ -0,0 +1,962 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ocr3_capability + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var OCR3CapabilityMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ReportInvalid\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReportingUnsupported\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"name\":\"Transmitted\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"_transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"_f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"_onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"_offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"_offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[3]\",\"name\":\"reportContext\",\"type\":\"bytes32[3]\"},{\"internalType\":\"bytes\",\"name\":\"report\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"transmitters\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b50600133806000816200006b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156200009e576200009e81620000ac565b505050151560805262000157565b336001600160a01b03821603620001065760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000062565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b608051611f7d6200017360003960006104a40152611f7d6000f3fe608060405234801561001057600080fd5b50600436106100a35760003560e01c80638da5cb5b11610076578063b1dc65a41161005b578063b1dc65a414610187578063e3d0e7121461019a578063f2fde38b146101ad57600080fd5b80638da5cb5b1461013f578063afcb95d71461016757600080fd5b8063181f5a77146100a857806379ba5097146100f057806381411834146100fa57806381ff70481461010f575b600080fd5b604080518082018252600e81527f4b657973746f6e6520302e302e30000000000000000000000000000000000000602082015290516100e791906117e8565b60405180910390f35b6100f86101c0565b005b6101026102c2565b6040516100e79190611853565b6004546002546040805163ffffffff808516825264010000000090940490931660208401528201526060016100e7565b60005460405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100e7565b6040805160018152600060208201819052918101919091526060016100e7565b6100f86101953660046118b2565b610331565b6100f86101a8366004611b7c565b610a62565b6100f86101bb366004611c49565b61143d565b60015473ffffffffffffffffffffffffffffffffffffffff163314610246576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6060600780548060200260200160405190810160405280929190818152602001828054801561032757602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff1681526001909101906020018083116102fc575b5050505050905090565b60005a604080516020601f8b018190048102820181019092528981529192508a3591818c01359161038791849163ffffffff851691908e908e908190840183828082843760009201919091525061145192505050565b6103bd576040517f0be3632800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805183815262ffffff600884901c1660208201527fb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62910160405180910390a16040805160608101825260025480825260035460ff80821660208501526101009091041692820192909252908314610492576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f636f6e666967446967657374206d69736d617463680000000000000000000000604482015260640161023d565b6104a08b8b8b8b8b8b61145a565b60007f0000000000000000000000000000000000000000000000000000000000000000156104fd576002826020015183604001516104de9190611cc2565b6104e89190611ce1565b6104f3906001611cc2565b60ff169050610513565b602082015161050d906001611cc2565b60ff1690505b88811461057c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f77726f6e67206e756d626572206f66207369676e617475726573000000000000604482015260640161023d565b8887146105e5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f7369676e617475726573206f7574206f6620726567697374726174696f6e0000604482015260640161023d565b3360009081526005602090815260408083208151808301909252805460ff8082168452929391929184019161010090910416600281111561062857610628611d2a565b600281111561063957610639611d2a565b905250905060028160200151600281111561065657610656611d2a565b14801561069d57506007816000015160ff168154811061067857610678611c64565b60009182526020909120015473ffffffffffffffffffffffffffffffffffffffff1633145b610703576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f756e617574686f72697a6564207472616e736d69747465720000000000000000604482015260640161023d565b5050505050610710611765565b6000808a8a604051610723929190611d59565b60405190819003812061073a918e90602001611d69565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181528282528051602091820120838301909252600080845290830152915060005b89811015610a445760006001848984602081106107a3576107a3611c64565b6107b091901a601b611cc2565b8e8e868181106107c2576107c2611c64565b905060200201358d8d878181106107db576107db611c64565b9050602002013560405160008152602001604052604051610818949392919093845260ff9290921660208401526040830152606082015260800190565b6020604051602081039080840390855afa15801561083a573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081015173ffffffffffffffffffffffffffffffffffffffff811660009081526005602090815290849020838501909452835460ff808216855292965092945084019161010090041660028111156108ba576108ba611d2a565b60028111156108cb576108cb611d2a565b90525092506001836020015160028111156108e8576108e8611d2a565b1461094f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f61646472657373206e6f7420617574686f72697a656420746f207369676e0000604482015260640161023d565b8251600090879060ff16601f811061096957610969611c64565b602002015173ffffffffffffffffffffffffffffffffffffffff16146109eb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f6e6f6e2d756e69717565207369676e6174757265000000000000000000000000604482015260640161023d565b8086846000015160ff16601f8110610a0557610a05611c64565b73ffffffffffffffffffffffffffffffffffffffff9092166020929092020152610a30600186611cc2565b94505080610a3d90611d7d565b9050610784565b505050610a55833383858e8e611511565b5050505050505050505050565b855185518560ff16601f831115610ad5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f746f6f206d616e79207369676e65727300000000000000000000000000000000604482015260640161023d565b60008111610b3f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f66206d75737420626520706f7369746976650000000000000000000000000000604482015260640161023d565b818314610bcd576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f6f7261636c6520616464726573736573206f7574206f6620726567697374726160448201527f74696f6e00000000000000000000000000000000000000000000000000000000606482015260840161023d565b610bd8816003611db5565b8311610c40576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f6661756c74792d6f7261636c65206620746f6f20686967680000000000000000604482015260640161023d565b610c48611543565b6040805160c0810182528a8152602081018a905260ff8916918101919091526060810187905267ffffffffffffffff8616608082015260a081018590525b60065415610e3b57600654600090610ca090600190611dcc565b9050600060068281548110610cb757610cb7611c64565b60009182526020822001546007805473ffffffffffffffffffffffffffffffffffffffff90921693509084908110610cf157610cf1611c64565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff85811684526005909252604080842080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000090811690915592909116808452922080549091169055600680549192509080610d7157610d71611ddf565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff00000000000000000000000000000000000000001690550190556007805480610dda57610dda611ddf565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905501905550610c86915050565b60005b8151518110156112a05760006005600084600001518481518110610e6457610e64611c64565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054610100900460ff166002811115610eae57610eae611d2a565b14610f15576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265706561746564207369676e65722061646472657373000000000000000000604482015260640161023d565b6040805180820190915260ff82168152600160208201528251805160059160009185908110610f4657610f46611c64565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040016000208251815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082168117835592840151919283917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001617610100836002811115610fe757610fe7611d2a565b021790555060009150610ff79050565b600560008460200151848151811061101157611011611c64565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054610100900460ff16600281111561105b5761105b611d2a565b146110c2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f7265706561746564207472616e736d6974746572206164647265737300000000604482015260640161023d565b6040805180820190915260ff8216815260208101600281525060056000846020015184815181106110f5576110f5611c64565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040016000208251815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082168117835592840151919283917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000161761010083600281111561119657611196611d2a565b0217905550508251805160069250839081106111b4576111b4611c64565b602090810291909101810151825460018101845560009384529282902090920180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909316929092179091558201518051600791908390811061123057611230611c64565b60209081029190910181015182546001810184556000938452919092200180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff90921691909117905561129981611d7d565b9050610e3e565b506040810151600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff909216919091179055600480547fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff811664010000000063ffffffff438116820292831785559083048116936001939092600092611332928692908216911617611e0e565b92506101000a81548163ffffffff021916908363ffffffff1602179055506113914630600460009054906101000a900463ffffffff1663ffffffff16856000015186602001518760400151886060015189608001518a60a001516115c6565b6002819055825180516003805460ff909216610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff90921691909117905560045460208501516040808701516060880151608089015160a08a015193517f1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e0598611430988b98919763ffffffff909216969095919491939192611e32565b60405180910390a1610a55565b611445611543565b61144e81611670565b50565b60019392505050565b6000611467826020611db5565b611472856020611db5565b61147e88610144611ec8565b6114889190611ec8565b6114929190611ec8565b61149d906000611ec8565b9050368114611508576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f63616c6c64617461206c656e677468206d69736d617463680000000000000000604482015260640161023d565b50505050505050565b6040517f0750181900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005473ffffffffffffffffffffffffffffffffffffffff1633146115c4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161023d565b565b6000808a8a8a8a8a8a8a8a8a6040516020016115ea99989796959493929190611edb565b604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe001815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e01000000000000000000000000000000000000000000000000000000000000179b9a5050505050505050505050565b3373ffffffffffffffffffffffffffffffffffffffff8216036116ef576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161023d565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b604051806103e00160405280601f906020820280368337509192915050565b6000815180845260005b818110156117aa5760208185018101518683018201520161178e565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b6020815260006117fb6020830184611784565b9392505050565b600081518084526020808501945080840160005b8381101561184857815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101611816565b509495945050505050565b6020815260006117fb6020830184611802565b60008083601f84011261187857600080fd5b50813567ffffffffffffffff81111561189057600080fd5b6020830191508360208260051b85010111156118ab57600080fd5b9250929050565b60008060008060008060008060e0898b0312156118ce57600080fd5b606089018a8111156118df57600080fd5b8998503567ffffffffffffffff808211156118f957600080fd5b818b0191508b601f83011261190d57600080fd5b81358181111561191c57600080fd5b8c602082850101111561192e57600080fd5b6020830199508098505060808b013591508082111561194c57600080fd5b6119588c838d01611866565b909750955060a08b013591508082111561197157600080fd5b5061197e8b828c01611866565b999c989b50969995989497949560c00135949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715611a0d57611a0d611997565b604052919050565b803573ffffffffffffffffffffffffffffffffffffffff81168114611a3957600080fd5b919050565b600082601f830112611a4f57600080fd5b8135602067ffffffffffffffff821115611a6b57611a6b611997565b8160051b611a7a8282016119c6565b9283528481018201928281019087851115611a9457600080fd5b83870192505b84831015611aba57611aab83611a15565b82529183019190830190611a9a565b979650505050505050565b803560ff81168114611a3957600080fd5b600082601f830112611ae757600080fd5b813567ffffffffffffffff811115611b0157611b01611997565b611b3260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116016119c6565b818152846020838601011115611b4757600080fd5b816020850160208301376000918101602001919091529392505050565b803567ffffffffffffffff81168114611a3957600080fd5b60008060008060008060c08789031215611b9557600080fd5b863567ffffffffffffffff80821115611bad57600080fd5b611bb98a838b01611a3e565b97506020890135915080821115611bcf57600080fd5b611bdb8a838b01611a3e565b9650611be960408a01611ac5565b95506060890135915080821115611bff57600080fd5b611c0b8a838b01611ad6565b9450611c1960808a01611b64565b935060a0890135915080821115611c2f57600080fd5b50611c3c89828a01611ad6565b9150509295509295509295565b600060208284031215611c5b57600080fd5b6117fb82611a15565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60ff8181168382160190811115611cdb57611cdb611c93565b92915050565b600060ff831680611d1b577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b8060ff84160491505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b8183823760009101908152919050565b828152606082602083013760800192915050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203611dae57611dae611c93565b5060010190565b8082028115828204841417611cdb57611cdb611c93565b81810381811115611cdb57611cdb611c93565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b63ffffffff818116838216019080821115611e2b57611e2b611c93565b5092915050565b600061012063ffffffff808d1684528b6020850152808b16604085015250806060840152611e628184018a611802565b90508281036080840152611e768189611802565b905060ff871660a084015282810360c0840152611e938187611784565b905067ffffffffffffffff851660e0840152828103610100840152611eb88185611784565b9c9b505050505050505050505050565b80820180821115611cdb57611cdb611c93565b60006101208b835273ffffffffffffffffffffffffffffffffffffffff8b16602084015267ffffffffffffffff808b166040850152816060850152611f228285018b611802565b91508382036080850152611f36828a611802565b915060ff881660a085015283820360c0850152611f538288611784565b90861660e08501528381036101008501529050611eb8818561178456fea164736f6c6343000813000a", +} + +var OCR3CapabilityABI = OCR3CapabilityMetaData.ABI + +var OCR3CapabilityBin = OCR3CapabilityMetaData.Bin + +func DeployOCR3Capability(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *OCR3Capability, error) { + parsed, err := OCR3CapabilityMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(OCR3CapabilityBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &OCR3Capability{address: address, abi: *parsed, OCR3CapabilityCaller: OCR3CapabilityCaller{contract: contract}, OCR3CapabilityTransactor: OCR3CapabilityTransactor{contract: contract}, OCR3CapabilityFilterer: OCR3CapabilityFilterer{contract: contract}}, nil +} + +type OCR3Capability struct { + address common.Address + abi abi.ABI + OCR3CapabilityCaller + OCR3CapabilityTransactor + OCR3CapabilityFilterer +} + +type OCR3CapabilityCaller struct { + contract *bind.BoundContract +} + +type OCR3CapabilityTransactor struct { + contract *bind.BoundContract +} + +type OCR3CapabilityFilterer struct { + contract *bind.BoundContract +} + +type OCR3CapabilitySession struct { + Contract *OCR3Capability + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OCR3CapabilityCallerSession struct { + Contract *OCR3CapabilityCaller + CallOpts bind.CallOpts +} + +type OCR3CapabilityTransactorSession struct { + Contract *OCR3CapabilityTransactor + TransactOpts bind.TransactOpts +} + +type OCR3CapabilityRaw struct { + Contract *OCR3Capability +} + +type OCR3CapabilityCallerRaw struct { + Contract *OCR3CapabilityCaller +} + +type OCR3CapabilityTransactorRaw struct { + Contract *OCR3CapabilityTransactor +} + +func NewOCR3Capability(address common.Address, backend bind.ContractBackend) (*OCR3Capability, error) { + abi, err := abi.JSON(strings.NewReader(OCR3CapabilityABI)) + if err != nil { + return nil, err + } + contract, err := bindOCR3Capability(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OCR3Capability{address: address, abi: abi, OCR3CapabilityCaller: OCR3CapabilityCaller{contract: contract}, OCR3CapabilityTransactor: OCR3CapabilityTransactor{contract: contract}, OCR3CapabilityFilterer: OCR3CapabilityFilterer{contract: contract}}, nil +} + +func NewOCR3CapabilityCaller(address common.Address, caller bind.ContractCaller) (*OCR3CapabilityCaller, error) { + contract, err := bindOCR3Capability(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OCR3CapabilityCaller{contract: contract}, nil +} + +func NewOCR3CapabilityTransactor(address common.Address, transactor bind.ContractTransactor) (*OCR3CapabilityTransactor, error) { + contract, err := bindOCR3Capability(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OCR3CapabilityTransactor{contract: contract}, nil +} + +func NewOCR3CapabilityFilterer(address common.Address, filterer bind.ContractFilterer) (*OCR3CapabilityFilterer, error) { + contract, err := bindOCR3Capability(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OCR3CapabilityFilterer{contract: contract}, nil +} + +func bindOCR3Capability(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OCR3CapabilityMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_OCR3Capability *OCR3CapabilityRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR3Capability.Contract.OCR3CapabilityCaller.contract.Call(opts, result, method, params...) +} + +func (_OCR3Capability *OCR3CapabilityRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR3Capability.Contract.OCR3CapabilityTransactor.contract.Transfer(opts) +} + +func (_OCR3Capability *OCR3CapabilityRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR3Capability.Contract.OCR3CapabilityTransactor.contract.Transact(opts, method, params...) +} + +func (_OCR3Capability *OCR3CapabilityCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR3Capability.Contract.contract.Call(opts, result, method, params...) +} + +func (_OCR3Capability *OCR3CapabilityTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR3Capability.Contract.contract.Transfer(opts) +} + +func (_OCR3Capability *OCR3CapabilityTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR3Capability.Contract.contract.Transact(opts, method, params...) +} + +func (_OCR3Capability *OCR3CapabilityCaller) LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) { + var out []interface{} + err := _OCR3Capability.contract.Call(opts, &out, "latestConfigDetails") + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_OCR3Capability *OCR3CapabilitySession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _OCR3Capability.Contract.LatestConfigDetails(&_OCR3Capability.CallOpts) +} + +func (_OCR3Capability *OCR3CapabilityCallerSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _OCR3Capability.Contract.LatestConfigDetails(&_OCR3Capability.CallOpts) +} + +func (_OCR3Capability *OCR3CapabilityCaller) LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) { + var out []interface{} + err := _OCR3Capability.contract.Call(opts, &out, "latestConfigDigestAndEpoch") + + outstruct := new(LatestConfigDigestAndEpoch) + if err != nil { + return *outstruct, err + } + + outstruct.ScanLogs = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.ConfigDigest = *abi.ConvertType(out[1], new([32]byte)).(*[32]byte) + outstruct.Epoch = *abi.ConvertType(out[2], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_OCR3Capability *OCR3CapabilitySession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _OCR3Capability.Contract.LatestConfigDigestAndEpoch(&_OCR3Capability.CallOpts) +} + +func (_OCR3Capability *OCR3CapabilityCallerSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _OCR3Capability.Contract.LatestConfigDigestAndEpoch(&_OCR3Capability.CallOpts) +} + +func (_OCR3Capability *OCR3CapabilityCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OCR3Capability.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OCR3Capability *OCR3CapabilitySession) Owner() (common.Address, error) { + return _OCR3Capability.Contract.Owner(&_OCR3Capability.CallOpts) +} + +func (_OCR3Capability *OCR3CapabilityCallerSession) Owner() (common.Address, error) { + return _OCR3Capability.Contract.Owner(&_OCR3Capability.CallOpts) +} + +func (_OCR3Capability *OCR3CapabilityCaller) Transmitters(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _OCR3Capability.contract.Call(opts, &out, "transmitters") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_OCR3Capability *OCR3CapabilitySession) Transmitters() ([]common.Address, error) { + return _OCR3Capability.Contract.Transmitters(&_OCR3Capability.CallOpts) +} + +func (_OCR3Capability *OCR3CapabilityCallerSession) Transmitters() ([]common.Address, error) { + return _OCR3Capability.Contract.Transmitters(&_OCR3Capability.CallOpts) +} + +func (_OCR3Capability *OCR3CapabilityCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _OCR3Capability.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_OCR3Capability *OCR3CapabilitySession) TypeAndVersion() (string, error) { + return _OCR3Capability.Contract.TypeAndVersion(&_OCR3Capability.CallOpts) +} + +func (_OCR3Capability *OCR3CapabilityCallerSession) TypeAndVersion() (string, error) { + return _OCR3Capability.Contract.TypeAndVersion(&_OCR3Capability.CallOpts) +} + +func (_OCR3Capability *OCR3CapabilityTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR3Capability.contract.Transact(opts, "acceptOwnership") +} + +func (_OCR3Capability *OCR3CapabilitySession) AcceptOwnership() (*types.Transaction, error) { + return _OCR3Capability.Contract.AcceptOwnership(&_OCR3Capability.TransactOpts) +} + +func (_OCR3Capability *OCR3CapabilityTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _OCR3Capability.Contract.AcceptOwnership(&_OCR3Capability.TransactOpts) +} + +func (_OCR3Capability *OCR3CapabilityTransactor) SetConfig(opts *bind.TransactOpts, _signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _OCR3Capability.contract.Transact(opts, "setConfig", _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_OCR3Capability *OCR3CapabilitySession) SetConfig(_signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _OCR3Capability.Contract.SetConfig(&_OCR3Capability.TransactOpts, _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_OCR3Capability *OCR3CapabilityTransactorSession) SetConfig(_signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _OCR3Capability.Contract.SetConfig(&_OCR3Capability.TransactOpts, _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_OCR3Capability *OCR3CapabilityTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _OCR3Capability.contract.Transact(opts, "transferOwnership", to) +} + +func (_OCR3Capability *OCR3CapabilitySession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _OCR3Capability.Contract.TransferOwnership(&_OCR3Capability.TransactOpts, to) +} + +func (_OCR3Capability *OCR3CapabilityTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _OCR3Capability.Contract.TransferOwnership(&_OCR3Capability.TransactOpts, to) +} + +func (_OCR3Capability *OCR3CapabilityTransactor) Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _OCR3Capability.contract.Transact(opts, "transmit", reportContext, report, rs, ss, rawVs) +} + +func (_OCR3Capability *OCR3CapabilitySession) Transmit(reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _OCR3Capability.Contract.Transmit(&_OCR3Capability.TransactOpts, reportContext, report, rs, ss, rawVs) +} + +func (_OCR3Capability *OCR3CapabilityTransactorSession) Transmit(reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _OCR3Capability.Contract.Transmit(&_OCR3Capability.TransactOpts, reportContext, report, rs, ss, rawVs) +} + +type OCR3CapabilityConfigSetIterator struct { + Event *OCR3CapabilityConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR3CapabilityConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR3CapabilityConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR3CapabilityConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR3CapabilityConfigSetIterator) Error() error { + return it.fail +} + +func (it *OCR3CapabilityConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR3CapabilityConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte + Raw types.Log +} + +func (_OCR3Capability *OCR3CapabilityFilterer) FilterConfigSet(opts *bind.FilterOpts) (*OCR3CapabilityConfigSetIterator, error) { + + logs, sub, err := _OCR3Capability.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &OCR3CapabilityConfigSetIterator{contract: _OCR3Capability.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_OCR3Capability *OCR3CapabilityFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OCR3CapabilityConfigSet) (event.Subscription, error) { + + logs, sub, err := _OCR3Capability.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR3CapabilityConfigSet) + if err := _OCR3Capability.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR3Capability *OCR3CapabilityFilterer) ParseConfigSet(log types.Log) (*OCR3CapabilityConfigSet, error) { + event := new(OCR3CapabilityConfigSet) + if err := _OCR3Capability.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR3CapabilityOwnershipTransferRequestedIterator struct { + Event *OCR3CapabilityOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR3CapabilityOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR3CapabilityOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR3CapabilityOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR3CapabilityOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OCR3CapabilityOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR3CapabilityOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR3Capability *OCR3CapabilityFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR3CapabilityOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR3Capability.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &OCR3CapabilityOwnershipTransferRequestedIterator{contract: _OCR3Capability.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OCR3Capability *OCR3CapabilityFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR3CapabilityOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR3Capability.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR3CapabilityOwnershipTransferRequested) + if err := _OCR3Capability.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR3Capability *OCR3CapabilityFilterer) ParseOwnershipTransferRequested(log types.Log) (*OCR3CapabilityOwnershipTransferRequested, error) { + event := new(OCR3CapabilityOwnershipTransferRequested) + if err := _OCR3Capability.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR3CapabilityOwnershipTransferredIterator struct { + Event *OCR3CapabilityOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR3CapabilityOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR3CapabilityOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR3CapabilityOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR3CapabilityOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *OCR3CapabilityOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR3CapabilityOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR3Capability *OCR3CapabilityFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR3CapabilityOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR3Capability.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &OCR3CapabilityOwnershipTransferredIterator{contract: _OCR3Capability.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_OCR3Capability *OCR3CapabilityFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OCR3CapabilityOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR3Capability.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR3CapabilityOwnershipTransferred) + if err := _OCR3Capability.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR3Capability *OCR3CapabilityFilterer) ParseOwnershipTransferred(log types.Log) (*OCR3CapabilityOwnershipTransferred, error) { + event := new(OCR3CapabilityOwnershipTransferred) + if err := _OCR3Capability.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR3CapabilityTransmittedIterator struct { + Event *OCR3CapabilityTransmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR3CapabilityTransmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR3CapabilityTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR3CapabilityTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR3CapabilityTransmittedIterator) Error() error { + return it.fail +} + +func (it *OCR3CapabilityTransmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR3CapabilityTransmitted struct { + ConfigDigest [32]byte + Epoch uint32 + Raw types.Log +} + +func (_OCR3Capability *OCR3CapabilityFilterer) FilterTransmitted(opts *bind.FilterOpts) (*OCR3CapabilityTransmittedIterator, error) { + + logs, sub, err := _OCR3Capability.contract.FilterLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return &OCR3CapabilityTransmittedIterator{contract: _OCR3Capability.contract, event: "Transmitted", logs: logs, sub: sub}, nil +} + +func (_OCR3Capability *OCR3CapabilityFilterer) WatchTransmitted(opts *bind.WatchOpts, sink chan<- *OCR3CapabilityTransmitted) (event.Subscription, error) { + + logs, sub, err := _OCR3Capability.contract.WatchLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR3CapabilityTransmitted) + if err := _OCR3Capability.contract.UnpackLog(event, "Transmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR3Capability *OCR3CapabilityFilterer) ParseTransmitted(log types.Log) (*OCR3CapabilityTransmitted, error) { + event := new(OCR3CapabilityTransmitted) + if err := _OCR3Capability.contract.UnpackLog(event, "Transmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [32]byte +} +type LatestConfigDigestAndEpoch struct { + ScanLogs bool + ConfigDigest [32]byte + Epoch uint32 +} + +func (_OCR3Capability *OCR3Capability) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _OCR3Capability.abi.Events["ConfigSet"].ID: + return _OCR3Capability.ParseConfigSet(log) + case _OCR3Capability.abi.Events["OwnershipTransferRequested"].ID: + return _OCR3Capability.ParseOwnershipTransferRequested(log) + case _OCR3Capability.abi.Events["OwnershipTransferred"].ID: + return _OCR3Capability.ParseOwnershipTransferred(log) + case _OCR3Capability.abi.Events["Transmitted"].ID: + return _OCR3Capability.ParseTransmitted(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OCR3CapabilityConfigSet) Topic() common.Hash { + return common.HexToHash("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05") +} + +func (OCR3CapabilityOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (OCR3CapabilityOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (OCR3CapabilityTransmitted) Topic() common.Hash { + return common.HexToHash("0xb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62") +} + +func (_OCR3Capability *OCR3Capability) Address() common.Address { + return _OCR3Capability.address +} + +type OCR3CapabilityInterface interface { + LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) + + LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + Transmitters(opts *bind.CallOpts) ([]common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, _signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) + + FilterConfigSet(opts *bind.FilterOpts) (*OCR3CapabilityConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OCR3CapabilityConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*OCR3CapabilityConfigSet, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR3CapabilityOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR3CapabilityOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*OCR3CapabilityOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR3CapabilityOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OCR3CapabilityOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*OCR3CapabilityOwnershipTransferred, error) + + FilterTransmitted(opts *bind.FilterOpts) (*OCR3CapabilityTransmittedIterator, error) + + WatchTransmitted(opts *bind.WatchOpts, sink chan<- *OCR3CapabilityTransmitted) (event.Subscription, error) + + ParseTransmitted(log types.Log) (*OCR3CapabilityTransmitted, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt index 8dad729b196..b9d8bfbfefc 100644 --- a/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt +++ b/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -1,2 +1,3 @@ GETH_VERSION: 1.13.8 forwarder: ../../../contracts/solc/v0.8.19/KeystoneForwarder/KeystoneForwarder.abi ../../../contracts/solc/v0.8.19/KeystoneForwarder/KeystoneForwarder.bin 4886b538e1fdc8aaf860901de36269e0c35acfd3e6eb190654d693ff9dbd4b6d +ocr3_capability: ../../../contracts/solc/v0.8.19/OCR3Capability/OCR3Capability.abi ../../../contracts/solc/v0.8.19/OCR3Capability/OCR3Capability.bin 9dcbdf55bd5729ba266148da3f17733eb592c871c2108ccca546618628fd9ad2 diff --git a/core/gethwrappers/keystone/go_generate.go b/core/gethwrappers/keystone/go_generate.go index 75800132f8e..0c49456f29c 100644 --- a/core/gethwrappers/keystone/go_generate.go +++ b/core/gethwrappers/keystone/go_generate.go @@ -5,3 +5,4 @@ package gethwrappers // Keystone //go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/KeystoneForwarder/KeystoneForwarder.abi ../../../contracts/solc/v0.8.19/KeystoneForwarder/KeystoneForwarder.bin KeystoneForwarder forwarder +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/OCR3Capability/OCR3Capability.abi ../../../contracts/solc/v0.8.19/OCR3Capability/OCR3Capability.bin OCR3Capability ocr3_capability diff --git a/core/services/chainlink/relayer_factory.go b/core/services/chainlink/relayer_factory.go index f5cb1badb95..2dd5e1eb68a 100644 --- a/core/services/chainlink/relayer_factory.go +++ b/core/services/chainlink/relayer_factory.go @@ -68,6 +68,7 @@ func (r *RelayerFactory) NewEVM(ctx context.Context, config EVMFactoryConfig) (m relayerOpts := evmrelay.RelayerOpts{ DB: ccOpts.SqlxDB, + DS: ccOpts.DB, QConfig: ccOpts.AppConfig.Database(), CSAETHKeystore: config.CSAETHKeystore, MercuryPool: r.MercuryPool, diff --git a/core/services/job/spawner_test.go b/core/services/job/spawner_test.go index b6de9d790fa..ac0783e9868 100644 --- a/core/services/job/spawner_test.go +++ b/core/services/job/spawner_test.go @@ -287,6 +287,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { evmRelayer, err := evmrelayer.NewRelayer(lggr, chain, evmrelayer.RelayerOpts{ DB: db, + DS: db, QConfig: testopts.GeneralConfig.Database(), CSAETHKeystore: keyStore, }) diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index a053b53992d..7b4200efd68 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -59,6 +59,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/autotelemetry21" ocr2keeper21core "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" ocr2vrfconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2vrf/config" ocr2coordinator "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2vrf/coordinator" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2vrf/juelsfeecoin" @@ -1313,6 +1314,14 @@ func (d *Delegate) newServicesOCR2Keepers21( return nil, errors.New("could not coerce PluginProvider to AutomationProvider") } + // TODO: (AUTO-9355) remove once we remove v0 + if useBufferV1 := cfg.UseBufferV1 != nil && *cfg.UseBufferV1; useBufferV1 { + logProviderFeatures, ok := keeperProvider.LogEventProvider().(logprovider.LogEventProviderFeatures) + if ok { + logProviderFeatures.WithBufferVersion("v1") + } + } + services, err := ocr2keeper.EVMDependencies21(kb) if err != nil { return nil, errors.Wrap(err, "could not build dependencies for ocr2 keepers") diff --git a/core/services/ocr2/plugins/ocr2keeper/config.go b/core/services/ocr2/plugins/ocr2keeper/config.go index ec56f9c6993..4b41e5a0285 100644 --- a/core/services/ocr2/plugins/ocr2keeper/config.go +++ b/core/services/ocr2/plugins/ocr2keeper/config.go @@ -60,6 +60,9 @@ type PluginConfig struct { ContractVersion string `json:"contractVersion"` // CaptureAutomationCustomTelemetry is a bool flag to toggle Custom Telemetry Service CaptureAutomationCustomTelemetry *bool `json:"captureAutomationCustomTelemetry,omitempty"` + // UseBufferV1 is a bool flag to toggle the new log buffer implementation + // TODO: (AUTO-9355) remove once we have a single version + UseBufferV1 *bool `json:"useBufferV1,omitempty"` } func ValidatePluginConfig(cfg PluginConfig) error { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go new file mode 100644 index 00000000000..fbc1da075df --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1.go @@ -0,0 +1,426 @@ +package logprovider + +import ( + "math" + "math/big" + "sort" + "sync" + "sync/atomic" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/prommetrics" +) + +type BufferedLog struct { + ID *big.Int + Log logpoller.Log +} + +type LogBuffer interface { + // Enqueue adds logs to the buffer and might also drop logs if the limit for the + // given upkeep was exceeded. Returns the number of logs that were added and number of logs that were dropped. + Enqueue(id *big.Int, logs ...logpoller.Log) (added int, dropped int) + // Dequeue pulls logs from the buffer that are within the given block window, + // with a maximum number of logs per upkeep and a total maximum number of logs to return. + // It also accepts a function to select upkeeps. + // Returns logs (associated to upkeeps) and the number of remaining + // logs in that window for the involved upkeeps. + Dequeue(block int64, blockRate, upkeepLimit, maxResults int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) + // SetConfig sets the buffer size and the maximum number of logs to keep for each upkeep. + SetConfig(lookback, blockRate, logLimit uint32) + // NumOfUpkeeps returns the number of upkeeps that are being tracked by the buffer. + NumOfUpkeeps() int + // SyncFilters removes upkeeps that are not in the filter store. + SyncFilters(filterStore UpkeepFilterStore) error +} + +func DefaultUpkeepSelector(id *big.Int) bool { + return true +} + +type logBufferOptions struct { + // number of blocks to keep in the buffer + lookback *atomic.Uint32 + // blockRate is the number of blocks per window + blockRate *atomic.Uint32 + // max number of logs to keep in the buffer for each upkeep per window (LogLimit*10) + windowLimit *atomic.Uint32 +} + +func newLogBufferOptions(lookback, blockRate, logLimit uint32) *logBufferOptions { + opts := &logBufferOptions{ + windowLimit: new(atomic.Uint32), + lookback: new(atomic.Uint32), + blockRate: new(atomic.Uint32), + } + opts.override(lookback, blockRate, logLimit) + + return opts +} + +func (o *logBufferOptions) override(lookback, blockRate, logLimit uint32) { + o.windowLimit.Store(logLimit * 10) + o.lookback.Store(lookback) + o.blockRate.Store(blockRate) +} + +func (o *logBufferOptions) windows() int { + return int(math.Ceil(float64(o.lookback.Load()) / float64(o.blockRate.Load()))) +} + +type logBuffer struct { + lggr logger.Logger + opts *logBufferOptions + // last block number seen by the buffer + lastBlockSeen *atomic.Int64 + // map of upkeep id to its queue + queues map[string]*upkeepLogQueue + lock sync.RWMutex +} + +func NewLogBuffer(lggr logger.Logger, lookback, blockRate, logLimit uint32) LogBuffer { + return &logBuffer{ + lggr: lggr.Named("KeepersRegistry.LogEventBufferV1"), + opts: newLogBufferOptions(lookback, blockRate, logLimit), + lastBlockSeen: new(atomic.Int64), + queues: make(map[string]*upkeepLogQueue), + } +} + +// Enqueue adds logs to the buffer and might also drop logs if the limit for the +// given upkeep was exceeded. It will create a new buffer if it does not exist. +// Returns the number of logs that were added and number of logs that were dropped. +func (b *logBuffer) Enqueue(uid *big.Int, logs ...logpoller.Log) (int, int) { + buf, ok := b.getUpkeepQueue(uid) + if !ok || buf == nil { + buf = newUpkeepLogQueue(b.lggr, uid, b.opts) + b.setUpkeepQueue(uid, buf) + } + latestBlock := latestBlockNumber(logs...) + if b.lastBlockSeen.Load() < latestBlock { + b.lastBlockSeen.Store(latestBlock) + } + blockThreshold := b.lastBlockSeen.Load() - int64(b.opts.lookback.Load()) + if blockThreshold <= 0 { + blockThreshold = 1 + } + return buf.enqueue(blockThreshold, logs...) +} + +// Dequeue greedly pulls logs from the buffers. +// Returns logs and the number of remaining logs in the buffer. +func (b *logBuffer) Dequeue(block int64, blockRate, upkeepLimit, maxResults int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) { + b.lock.RLock() + defer b.lock.RUnlock() + + start, end := getBlockWindow(block, blockRate) + return b.dequeue(start, end, upkeepLimit, maxResults, upkeepSelector) +} + +// dequeue pulls logs from the buffers, depends the given selector (upkeepSelector), +// in block range [start,end] with minimum number of results per upkeep (upkeepLimit) +// and the maximum number of results (capacity). +// Returns logs and the number of remaining logs in the buffer for the given range and selector. +// NOTE: this method is not thread safe and should be called within a lock. +func (b *logBuffer) dequeue(start, end int64, upkeepLimit, capacity int, upkeepSelector func(id *big.Int) bool) ([]BufferedLog, int) { + var result []BufferedLog + var remainingLogs int + for _, q := range b.queues { + if !upkeepSelector(q.id) { + // if the upkeep is not selected, skip it + continue + } + logsInRange := q.sizeOfRange(start, end) + if logsInRange == 0 { + // if there are no logs in the range, skip the upkeep + continue + } + if capacity == 0 { + // if there is no more capacity for results, just count the remaining logs + remainingLogs += logsInRange + continue + } + if upkeepLimit > capacity { + // adjust limit if it is higher than the actual capacity + upkeepLimit = capacity + } + logs, remaining := q.dequeue(start, end, upkeepLimit) + for _, l := range logs { + result = append(result, BufferedLog{ID: q.id, Log: l}) + capacity-- + } + remainingLogs += remaining + } + return result, remainingLogs +} + +func (b *logBuffer) SetConfig(lookback, blockRate, logLimit uint32) { + b.lock.Lock() + defer b.lock.Unlock() + + b.opts.override(lookback, blockRate, logLimit) +} + +func (b *logBuffer) NumOfUpkeeps() int { + b.lock.RLock() + defer b.lock.RUnlock() + + return len(b.queues) +} + +func (b *logBuffer) SyncFilters(filterStore UpkeepFilterStore) error { + b.lock.Lock() + defer b.lock.Unlock() + + for upkeepID := range b.queues { + uid := new(big.Int) + _, ok := uid.SetString(upkeepID, 10) + if ok && !filterStore.Has(uid) { + // remove upkeep that is not in the filter store + delete(b.queues, upkeepID) + } + } + + return nil +} + +func (b *logBuffer) getUpkeepQueue(uid *big.Int) (*upkeepLogQueue, bool) { + b.lock.RLock() + defer b.lock.RUnlock() + + ub, ok := b.queues[uid.String()] + return ub, ok +} + +func (b *logBuffer) setUpkeepQueue(uid *big.Int, buf *upkeepLogQueue) { + b.lock.Lock() + defer b.lock.Unlock() + + b.queues[uid.String()] = buf +} + +// TODO (AUTO-9256) separate files + +// logTriggerState represents the state of a log in the buffer. +type logTriggerState uint8 + +const ( + // the log was dropped due to buffer limits + logTriggerStateDropped logTriggerState = iota + // the log was enqueued by the buffer + logTriggerStateEnqueued + // the log was visited/dequeued from the buffer + logTriggerStateDequeued +) + +// logTriggerStateEntry represents the state of a log in the buffer and the block number of the log. +// TODO (AUTO-10013) handling of reorgs might require to store the block hash as well. +type logTriggerStateEntry struct { + state logTriggerState + block int64 +} + +// upkeepLogQueue is a priority queue for logs associated to a specific upkeep. +// It keeps track of the logs that were already visited and the capacity of the queue. +type upkeepLogQueue struct { + lggr logger.Logger + + id *big.Int + opts *logBufferOptions + + // logs is the buffer of logs for the upkeep + logs []logpoller.Log + // states keeps track of the state of the logs that are known to the queue + // and the block number they were seen at + states map[string]logTriggerStateEntry + lock sync.RWMutex +} + +func newUpkeepLogQueue(lggr logger.Logger, id *big.Int, opts *logBufferOptions) *upkeepLogQueue { + maxLogs := int(opts.windowLimit.Load()) * opts.windows() // limit per window * windows + return &upkeepLogQueue{ + lggr: lggr.With("upkeepID", id.String()), + id: id, + opts: opts, + logs: make([]logpoller.Log, 0, maxLogs), + states: make(map[string]logTriggerStateEntry), + } +} + +// sizeOfRange returns the number of logs in the buffer that are within the given block range. +func (q *upkeepLogQueue) sizeOfRange(start, end int64) int { + q.lock.RLock() + defer q.lock.RUnlock() + + size := 0 + for _, l := range q.logs { + if l.BlockNumber >= start && l.BlockNumber <= end { + size++ + } + } + return size +} + +// dequeue pulls logs from the buffer that are within the given block range, +// with a limit of logs to pull. Returns logs and the number of remaining logs in the buffer. +func (q *upkeepLogQueue) dequeue(start, end int64, limit int) ([]logpoller.Log, int) { + q.lock.Lock() + defer q.lock.Unlock() + + if len(q.logs) == 0 { + return nil, 0 + } + + var results []logpoller.Log + var remaining int + updatedLogs := make([]logpoller.Log, 0) + for _, l := range q.logs { + if l.BlockNumber >= start && l.BlockNumber <= end { + if len(results) < limit { + results = append(results, l) + lid := logID(l) + if s, ok := q.states[lid]; ok { + s.state = logTriggerStateDequeued + q.states[lid] = s + } + continue + } + remaining++ + } + updatedLogs = append(updatedLogs, l) + } + + if len(results) > 0 { + q.logs = updatedLogs + q.lggr.Debugw("Dequeued logs", "start", start, "end", end, "limit", limit, "results", len(results), "remaining", remaining) + } + + prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionEgress).Add(float64(len(results))) + + return results, remaining +} + +// enqueue adds logs to the buffer and might also drop logs if the limit for the +// given upkeep was exceeded. Additionally, it will drop logs that are older than blockThreshold. +// Returns the number of logs that were added and number of logs that were dropped. +func (q *upkeepLogQueue) enqueue(blockThreshold int64, logsToAdd ...logpoller.Log) (int, int) { + q.lock.Lock() + defer q.lock.Unlock() + + logs := q.logs + var added int + for _, log := range logsToAdd { + if log.BlockNumber < blockThreshold { + // q.lggr.Debugw("Skipping log from old block", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber, "logIndex", log.LogIndex) + continue + } + lid := logID(log) + if _, ok := q.states[lid]; ok { + // q.lggr.Debugw("Skipping known log", "blockThreshold", blockThreshold, "logBlock", log.BlockNumber, "logIndex", log.LogIndex) + continue + } + q.states[lid] = logTriggerStateEntry{state: logTriggerStateEnqueued, block: log.BlockNumber} + added++ + logs = append(logs, log) + } + q.logs = logs + + var dropped int + if added > 0 { + q.orderLogs() + dropped = q.clean(blockThreshold) + q.lggr.Debugw("Enqueued logs", "added", added, "dropped", dropped, "blockThreshold", blockThreshold, "q size", len(q.logs), "visited size", len(q.states)) + } + + prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionIngress).Add(float64(added)) + prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionDropped).Add(float64(dropped)) + + return added, dropped +} + +// orderLogs sorts the logs in the buffer. +// NOTE: this method is not thread safe and should be called within a lock. +func (q *upkeepLogQueue) orderLogs() { + // sort logs by block number, tx hash and log index + // to keep the q sorted and to ensure that logs can be + // grouped by block windows for the cleanup + sort.SliceStable(q.logs, func(i, j int) bool { + return LogSorter(q.logs[i], q.logs[j]) + }) +} + +// clean removes logs that are older than blockThreshold and drops logs if the limit for the +// given upkeep was exceeded. Returns the number of logs that were dropped. +// NOTE: this method is not thread safe and should be called within a lock. +func (q *upkeepLogQueue) clean(blockThreshold int64) int { + var dropped, expired int + blockRate := int(q.opts.blockRate.Load()) + windowLimit := int(q.opts.windowLimit.Load()) + updated := make([]logpoller.Log, 0) + // helper variables to keep track of the current window capacity + currentWindowCapacity, currentWindowStart := 0, int64(0) + for _, l := range q.logs { + if blockThreshold > l.BlockNumber { // old log, removed + prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionExpired).Inc() + // q.lggr.Debugw("Expiring old log", "blockNumber", l.BlockNumber, "blockThreshold", blockThreshold, "logIndex", l.LogIndex) + logid := logID(l) + delete(q.states, logid) + expired++ + continue + } + start, _ := getBlockWindow(l.BlockNumber, blockRate) + if start != currentWindowStart { + // new window, reset capacity + currentWindowStart = start + currentWindowCapacity = 0 + } + currentWindowCapacity++ + // if capacity has been reached, drop the log + if currentWindowCapacity > windowLimit { + lid := logID(l) + if s, ok := q.states[lid]; ok { + s.state = logTriggerStateDropped + q.states[lid] = s + } + dropped++ + prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionDropped).Inc() + q.lggr.Debugw("Reached log buffer limits, dropping log", "blockNumber", l.BlockNumber, + "blockHash", l.BlockHash, "txHash", l.TxHash, "logIndex", l.LogIndex, "len updated", len(updated), + "currentWindowStart", currentWindowStart, "currentWindowCapacity", currentWindowCapacity, + "maxLogsPerWindow", windowLimit, "blockRate", blockRate) + continue + } + updated = append(updated, l) + } + + if dropped > 0 || expired > 0 { + q.lggr.Debugw("Cleaned logs", "dropped", dropped, "expired", expired, "blockThreshold", blockThreshold, "len updated", len(updated), "len before", len(q.logs)) + q.logs = updated + } + + q.cleanStates(blockThreshold) + + return dropped +} + +// cleanStates removes states that are older than blockThreshold. +// NOTE: this method is not thread safe and should be called within a lock. +func (q *upkeepLogQueue) cleanStates(blockThreshold int64) { + for lid, s := range q.states { + if s.block <= blockThreshold { + delete(q.states, lid) + } + } +} + +// getBlockWindow returns the start and end block of the window for the given block. +func getBlockWindow(block int64, blockRate int) (start int64, end int64) { + windowSize := int64(blockRate) + if windowSize == 0 { + return block, block + } + start = block - (block % windowSize) + end = start + windowSize - 1 + return +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go new file mode 100644 index 00000000000..19f806d35b9 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_v1_test.go @@ -0,0 +1,472 @@ +package logprovider + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +func TestLogEventBufferV1(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1) + + buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + ) + buf.Enqueue(big.NewInt(2), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 2}, + ) + results, remaining := buf.Dequeue(int64(1), 10, 1, 2, DefaultUpkeepSelector) + require.Equal(t, 2, len(results)) + require.Equal(t, 2, remaining) + require.True(t, results[0].ID.Cmp(results[1].ID) != 0) + results, remaining = buf.Dequeue(int64(1), 10, 1, 2, DefaultUpkeepSelector) + require.Equal(t, 2, len(results)) + require.Equal(t, 0, remaining) +} + +func TestLogEventBufferV1_SyncFilters(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), 10, 20, 1) + + buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + ) + buf.Enqueue(big.NewInt(2), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 2}, + ) + filterStore := NewUpkeepFilterStore() + filterStore.AddActiveUpkeeps(upkeepFilter{upkeepID: big.NewInt(1)}) + + require.Equal(t, 2, buf.NumOfUpkeeps()) + require.NoError(t, buf.SyncFilters(filterStore)) + require.Equal(t, 1, buf.NumOfUpkeeps()) +} + +func TestLogEventBufferV1_Dequeue(t *testing.T) { + tests := []struct { + name string + logsInBuffer map[*big.Int][]logpoller.Log + args dequeueArgs + lookback int + results []logpoller.Log + remaining int + }{ + { + name: "empty", + logsInBuffer: map[*big.Int][]logpoller.Log{}, + args: newDequeueArgs(10, 1, 1, 10, nil), + lookback: 20, + results: []logpoller.Log{}, + }, + { + name: "happy path", + logsInBuffer: map[*big.Int][]logpoller.Log{ + big.NewInt(1): { + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 0}, + {BlockNumber: 14, TxHash: common.HexToHash("0x15"), LogIndex: 1}, + }, + }, + args: newDequeueArgs(10, 5, 3, 10, nil), + lookback: 20, + results: []logpoller.Log{ + {}, {}, + }, + }, + { + name: "with upkeep limits", + logsInBuffer: map[*big.Int][]logpoller.Log{ + big.NewInt(1): { + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 1}, + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 0}, + {BlockNumber: 13, TxHash: common.HexToHash("0x13"), LogIndex: 0}, + {BlockNumber: 13, TxHash: common.HexToHash("0x13"), LogIndex: 1}, + {BlockNumber: 14, TxHash: common.HexToHash("0x14"), LogIndex: 1}, + {BlockNumber: 14, TxHash: common.HexToHash("0x14"), LogIndex: 2}, + }, + big.NewInt(2): { + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 11}, + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 10}, + {BlockNumber: 13, TxHash: common.HexToHash("0x13"), LogIndex: 10}, + {BlockNumber: 13, TxHash: common.HexToHash("0x13"), LogIndex: 11}, + {BlockNumber: 14, TxHash: common.HexToHash("0x14"), LogIndex: 11}, + {BlockNumber: 14, TxHash: common.HexToHash("0x14"), LogIndex: 12}, + }, + }, + args: newDequeueArgs(10, 5, 2, 10, nil), + lookback: 20, + results: []logpoller.Log{ + {}, {}, {}, {}, + }, + remaining: 8, + }, + { + name: "with max results", + logsInBuffer: map[*big.Int][]logpoller.Log{ + big.NewInt(1): append(createDummyLogSequence(2, 0, 12, common.HexToHash("0x12")), createDummyLogSequence(2, 0, 13, common.HexToHash("0x13"))...), + big.NewInt(2): append(createDummyLogSequence(2, 10, 12, common.HexToHash("0x12")), createDummyLogSequence(2, 10, 13, common.HexToHash("0x13"))...), + }, + args: newDequeueArgs(10, 5, 3, 4, nil), + lookback: 20, + results: []logpoller.Log{ + {}, {}, {}, {}, + }, + remaining: 4, + }, + { + name: "with upkeep selector", + logsInBuffer: map[*big.Int][]logpoller.Log{ + big.NewInt(1): { + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 0}, + {BlockNumber: 14, TxHash: common.HexToHash("0x15"), LogIndex: 1}, + }, + }, + args: newDequeueArgs(10, 5, 5, 10, func(id *big.Int) bool { return false }), + lookback: 20, + results: []logpoller.Log{}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), uint32(tc.lookback), uint32(tc.args.blockRate), uint32(tc.args.upkeepLimit)) + for id, logs := range tc.logsInBuffer { + added, dropped := buf.Enqueue(id, logs...) + require.Equal(t, len(logs), added+dropped) + } + results, remaining := buf.Dequeue(tc.args.block, tc.args.blockRate, tc.args.upkeepLimit, tc.args.maxResults, tc.args.upkeepSelector) + require.Equal(t, len(tc.results), len(results)) + require.Equal(t, tc.remaining, remaining) + }) + } +} + +func TestLogEventBufferV1_Enqueue(t *testing.T) { + tests := []struct { + name string + logsToAdd map[*big.Int][]logpoller.Log + added, dropped map[string]int + sizeOfRange map[*big.Int]int + rangeStart, rangeEnd int64 + lookback, blockRate, upkeepLimit uint32 + }{ + { + name: "empty", + logsToAdd: map[*big.Int][]logpoller.Log{}, + added: map[string]int{}, + dropped: map[string]int{}, + sizeOfRange: map[*big.Int]int{}, + rangeStart: 0, + rangeEnd: 10, + blockRate: 1, + upkeepLimit: 1, + lookback: 20, + }, + { + name: "happy path", + logsToAdd: map[*big.Int][]logpoller.Log{ + big.NewInt(1): { + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 0}, + {BlockNumber: 14, TxHash: common.HexToHash("0x15"), LogIndex: 1}, + }, + big.NewInt(2): { + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 11}, + }, + }, + added: map[string]int{ + big.NewInt(1).String(): 2, + big.NewInt(2).String(): 1, + }, + dropped: map[string]int{ + big.NewInt(1).String(): 0, + big.NewInt(2).String(): 0, + }, + sizeOfRange: map[*big.Int]int{ + big.NewInt(1): 2, + big.NewInt(2): 1, + }, + rangeStart: 10, + rangeEnd: 20, + blockRate: 5, + upkeepLimit: 1, + lookback: 20, + }, + { + name: "above limits", + logsToAdd: map[*big.Int][]logpoller.Log{ + big.NewInt(1): createDummyLogSequence(11, 0, 12, common.HexToHash("0x12")), + big.NewInt(2): { + {BlockNumber: 12, TxHash: common.HexToHash("0x12"), LogIndex: 11}, + }, + }, + added: map[string]int{ + big.NewInt(1).String(): 11, + big.NewInt(2).String(): 1, + }, + dropped: map[string]int{ + big.NewInt(1).String(): 1, + big.NewInt(2).String(): 0, + }, + sizeOfRange: map[*big.Int]int{ + big.NewInt(1): 10, + big.NewInt(2): 1, + }, + rangeStart: 10, + rangeEnd: 20, + blockRate: 10, + upkeepLimit: 1, + lookback: 20, + }, + { + name: "out of block range", + logsToAdd: map[*big.Int][]logpoller.Log{ + big.NewInt(1): append(createDummyLogSequence(2, 0, 1, common.HexToHash("0x1")), createDummyLogSequence(2, 0, 100, common.HexToHash("0x1"))...), + }, + added: map[string]int{ + big.NewInt(1).String(): 2, + }, + dropped: map[string]int{ + big.NewInt(1).String(): 0, + }, + sizeOfRange: map[*big.Int]int{ + big.NewInt(1): 2, + }, + rangeStart: 1, + rangeEnd: 101, + blockRate: 10, + upkeepLimit: 10, + lookback: 20, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), tc.lookback, tc.blockRate, tc.upkeepLimit) + for id, logs := range tc.logsToAdd { + added, dropped := buf.Enqueue(id, logs...) + sid := id.String() + if _, ok := tc.added[sid]; !ok { + tc.added[sid] = 0 + } + if _, ok := tc.dropped[sid]; !ok { + tc.dropped[sid] = 0 + } + require.Equal(t, tc.added[sid], added) + require.Equal(t, tc.dropped[sid], dropped) + } + for id, size := range tc.sizeOfRange { + q, ok := buf.(*logBuffer).getUpkeepQueue(id) + require.True(t, ok) + require.Equal(t, size, q.sizeOfRange(tc.rangeStart, tc.rangeEnd)) + } + }) + } +} + +func TestLogEventBufferV1_UpkeepQueue(t *testing.T) { + t.Run("enqueue dequeue", func(t *testing.T) { + q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) + + added, dropped := q.enqueue(10, logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 0}) + require.Equal(t, 0, dropped) + require.Equal(t, 1, added) + require.Equal(t, 1, q.sizeOfRange(1, 20)) + logs, remaining := q.dequeue(19, 21, 10) + require.Equal(t, 1, len(logs)) + require.Equal(t, 0, remaining) + }) + + t.Run("enqueue with limits", func(t *testing.T) { + q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) + + added, dropped := q.enqueue(10, + createDummyLogSequence(15, 0, 20, common.HexToHash("0x20"))..., + ) + require.Equal(t, 5, dropped) + require.Equal(t, 15, added) + }) + + t.Run("dequeue with limits", func(t *testing.T) { + q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 3)) + + added, dropped := q.enqueue(10, + logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 10}, + ) + require.Equal(t, 0, dropped) + require.Equal(t, 3, added) + + logs, remaining := q.dequeue(19, 21, 2) + require.Equal(t, 2, len(logs)) + require.Equal(t, 1, remaining) + }) +} + +func TestLogEventBufferV1_UpkeepQueue_sizeOfRange(t *testing.T) { + t.Run("empty", func(t *testing.T) { + q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) + + require.Equal(t, 0, q.sizeOfRange(1, 10)) + }) + + t.Run("happy path", func(t *testing.T) { + q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) + + added, dropped := q.enqueue(10, logpoller.Log{BlockNumber: 20, TxHash: common.HexToHash("0x1"), LogIndex: 0}) + require.Equal(t, 0, dropped) + require.Equal(t, 1, added) + require.Equal(t, 0, q.sizeOfRange(1, 10)) + require.Equal(t, 1, q.sizeOfRange(1, 20)) + }) +} + +func TestLogEventBufferV1_UpkeepQueue_clean(t *testing.T) { + t.Run("empty", func(t *testing.T) { + q := newUpkeepLogQueue(logger.TestLogger(t), big.NewInt(1), newLogBufferOptions(10, 1, 1)) + + q.clean(10) + }) + + t.Run("happy path", func(t *testing.T) { + buf := NewLogBuffer(logger.TestLogger(t), 10, 5, 1) + + buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + ) + buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x111"), LogIndex: 0}, + logpoller.Log{BlockNumber: 11, TxHash: common.HexToHash("0x111"), LogIndex: 1}, + ) + + q, ok := buf.(*logBuffer).getUpkeepQueue(big.NewInt(1)) + require.True(t, ok) + require.Equal(t, 4, q.sizeOfRange(1, 11)) + + buf.Enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x171"), LogIndex: 0}, + logpoller.Log{BlockNumber: 17, TxHash: common.HexToHash("0x171"), LogIndex: 1}, + ) + + require.Equal(t, 4, q.sizeOfRange(1, 18)) + require.Equal(t, 0, q.clean(12)) + require.Equal(t, 2, q.sizeOfRange(1, 18)) + q.lock.Lock() + defer q.lock.Unlock() + require.Equal(t, 2, len(q.states)) + }) +} + +func TestLogEventBufferV1_BlockWindow(t *testing.T) { + tests := []struct { + name string + block int64 + blockRate int + wantStart int64 + wantEnd int64 + }{ + { + name: "block 0, blockRate 1", + block: 0, + blockRate: 1, + wantStart: 0, + wantEnd: 0, + }, + { + name: "block 81, blockRate 1", + block: 81, + blockRate: 1, + wantStart: 81, + wantEnd: 81, + }, + { + name: "block 0, blockRate 4", + block: 0, + blockRate: 4, + wantStart: 0, + wantEnd: 3, + }, + { + name: "block 81, blockRate 4", + block: 81, + blockRate: 4, + wantStart: 80, + wantEnd: 83, + }, + { + name: "block 83, blockRate 4", + block: 83, + blockRate: 4, + wantStart: 80, + wantEnd: 83, + }, + { + name: "block 84, blockRate 4", + block: 84, + blockRate: 4, + wantStart: 84, + wantEnd: 87, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + start, end := getBlockWindow(tc.block, tc.blockRate) + require.Equal(t, tc.wantStart, start) + require.Equal(t, tc.wantEnd, end) + }) + } +} + +type dequeueArgs struct { + block int64 + blockRate int + upkeepLimit int + maxResults int + upkeepSelector func(id *big.Int) bool +} + +func newDequeueArgs(block int64, blockRate int, upkeepLimit int, maxResults int, upkeepSelector func(id *big.Int) bool) dequeueArgs { + args := dequeueArgs{ + block: block, + blockRate: blockRate, + upkeepLimit: upkeepLimit, + maxResults: maxResults, + upkeepSelector: upkeepSelector, + } + + if upkeepSelector == nil { + args.upkeepSelector = DefaultUpkeepSelector + } + if blockRate == 0 { + args.blockRate = 1 + } + if maxResults == 0 { + args.maxResults = 10 + } + if upkeepLimit == 0 { + args.upkeepLimit = 1 + } + + return args +} + +func createDummyLogSequence(n, startIndex int, block int64, tx common.Hash) []logpoller.Log { + logs := make([]logpoller.Log, n) + for i := 0; i < n; i++ { + logs[i] = logpoller.Log{ + BlockNumber: block, + TxHash: tx, + LogIndex: int64(i + startIndex), + } + } + return logs +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index 263fa69223f..64833f9269b 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -4,8 +4,6 @@ import ( "math/big" "time" - "golang.org/x/time/rate" - "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/logger" @@ -17,7 +15,7 @@ import ( func New(lggr logger.Logger, poller logpoller.LogPoller, c client.Client, stateStore core.UpkeepStateReader, finalityDepth uint32, chainID *big.Int) (LogEventProvider, LogRecoverer) { filterStore := NewUpkeepFilterStore() packer := NewLogEventsPacker() - opts := NewOptions(int64(finalityDepth)) + opts := NewOptions(int64(finalityDepth), chainID) provider := NewLogProvider(lggr, poller, chainID, packer, filterStore, opts) recoverer := NewLogRecoverer(lggr, poller, c, stateStore, packer, filterStore, opts) @@ -27,22 +25,36 @@ func New(lggr logger.Logger, poller logpoller.LogPoller, c client.Client, stateS // LogTriggersOptions holds the options for the log trigger components. type LogTriggersOptions struct { + chainID *big.Int // LookbackBlocks is the number of blocks the provider will look back for logs. // The recoverer will scan for logs up to this depth. // NOTE: MUST be set to a greater-or-equal to the chain's finality depth. LookbackBlocks int64 // ReadInterval is the interval to fetch logs in the background. ReadInterval time.Duration - // BlockRateLimit is the rate limit on the range of blocks the we fetch logs for. - BlockRateLimit rate.Limit - // blockLimitBurst is the burst upper limit on the range of blocks the we fetch logs for. - BlockLimitBurst int // Finality depth is the number of blocks to wait before considering a block final. FinalityDepth int64 + + // TODO: (AUTO-9355) remove once we have a single version + BufferVersion BufferVersion + // LogLimit is the minimum number of logs to process in a single block window. + LogLimit uint32 + // BlockRate determines the block window for log processing. + BlockRate uint32 } -func NewOptions(finalityDepth int64) LogTriggersOptions { +// BufferVersion is the version of the log buffer. +// TODO: (AUTO-9355) remove once we have a single version +type BufferVersion string + +const ( + BufferVersionDefault BufferVersion = "" + BufferVersionV1 BufferVersion = "v1" +) + +func NewOptions(finalityDepth int64, chainID *big.Int) LogTriggersOptions { opts := new(LogTriggersOptions) + opts.chainID = chainID opts.Defaults(finalityDepth) return *opts } @@ -60,13 +72,35 @@ func (o *LogTriggersOptions) Defaults(finalityDepth int64) { if o.ReadInterval == 0 { o.ReadInterval = time.Second } - if o.BlockLimitBurst == 0 { - o.BlockLimitBurst = int(o.LookbackBlocks) - } - if o.BlockRateLimit == 0 { - o.BlockRateLimit = rate.Every(o.ReadInterval) - } if o.FinalityDepth == 0 { o.FinalityDepth = finalityDepth } + if o.BlockRate == 0 { + o.BlockRate = o.defaultBlockRate() + } + if o.LogLimit == 0 { + o.LogLimit = o.defaultLogLimit() + } +} + +func (o *LogTriggersOptions) defaultBlockRate() uint32 { + switch o.chainID.Int64() { + case 42161, 421613, 421614: // Arbitrum + return 4 + default: + return 1 + } +} + +func (o *LogTriggersOptions) defaultLogLimit() uint32 { + switch o.chainID.Int64() { + case 42161, 421613, 421614: // Arbitrum + return 1 + case 1, 4, 5, 42, 11155111: // Eth + return 20 + case 10, 420, 56, 97, 137, 80001, 43113, 43114, 8453, 84531: // Optimism, BSC, Polygon, Avax, Base + return 5 + default: + return 2 + } } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter.go index 44780cbc4b1..c0f204aa57b 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter.go @@ -5,7 +5,6 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" - "golang.org/x/time/rate" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" ) @@ -21,9 +20,6 @@ type upkeepFilter struct { // lastPollBlock is the last block number the logs were fetched for this upkeep // used by log event provider. lastPollBlock int64 - // blockLimiter is used to limit the number of blocks to fetch logs for an upkeep. - // used by log event provider. - blockLimiter *rate.Limiter // lastRePollBlock is the last block number the logs were recovered for this upkeep // used by log recoverer. lastRePollBlock int64 @@ -42,7 +38,6 @@ func (f upkeepFilter) Clone() upkeepFilter { configUpdateBlock: f.configUpdateBlock, lastPollBlock: f.lastPollBlock, lastRePollBlock: f.lastRePollBlock, - blockLimiter: f.blockLimiter, } } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go index 51cdeccafdf..8108f1a3466 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -2,7 +2,6 @@ package logprovider_test import ( "context" - "errors" "math/big" "testing" "time" @@ -15,15 +14,12 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/jmoiron/sqlx" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" - "golang.org/x/time/rate" ocr2keepers "github.com/smartcontractkit/chainlink-common/pkg/types/automation" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" - "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_upkeep_counter_wrapper" @@ -37,90 +33,115 @@ import ( ) func TestIntegration_LogEventProvider(t *testing.T) { - ctx, cancel := context.WithCancel(testutils.Context(t)) - defer cancel() + tests := []struct { + name string + bufferVersion logprovider.BufferVersion + logLimit uint32 + }{ + { + name: "default buffer", + bufferVersion: logprovider.BufferVersionDefault, + logLimit: 10, + }, + { + name: "buffer v1", + bufferVersion: logprovider.BufferVersionV1, + logLimit: 10, + }, + } - backend, stopMining, accounts := setupBackend(t) - defer stopMining() - carrol := accounts[2] + for _, tc := range tests { + bufferVersion, logLimit := tc.bufferVersion, tc.logLimit + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() - db := setupDB(t) - defer db.Close() + backend, stopMining, accounts := setupBackend(t) + defer stopMining() + carrol := accounts[2] - opts := logprovider.NewOptions(200) - opts.ReadInterval = time.Second / 2 - lp, ethClient := setupDependencies(t, db, backend) - filterStore := logprovider.NewUpkeepFilterStore() - provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) - logProvider := provider.(logprovider.LogEventProviderTest) + db := setupDB(t) + defer db.Close() - n := 10 + opts := logprovider.NewOptions(200, big.NewInt(1)) + opts.ReadInterval = time.Second / 2 + opts.BufferVersion = bufferVersion + opts.LogLimit = logLimit - backend.Commit() - lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block + lp, ethClient := setupDependencies(t, db, backend) + filterStore := logprovider.NewUpkeepFilterStore() + provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) + logProvider := provider.(logprovider.LogEventProviderTest) - ids, addrs, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) - lp.PollAndSaveLogs(ctx, int64(n)) + n := 10 - go func() { - if err := logProvider.Start(ctx); err != nil { - t.Logf("error starting log provider: %s", err) - t.Fail() - } - }() - defer logProvider.Close() + backend.Commit() + lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block - logsRounds := 10 + ids, addrs, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) + lp.PollAndSaveLogs(ctx, int64(n)) - poll := pollFn(ctx, t, lp, ethClient) + go func() { + if err := logProvider.Start(ctx); err != nil { + t.Logf("error starting log provider: %s", err) + t.Fail() + } + }() + defer logProvider.Close() - triggerEvents(ctx, t, backend, carrol, logsRounds, poll, contracts...) + logsRounds := 10 - poll(backend.Commit()) + poll := pollFn(ctx, t, lp, ethClient) - waitLogPoller(ctx, t, backend, lp, ethClient) + triggerEvents(ctx, t, backend, carrol, logsRounds, poll, contracts...) - waitLogProvider(ctx, t, logProvider, 3) + poll(backend.Commit()) - allPayloads := collectPayloads(ctx, t, logProvider, n, 5) - require.GreaterOrEqual(t, len(allPayloads), n, - "failed to get logs after restart") + waitLogPoller(ctx, t, backend, lp, ethClient) - t.Run("Restart", func(t *testing.T) { - t.Log("restarting log provider") - // assuming that our service was closed and restarted, - // we should be able to backfill old logs and fetch new ones - filterStore := logprovider.NewUpkeepFilterStore() - logProvider2 := logprovider.NewLogProvider(logger.TestLogger(t), lp, big.NewInt(1), logprovider.NewLogEventsPacker(), filterStore, opts) + waitLogProvider(ctx, t, logProvider, 3) - poll(backend.Commit()) - go func() { - if err2 := logProvider2.Start(ctx); err2 != nil { - t.Logf("error starting log provider: %s", err2) - t.Fail() - } - }() - defer logProvider2.Close() - - // re-register filters - for i, id := range ids { - err := logProvider2.RegisterFilter(ctx, logprovider.FilterOptions{ - UpkeepID: id, - TriggerConfig: newPlainLogTriggerConfig(addrs[i]), - // using block number at which the upkeep was registered, - // before we emitted any logs - UpdateBlock: uint64(n), - }) - require.NoError(t, err) - } + allPayloads := collectPayloads(ctx, t, logProvider, n, logsRounds/2) + require.GreaterOrEqual(t, len(allPayloads), n, + "failed to get logs after restart") - waitLogProvider(ctx, t, logProvider2, 2) + t.Run("Restart", func(t *testing.T) { + t.Log("restarting log provider") + // assuming that our service was closed and restarted, + // we should be able to backfill old logs and fetch new ones + filterStore := logprovider.NewUpkeepFilterStore() + logProvider2 := logprovider.NewLogProvider(logger.TestLogger(t), lp, big.NewInt(1), logprovider.NewLogEventsPacker(), filterStore, opts) - t.Log("getting logs after restart") - logsAfterRestart := collectPayloads(ctx, t, logProvider2, n, 5) - require.GreaterOrEqual(t, len(logsAfterRestart), n, - "failed to get logs after restart") - }) + poll(backend.Commit()) + go func() { + if err2 := logProvider2.Start(ctx); err2 != nil { + t.Logf("error starting log provider: %s", err2) + t.Fail() + } + }() + defer logProvider2.Close() + + // re-register filters + for i, id := range ids { + err := logProvider2.RegisterFilter(ctx, logprovider.FilterOptions{ + UpkeepID: id, + TriggerConfig: newPlainLogTriggerConfig(addrs[i]), + // using block number at which the upkeep was registered, + // before we emitted any logs + UpdateBlock: uint64(n), + }) + require.NoError(t, err) + } + + waitLogProvider(ctx, t, logProvider2, 2) + + t.Log("getting logs after restart") + logsAfterRestart := collectPayloads(ctx, t, logProvider2, n, 5) + require.GreaterOrEqual(t, len(logsAfterRestart), n, + "failed to get logs after restart") + }) + }) + } } func TestIntegration_LogEventProvider_UpdateConfig(t *testing.T) { @@ -198,258 +219,79 @@ func TestIntegration_LogEventProvider_UpdateConfig(t *testing.T) { } func TestIntegration_LogEventProvider_Backfill(t *testing.T) { - ctx, cancel := context.WithTimeout(testutils.Context(t), time.Second*60) - defer cancel() - - backend, stopMining, accounts := setupBackend(t) - defer stopMining() - carrol := accounts[2] - - db := setupDB(t) - defer db.Close() - - opts := logprovider.NewOptions(200) - opts.ReadInterval = time.Second / 4 - lp, ethClient := setupDependencies(t, db, backend) - filterStore := logprovider.NewUpkeepFilterStore() - provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) - logProvider := provider.(logprovider.LogEventProviderTest) - - n := 10 - - backend.Commit() - lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block - _, _, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) - - poll := pollFn(ctx, t, lp, ethClient) - - rounds := 8 - for i := 0; i < rounds; i++ { - poll(backend.Commit()) - triggerEvents(ctx, t, backend, carrol, n, poll, contracts...) - poll(backend.Commit()) - } - - waitLogPoller(ctx, t, backend, lp, ethClient) - - // starting the log provider should backfill logs - go func() { - if startErr := logProvider.Start(ctx); startErr != nil { - t.Logf("error starting log provider: %s", startErr) - t.Fail() - } - }() - defer logProvider.Close() - - waitLogProvider(ctx, t, logProvider, 3) - - allPayloads := collectPayloads(ctx, t, logProvider, n, 5) - require.GreaterOrEqual(t, len(allPayloads), len(contracts), "failed to backfill logs") -} - -func TestIntegration_LogEventProvider_RateLimit(t *testing.T) { - setupTest := func( - t *testing.T, - opts *logprovider.LogTriggersOptions, - ) ( - context.Context, - *backends.SimulatedBackend, - func(blockHash common.Hash), - logprovider.LogEventProviderTest, - []*big.Int, - func(), - ) { - ctx, cancel := context.WithCancel(testutils.Context(t)) - backend, stopMining, accounts := setupBackend(t) - userContractAccount := accounts[2] - db := setupDB(t) - - deferFunc := func() { - cancel() - stopMining() - _ = db.Close() - } - lp, ethClient := setupDependencies(t, db, backend) - filterStore := logprovider.NewUpkeepFilterStore() - provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, opts) - logProvider := provider.(logprovider.LogEventProviderTest) - backend.Commit() - lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block - - rounds := 5 - numberOfUserContracts := 10 - poll := pollFn(ctx, t, lp, ethClient) - - // deployUpkeepCounter creates 'n' blocks and 'n' contracts - ids, _, contracts := deployUpkeepCounter( - ctx, - t, - numberOfUserContracts, - ethClient, - backend, - userContractAccount, - logProvider) - - // have log poller save logs for current blocks - lp.PollAndSaveLogs(ctx, int64(numberOfUserContracts)) - - for i := 0; i < rounds; i++ { - triggerEvents( - ctx, - t, - backend, - userContractAccount, - numberOfUserContracts, - poll, - contracts...) - - for dummyBlocks := 0; dummyBlocks < numberOfUserContracts; dummyBlocks++ { - _ = backend.Commit() - } - - poll(backend.Commit()) - } - + tests := []struct { + name string + bufferVersion logprovider.BufferVersion + logLimit uint32 + }{ { - // total block history at this point should be 566 - var minimumBlockCount int64 = 500 - latestBlock, _ := lp.LatestBlock(ctx) - - assert.GreaterOrEqual(t, latestBlock.BlockNumber, minimumBlockCount, "to ensure the integrety of the test, the minimum block count before the test should be %d but got %d", minimumBlockCount, latestBlock) - } - - require.NoError(t, logProvider.ReadLogs(ctx, ids...)) - - return ctx, backend, poll, logProvider, ids, deferFunc + name: "default buffer", + bufferVersion: logprovider.BufferVersionDefault, + logLimit: 10, + }, + { + name: "buffer v1", + bufferVersion: logprovider.BufferVersionV1, + logLimit: 10, + }, } - // polling for logs at approximately the same rate as a chain produces - // blocks should not encounter rate limits - t.Run("should allow constant polls within the rate and burst limit", func(t *testing.T) { - ctx, backend, poll, logProvider, ids, deferFunc := setupTest(t, &logprovider.LogTriggersOptions{ - LookbackBlocks: 200, - // BlockRateLimit is set low to ensure the test does not exceed the - // rate limit - BlockRateLimit: rate.Every(50 * time.Millisecond), - // BlockLimitBurst is just set to a non-zero value - BlockLimitBurst: 5, - }) - - defer deferFunc() + for _, tc := range tests { + bufferVersion, limitLow := tc.bufferVersion, tc.logLimit + t.Run(tc.name, func(t *testing.T) { - // set the wait time between reads higher than the rate limit - readWait := 50 * time.Millisecond - timer := time.NewTimer(readWait) + ctx, cancel := context.WithTimeout(testutils.Context(t), time.Second*60) + defer cancel() - for i := 0; i < 4; i++ { - <-timer.C + backend, stopMining, accounts := setupBackend(t) + defer stopMining() + carrol := accounts[2] - // advance 1 block for every read - poll(backend.Commit()) - - err := logProvider.ReadLogs(ctx, ids...) - if err != nil { - assert.False(t, errors.Is(err, logprovider.ErrBlockLimitExceeded), "error should not contain block limit exceeded") - } + db := setupDB(t) + defer db.Close() - timer.Reset(readWait) - } + opts := logprovider.NewOptions(200, big.NewInt(1)) + opts.ReadInterval = time.Second / 4 + opts.BufferVersion = bufferVersion + opts.LogLimit = limitLow - poll(backend.Commit()) + lp, ethClient := setupDependencies(t, db, backend) + filterStore := logprovider.NewUpkeepFilterStore() + provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) + logProvider := provider.(logprovider.LogEventProviderTest) - _, err := logProvider.GetLatestPayloads(ctx) + n := 10 - require.NoError(t, err) - }) + backend.Commit() + lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block + _, _, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) - t.Run("should produce a rate limit error for over burst limit", func(t *testing.T) { - ctx, backend, poll, logProvider, ids, deferFunc := setupTest(t, &logprovider.LogTriggersOptions{ - LookbackBlocks: 200, - // BlockRateLimit is set low to ensure the test does not exceed the - // rate limit - BlockRateLimit: rate.Every(50 * time.Millisecond), - // BlockLimitBurst is just set to a non-zero value - BlockLimitBurst: 5, - }) + poll := pollFn(ctx, t, lp, ethClient) - defer deferFunc() - - // set the wait time between reads higher than the rate limit - readWait := 50 * time.Millisecond - timer := time.NewTimer(readWait) - - for i := 0; i < 4; i++ { - <-timer.C - - // advance 4 blocks for every read - for x := 0; x < 4; x++ { + rounds := 8 + for i := 0; i < rounds; i++ { + poll(backend.Commit()) + triggerEvents(ctx, t, backend, carrol, n, poll, contracts...) poll(backend.Commit()) } - err := logProvider.ReadLogs(ctx, ids...) - if err != nil { - assert.True(t, errors.Is(err, logprovider.ErrBlockLimitExceeded), "error should not contain block limit exceeded") - } - - timer.Reset(readWait) - } - - poll(backend.Commit()) + waitLogPoller(ctx, t, backend, lp, ethClient) - _, err := logProvider.GetLatestPayloads(ctx) + // starting the log provider should backfill logs + go func() { + if startErr := logProvider.Start(ctx); startErr != nil { + t.Logf("error starting log provider: %s", startErr) + t.Fail() + } + }() + defer logProvider.Close() - require.NoError(t, err) - }) + waitLogProvider(ctx, t, logProvider, 3) - t.Run("should allow polling after lookback number of blocks have passed", func(t *testing.T) { - ctx, backend, poll, logProvider, ids, deferFunc := setupTest(t, &logprovider.LogTriggersOptions{ - // BlockRateLimit is set low to ensure the test does not exceed the - // rate limit - BlockRateLimit: rate.Every(50 * time.Millisecond), - // BlockLimitBurst is set low to ensure the test exceeds the burst limit - BlockLimitBurst: 5, - // LogBlocksLookback is set low to reduce the number of blocks required - // to reset the block limiter to maxBurst - LookbackBlocks: 50, + allPayloads := collectPayloads(ctx, t, logProvider, n*rounds, 5) + require.GreaterOrEqual(t, len(allPayloads), len(contracts), "failed to backfill logs") }) - - defer deferFunc() - - // simulate a burst in unpolled blocks - for i := 0; i < 20; i++ { - _ = backend.Commit() - } - - poll(backend.Commit()) - - // all entries should error at this point because there are too many - // blocks to processes - err := logProvider.ReadLogs(ctx, ids...) - if err != nil { - assert.True(t, errors.Is(err, logprovider.ErrBlockLimitExceeded), "error should not contain block limit exceeded") - } - - // progress the chain by the same number of blocks as the lookback limit - // to trigger the usage of maxBurst - for i := 0; i < 50; i++ { - _ = backend.Commit() - } - - poll(backend.Commit()) - - // all entries should reset to the maxBurst because they are beyond - // the log lookback - err = logProvider.ReadLogs(ctx, ids...) - if err != nil { - assert.True(t, errors.Is(err, logprovider.ErrBlockLimitExceeded), "error should not contain block limit exceeded") - } - - poll(backend.Commit()) - - _, err = logProvider.GetLatestPayloads(ctx) - - require.NoError(t, err) - }) + } } func TestIntegration_LogRecoverer_Backfill(t *testing.T) { @@ -533,7 +375,6 @@ func collectPayloads(ctx context.Context, t *testing.T, logProvider logprovider. for ctx.Err() == nil && len(allPayloads) < n && rounds > 0 { logs, err := logProvider.GetLatestPayloads(ctx) require.NoError(t, err) - require.LessOrEqual(t, len(logs), logprovider.AllowedLogsPerUpkeep, "failed to get all logs") allPayloads = append(allPayloads, logs...) rounds-- } @@ -670,10 +511,10 @@ func setupDependencies(t *testing.T, db *sqlx.DB, backend *backends.SimulatedBac return lp, ethClient } -func setup(lggr logger.Logger, poller logpoller.LogPoller, c client.Client, stateStore evmregistry21.UpkeepStateReader, filterStore logprovider.UpkeepFilterStore, opts *logprovider.LogTriggersOptions) (logprovider.LogEventProvider, logprovider.LogRecoverer) { +func setup(lggr logger.Logger, poller logpoller.LogPoller, c evmclient.Client, stateStore evmregistry21.UpkeepStateReader, filterStore logprovider.UpkeepFilterStore, opts *logprovider.LogTriggersOptions) (logprovider.LogEventProvider, logprovider.LogRecoverer) { packer := logprovider.NewLogEventsPacker() if opts == nil { - o := logprovider.NewOptions(200) + o := logprovider.NewOptions(200, big.NewInt(1)) opts = &o } provider := logprovider.NewLogProvider(lggr, poller, big.NewInt(1), packer, filterStore, *opts) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go new file mode 100644 index 00000000000..9156e341688 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log.go @@ -0,0 +1,69 @@ +package logprovider + +import ( + "encoding/hex" + + ocr2keepers "github.com/smartcontractkit/chainlink-common/pkg/types/automation" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" +) + +// LogSorter sorts the logs based on block number, tx hash and log index. +// returns true if b should come before a. +func LogSorter(a, b logpoller.Log) bool { + return LogComparator(a, b) > 0 +} + +// LogComparator compares the logs based on block number, log index. +// tx hash is also checked in case the log index is not unique within a block. +// +// Returns: +// +// -1 if a < b +// 0 if a == b +// +1 if a > b +func LogComparator(a, b logpoller.Log) int { + blockDiff := int(a.BlockNumber - b.BlockNumber) + if blockDiff != 0 { + return normalizeCompareResult(blockDiff) + } + logIndexDiff := int(a.LogIndex - b.LogIndex) + if logIndexDiff != 0 { + return normalizeCompareResult(logIndexDiff) + } + return a.TxHash.Big().Cmp(b.TxHash.Big()) +} + +// normalizeCompareResult normalizes the result of a comparison to -1, 0, 1 +func normalizeCompareResult(res int) int { + switch { + case res < 0: + return -1 + case res > 0: + return 1 + default: + return 0 + } +} + +// logID returns a unique identifier for a log, which is an hex string +// of ocr2keepers.LogTriggerExtension.LogIdentifier() +func logID(l logpoller.Log) string { + ext := ocr2keepers.LogTriggerExtension{ + Index: uint32(l.LogIndex), + } + copy(ext.TxHash[:], l.TxHash[:]) + copy(ext.BlockHash[:], l.BlockHash[:]) + return hex.EncodeToString(ext.LogIdentifier()) +} + +// latestBlockNumber returns the latest block number from the given logs +func latestBlockNumber(logs ...logpoller.Log) int64 { + var latest int64 + for _, l := range logs { + if l.BlockNumber > latest { + latest = l.BlockNumber + } + } + return latest +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log_test.go new file mode 100644 index 00000000000..9ee8e98a996 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log_test.go @@ -0,0 +1,133 @@ +package logprovider + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" +) + +func TestLogComparatorSorter(t *testing.T) { + tests := []struct { + name string + a logpoller.Log + b logpoller.Log + wantCmp int + wantSort bool + }{ + { + name: "a == b", + a: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + b: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + wantCmp: 0, + wantSort: false, + }, + { + name: "a < b: block number", + a: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + b: logpoller.Log{ + BlockNumber: 4, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + wantCmp: -1, + wantSort: false, + }, + { + name: "a < b: log index", + a: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + b: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 2, + }, + wantCmp: -1, + wantSort: false, + }, + { + name: "a > b: block number", + a: logpoller.Log{ + BlockNumber: 3, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + b: logpoller.Log{ + BlockNumber: 2, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + wantCmp: 1, + wantSort: true, + }, + { + name: "a > b: log index", + a: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 4, + }, + b: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 2, + }, + wantCmp: 1, + wantSort: true, + }, + { + name: "a > b: tx hash", + a: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x21"), + LogIndex: 2, + }, + b: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 2, + }, + wantCmp: 1, + wantSort: true, + }, + { + name: "a < b: tx hash", + a: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 2, + }, + b: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x4"), + LogIndex: 2, + }, + wantCmp: -1, + wantSort: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + require.Equal(t, tc.wantCmp, LogComparator(tc.a, tc.b)) + require.Equal(t, tc.wantSort, LogSorter(tc.a, tc.b)) + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index 60505a2989e..b07b08d3354 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -46,6 +46,10 @@ var ( // reorgBuffer is the number of blocks to add as a buffer to the block range when reading logs. reorgBuffer = int64(32) readerThreads = 4 + + bufferSyncInterval = 10 * time.Minute + // logLimitMinimum is how low the log limit can go. + logLimitMinimum = 1 ) // LogTriggerConfig is an alias for log trigger config. @@ -79,8 +83,13 @@ type LogEventProviderTest interface { CurrentPartitionIdx() uint64 } +type LogEventProviderFeatures interface { + WithBufferVersion(v BufferVersion) +} + var _ LogEventProvider = &logEventProvider{} var _ LogEventProviderTest = &logEventProvider{} +var _ LogEventProviderFeatures = &logEventProvider{} // logEventProvider manages log filters for upkeeps and enables to read the log events. type logEventProvider struct { @@ -98,6 +107,7 @@ type logEventProvider struct { filterStore UpkeepFilterStore buffer *logEventBuffer + bufferV1 LogBuffer opts LogTriggersOptions @@ -107,18 +117,12 @@ type logEventProvider struct { } func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, chainID *big.Int, packer LogDataPacker, filterStore UpkeepFilterStore, opts LogTriggersOptions) *logEventProvider { - defaultBlockRate := defaultBlockRateForChain(chainID) - defaultLogLimit := defaultLogLimitForChain(chainID) - - // TODO apply these to the log buffer later - _ = defaultBlockRate - _ = defaultLogLimit - return &logEventProvider{ threadCtrl: utils.NewThreadControl(), lggr: lggr.Named("KeepersRegistry.LogEventProvider"), packer: packer, buffer: newLogEventBuffer(lggr, int(opts.LookbackBlocks), defaultNumOfLogUpkeeps, defaultFastExecLogsHigh), + bufferV1: NewLogBuffer(lggr, uint32(opts.LookbackBlocks), opts.BlockRate, opts.LogLimit), poller: poller, opts: opts, filterStore: filterStore, @@ -127,20 +131,38 @@ func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, chainID *big } func (p *logEventProvider) SetConfig(cfg ocr2keepers.LogEventProviderConfig) { + p.lock.Lock() + defer p.lock.Unlock() + blockRate := cfg.BlockRate logLimit := cfg.LogLimit if blockRate == 0 { - blockRate = defaultBlockRateForChain(p.chainID) + blockRate = p.opts.defaultBlockRate() } if logLimit == 0 { - logLimit = defaultLogLimitForChain(p.chainID) + logLimit = p.opts.defaultLogLimit() } p.lggr.With("where", "setConfig").Infow("setting config ", "bockRate", blockRate, "logLimit", logLimit) - // TODO set block rate and log limit on the buffer - //p.buffer.SetConfig(blockRate, logLimit) + atomic.StoreUint32(&p.opts.BlockRate, blockRate) + atomic.StoreUint32(&p.opts.LogLimit, logLimit) + + switch p.opts.BufferVersion { + case BufferVersionV1: + p.bufferV1.SetConfig(uint32(p.opts.LookbackBlocks), blockRate, logLimit) + default: + } +} + +func (p *logEventProvider) WithBufferVersion(v BufferVersion) { + p.lock.Lock() + defer p.lock.Unlock() + + p.lggr.Debugw("with buffer version", "version", v) + + p.opts.BufferVersion = v } func (p *logEventProvider) Start(context.Context) error { @@ -169,6 +191,24 @@ func (p *logEventProvider) Start(context.Context) error { }) }) + p.threadCtrl.Go(func(ctx context.Context) { + // sync filters with buffer periodically, + // to ensure that inactive upkeeps won't waste capacity. + ticker := time.NewTicker(bufferSyncInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := p.syncBufferFilters(); err != nil { + p.lggr.Warnw("failed to sync buffer filters", "err", err) + } + case <-ctx.Done(): + return + } + } + }) + return nil }) } @@ -190,33 +230,94 @@ func (p *logEventProvider) GetLatestPayloads(ctx context.Context) ([]ocr2keepers return nil, fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) } prommetrics.AutomationLogProviderLatestBlock.Set(float64(latest.BlockNumber)) - start := latest.BlockNumber - p.opts.LookbackBlocks - if start <= 0 { - start = 1 + payloads := p.getLogsFromBuffer(latest.BlockNumber) + + if len(payloads) > 0 { + p.lggr.Debugw("Fetched payloads from buffer", "latestBlock", latest.BlockNumber, "payloads", len(payloads)) } - logs := p.buffer.dequeueRange(start, latest.BlockNumber, AllowedLogsPerUpkeep, MaxPayloads) - // p.lggr.Debugw("got latest logs from buffer", "latest", latest, "diff", diff, "logs", len(logs)) + return payloads, nil +} +func (p *logEventProvider) createPayload(id *big.Int, log logpoller.Log) (ocr2keepers.UpkeepPayload, error) { + trig := logToTrigger(log) + checkData, err := p.packer.PackLogData(log) + if err != nil { + p.lggr.Warnw("failed to pack log data", "err", err, "log", log, "id", id) + return ocr2keepers.UpkeepPayload{}, err + } + payload, err := core.NewUpkeepPayload(id, trig, checkData) + if err != nil { + p.lggr.Warnw("failed to create upkeep payload", "err", err, "id", id, "trigger", trig, "checkData", checkData) + return ocr2keepers.UpkeepPayload{}, err + } + return payload, nil +} + +// getBufferDequeueArgs returns the arguments for the buffer to dequeue logs. +// It adjust the log limit low based on the number of upkeeps to ensure that more upkeeps get slots in the result set. +func (p *logEventProvider) getBufferDequeueArgs() (blockRate, logLimitLow, maxResults, numOfUpkeeps int) { + blockRate, logLimitLow, maxResults, numOfUpkeeps = int(p.opts.BlockRate), int(p.opts.LogLimit), MaxPayloads, p.bufferV1.NumOfUpkeeps() + // in case we have more upkeeps than the max results, we reduce the log limit low + // so that more upkeeps will get slots in the result set. + for numOfUpkeeps > maxResults/logLimitLow { + if logLimitLow == logLimitMinimum { + // Log limit low can't go less than logLimitMinimum (1). + // If some upkeeps are not getting slots in the result set, they supposed to be picked up + // in the next iteration if the range is still applicable. + // TODO: alerts to notify the system is at full capacity. + // TODO: handle this case properly by distributing available slots across upkeeps to avoid + // starvation when log volume is high. + p.lggr.Warnw("The system is at full capacity", "maxResults", maxResults, "numOfUpkeeps", numOfUpkeeps, "logLimitLow", logLimitLow) + break + } + p.lggr.Debugw("Too many upkeeps, reducing the log limit low", "maxResults", maxResults, "numOfUpkeeps", numOfUpkeeps, "logLimitLow_before", logLimitLow) + logLimitLow-- + } + return +} + +func (p *logEventProvider) getLogsFromBuffer(latestBlock int64) []ocr2keepers.UpkeepPayload { var payloads []ocr2keepers.UpkeepPayload - for _, l := range logs { - log := l.log - trig := logToTrigger(log) - checkData, err := p.packer.PackLogData(log) - if err != nil { - p.lggr.Warnw("failed to pack log data", "err", err, "log", log) - continue + + start := latestBlock - p.opts.LookbackBlocks + if start <= 0 { // edge case when the chain is new (e.g. tests) + start = 1 + } + + switch p.opts.BufferVersion { + case BufferVersionV1: + // in v1, we use a greedy approach - we keep dequeuing logs until we reach the max results or cover the entire range. + blockRate, logLimitLow, maxResults, _ := p.getBufferDequeueArgs() + for len(payloads) < maxResults && start <= latestBlock { + logs, remaining := p.bufferV1.Dequeue(start, blockRate, logLimitLow, maxResults-len(payloads), DefaultUpkeepSelector) + if len(logs) > 0 { + p.lggr.Debugw("Dequeued logs", "start", start, "latestBlock", latestBlock, "logs", len(logs)) + } + for _, l := range logs { + payload, err := p.createPayload(l.ID, l.Log) + if err == nil { + payloads = append(payloads, payload) + } + } + if remaining > 0 { + p.lggr.Debugw("Remaining logs", "start", start, "latestBlock", latestBlock, "remaining", remaining) + // TODO: handle remaining logs in a better way than consuming the entire window, e.g. do not repeat more than x times + continue + } + start += int64(blockRate) } - payload, err := core.NewUpkeepPayload(l.upkeepID, trig, checkData) - if err != nil { - p.lggr.Warnw("failed to create upkeep payload", "err", err, "id", l.upkeepID, "trigger", trig, "checkData", checkData) - continue + default: + logs := p.buffer.dequeueRange(start, latestBlock, AllowedLogsPerUpkeep, MaxPayloads) + for _, l := range logs { + payload, err := p.createPayload(l.upkeepID, l.log) + if err == nil { + payloads = append(payloads, payload) + } } - - payloads = append(payloads, payload) } - return payloads, nil + return payloads } // ReadLogs fetches the logs for the given upkeeps. @@ -380,8 +481,6 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ // special case of a new blockchain (e.g. simulated chain) lookbackBlocks = latest - 1 } - // maxBurst will be used to increase the burst limit to allow a long range scan - maxBurst := int(lookbackBlocks + 1) for i, filter := range filters { if len(filter.addr) == 0 { @@ -391,13 +490,6 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ // range should not exceed [lookbackBlocks, latest] if start < latest-lookbackBlocks { start = latest - lookbackBlocks - filter.blockLimiter.SetBurst(maxBurst) - } - - resv := filter.blockLimiter.ReserveN(time.Now(), int(latest-start)) - if !resv.OK() { - merr = errors.Join(merr, fmt.Errorf("%w: %s", ErrBlockLimitExceeded, filter.upkeepID.String())) - continue } // adding a buffer to check for reorged logs. start = start - reorgBuffer @@ -408,8 +500,6 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ // query logs based on contract address, event sig, and blocks logs, err := p.poller.LogsWithSigs(ctx, start, latest, []common.Hash{filter.topics[0]}, common.BytesToAddress(filter.addr)) if err != nil { - // cancel limit reservation as we failed to get logs - resv.Cancel() if ctx.Err() != nil { // exit if the context was canceled return merr @@ -419,15 +509,12 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ } filteredLogs := filter.Select(logs...) - // if this limiter's burst was set to the max -> - // reset it and cancel the reservation to allow further processing - if filter.blockLimiter.Burst() == maxBurst { - resv.Cancel() - filter.blockLimiter.SetBurst(p.opts.BlockLimitBurst) + switch p.opts.BufferVersion { + case BufferVersionV1: + p.bufferV1.Enqueue(filter.upkeepID, filteredLogs...) + default: + p.buffer.enqueue(filter.upkeepID, filteredLogs...) } - - p.buffer.enqueue(filter.upkeepID, filteredLogs...) - // Update the lastPollBlock for filter in slice this is then // updated into filter store in updateFiltersLastPoll filters[i].lastPollBlock = latest @@ -436,24 +523,15 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ return merr } -func defaultBlockRateForChain(chainID *big.Int) uint32 { - switch chainID.Int64() { - case 42161, 421613, 421614: // Arbitrum - return 4 - default: - return 1 - } -} +func (p *logEventProvider) syncBufferFilters() error { + p.lock.RLock() + buffVersion := p.opts.BufferVersion + p.lock.RUnlock() -func defaultLogLimitForChain(chainID *big.Int) uint32 { - switch chainID.Int64() { - case 42161, 421613, 421614: // Arbitrum - return 1 - case 1, 4, 5, 42, 11155111: // Eth - return 20 - case 10, 420, 56, 97, 137, 80001, 43113, 43114, 8453, 84531: // Optimism, BSC, Polygon, Avax, Base - return 5 + switch buffVersion { + case BufferVersionV1: + return p.bufferV1.SyncFilters(p.filterStore) default: - return 1 + return nil } } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go index ae6a373ad22..db47ac2ecd8 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go @@ -9,7 +9,6 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "golang.org/x/time/rate" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" ) @@ -84,8 +83,7 @@ func (p *logEventProvider) RegisterFilter(ctx context.Context, opts FilterOption filter = *currentFilter } else { // new filter filter = upkeepFilter{ - upkeepID: upkeepID, - blockLimiter: rate.NewLimiter(p.opts.BlockRateLimit, p.opts.BlockLimitBurst), + upkeepID: upkeepID, } } filter.lastPollBlock = 0 diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go index 96a397827be..26e989c7466 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go @@ -100,7 +100,7 @@ func TestLogEventProvider_LifeCycle(t *testing.T) { }, } - p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200, big.NewInt(1))) for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -152,7 +152,7 @@ func TestEventLogProvider_RefreshActiveUpkeeps(t *testing.T) { mp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{}, nil) mp.On("ReplayAsync", mock.Anything).Return(nil) - p := NewLogProvider(logger.TestLogger(t), mp, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + p := NewLogProvider(logger.TestLogger(t), mp, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200, big.NewInt(1))) require.NoError(t, p.RegisterFilter(ctx, FilterOptions{ UpkeepID: core.GenUpkeepID(types.LogTrigger, "1111").BigInt(), @@ -231,7 +231,7 @@ func TestLogEventProvider_ValidateLogTriggerConfig(t *testing.T) { }, } - p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200, big.NewInt(1))) for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { err := p.validateLogTriggerConfig(tc.cfg) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go index ade2c630ebd..57da895403e 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go @@ -11,7 +11,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "golang.org/x/time/rate" ocr2keepers "github.com/smartcontractkit/chainlink-common/pkg/types/automation" @@ -22,7 +21,7 @@ import ( ) func TestLogEventProvider_GetFilters(t *testing.T) { - p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200, big.NewInt(1))) _, f := newEntry(p, 1) p.filterStore.AddActiveUpkeeps(f) @@ -64,7 +63,7 @@ func TestLogEventProvider_GetFilters(t *testing.T) { } func TestLogEventProvider_UpdateEntriesLastPoll(t *testing.T) { - p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + p := NewLogProvider(logger.TestLogger(t), nil, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200, big.NewInt(1))) n := 10 @@ -177,7 +176,7 @@ func TestLogEventProvider_ScheduleReadJobs(t *testing.T) { ctx := testutils.Context(t) readInterval := 10 * time.Millisecond - opts := NewOptions(200) + opts := NewOptions(200, big.NewInt(1)) opts.ReadInterval = readInterval p := NewLogProvider(logger.TestLogger(t), mp, big.NewInt(1), &mockedPacker{}, NewUpkeepFilterStore(), opts) @@ -255,7 +254,7 @@ func TestLogEventProvider_ReadLogs(t *testing.T) { }, nil) filterStore := NewUpkeepFilterStore() - p := NewLogProvider(logger.TestLogger(t), mp, big.NewInt(1), &mockedPacker{}, filterStore, NewOptions(200)) + p := NewLogProvider(logger.TestLogger(t), mp, big.NewInt(1), &mockedPacker{}, filterStore, NewOptions(200, big.NewInt(1))) var ids []*big.Int for i := 0; i < 10; i++ { @@ -310,10 +309,9 @@ func newEntry(p *logEventProvider, i int, args ...string) (LogTriggerConfig, upk topics := make([]common.Hash, len(filter.EventSigs)) copy(topics, filter.EventSigs) f := upkeepFilter{ - upkeepID: uid, - addr: filter.Addresses[0].Bytes(), - topics: topics, - blockLimiter: rate.NewLimiter(p.opts.BlockRateLimit, p.opts.BlockLimitBurst), + upkeepID: uid, + addr: filter.Addresses[0].Bytes(), + topics: topics, } return cfg, f } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go index 26c56c23b8c..5ef321cbf7d 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go @@ -100,8 +100,8 @@ func NewLogRecoverer(lggr logger.Logger, poller logpoller.LogPoller, client clie threadCtrl: utils.NewThreadControl(), - blockTime: &atomic.Int64{}, - lookbackBlocks: &atomic.Int64{}, + blockTime: new(atomic.Int64), + lookbackBlocks: new(atomic.Int64), interval: opts.ReadInterval * 5, pending: make([]ocr2keepers.UpkeepPayload, 0), diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go index 54338207190..65a05b2537e 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go @@ -34,7 +34,7 @@ func TestLogRecoverer_GetRecoverables(t *testing.T) { ctx := testutils.Context(t) lp := &lpmocks.LogPoller{} lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: 100}, nil) - r := NewLogRecoverer(logger.TestLogger(t), lp, nil, nil, nil, nil, NewOptions(200)) + r := NewLogRecoverer(logger.TestLogger(t), lp, nil, nil, nil, nil, NewOptions(200, big.NewInt(1))) tests := []struct { name string @@ -1152,7 +1152,7 @@ func TestLogRecoverer_pending(t *testing.T) { maxPendingPayloadsPerUpkeep = origMaxPendingPayloadsPerUpkeep }() - r := NewLogRecoverer(logger.TestLogger(t), nil, nil, nil, nil, nil, NewOptions(200)) + r := NewLogRecoverer(logger.TestLogger(t), nil, nil, nil, nil, nil, NewOptions(200, big.NewInt(1))) r.lock.Lock() r.pending = tc.exist for i, p := range tc.new { @@ -1233,7 +1233,7 @@ func setupTestRecoverer(t *testing.T, interval time.Duration, lookbackBlocks int lp := new(lpmocks.LogPoller) statesReader := new(mocks.UpkeepStateReader) filterStore := NewUpkeepFilterStore() - opts := NewOptions(lookbackBlocks) + opts := NewOptions(lookbackBlocks, big.NewInt(1)) opts.ReadInterval = interval / 5 opts.LookbackBlocks = lookbackBlocks recoverer := NewLogRecoverer(logger.TestLogger(t), lp, nil, statesReader, &mockedPacker{}, filterStore, opts) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/prommetrics/metrics.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/prommetrics/metrics.go index 6b68f5c6afd..682b8710c0c 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/prommetrics/metrics.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/prommetrics/metrics.go @@ -41,6 +41,7 @@ const ( LogBufferFlowDirectionIngress = "ingress" LogBufferFlowDirectionEgress = "egress" LogBufferFlowDirectionDropped = "dropped" + LogBufferFlowDirectionExpired = "expired" ) // Automation metrics diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go index 4aa9b0cb7dc..288e7e74fdb 100644 --- a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go @@ -54,6 +54,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/job" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ethkey" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams" "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" @@ -118,7 +119,7 @@ func TestIntegration_KeeperPluginConditionalUpkeep(t *testing.T) { require.NoError(t, err) registry := deployKeeper21Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) - setupNodes(t, nodeKeys, registry, backend, steve) + setupNodes(t, nodeKeys, registry, backend, steve, false) <-time.After(time.Second * 5) @@ -172,311 +173,368 @@ func TestIntegration_KeeperPluginConditionalUpkeep(t *testing.T) { } func TestIntegration_KeeperPluginLogUpkeep(t *testing.T) { - g := gomega.NewWithT(t) - - // setup blockchain - sergey := testutils.MustNewSimTransactor(t) // owns all the link - steve := testutils.MustNewSimTransactor(t) // registry owner - carrol := testutils.MustNewSimTransactor(t) // upkeep owner - genesisData := core.GenesisAlloc{ - sergey.From: {Balance: assets.Ether(10000).ToInt()}, - steve.From: {Balance: assets.Ether(10000).ToInt()}, - carrol.From: {Balance: assets.Ether(10000).ToInt()}, - } - // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether - var nodeKeys [5]ethkey.KeyV2 - for i := int64(0); i < 5; i++ { - nodeKeys[i] = cltest.MustGenerateRandomKey(t) - genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + tests := []struct { + name string + logBufferVersion logprovider.BufferVersion + }{ + { + name: "default buffer", + logBufferVersion: logprovider.BufferVersionDefault, + }, + { + name: "buffer v1", + logBufferVersion: logprovider.BufferVersionV1, + }, } - backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) - stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain - defer stopMining() + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := gomega.NewWithT(t) + + // setup blockchain + sergey := testutils.MustNewSimTransactor(t) // owns all the link + steve := testutils.MustNewSimTransactor(t) // registry owner + carrol := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: assets.Ether(10000).ToInt()}, + steve.From: {Balance: assets.Ether(10000).ToInt()}, + carrol.From: {Balance: assets.Ether(10000).ToInt()}, + } + // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether + var nodeKeys [5]ethkey.KeyV2 + for i := int64(0); i < 5; i++ { + nodeKeys[i] = cltest.MustGenerateRandomKey(t) + genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } - // Deploy registry - linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(sergey, backend) - require.NoError(t, err) - gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(60000000000)) - require.NoError(t, err) - linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(2000000000000000000)) - require.NoError(t, err) + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + defer stopMining() - registry := deployKeeper21Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) - setupNodes(t, nodeKeys, registry, backend, steve) - upkeeps := 1 + // Deploy registry + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(sergey, backend) + require.NoError(t, err) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(2000000000000000000)) + require.NoError(t, err) - _, err = linkToken.Transfer(sergey, carrol.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeeps+1)))) - require.NoError(t, err) + registry := deployKeeper21Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) + setupNodes(t, nodeKeys, registry, backend, steve, tc.logBufferVersion == logprovider.BufferVersionV1) + upkeeps := 1 - backend.Commit() + _, err = linkToken.Transfer(sergey, carrol.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeeps+1)))) + require.NoError(t, err) - ids, addrs, contracts := deployUpkeeps(t, backend, carrol, steve, linkToken, registry, upkeeps) - require.Equal(t, upkeeps, len(ids)) - require.Equal(t, len(ids), len(contracts)) - require.Equal(t, len(ids), len(addrs)) + backend.Commit() - backend.Commit() + ids, addrs, contracts := deployUpkeeps(t, backend, carrol, steve, linkToken, registry, upkeeps) + require.Equal(t, upkeeps, len(ids)) + require.Equal(t, len(ids), len(contracts)) + require.Equal(t, len(ids), len(addrs)) - emits := 1 - go emitEvents(testutils.Context(t), t, emits, contracts, carrol, func() { - backend.Commit() - }) - - listener, done := listenPerformed(t, backend, registry, ids, int64(1)) - g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) - done() + backend.Commit() - t.Run("recover logs", func(t *testing.T) { - addr, contract := addrs[0], contracts[0] - upkeepID := registerUpkeep(t, registry, addr, carrol, steve, backend) - backend.Commit() - t.Logf("Registered new upkeep %s for address %s", upkeepID.String(), addr.String()) - // Emit 100 logs in a burst - recoverEmits := 100 - i := 0 - emitEvents(testutils.Context(t), t, 100, []*log_upkeep_counter_wrapper.LogUpkeepCounter{contract}, carrol, func() { - i++ - if i%(recoverEmits/4) == 0 { + emits := 1 + go emitEvents(testutils.Context(t), t, emits, contracts, carrol, func() { backend.Commit() - time.Sleep(time.Millisecond * 250) // otherwise we get "invalid transaction nonce" errors - } - }) + }) - beforeDummyBlocks := backend.Blockchain().CurrentBlock().Number.Uint64() + listener, done := listenPerformed(t, backend, registry, ids, int64(1)) + g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) + done() - // Mine enough blocks to ensure these logs don't fall into log provider range - dummyBlocks := 500 - for i := 0; i < dummyBlocks; i++ { - backend.Commit() - time.Sleep(time.Millisecond * 10) - } + t.Run("recover logs", func(t *testing.T) { + addr, contract := addrs[0], contracts[0] + upkeepID := registerUpkeep(t, registry, addr, carrol, steve, backend) + backend.Commit() + t.Logf("Registered new upkeep %s for address %s", upkeepID.String(), addr.String()) + // Emit 100 logs in a burst + recoverEmits := 100 + i := 0 + emitEvents(testutils.Context(t), t, 100, []*log_upkeep_counter_wrapper.LogUpkeepCounter{contract}, carrol, func() { + i++ + if i%(recoverEmits/4) == 0 { + backend.Commit() + time.Sleep(time.Millisecond * 250) // otherwise we get "invalid transaction nonce" errors + } + }) - t.Logf("Mined %d blocks, waiting for logs to be recovered", dummyBlocks) + beforeDummyBlocks := backend.Blockchain().CurrentBlock().Number.Uint64() - listener, done := listenPerformedN(t, backend, registry, ids, int64(beforeDummyBlocks), recoverEmits) - g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) - done() - }) -} + // Mine enough blocks to ensure these logs don't fall into log provider range + dummyBlocks := 500 + for i := 0; i < dummyBlocks; i++ { + backend.Commit() + time.Sleep(time.Millisecond * 10) + } -func TestIntegration_KeeperPluginLogUpkeep_Retry(t *testing.T) { - g := gomega.NewWithT(t) + t.Logf("Mined %d blocks, waiting for logs to be recovered", dummyBlocks) - // setup blockchain - linkOwner := testutils.MustNewSimTransactor(t) // owns all the link - registryOwner := testutils.MustNewSimTransactor(t) // registry owner - upkeepOwner := testutils.MustNewSimTransactor(t) // upkeep owner - genesisData := core.GenesisAlloc{ - linkOwner.From: {Balance: assets.Ether(10000).ToInt()}, - registryOwner.From: {Balance: assets.Ether(10000).ToInt()}, - upkeepOwner.From: {Balance: assets.Ether(10000).ToInt()}, + listener, done := listenPerformedN(t, backend, registry, ids, int64(beforeDummyBlocks), recoverEmits) + defer done() + g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) + }) + }) } +} - // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether - var nodeKeys [5]ethkey.KeyV2 - for i := int64(0); i < 5; i++ { - nodeKeys[i] = cltest.MustGenerateRandomKey(t) - genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} +func TestIntegration_KeeperPluginLogUpkeep_Retry(t *testing.T) { + tests := []struct { + name string + logBufferVersion logprovider.BufferVersion + }{ + { + name: "default buffer", + logBufferVersion: logprovider.BufferVersionDefault, + }, + { + name: "buffer v1", + logBufferVersion: logprovider.BufferVersionV1, + }, } - backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) - stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain - defer stopMining() + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := gomega.NewWithT(t) + + // setup blockchain + linkOwner := testutils.MustNewSimTransactor(t) // owns all the link + registryOwner := testutils.MustNewSimTransactor(t) // registry owner + upkeepOwner := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + linkOwner.From: {Balance: assets.Ether(10000).ToInt()}, + registryOwner.From: {Balance: assets.Ether(10000).ToInt()}, + upkeepOwner.From: {Balance: assets.Ether(10000).ToInt()}, + } - // Deploy registry - linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(linkOwner, backend) - require.NoError(t, err) + // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether + var nodeKeys [5]ethkey.KeyV2 + for i := int64(0); i < 5; i++ { + nodeKeys[i] = cltest.MustGenerateRandomKey(t) + genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } - gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(60000000000)) - require.NoError(t, err) + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + defer stopMining() - linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(2000000000000000000)) - require.NoError(t, err) + // Deploy registry + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(linkOwner, backend) + require.NoError(t, err) - registry := deployKeeper21Registry(t, registryOwner, backend, linkAddr, linkFeedAddr, gasFeedAddr) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) - _, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(2000000000000000000)) + require.NoError(t, err) - const upkeepCount = 10 - const mercuryFailCount = upkeepCount * 3 * 2 + registry := deployKeeper21Registry(t, registryOwner, backend, linkAddr, linkFeedAddr, gasFeedAddr) - // testing with the mercury server involves mocking responses. currently, - // there is not a way to connect a mercury call to an upkeep id (though we - // could add custom headers) so the test must be fairly basic and just - // count calls before switching to successes - var ( - mu sync.Mutex - count int - ) + _, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner, tc.logBufferVersion == logprovider.BufferVersionV1) - mercuryServer.RegisterHandler(func(w http.ResponseWriter, r *http.Request) { - mu.Lock() - defer mu.Unlock() + const upkeepCount = 10 + const mercuryFailCount = upkeepCount * 3 * 2 - count++ + // testing with the mercury server involves mocking responses. currently, + // there is not a way to connect a mercury call to an upkeep id (though we + // could add custom headers) so the test must be fairly basic and just + // count calls before switching to successes + var ( + mu sync.Mutex + count int + ) - _ = r.ParseForm() + mercuryServer.RegisterHandler(func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + defer mu.Unlock() - t.Logf("MercuryHTTPServe:RequestURI: %s", r.RequestURI) + count++ - for key, value := range r.Form { - t.Logf("MercuryHTTPServe:FormValue: key: %s; value: %s;", key, value) - } + _ = r.ParseForm() - // the streams lookup retries against the remote server 3 times before - // returning a result as retryable. - // the simulation here should force the streams lookup process to return - // retryable 2 times. - // the total count of failures should be (upkeepCount * 3 * tryCount) - if count <= mercuryFailCount { - w.WriteHeader(http.StatusNotFound) + t.Logf("MercuryHTTPServe:RequestURI: %s", r.RequestURI) - return - } + for key, value := range r.Form { + t.Logf("MercuryHTTPServe:FormValue: key: %s; value: %s;", key, value) + } - // start sending success messages - output := `{"chainlinkBlob":"0x0001c38d71fed6c320b90e84b6f559459814d068e2a1700adc931ca9717d4fe70000000000000000000000000000000000000000000000000000000001a80b52b4bf1233f9cb71144a253a1791b202113c4ab4a92fa1b176d684b4959666ff8200000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001004254432d5553442d415242495452554d2d544553544e4554000000000000000000000000000000000000000000000000000000000000000000000000645570be000000000000000000000000000000000000000000000000000002af2b818dc5000000000000000000000000000000000000000000000000000002af2426faf3000000000000000000000000000000000000000000000000000002af32dc209700000000000000000000000000000000000000000000000000000000012130f8df0a9745bb6ad5e2df605e158ba8ad8a33ef8a0acf9851f0f01668a3a3f2b68600000000000000000000000000000000000000000000000000000000012130f60000000000000000000000000000000000000000000000000000000000000002c4a7958dce105089cf5edb68dad7dcfe8618d7784eb397f97d5a5fade78c11a58275aebda478968e545f7e3657aba9dcbe8d44605e4c6fde3e24edd5e22c94270000000000000000000000000000000000000000000000000000000000000002459c12d33986018a8959566d145225f0c4a4e61a9a3f50361ccff397899314f0018162cf10cd89897635a0bb62a822355bd199d09f4abe76e4d05261bb44733d"}` + // the streams lookup retries against the remote server 3 times before + // returning a result as retryable. + // the simulation here should force the streams lookup process to return + // retryable 2 times. + // the total count of failures should be (upkeepCount * 3 * tryCount) + if count <= mercuryFailCount { + w.WriteHeader(http.StatusNotFound) - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte(output)) - }) + return + } - defer mercuryServer.Stop() + // start sending success messages + output := `{"chainlinkBlob":"0x0001c38d71fed6c320b90e84b6f559459814d068e2a1700adc931ca9717d4fe70000000000000000000000000000000000000000000000000000000001a80b52b4bf1233f9cb71144a253a1791b202113c4ab4a92fa1b176d684b4959666ff8200000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001004254432d5553442d415242495452554d2d544553544e4554000000000000000000000000000000000000000000000000000000000000000000000000645570be000000000000000000000000000000000000000000000000000002af2b818dc5000000000000000000000000000000000000000000000000000002af2426faf3000000000000000000000000000000000000000000000000000002af32dc209700000000000000000000000000000000000000000000000000000000012130f8df0a9745bb6ad5e2df605e158ba8ad8a33ef8a0acf9851f0f01668a3a3f2b68600000000000000000000000000000000000000000000000000000000012130f60000000000000000000000000000000000000000000000000000000000000002c4a7958dce105089cf5edb68dad7dcfe8618d7784eb397f97d5a5fade78c11a58275aebda478968e545f7e3657aba9dcbe8d44605e4c6fde3e24edd5e22c94270000000000000000000000000000000000000000000000000000000000000002459c12d33986018a8959566d145225f0c4a4e61a9a3f50361ccff397899314f0018162cf10cd89897635a0bb62a822355bd199d09f4abe76e4d05261bb44733d"}` - _, err = linkToken.Transfer(linkOwner, upkeepOwner.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeepCount+1)))) - require.NoError(t, err) + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(output)) + }) - backend.Commit() + defer mercuryServer.Stop() - feeds, err := newFeedLookupUpkeepController(backend, registryOwner) - require.NoError(t, err, "no error expected from creating a feed lookup controller") + _, err = linkToken.Transfer(linkOwner, upkeepOwner.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeepCount+1)))) + require.NoError(t, err) - // deploy multiple upkeeps that listen to a log emitter and need to be - // performed for each log event - _ = feeds.DeployUpkeeps(t, backend, upkeepOwner, upkeepCount, func(int) bool { - return false - }) - _ = feeds.RegisterAndFund(t, registry, registryOwner, backend, linkToken) - _ = feeds.EnableMercury(t, backend, registry, registryOwner) - _ = feeds.VerifyEnv(t, backend, registry, registryOwner) + backend.Commit() - // start emitting events in a separate go-routine - // feed lookup relies on a single contract event log to perform multiple - // listener contracts - go func() { - // only 1 event is necessary to make all 10 upkeeps eligible - _ = feeds.EmitEvents(t, backend, 1, func() { - // pause per emit for expected block production time - time.Sleep(3 * time.Second) + feeds, err := newFeedLookupUpkeepController(backend, registryOwner) + require.NoError(t, err, "no error expected from creating a feed lookup controller") + + // deploy multiple upkeeps that listen to a log emitter and need to be + // performed for each log event + _ = feeds.DeployUpkeeps(t, backend, upkeepOwner, upkeepCount, func(int) bool { + return false + }) + _ = feeds.RegisterAndFund(t, registry, registryOwner, backend, linkToken) + _ = feeds.EnableMercury(t, backend, registry, registryOwner) + _ = feeds.VerifyEnv(t, backend, registry, registryOwner) + + // start emitting events in a separate go-routine + // feed lookup relies on a single contract event log to perform multiple + // listener contracts + go func() { + // only 1 event is necessary to make all 10 upkeeps eligible + _ = feeds.EmitEvents(t, backend, 1, func() { + // pause per emit for expected block production time + time.Sleep(3 * time.Second) + }) + }() + + listener, done := listenPerformed(t, backend, registry, feeds.UpkeepsIds(), int64(1)) + defer done() + g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) }) - }() - - listener, done := listenPerformed(t, backend, registry, feeds.UpkeepsIds(), int64(1)) - g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) - - done() + } } func TestIntegration_KeeperPluginLogUpkeep_ErrHandler(t *testing.T) { - g := gomega.NewWithT(t) - - // setup blockchain - linkOwner := testutils.MustNewSimTransactor(t) // owns all the link - registryOwner := testutils.MustNewSimTransactor(t) // registry owner - upkeepOwner := testutils.MustNewSimTransactor(t) // upkeep owner - genesisData := core.GenesisAlloc{ - linkOwner.From: {Balance: assets.Ether(10000).ToInt()}, - registryOwner.From: {Balance: assets.Ether(10000).ToInt()}, - upkeepOwner.From: {Balance: assets.Ether(10000).ToInt()}, + tests := []struct { + name string + logBufferVersion logprovider.BufferVersion + }{ + { + name: "default buffer", + logBufferVersion: logprovider.BufferVersionDefault, + }, + { + name: "buffer v1", + logBufferVersion: logprovider.BufferVersionV1, + }, } - // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether - var nodeKeys [5]ethkey.KeyV2 - for i := int64(0); i < 5; i++ { - nodeKeys[i] = cltest.MustGenerateRandomKey(t) - genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} - } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := gomega.NewWithT(t) + + // setup blockchain + linkOwner := testutils.MustNewSimTransactor(t) // owns all the link + registryOwner := testutils.MustNewSimTransactor(t) // registry owner + upkeepOwner := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + linkOwner.From: {Balance: assets.Ether(10000).ToInt()}, + registryOwner.From: {Balance: assets.Ether(10000).ToInt()}, + upkeepOwner.From: {Balance: assets.Ether(10000).ToInt()}, + } - backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) - stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain - defer stopMining() + // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether + var nodeKeys [5]ethkey.KeyV2 + for i := int64(0); i < 5; i++ { + nodeKeys[i] = cltest.MustGenerateRandomKey(t) + genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } - // Deploy registry - linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(linkOwner, backend) - require.NoError(t, err) + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + defer stopMining() - gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(60000000000)) - require.NoError(t, err) + // Deploy registry + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(linkOwner, backend) + require.NoError(t, err) - linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(2000000000000000000)) - require.NoError(t, err) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) - registry := deployKeeper21Registry(t, registryOwner, backend, linkAddr, linkFeedAddr, gasFeedAddr) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(2000000000000000000)) + require.NoError(t, err) - _, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner) + registry := deployKeeper21Registry(t, registryOwner, backend, linkAddr, linkFeedAddr, gasFeedAddr) - upkeepCount := 10 + _, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner, tc.logBufferVersion == logprovider.BufferVersionV1) - errResponses := []int{ - http.StatusUnauthorized, - http.StatusBadRequest, - http.StatusInternalServerError, - } - startMercuryServer(t, mercuryServer, func(i int) (int, []byte) { - var resp int - if i < len(errResponses) { - resp = errResponses[i] - } - if resp == 0 { - resp = http.StatusNotFound - } - return resp, nil - }) - defer mercuryServer.Stop() + upkeepCount := 10 - _, err = linkToken.Transfer(linkOwner, upkeepOwner.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeepCount+1)))) - require.NoError(t, err) + errResponses := []int{ + http.StatusUnauthorized, + http.StatusBadRequest, + http.StatusInternalServerError, + http.StatusNotFound, + http.StatusNotFound, + http.StatusNotFound, + http.StatusUnauthorized, + } + startMercuryServer(t, mercuryServer, func(i int) (int, []byte) { + var resp int + if i < len(errResponses) { + resp = errResponses[i] + } + if resp == 0 { + resp = http.StatusNotFound + } + return resp, nil + }) + defer mercuryServer.Stop() - backend.Commit() + _, err = linkToken.Transfer(linkOwner, upkeepOwner.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeepCount+1)))) + require.NoError(t, err) - feeds, err := newFeedLookupUpkeepController(backend, registryOwner) - require.NoError(t, err, "no error expected from creating a feed lookup controller") + backend.Commit() - // deploy multiple upkeeps that listen to a log emitter and need to be - // performed for each log event - checkResultsProvider := func(i int) bool { - return i%2 == 1 - } - require.NoError(t, feeds.DeployUpkeeps(t, backend, upkeepOwner, upkeepCount, checkResultsProvider)) - require.NoError(t, feeds.RegisterAndFund(t, registry, registryOwner, backend, linkToken)) - require.NoError(t, feeds.EnableMercury(t, backend, registry, registryOwner)) - require.NoError(t, feeds.VerifyEnv(t, backend, registry, registryOwner)) - - startBlock := backend.Blockchain().CurrentBlock().Number.Int64() - // start emitting events in a separate go-routine - // feed lookup relies on a single contract event log to perform multiple - // listener contracts - go func() { - // only 1 event is necessary to make all 10 upkeeps eligible - _ = feeds.EmitEvents(t, backend, 1, func() { - // pause per emit for expected block production time - time.Sleep(3 * time.Second) - }) - }() + feeds, err := newFeedLookupUpkeepController(backend, registryOwner) + require.NoError(t, err, "no error expected from creating a feed lookup controller") - go makeDummyBlocks(t, backend, 3*time.Second, 1000) + // deploy multiple upkeeps that listen to a log emitter and need to be + // performed for each log event + checkResultsProvider := func(i int) bool { + return i%2 == 1 + } + require.NoError(t, feeds.DeployUpkeeps(t, backend, upkeepOwner, upkeepCount, checkResultsProvider)) + require.NoError(t, feeds.RegisterAndFund(t, registry, registryOwner, backend, linkToken)) + require.NoError(t, feeds.EnableMercury(t, backend, registry, registryOwner)) + require.NoError(t, feeds.VerifyEnv(t, backend, registry, registryOwner)) + + startBlock := backend.Blockchain().CurrentBlock().Number.Int64() + // start emitting events in a separate go-routine + // feed lookup relies on a single contract event log to perform multiple + // listener contracts + go func() { + // only 1 event is necessary to make all 10 upkeeps eligible + _ = feeds.EmitEvents(t, backend, 1, func() { + // pause per emit for expected block production time + time.Sleep(3 * time.Second) + }) + }() + + go makeDummyBlocks(t, backend, 3*time.Second, 1000) + + idsToCheck := make([]*big.Int, 0) + for i, uid := range feeds.UpkeepsIds() { + if checkResultsProvider(i) { + idsToCheck = append(idsToCheck, uid) + } + } - idsToCheck := make([]*big.Int, 0) - for i, uid := range feeds.UpkeepsIds() { - if checkResultsProvider(i) { - idsToCheck = append(idsToCheck, uid) - } + listener, done := listenPerformed(t, backend, registry, idsToCheck, startBlock) + defer done() + g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) + }) } - - listener, done := listenPerformed(t, backend, registry, idsToCheck, startBlock) - g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) - done() } func startMercuryServer(t *testing.T, mercuryServer *mercury.SimulatedMercuryServer, responder func(i int) (int, []byte)) { @@ -586,7 +644,7 @@ func listenPerformed(t *testing.T, backend *backends.SimulatedBackend, registry return listenPerformedN(t, backend, registry, ids, startBlock, 0) } -func setupNodes(t *testing.T, nodeKeys [5]ethkey.KeyV2, registry *iregistry21.IKeeperRegistryMaster, backend *backends.SimulatedBackend, usr *bind.TransactOpts) ([]Node, *mercury.SimulatedMercuryServer) { +func setupNodes(t *testing.T, nodeKeys [5]ethkey.KeyV2, registry *iregistry21.IKeeperRegistryMaster, backend *backends.SimulatedBackend, usr *bind.TransactOpts, useBufferV1 bool) ([]Node, *mercury.SimulatedMercuryServer) { lggr := logger.TestLogger(t) mServer := mercury.NewSimulatedMercuryServer() mServer.Start() @@ -660,7 +718,8 @@ func setupNodes(t *testing.T, nodeKeys [5]ethkey.KeyV2, registry *iregistry21.IK cacheEvictionInterval = "1s" mercuryCredentialName = "%s" contractVersion = "v2.1" - `, i, registry.Address(), node.KeyBundle.ID(), node.Transmitter, fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), MercuryCredName)) + useBufferV1 = %v + `, i, registry.Address(), node.KeyBundle.ID(), node.Transmitter, fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), MercuryCredName, useBufferV1)) } // Setup config on contract diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go index ddddb82aaed..95cf9efc944 100644 --- a/core/services/relay/evm/evm.go +++ b/core/services/relay/evm/evm.go @@ -21,6 +21,7 @@ import ( ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" "github.com/smartcontractkit/chainlink-common/pkg/services" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr" @@ -70,7 +71,8 @@ func init() { var _ commontypes.Relayer = &Relayer{} //nolint:staticcheck type Relayer struct { - db *sqlx.DB + db *sqlx.DB // legacy: prefer to use ds instead + ds sqlutil.DataSource chain legacyevm.Chain lggr logger.Logger ks CSAETHKeystore @@ -93,7 +95,8 @@ type CSAETHKeystore interface { } type RelayerOpts struct { - *sqlx.DB + *sqlx.DB // legacy: prefer to use ds instead + DS sqlutil.DataSource pg.QConfig CSAETHKeystore MercuryPool wsrpc.Pool @@ -104,6 +107,9 @@ func (c RelayerOpts) Validate() error { if c.DB == nil { err = errors.Join(err, errors.New("nil DB")) } + if c.DS == nil { + err = errors.Join(err, errors.New("nil DataSource")) + } if c.QConfig == nil { err = errors.Join(err, errors.New("nil QConfig")) } @@ -129,6 +135,7 @@ func NewRelayer(lggr logger.Logger, chain legacyevm.Chain, opts RelayerOpts) (*R cdcFactory := llo.NewChannelDefinitionCacheFactory(lggr, lloORM, chain.LogPoller()) return &Relayer{ db: opts.DB, + ds: opts.DS, chain: chain, lggr: lggr, ks: opts.CSAETHKeystore, @@ -588,7 +595,7 @@ func (r *Relayer) NewMedianProvider(rargs commontypes.RelayArgs, pargs commontyp return nil, err } - medianContract, err := newMedianContract(configWatcher.ContractConfigTracker(), configWatcher.contractAddress, configWatcher.chain, rargs.JobID, r.db, lggr) + medianContract, err := newMedianContract(configWatcher.ContractConfigTracker(), configWatcher.contractAddress, configWatcher.chain, rargs.JobID, r.ds, lggr) if err != nil { return nil, err } diff --git a/core/services/relay/evm/evm_test.go b/core/services/relay/evm/evm_test.go index 41e51a7ab8f..d53fe910bc3 100644 --- a/core/services/relay/evm/evm_test.go +++ b/core/services/relay/evm/evm_test.go @@ -7,6 +7,7 @@ import ( "github.com/jmoiron/sqlx" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest" "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" @@ -16,6 +17,7 @@ func TestRelayerOpts_Validate(t *testing.T) { cfg := configtest.NewTestGeneralConfig(t) type fields struct { DB *sqlx.DB + DS sqlutil.DataSource QConfig pg.QConfig CSAETHKeystore evm.CSAETHKeystore } @@ -28,20 +30,23 @@ func TestRelayerOpts_Validate(t *testing.T) { name: "all invalid", fields: fields{ DB: nil, + DS: nil, QConfig: nil, CSAETHKeystore: nil, }, wantErrContains: `nil DB +nil DataSource nil QConfig nil Keystore`, }, { - name: "missing db, keystore", + name: "missing db, ds, keystore", fields: fields{ DB: nil, QConfig: cfg.Database(), }, wantErrContains: `nil DB +nil DataSource nil Keystore`, }, } @@ -49,6 +54,7 @@ nil Keystore`, t.Run(tt.name, func(t *testing.T) { c := evm.RelayerOpts{ DB: tt.fields.DB, + DS: tt.fields.DS, QConfig: tt.fields.QConfig, CSAETHKeystore: tt.fields.CSAETHKeystore, } diff --git a/core/services/relay/evm/functions/logpoller_wrapper.go b/core/services/relay/evm/functions/logpoller_wrapper.go index 471f18b4b0e..4e37770f90e 100644 --- a/core/services/relay/evm/functions/logpoller_wrapper.go +++ b/core/services/relay/evm/functions/logpoller_wrapper.go @@ -410,6 +410,7 @@ func (l *logPollerWrapper) handleRouteUpdate(ctx context.Context, activeCoordina } l.lggr.Debugw("LogPollerWrapper: new routes", "activeCoordinator", activeCoordinator.Hex(), "proposedCoordinator", proposedCoordinator.Hex()) + l.activeCoordinator = activeCoordinator l.proposedCoordinator = proposedCoordinator @@ -419,10 +420,28 @@ func (l *logPollerWrapper) handleRouteUpdate(ctx context.Context, activeCoordina l.lggr.Errorw("LogPollerWrapper: Failed to update routes", "err", err) } } + + filters := l.logPoller.GetFilters() + for _, filter := range filters { + if filter.Name[:len(l.filterPrefix())] != l.filterPrefix() { + continue + } + if filter.Name == l.filterName(l.activeCoordinator) || filter.Name == l.filterName(l.proposedCoordinator) { + continue + } + if err := l.logPoller.UnregisterFilter(ctx, filter.Name); err != nil { + l.lggr.Errorw("LogPollerWrapper: Failed to unregister filter", "filterName", filter.Name, "err", err) + } + l.lggr.Debugw("LogPollerWrapper: Successfully unregistered filter", "filterName", filter.Name) + } +} + +func (l *logPollerWrapper) filterPrefix() string { + return "FunctionsLogPollerWrapper:" + l.pluginConfig.DONID } -func filterName(addr common.Address) string { - return logpoller.FilterName("FunctionsLogPollerWrapper", addr.String()) +func (l *logPollerWrapper) filterName(addr common.Address) string { + return logpoller.FilterName(l.filterPrefix(), addr.String()) } func (l *logPollerWrapper) registerFilters(ctx context.Context, coordinatorAddress common.Address) error { @@ -432,7 +451,7 @@ func (l *logPollerWrapper) registerFilters(ctx context.Context, coordinatorAddre return l.logPoller.RegisterFilter( ctx, logpoller.Filter{ - Name: filterName(coordinatorAddress), + Name: l.filterName(coordinatorAddress), EventSigs: []common.Hash{ functions_coordinator.FunctionsCoordinatorOracleRequest{}.Topic(), functions_coordinator.FunctionsCoordinatorOracleResponse{}.Topic(), diff --git a/core/services/relay/evm/functions/logpoller_wrapper_test.go b/core/services/relay/evm/functions/logpoller_wrapper_test.go index b9a1684050d..583e6617417 100644 --- a/core/services/relay/evm/functions/logpoller_wrapper_test.go +++ b/core/services/relay/evm/functions/logpoller_wrapper_test.go @@ -95,6 +95,7 @@ func TestLogPollerWrapper_SingleSubscriberEmptyEvents(t *testing.T) { lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{}, nil) client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "01"), nil) lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) + lp.On("GetFilters").Return(map[string]logpoller.Filter{}, nil) subscriber := newSubscriber(1) lpWrapper.SubscribeToUpdates(ctx, "mock_subscriber", subscriber) @@ -127,6 +128,8 @@ func TestLogPollerWrapper_LatestEvents_ReorgHandling(t *testing.T) { lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "01"), nil) lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) + lp.On("GetFilters").Return(map[string]logpoller.Filter{}, nil) + subscriber := newSubscriber(1) lpWrapper.SubscribeToUpdates(ctx, "mock_subscriber", subscriber) mockedLog := getMockedRequestLog(t) @@ -213,3 +216,34 @@ func TestLogPollerWrapper_FilterPreviouslyDetectedEvents_FiltersPreviouslyDetect assert.Equal(t, 0, len(mockedDetectedEvents.detectedEventsOrdered)) assert.Equal(t, 0, len(mockedDetectedEvents.isPreviouslyDetected)) } + +func TestLogPollerWrapper_UnregisterOldFiltersOnRouteUpgrade(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + lp, lpWrapper, _ := setUp(t, 100_000) // check only once + wrapper := lpWrapper.(*logPollerWrapper) + + activeCoord := common.HexToAddress("0x1") + proposedCoord := common.HexToAddress("0x2") + newActiveCoord := proposedCoord + newProposedCoord := common.HexToAddress("0x3") + + wrapper.activeCoordinator = activeCoord + wrapper.proposedCoordinator = proposedCoord + activeCoordFilterName := wrapper.filterName(activeCoord) + proposedCoordFilterName := wrapper.filterName(proposedCoord) + newProposedCoordFilterName := wrapper.filterName(newProposedCoord) + + lp.On("RegisterFilter", ctx, mock.Anything).Return(nil) + existingFilters := map[string]logpoller.Filter{ + activeCoordFilterName: {Name: activeCoordFilterName}, + proposedCoordFilterName: {Name: proposedCoordFilterName}, + newProposedCoordFilterName: {Name: newProposedCoordFilterName}, + } + lp.On("GetFilters").Return(existingFilters, nil) + lp.On("UnregisterFilter", ctx, activeCoordFilterName).Return(nil) + + wrapper.handleRouteUpdate(ctx, newActiveCoord, newProposedCoord) + + lp.AssertCalled(t, "UnregisterFilter", ctx, activeCoordFilterName) +} diff --git a/core/services/relay/evm/median.go b/core/services/relay/evm/median.go index e3200d8e867..2407cff7140 100644 --- a/core/services/relay/evm/median.go +++ b/core/services/relay/evm/median.go @@ -7,7 +7,6 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/jmoiron/sqlx" "github.com/pkg/errors" "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator" "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median" @@ -15,6 +14,7 @@ import ( ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" "github.com/smartcontractkit/chainlink-common/pkg/services" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm" offchain_aggregator_wrapper "github.com/smartcontractkit/chainlink/v2/core/internal/gethwrappers2/generated/offchainaggregator" "github.com/smartcontractkit/chainlink/v2/core/logger" @@ -30,7 +30,7 @@ type medianContract struct { requestRoundTracker *RequestRoundTracker } -func newMedianContract(configTracker types.ContractConfigTracker, contractAddress common.Address, chain legacyevm.Chain, specID int32, db *sqlx.DB, lggr logger.Logger) (*medianContract, error) { +func newMedianContract(configTracker types.ContractConfigTracker, contractAddress common.Address, chain legacyevm.Chain, specID int32, ds sqlutil.DataSource, lggr logger.Logger) (*medianContract, error) { lggr = lggr.Named("MedianContract") contract, err := offchain_aggregator_wrapper.NewOffchainAggregator(contractAddress, chain.Client()) if err != nil { @@ -58,16 +58,15 @@ func newMedianContract(configTracker types.ContractConfigTracker, contractAddres chain.LogBroadcaster(), specID, lggr, - db, - NewRoundRequestedDB(db.DB, specID, lggr), + ds, + NewRoundRequestedDB(ds, specID, lggr), chain.Config().EVM(), - chain.Config().Database(), ), }, nil } -func (oc *medianContract) Start(context.Context) error { +func (oc *medianContract) Start(ctx context.Context) error { return oc.StartOnce("MedianContract", func() error { - return oc.requestRoundTracker.Start() + return oc.requestRoundTracker.Start(ctx) }) } diff --git a/core/services/relay/evm/mocks/request_round_db.go b/core/services/relay/evm/mocks/request_round_db.go index eb27e8bd526..725fc6e6b37 100644 --- a/core/services/relay/evm/mocks/request_round_db.go +++ b/core/services/relay/evm/mocks/request_round_db.go @@ -3,9 +3,12 @@ package mocks import ( - pg "github.com/smartcontractkit/chainlink/v2/core/services/pg" - ocr2aggregator "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator" + context "context" + + evm "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" mock "github.com/stretchr/testify/mock" + + ocr2aggregator "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator" ) // RequestRoundDB is an autogenerated mock type for the RequestRoundDB type @@ -13,9 +16,9 @@ type RequestRoundDB struct { mock.Mock } -// LoadLatestRoundRequested provides a mock function with given fields: -func (_m *RequestRoundDB) LoadLatestRoundRequested() (ocr2aggregator.OCR2AggregatorRoundRequested, error) { - ret := _m.Called() +// LoadLatestRoundRequested provides a mock function with given fields: _a0 +func (_m *RequestRoundDB) LoadLatestRoundRequested(_a0 context.Context) (ocr2aggregator.OCR2AggregatorRoundRequested, error) { + ret := _m.Called(_a0) if len(ret) == 0 { panic("no return value specified for LoadLatestRoundRequested") @@ -23,17 +26,17 @@ func (_m *RequestRoundDB) LoadLatestRoundRequested() (ocr2aggregator.OCR2Aggrega var r0 ocr2aggregator.OCR2AggregatorRoundRequested var r1 error - if rf, ok := ret.Get(0).(func() (ocr2aggregator.OCR2AggregatorRoundRequested, error)); ok { - return rf() + if rf, ok := ret.Get(0).(func(context.Context) (ocr2aggregator.OCR2AggregatorRoundRequested, error)); ok { + return rf(_a0) } - if rf, ok := ret.Get(0).(func() ocr2aggregator.OCR2AggregatorRoundRequested); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) ocr2aggregator.OCR2AggregatorRoundRequested); ok { + r0 = rf(_a0) } else { r0 = ret.Get(0).(ocr2aggregator.OCR2AggregatorRoundRequested) } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) } else { r1 = ret.Error(1) } @@ -41,17 +44,35 @@ func (_m *RequestRoundDB) LoadLatestRoundRequested() (ocr2aggregator.OCR2Aggrega return r0, r1 } -// SaveLatestRoundRequested provides a mock function with given fields: tx, rr -func (_m *RequestRoundDB) SaveLatestRoundRequested(tx pg.Queryer, rr ocr2aggregator.OCR2AggregatorRoundRequested) error { - ret := _m.Called(tx, rr) +// SaveLatestRoundRequested provides a mock function with given fields: ctx, rr +func (_m *RequestRoundDB) SaveLatestRoundRequested(ctx context.Context, rr ocr2aggregator.OCR2AggregatorRoundRequested) error { + ret := _m.Called(ctx, rr) if len(ret) == 0 { panic("no return value specified for SaveLatestRoundRequested") } var r0 error - if rf, ok := ret.Get(0).(func(pg.Queryer, ocr2aggregator.OCR2AggregatorRoundRequested) error); ok { - r0 = rf(tx, rr) + if rf, ok := ret.Get(0).(func(context.Context, ocr2aggregator.OCR2AggregatorRoundRequested) error); ok { + r0 = rf(ctx, rr) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Transact provides a mock function with given fields: _a0, _a1 +func (_m *RequestRoundDB) Transact(_a0 context.Context, _a1 func(evm.RequestRoundDB) error) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for Transact") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, func(evm.RequestRoundDB) error) error); ok { + r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) } diff --git a/core/services/relay/evm/request_round_db.go b/core/services/relay/evm/request_round_db.go index b3a5b01bc2c..2b6ae10782d 100644 --- a/core/services/relay/evm/request_round_db.go +++ b/core/services/relay/evm/request_round_db.go @@ -1,43 +1,50 @@ package evm import ( - "database/sql" + "context" "encoding/json" "github.com/pkg/errors" "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator" ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" "github.com/smartcontractkit/chainlink/v2/core/logger" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) // RequestRoundDB stores requested rounds for querying by the median plugin. type RequestRoundDB interface { - SaveLatestRoundRequested(tx pg.Queryer, rr ocr2aggregator.OCR2AggregatorRoundRequested) error - LoadLatestRoundRequested() (rr ocr2aggregator.OCR2AggregatorRoundRequested, err error) + SaveLatestRoundRequested(ctx context.Context, rr ocr2aggregator.OCR2AggregatorRoundRequested) error + LoadLatestRoundRequested(context.Context) (rr ocr2aggregator.OCR2AggregatorRoundRequested, err error) + Transact(context.Context, func(db RequestRoundDB) error) error } var _ RequestRoundDB = &requestRoundDB{} //go:generate mockery --quiet --name RequestRoundDB --output ./mocks/ --case=underscore type requestRoundDB struct { - *sql.DB + ds sqlutil.DataSource oracleSpecID int32 lggr logger.Logger } // NewDB returns a new DB scoped to this oracleSpecID -func NewRoundRequestedDB(sqldb *sql.DB, oracleSpecID int32, lggr logger.Logger) *requestRoundDB { - return &requestRoundDB{sqldb, oracleSpecID, lggr} +func NewRoundRequestedDB(ds sqlutil.DataSource, oracleSpecID int32, lggr logger.Logger) *requestRoundDB { + return &requestRoundDB{ds, oracleSpecID, lggr} } -func (d *requestRoundDB) SaveLatestRoundRequested(tx pg.Queryer, rr ocr2aggregator.OCR2AggregatorRoundRequested) error { +func (d *requestRoundDB) Transact(ctx context.Context, fn func(db RequestRoundDB) error) error { + return sqlutil.Transact(ctx, func(ds sqlutil.DataSource) RequestRoundDB { + return NewRoundRequestedDB(ds, d.oracleSpecID, d.lggr) + }, d.ds, nil, fn) +} + +func (d *requestRoundDB) SaveLatestRoundRequested(ctx context.Context, rr ocr2aggregator.OCR2AggregatorRoundRequested) error { rawLog, err := json.Marshal(rr.Raw) if err != nil { return errors.Wrap(err, "could not marshal log as JSON") } - _, err = tx.Exec(` + _, err = d.ds.ExecContext(ctx, ` INSERT INTO ocr2_latest_round_requested (ocr2_oracle_spec_id, requester, config_digest, epoch, round, raw) VALUES ($1,$2,$3,$4,$5,$6) ON CONFLICT (ocr2_oracle_spec_id) DO UPDATE SET requester = EXCLUDED.requester, @@ -50,9 +57,9 @@ VALUES ($1,$2,$3,$4,$5,$6) ON CONFLICT (ocr2_oracle_spec_id) DO UPDATE SET return errors.Wrap(err, "could not save latest round requested") } -func (d *requestRoundDB) LoadLatestRoundRequested() (ocr2aggregator.OCR2AggregatorRoundRequested, error) { +func (d *requestRoundDB) LoadLatestRoundRequested(ctx context.Context) (ocr2aggregator.OCR2AggregatorRoundRequested, error) { rr := ocr2aggregator.OCR2AggregatorRoundRequested{} - rows, err := d.Query(` + rows, err := d.ds.QueryContext(ctx, ` SELECT requester, config_digest, epoch, round, raw FROM ocr2_latest_round_requested WHERE ocr2_oracle_spec_id = $1 diff --git a/core/services/relay/evm/request_round_db_test.go b/core/services/relay/evm/request_round_db_test.go index d10d6a41a61..10932c4e229 100644 --- a/core/services/relay/evm/request_round_db_test.go +++ b/core/services/relay/evm/request_round_db_test.go @@ -12,7 +12,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/testhelpers" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" ) @@ -23,8 +22,8 @@ func Test_DB_LatestRoundRequested(t *testing.T) { require.NoError(t, err) lggr := logger.TestLogger(t) - db := evm.NewRoundRequestedDB(sqlDB.DB, 1, lggr) - db2 := evm.NewRoundRequestedDB(sqlDB.DB, 2, lggr) + db := evm.NewRoundRequestedDB(sqlDB, 1, lggr) + db2 := evm.NewRoundRequestedDB(sqlDB, 2, lggr) rawLog := cltest.LogFromFixture(t, "../../../testdata/jsonrpc/round_requested_log_1_1.json") @@ -38,8 +37,8 @@ func Test_DB_LatestRoundRequested(t *testing.T) { t.Run("saves latest round requested", func(t *testing.T) { ctx := testutils.Context(t) - err := pg.SqlxTransaction(ctx, sqlDB, logger.TestLogger(t), func(q pg.Queryer) error { - return db.SaveLatestRoundRequested(q, rr) + err := db.Transact(ctx, func(tx evm.RequestRoundDB) error { + return tx.SaveLatestRoundRequested(ctx, rr) }) require.NoError(t, err) @@ -54,19 +53,20 @@ func Test_DB_LatestRoundRequested(t *testing.T) { Raw: rawLog, } - err = pg.SqlxTransaction(ctx, sqlDB, logger.TestLogger(t), func(q pg.Queryer) error { - return db.SaveLatestRoundRequested(q, rr) + err = db.Transact(ctx, func(tx evm.RequestRoundDB) error { + return tx.SaveLatestRoundRequested(ctx, rr) }) require.NoError(t, err) }) t.Run("loads latest round requested", func(t *testing.T) { + ctx := testutils.Context(t) // There is no round for db2 - lrr, err := db2.LoadLatestRoundRequested() + lrr, err := db2.LoadLatestRoundRequested(ctx) require.NoError(t, err) require.Equal(t, 0, int(lrr.Epoch)) - lrr, err = db.LoadLatestRoundRequested() + lrr, err = db.LoadLatestRoundRequested(ctx) require.NoError(t, err) assert.Equal(t, rr, lrr) diff --git a/core/services/relay/evm/request_round_tracker.go b/core/services/relay/evm/request_round_tracker.go index 1e77ce28089..bb39271f278 100644 --- a/core/services/relay/evm/request_round_tracker.go +++ b/core/services/relay/evm/request_round_tracker.go @@ -9,19 +9,17 @@ import ( gethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/pkg/errors" - "github.com/jmoiron/sqlx" - "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator" ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" "github.com/smartcontractkit/chainlink-common/pkg/services" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/log" offchain_aggregator_wrapper "github.com/smartcontractkit/chainlink/v2/core/internal/gethwrappers2/generated/offchainaggregator" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) // RequestRoundTracker subscribes to new request round logs. @@ -35,7 +33,7 @@ type RequestRoundTracker struct { jobID int32 lggr logger.SugaredLogger odb RequestRoundDB - q pg.Q + ds sqlutil.DataSource blockTranslator ocrcommon.BlockTranslator // Start/Stop lifecycle @@ -56,10 +54,9 @@ func NewRequestRoundTracker( logBroadcaster log.Broadcaster, jobID int32, lggr logger.Logger, - db *sqlx.DB, + ds sqlutil.DataSource, odb RequestRoundDB, chain ocrcommon.Config, - qConfig pg.QConfig, ) (o *RequestRoundTracker) { ctx, cancel := context.WithCancel(context.Background()) return &RequestRoundTracker{ @@ -70,7 +67,7 @@ func NewRequestRoundTracker( jobID: jobID, lggr: logger.Sugared(lggr), odb: odb, - q: pg.NewQ(db, lggr, qConfig), + ds: ds, blockTranslator: ocrcommon.NewBlockTranslator(chain, ethClient, lggr), ctx: ctx, ctxCancel: cancel, @@ -79,9 +76,9 @@ func NewRequestRoundTracker( // Start must be called before logs can be delivered // It ought to be called before starting OCR -func (t *RequestRoundTracker) Start() error { +func (t *RequestRoundTracker) Start(ctx context.Context) error { return t.StartOnce("RequestRoundTracker", func() (err error) { - t.latestRoundRequested, err = t.odb.LoadLatestRoundRequested() + t.latestRoundRequested, err = t.odb.LoadLatestRoundRequested(ctx) if err != nil { return errors.Wrap(err, "RequestRoundTracker#Start: failed to load latest round requested") } @@ -141,8 +138,9 @@ func (t *RequestRoundTracker) HandleLog(lb log.Broadcast) { return } if IsLaterThan(raw, t.latestRoundRequested.Raw) { - err = t.q.Transaction(func(q pg.Queryer) error { - if err = t.odb.SaveLatestRoundRequested(q, *rr); err != nil { + ctx := context.TODO() //TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 + err = t.odb.Transact(ctx, func(tx RequestRoundDB) error { + if err = tx.SaveLatestRoundRequested(ctx, *rr); err != nil { return err } return t.logBroadcaster.MarkConsumed(t.ctx, lb) diff --git a/core/services/relay/evm/request_round_tracker_test.go b/core/services/relay/evm/request_round_tracker_test.go index cb2ee2a8d72..324b76dc6de 100644 --- a/core/services/relay/evm/request_round_tracker_test.go +++ b/core/services/relay/evm/request_round_tracker_test.go @@ -93,7 +93,6 @@ func newContractTrackerUni(t *testing.T, opts ...interface{}) (uni contractTrack db, uni.db, chain.EVM(), - chain.Database(), ) return uni @@ -174,6 +173,12 @@ func Test_OCRContractTracker_HandleLog_OCRContractLatestRoundRequested(t *testin uni.db.On("SaveLatestRoundRequested", mock.Anything, mock.MatchedBy(func(rr ocr2aggregator.OCR2AggregatorRoundRequested) bool { return rr.Epoch == 1 && rr.Round == 1 })).Return(nil) + transact := uni.db.On("Transact", mock.Anything, mock.Anything) + transact.Run(func(args mock.Arguments) { + fn := args[1].(func(evm.RequestRoundDB) error) + err2 := fn(uni.db) + transact.ReturnArguments = []any{err2} + }) uni.requestRoundTracker.HandleLog(logBroadcast) @@ -245,6 +250,12 @@ func Test_OCRContractTracker_HandleLog_OCRContractLatestRoundRequested(t *testin uni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) uni.db.On("SaveLatestRoundRequested", mock.Anything, mock.Anything).Return(errors.New("something exploded")) + transact := uni.db.On("Transact", mock.Anything, mock.Anything) + transact.Run(func(args mock.Arguments) { + fn := args[1].(func(evm.RequestRoundDB) error) + err := fn(uni.db) + transact.ReturnArguments = []any{err} + }) uni.requestRoundTracker.HandleLog(logBroadcast) @@ -271,9 +282,10 @@ func Test_OCRContractTracker_HandleLog_OCRContractLatestRoundRequested(t *testin uni.lb.On("Register", uni.requestRoundTracker, mock.Anything).Return(func() { eventuallyCloseLogBroadcaster.ItHappened() }) uni.lb.On("IsConnected").Return(true).Maybe() - uni.db.On("LoadLatestRoundRequested").Return(rr, nil) + uni.db.On("LoadLatestRoundRequested", mock.Anything).Return(rr, nil) - require.NoError(t, uni.requestRoundTracker.Start()) + ctx := testutils.Context(t) + require.NoError(t, uni.requestRoundTracker.Start(ctx)) configDigest, epoch, round, err := uni.requestRoundTracker.LatestRoundRequested(testutils.Context(t), 0) require.NoError(t, err) diff --git a/core/services/workflows/delegate.go b/core/services/workflows/delegate.go index 2c95b478709..fb9540844fa 100644 --- a/core/services/workflows/delegate.go +++ b/core/services/workflows/delegate.go @@ -35,13 +35,13 @@ consensus: aggregation_config: "0x1111111111111111111100000000000000000000000000000000000000000000": deviation: "0.001" - heartbeat: "30m" + heartbeat: 3600 "0x2222222222222222222200000000000000000000000000000000000000000000": deviation: "0.001" - heartbeat: "30m" + heartbeat: 3600 "0x3333333333333333333300000000000000000000000000000000000000000000": deviation: "0.001" - heartbeat: "30m" + heartbeat: 3600 encoder: "EVM" encoder_config: abi: "mercury_reports bytes[]" @@ -49,19 +49,17 @@ consensus: targets: - type: "write_polygon-testnet-mumbai" inputs: - report: - - "$(evm_median.outputs.reports)" + report: "$(evm_median.outputs.report)" config: address: "0x3F3554832c636721F1fD1822Ccca0354576741Ef" - params: ["$(inputs.report)"] + params: ["$(report)"] abi: "receive(report bytes)" - type: "write_ethereum-testnet-sepolia" inputs: - report: - - "$(evm_median.outputs.reports)" + report: "$(evm_median.outputs.report)" config: address: "0x54e220867af6683aE6DcBF535B4f952cB5116510" - params: ["$(inputs.report)"] + params: ["$(report)"] abi: "receive(report bytes)" ` diff --git a/core/services/workflows/engine.go b/core/services/workflows/engine.go index dfc2fb347ae..8198152fb14 100644 --- a/core/services/workflows/engine.go +++ b/core/services/workflows/engine.go @@ -564,6 +564,7 @@ func NewEngine(cfg Config) (engine *Engine, err error) { // - that there are no step `ref` called `trigger` as this is reserved for any triggers // - that there are no duplicate `ref`s // - that the `ref` for any triggers is empty -- and filled in with `trigger` + // - that the resulting graph is strongly connected (i.e. no disjointed subgraphs exist) // - etc. workflow, err := Parse(cfg.Spec) diff --git a/core/services/workflows/engine_test.go b/core/services/workflows/engine_test.go index a87e841121d..e456eefb729 100644 --- a/core/services/workflows/engine_test.go +++ b/core/services/workflows/engine_test.go @@ -86,9 +86,9 @@ func TestEngineWithHardcodedWorkflow(t *testing.T) { "v1.0.0", ), func(req capabilities.CapabilityRequest) (capabilities.CapabilityResponse, error) { - list := req.Inputs.Underlying["report"].(*values.List) + m := req.Inputs.Underlying["report"].(*values.Map) return capabilities.CapabilityResponse{ - Value: list.Underlying[0], + Value: m, }, nil }, ) @@ -152,11 +152,10 @@ consensus: targets: - type: "write_polygon-testnet-mumbai" inputs: - report: - - "$(evm_median.outputs.reports)" + report: "$(evm_median.outputs.report)" config: address: "0x3F3554832c636721F1fD1822Ccca0354576741Ef" - params: ["$(inputs.report)"] + params: ["$(report)"] abi: "receive(report bytes)" ` ) @@ -207,9 +206,9 @@ func mockConsensus() *mockCapability { ), func(req capabilities.CapabilityRequest) (capabilities.CapabilityResponse, error) { obs := req.Inputs.Underlying["observations"] - reports := obs.(*values.List) + report := obs.(*values.List) rm := map[string]any{ - "reports": reports.Underlying[0], + "report": report.Underlying[0], } rv, err := values.NewMap(rm) if err != nil { @@ -232,9 +231,9 @@ func mockTarget() *mockCapability { "v1.0.0", ), func(req capabilities.CapabilityRequest) (capabilities.CapabilityResponse, error) { - list := req.Inputs.Underlying["report"].(*values.List) + m := req.Inputs.Underlying["report"].(*values.Map) return capabilities.CapabilityResponse{ - Value: list.Underlying[0], + Value: m, }, nil }, ) @@ -314,11 +313,10 @@ consensus: targets: - type: "write_polygon-testnet-mumbai" inputs: - report: - - "$(evm_median.outputs.reports)" + report: "$(evm_median.outputs.report)" config: address: "0x3F3554832c636721F1fD1822Ccca0354576741Ef" - params: ["$(inputs.report)"] + params: ["$(report)"] abi: "receive(report bytes)" ` ) diff --git a/core/services/workflows/models.go b/core/services/workflows/models.go index 3c15c1bc778..9c1c56d6054 100644 --- a/core/services/workflows/models.go +++ b/core/services/workflows/models.go @@ -1,6 +1,7 @@ package workflows import ( + "errors" "fmt" "github.com/dominikbraun/graph" @@ -182,6 +183,10 @@ func Parse(yamlWorkflow string) (*workflow, error) { } step.dependencies = refs + if stepRef != keywordTrigger && len(refs) == 0 { + return nil, errors.New("all non-trigger steps must have a dependent ref") + } + for _, r := range refs { innerErr = g.AddEdge(r, step.Ref) if innerErr != nil { diff --git a/core/services/workflows/models_test.go b/core/services/workflows/models_test.go index 93b5bf64f56..61aced2ed19 100644 --- a/core/services/workflows/models_test.go +++ b/core/services/workflows/models_test.go @@ -186,6 +186,31 @@ targets: "a-target": {}, }, }, + { + name: "non-trigger step with no dependent refs", + yaml: ` +triggers: + - type: "a-trigger" + - type: "a-second-trigger" +actions: + - type: "an-action" + ref: "an-action" + inputs: + hello: "world" +consensus: + - type: "a-consensus" + ref: "a-consensus" + inputs: + trigger_output: $(trigger.outputs) + action_output: $(an-action.outputs) +targets: + - type: "a-target" + ref: "a-target" + inputs: + consensus_output: $(a-consensus.outputs) +`, + errMsg: "all non-trigger steps must have a dependent ref", + }, } for _, tc := range testCases { diff --git a/core/services/workflows/models_yaml.go b/core/services/workflows/models_yaml.go index aceabb44ec2..396811c3729 100644 --- a/core/services/workflows/models_yaml.go +++ b/core/services/workflows/models_yaml.go @@ -1,12 +1,14 @@ package workflows import ( + "bytes" "encoding/json" "fmt" "slices" "strings" "github.com/invopop/jsonschema" + "github.com/shopspring/decimal" "sigs.k8s.io/yaml" ) @@ -71,6 +73,84 @@ func (w workflowSpecYaml) toWorkflowSpec() workflowSpec { } } +type mapping map[string]any + +func (m *mapping) UnmarshalJSON(b []byte) error { + mp := map[string]any{} + + d := json.NewDecoder(bytes.NewReader(b)) + d.UseNumber() + + err := d.Decode(&mp) + if err != nil { + return err + } + + nm, err := convertNumbers(mp) + if err != nil { + return err + } + + *m = (mapping)(nm) + return err +} + +func convertNumber(el any) (any, error) { + switch elv := el.(type) { + case json.Number: + if strings.Contains(elv.String(), ".") { + f, err := elv.Float64() + if err == nil { + return decimal.NewFromFloat(f), nil + } + } + + return elv.Int64() + default: + return el, nil + } +} + +func convertNumbers(m map[string]any) (map[string]any, error) { + nm := map[string]any{} + for k, v := range m { + switch tv := v.(type) { + case map[string]any: + cm, err := convertNumbers(tv) + if err != nil { + return nil, err + } + + nm[k] = cm + case []any: + na := make([]any, len(tv)) + for i, v := range tv { + cv, err := convertNumber(v) + if err != nil { + return nil, err + } + + na[i] = cv + } + + nm[k] = na + default: + cv, err := convertNumber(v) + if err != nil { + return nil, err + } + + nm[k] = cv + } + } + + return nm, nil +} + +func (m mapping) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]any(m)) +} + // stepDefinitionYaml is the YAML representation of a step in a workflow. // // It allows for multiple ways of defining a step, which we later @@ -131,7 +211,7 @@ type stepDefinitionYaml struct { // - Input reference cannot be resolved. // - Input is defined on triggers // NOTE: Should introduce a custom validator to cover trigger case - Inputs map[string]any `json:"inputs,omitempty"` + Inputs mapping `json:"inputs,omitempty"` // The configuration of a Capability will be done using the “config” property. Each capability is responsible for defining an external interface used during setup. This interface may be unique or identical, meaning multiple Capabilities might use the same configuration properties. // @@ -149,7 +229,7 @@ type stepDefinitionYaml struct { // address: "0xaabbcc" // method: "updateFeedValues(report bytes, role uint8)" // params: [$(inputs.report), 1] - Config map[string]any `json:"config" jsonschema:"required"` + Config mapping `json:"config" jsonschema:"required"` } // toStepDefinition converts a stepDefinitionYaml to a stepDefinition. diff --git a/core/services/workflows/models_yaml_test.go b/core/services/workflows/models_yaml_test.go index 8f2461c49b5..411781a3782 100644 --- a/core/services/workflows/models_yaml_test.go +++ b/core/services/workflows/models_yaml_test.go @@ -8,6 +8,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/santhosh-tekuri/jsonschema/v5" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "sigs.k8s.io/yaml" @@ -55,7 +57,7 @@ func TestWorkflowSpecMarshalling(t *testing.T) { t.Run("Type coercion", func(t *testing.T) { workflowBytes := fixtureReader("workflow_1") - spec := workflowSpec{} + spec := workflowSpecYaml{} err := yaml.Unmarshal(workflowBytes, &spec) require.NoError(t, err) @@ -108,8 +110,8 @@ func TestWorkflowSpecMarshalling(t *testing.T) { numbers, ok := booleanCoercions["numbers"] require.True(t, ok, "expected numbers to be present in boolean_coercions") for _, v := range numbers.([]interface{}) { - _, ok = v.(float64) - require.True(t, ok, "expected float64 but got %T", v) + _, ok = v.(int64) + require.True(t, ok, "expected int64 but got %T", v) } }) @@ -227,3 +229,30 @@ func TestJsonSchema(t *testing.T) { }) }) } + +func TestParsesIntsCorrectly(t *testing.T) { + wf, err := Parse(hardcodedWorkflow) + require.NoError(t, err) + + n, err := wf.Vertex("evm_median") + require.NoError(t, err) + + assert.Equal(t, int64(3600), n.Config["aggregation_config"].(map[string]any)["0x1111111111111111111100000000000000000000000000000000000000000000"].(map[string]any)["heartbeat"]) + +} + +func TestMappingCustomType(t *testing.T) { + m := mapping(map[string]any{}) + data := ` +{ + "foo": 100, + "bar": 100.00, + "baz": { "gnat": 11.10 } +}` + + err := m.UnmarshalJSON([]byte(data)) + require.NoError(t, err) + assert.Equal(t, int64(100), m["foo"], m) + assert.Equal(t, decimal.NewFromFloat(100.00), m["bar"], m) + assert.Equal(t, decimal.NewFromFloat(11.10), m["baz"].(map[string]any)["gnat"], m) +} diff --git a/core/services/workflows/state.go b/core/services/workflows/state.go index f70b4661897..c229b14e1dd 100644 --- a/core/services/workflows/state.go +++ b/core/services/workflows/state.go @@ -216,6 +216,20 @@ func deepMap(input any, transform func(el string) (any, error)) (any, error) { } return nv, nil + case mapping: + // coerce mapping to map[string]any + mp := map[string]any(tv) + + nm := map[string]any{} + for k, v := range mp { + nv, err := deepMap(v, transform) + if err != nil { + return nil, err + } + + nm[k] = nv + } + return nm, nil case map[string]any: nm := map[string]any{} for k, v := range tv { diff --git a/core/services/workflows/testdata/fixtures/workflows/workflow_schema.json b/core/services/workflows/testdata/fixtures/workflows/workflow_schema.json index 04400ce20fc..2cb02c7921d 100644 --- a/core/services/workflows/testdata/fixtures/workflows/workflow_schema.json +++ b/core/services/workflows/testdata/fixtures/workflows/workflow_schema.json @@ -3,6 +3,9 @@ "$id": "https://github.com/smartcontractkit/chainlink/v2/core/services/workflows/workflow-spec-yaml", "$ref": "#/$defs/workflowSpecYaml", "$defs": { + "mapping": { + "type": "object" + }, "stepDefinitionType": { "oneOf": [ { @@ -48,10 +51,10 @@ "pattern": "^[a-z0-9_]+$" }, "inputs": { - "type": "object" + "$ref": "#/$defs/mapping" }, "config": { - "type": "object" + "$ref": "#/$defs/mapping" } }, "additionalProperties": false, @@ -97,4 +100,4 @@ ] } } -} \ No newline at end of file +}