From a97d944ab5b5135f4db873c87035162ff45d995f Mon Sep 17 00:00:00 2001 From: Arnau Bennassar Date: Thu, 1 Aug 2024 11:37:30 +0200 Subject: [PATCH] Feature/agg oracle (#23) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * implementation completed, missing tests * WIP * WIP * WIP * sync refactor * decouple sync processors from EVM * Add CLI for aggOracle * WIP * wip * WIP * start reorg detector * fix docker * pass test with docker * add TODO * go mod tidy * Add PR review suggestions from Stefan-Ethernal * fix UTs * Add PR review suggestions from joanestebanr --------- Co-authored-by: Stefan Negovanović --- Dockerfile | 7 +- Makefile | 2 +- aggoracle/chaingersender/evm.go | 117 ++++ aggoracle/config.go | 25 + aggoracle/e2e_test.go | 214 +++++++ aggoracle/mock_ethtxmanager_test.go | 127 +++++ aggoracle/oracle.go | 110 ++++ cmd/main.go | 4 +- cmd/run.go | 104 ++++ common/common.go | 26 +- config/config.go | 9 + config/default.go | 45 ++ go.mod | 2 +- go.sum | 4 +- l1infotreesync/downloader.go | 49 ++ l1infotreesync/e2e_test.go | 82 +++ l1infotreesync/l1infotreesync.go | 124 +++++ l1infotreesync/mock_reorgdetector_test.go | 73 +++ l1infotreesync/processor.go | 523 ++++++++++++++++++ l1infotreesync/processor_test.go | 3 + localbridgesync/downloader.go | 201 +------ localbridgesync/driver.go | 140 ----- localbridgesync/e2e_test.go | 3 + localbridgesync/localbridgesync.go | 67 ++- localbridgesync/processor.go | 59 +- localbridgesync/processor_test.go | 169 +++--- localbridgesync/types.go | 42 -- reorgdetector/mock_eth_client.go | 19 +- reorgdetector/reorgdetector.go | 4 + sync/common.go | 21 + sync/driver.go | 14 + sync/evmdownloader.go | 208 +++++++ .../evmdownloader_test.go | 311 ++++------- sync/evmdriver.go | 151 +++++ .../driver_test.go => sync/evmdriver_test.go | 101 ++-- sync/evmtypes.go | 15 + .../mock_downloader_test.go | 41 +- {localbridgesync => sync}/mock_l2_test.go | 2 +- .../mock_processor_test.go | 26 +- .../mock_reorgdetector_test.go | 16 +- test/Makefile | 25 +- 41 files changed, 2503 insertions(+), 782 deletions(-) create mode 100644 aggoracle/chaingersender/evm.go create mode 100644 aggoracle/config.go create mode 100644 aggoracle/e2e_test.go create mode 100644 aggoracle/mock_ethtxmanager_test.go create mode 100644 aggoracle/oracle.go create mode 100644 l1infotreesync/downloader.go create mode 100644 l1infotreesync/e2e_test.go create mode 100644 l1infotreesync/l1infotreesync.go create mode 100644 l1infotreesync/mock_reorgdetector_test.go create mode 100644 l1infotreesync/processor.go create mode 100644 l1infotreesync/processor_test.go delete mode 100644 localbridgesync/driver.go create mode 100644 localbridgesync/e2e_test.go delete mode 100644 localbridgesync/types.go create mode 100644 sync/common.go create mode 100644 sync/driver.go create mode 100644 sync/evmdownloader.go rename localbridgesync/downloader_test.go => sync/evmdownloader_test.go (55%) create mode 100644 sync/evmdriver.go rename localbridgesync/driver_test.go => sync/evmdriver_test.go (67%) create mode 100644 sync/evmtypes.go rename {localbridgesync => sync}/mock_downloader_test.go (54%) rename {localbridgesync => sync}/mock_l2_test.go (99%) rename {localbridgesync => sync}/mock_processor_test.go (70%) rename {localbridgesync => sync}/mock_reorgdetector_test.go (82%) diff --git a/Dockerfile b/Dockerfile index 0b509075..719868f3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,9 @@ # CONTAINER FOR BUILDING BINARY -FROM golang:1.22.4 AS build +FROM golang:1.22.5-alpine3.20 AS build WORKDIR $GOPATH/src/github.com/0xPolygon/cdk +RUN apk update && apk add --no-cache make build-base git # INSTALL DEPENDENCIES COPY go.mod go.sum /src/ RUN cd /src && go mod download @@ -12,9 +13,9 @@ COPY . /src RUN cd /src && make build # CONTAINER FOR RUNNING BINARY -FROM alpine:3.18.4 +FROM alpine:3.20 COPY --from=build /src/dist/cdk /app/cdk RUN mkdir /app/data && apk update && apk add postgresql15-client EXPOSE 8123 -CMD ["/bin/sh", "-c", "/app/cdk run"] +CMD ["/bin/sh", "-c", "/app/cdk run"] \ No newline at end of file diff --git a/Makefile b/Makefile index d092a0f3..e081dcb7 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ else endif GOBASE := $(shell pwd) GOBIN := $(GOBASE)/dist -GOENVVARS := GOBIN=$(GOBIN) CGO_ENABLED=0 GOOS=linux GOARCH=$(ARCH) +GOENVVARS := GOBIN=$(GOBIN) CGO_ENABLED=1 GOOS=linux GOARCH=$(ARCH) GOBINARY := cdk GOCMD := $(GOBASE)/cmd diff --git a/aggoracle/chaingersender/evm.go b/aggoracle/chaingersender/evm.go new file mode 100644 index 00000000..859f4b8b --- /dev/null +++ b/aggoracle/chaingersender/evm.go @@ -0,0 +1,117 @@ +package chaingersender + +import ( + "context" + "fmt" + "math/big" + "time" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitroot" + cfgTypes "github.com/0xPolygon/cdk/config/types" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +type EthClienter interface { + ethereum.LogFilterer + ethereum.BlockNumberReader + ethereum.ChainReader + bind.ContractBackend +} + +type EthTxManager interface { + Remove(ctx context.Context, id common.Hash) error + ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus) ([]ethtxmanager.MonitoredTxResult, error) + Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error) + Add(ctx context.Context, to *common.Address, forcedNonce *uint64, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) +} + +type EVMChainGERSender struct { + gerContract *pessimisticglobalexitroot.Pessimisticglobalexitroot + gerAddr common.Address + sender common.Address + client EthClienter + ethTxMan EthTxManager + gasOffset uint64 + waitPeriodMonitorTx time.Duration +} + +type EVMConfig struct { + GlobalExitRootL2Addr common.Address `mapstructure:"GlobalExitRootL2"` + URLRPCL2 string `mapstructure:"URLRPCL2"` + ChainIDL2 uint64 `mapstructure:"ChainIDL2"` + GasOffset uint64 `mapstructure:"GasOffset"` + WaitPeriodMonitorTx cfgTypes.Duration `mapstructure:"WaitPeriodMonitorTx"` + SenderAddr common.Address `mapstructure:"SenderAddr"` + EthTxManager ethtxmanager.Config `mapstructure:"EthTxManager"` +} + +func NewEVMChainGERSender( + l2GlobalExitRoot, sender common.Address, + l2Client EthClienter, + ethTxMan EthTxManager, + gasOffset uint64, + waitPeriodMonitorTx time.Duration, +) (*EVMChainGERSender, error) { + gerContract, err := pessimisticglobalexitroot.NewPessimisticglobalexitroot(l2GlobalExitRoot, l2Client) + if err != nil { + return nil, err + } + return &EVMChainGERSender{ + gerContract: gerContract, + gerAddr: l2GlobalExitRoot, + sender: sender, + client: l2Client, + ethTxMan: ethTxMan, + gasOffset: gasOffset, + waitPeriodMonitorTx: waitPeriodMonitorTx, + }, nil +} + +func (c *EVMChainGERSender) IsGERAlreadyInjected(ger common.Hash) (bool, error) { + timestamp, err := c.gerContract.GlobalExitRootMap(&bind.CallOpts{Pending: false}, ger) + if err != nil { + return false, fmt.Errorf("error calling gerContract.GlobalExitRootMap: %w", err) + } + return timestamp.Cmp(big.NewInt(0)) != 0, nil +} + +func (c *EVMChainGERSender) UpdateGERWaitUntilMined(ctx context.Context, ger common.Hash) error { + abi, err := pessimisticglobalexitroot.PessimisticglobalexitrootMetaData.GetAbi() + if err != nil { + return err + } + data, err := abi.Pack("updateGlobalExitRoot", ger) + if err != nil { + return err + } + id, err := c.ethTxMan.Add(ctx, &c.gerAddr, nil, big.NewInt(0), data, c.gasOffset, nil) + if err != nil { + return err + } + for { + time.Sleep(c.waitPeriodMonitorTx) + log.Debugf("waiting for tx %s to be mined", id.Hex()) + res, err := c.ethTxMan.Result(ctx, id) + if err != nil { + log.Error("error calling ethTxMan.Result: ", err) + } + switch res.Status { + case ethtxmanager.MonitoredTxStatusCreated, + ethtxmanager.MonitoredTxStatusSent: + continue + case ethtxmanager.MonitoredTxStatusFailed: + return fmt.Errorf("tx %s failed", res.ID) + case ethtxmanager.MonitoredTxStatusMined, + ethtxmanager.MonitoredTxStatusSafe, + ethtxmanager.MonitoredTxStatusFinalized: + return nil + default: + log.Error("unexpected tx status: ", res.Status) + } + } +} diff --git a/aggoracle/config.go b/aggoracle/config.go new file mode 100644 index 00000000..2dd39403 --- /dev/null +++ b/aggoracle/config.go @@ -0,0 +1,25 @@ +package aggoracle + +import ( + "github.com/0xPolygon/cdk/aggoracle/chaingersender" + "github.com/0xPolygon/cdk/config/types" +) + +type TargetChainType string + +const ( + EVMChain TargetChainType = "EVM" +) + +var ( + SupportedChainTypes = []TargetChainType{EVMChain} +) + +type Config struct { + TargetChainType TargetChainType `mapstructure:"TargetChainType"` + URLRPCL1 string `mapstructure:"URLRPCL1"` + // TODO: BlockFinality doesnt work as per the jsonschema + BlockFinality string `jsonschema:"enum=latest,enum=safe, enum=pending, enum=finalized" mapstructure:"BlockFinality"` + WaitPeriodNextGER types.Duration `mapstructure:"WaitPeriodNextGER"` + EVMSender chaingersender.EVMConfig `mapstructure:"EVMSender"` +} diff --git a/aggoracle/e2e_test.go b/aggoracle/e2e_test.go new file mode 100644 index 00000000..ce081c35 --- /dev/null +++ b/aggoracle/e2e_test.go @@ -0,0 +1,214 @@ +package aggoracle_test + +import ( + "context" + "errors" + "fmt" + "math/big" + "strconv" + "testing" + "time" + + gerContractL1 "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0" + gerContractEVMChain "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitrootnopush0" + "github.com/0xPolygon/cdk/aggoracle" + "github.com/0xPolygon/cdk/aggoracle/chaingersender" + "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/reorgdetector" + ethtxmanager "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient/simulated" + mock "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestEVM(t *testing.T) { + ctx := context.Background() + l1Client, syncer, gerL1Contract, authL1 := commonSetup(t) + sender := evmSetup(t) + oracle, err := aggoracle.New(sender, l1Client.Client(), syncer, etherman.LatestBlock, time.Millisecond) + require.NoError(t, err) + go oracle.Start(ctx) + + runTest(t, gerL1Contract, sender, l1Client, authL1) +} + +func commonSetup(t *testing.T) ( + *simulated.Backend, + *l1infotreesync.L1InfoTreeSync, + *gerContractL1.Globalexitrootnopush0, + *bind.TransactOpts, +) { + // Config and spin up + ctx := context.Background() + // Simulated L1 + privateKeyL1, err := crypto.GenerateKey() + require.NoError(t, err) + authL1, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337)) + require.NoError(t, err) + l1Client, gerL1Addr, gerL1Contract, err := newSimulatedL1(authL1) + require.NoError(t, err) + // Reorg detector + dbPathReorgDetector := t.TempDir() + reorg, err := reorgdetector.New(ctx, l1Client.Client(), dbPathReorgDetector) + require.NoError(t, err) + // Syncer + dbPathSyncer := t.TempDir() + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, 10, etherman.LatestBlock, reorg, l1Client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3) + require.NoError(t, err) + go syncer.Start(ctx) + + return l1Client, syncer, gerL1Contract, authL1 +} + +func evmSetup(t *testing.T) aggoracle.ChainSender { + privateKeyL2, err := crypto.GenerateKey() + require.NoError(t, err) + authL2, err := bind.NewKeyedTransactorWithChainID(privateKeyL2, big.NewInt(1337)) + require.NoError(t, err) + l2Client, gerL2Addr, _, err := newSimulatedEVMAggSovereignChain(authL2) + require.NoError(t, err) + ethTxManMock := aggoracle.NewEthTxManagerMock(t) + // id, err := c.ethTxMan.Add(ctx, &c.gerAddr, nil, big.NewInt(0), tx.Data(), c.gasOffset, nil) + ethTxManMock.On("Add", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + ctx := context.Background() + nonce, err := l2Client.Client().PendingNonceAt(ctx, authL2.From) + if err != nil { + log.Error(err) + return + } + gas, err := l2Client.Client().EstimateGas(ctx, ethereum.CallMsg{ + From: authL2.From, + To: args.Get(1).(*common.Address), + Value: big.NewInt(0), + Data: args.Get(4).([]byte), + }) + if err != nil { + log.Error(err) + res, err := l2Client.Client().CallContract(ctx, ethereum.CallMsg{ + From: authL2.From, + To: args.Get(1).(*common.Address), + Value: big.NewInt(0), + Data: args.Get(4).([]byte), + }, nil) + log.Debugf("contract call: %s", res) + if err != nil { + log.Error(err) + } + return + } + price, err := l2Client.Client().SuggestGasPrice(ctx) + if err != nil { + log.Error(err) + } + tx := types.NewTx(&types.LegacyTx{ + To: args.Get(1).(*common.Address), + Nonce: nonce, + Value: big.NewInt(0), + Data: args.Get(4).([]byte), + Gas: gas, + GasPrice: price, + }) + tx.Gas() + signedTx, err := authL2.Signer(authL2.From, tx) + if err != nil { + log.Error(err) + return + } + err = l2Client.Client().SendTransaction(ctx, signedTx) + if err != nil { + log.Error(err) + return + } + l2Client.Commit() + }). + Return(common.Hash{}, nil) + // res, err := c.ethTxMan.Result(ctx, id) + ethTxManMock.On("Result", mock.Anything, mock.Anything). + Return(ethtxmanager.MonitoredTxResult{Status: ethtxmanager.MonitoredTxStatusMined}, nil) + sender, err := chaingersender.NewEVMChainGERSender(gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50) + require.NoError(t, err) + + return sender +} + +func newSimulatedL1(auth *bind.TransactOpts) ( + client *simulated.Backend, + gerAddr common.Address, + gerContract *gerContractL1.Globalexitrootnopush0, + err error, +) { + balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd + address := auth.From + genesisAlloc := map[common.Address]types.Account{ + address: { + Balance: balance, + }, + } + blockGasLimit := uint64(999999999999999999) //nolint:gomnd + client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) + + gerAddr, _, gerContract, err = gerContractL1.DeployGlobalexitrootnopush0(auth, client.Client(), auth.From, auth.From) + + client.Commit() + return +} + +func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( + client *simulated.Backend, + gerAddr common.Address, + gerContract *gerContractEVMChain.Pessimisticglobalexitrootnopush0, + err error, +) { + balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd + address := auth.From + genesisAlloc := map[common.Address]types.Account{ + address: { + Balance: balance, + }, + } + blockGasLimit := uint64(999999999999999999) //nolint:gomnd + client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) + + gerAddr, _, gerContract, err = gerContractEVMChain.DeployPessimisticglobalexitrootnopush0(auth, client.Client(), auth.From) + if err != nil { + return + } + client.Commit() + + _GLOBAL_EXIT_ROOT_SETTER_ROLE := common.HexToHash("0x7b95520991dfda409891be0afa2635b63540f92ee996fda0bf695a166e5c5176") + _, err = gerContract.GrantRole(auth, _GLOBAL_EXIT_ROOT_SETTER_ROLE, auth.From) + client.Commit() + hasRole, _ := gerContract.HasRole(&bind.CallOpts{Pending: false}, _GLOBAL_EXIT_ROOT_SETTER_ROLE, auth.From) + if !hasRole { + err = errors.New("failed to set role") + } + return +} + +func runTest( + t *testing.T, + gerL1Contract *gerContractL1.Globalexitrootnopush0, + sender aggoracle.ChainSender, + l1Client *simulated.Backend, + authL1 *bind.TransactOpts, +) { + for i := 0; i < 10; i++ { + _, err := gerL1Contract.UpdateExitRoot(authL1, common.HexToHash(strconv.Itoa(i))) + require.NoError(t, err) + l1Client.Commit() + time.Sleep(time.Millisecond * 50) + expectedGER, err := gerL1Contract.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + isInjected, err := sender.IsGERAlreadyInjected(expectedGER) + require.NoError(t, err) + require.True(t, isInjected, fmt.Sprintf("iteration %d, GER: %s", i, common.Bytes2Hex(expectedGER[:]))) + } +} diff --git a/aggoracle/mock_ethtxmanager_test.go b/aggoracle/mock_ethtxmanager_test.go new file mode 100644 index 00000000..37bcbeda --- /dev/null +++ b/aggoracle/mock_ethtxmanager_test.go @@ -0,0 +1,127 @@ +// Code generated by mockery v2.22.1. DO NOT EDIT. + +package aggoracle + +import ( + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + context "context" + + ethtxmanager "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthTxManagerMock is an autogenerated mock type for the EthTxManager type +type EthTxManagerMock struct { + mock.Mock +} + +// Add provides a mock function with given fields: ctx, to, forcedNonce, value, data, gasOffset, sidecar +func (_m *EthTxManagerMock) Add(ctx context.Context, to *common.Address, forcedNonce *uint64, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) { + ret := _m.Called(ctx, to, forcedNonce, value, data, gasOffset, sidecar) + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *uint64, *big.Int, []byte, uint64, *types.BlobTxSidecar) (common.Hash, error)); ok { + return rf(ctx, to, forcedNonce, value, data, gasOffset, sidecar) + } + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *uint64, *big.Int, []byte, uint64, *types.BlobTxSidecar) common.Hash); ok { + r0 = rf(ctx, to, forcedNonce, value, data, gasOffset, sidecar) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *uint64, *big.Int, []byte, uint64, *types.BlobTxSidecar) error); ok { + r1 = rf(ctx, to, forcedNonce, value, data, gasOffset, sidecar) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Remove provides a mock function with given fields: ctx, id +func (_m *EthTxManagerMock) Remove(ctx context.Context, id common.Hash) error { + ret := _m.Called(ctx, id) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Result provides a mock function with given fields: ctx, id +func (_m *EthTxManagerMock) Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error) { + ret := _m.Called(ctx, id) + + var r0 ethtxmanager.MonitoredTxResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (ethtxmanager.MonitoredTxResult, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ethtxmanager.MonitoredTxResult); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(ethtxmanager.MonitoredTxResult) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ResultsByStatus provides a mock function with given fields: ctx, statuses +func (_m *EthTxManagerMock) ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus) ([]ethtxmanager.MonitoredTxResult, error) { + ret := _m.Called(ctx, statuses) + + var r0 []ethtxmanager.MonitoredTxResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []ethtxmanager.MonitoredTxStatus) ([]ethtxmanager.MonitoredTxResult, error)); ok { + return rf(ctx, statuses) + } + if rf, ok := ret.Get(0).(func(context.Context, []ethtxmanager.MonitoredTxStatus) []ethtxmanager.MonitoredTxResult); ok { + r0 = rf(ctx, statuses) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ethtxmanager.MonitoredTxResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []ethtxmanager.MonitoredTxStatus) error); ok { + r1 = rf(ctx, statuses) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type mockConstructorTestingTNewEthTxManagerMock interface { + mock.TestingT + Cleanup(func()) +} + +// NewEthTxManagerMock creates a new instance of EthTxManagerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewEthTxManagerMock(t mockConstructorTestingTNewEthTxManagerMock) *EthTxManagerMock { + mock := &EthTxManagerMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggoracle/oracle.go b/aggoracle/oracle.go new file mode 100644 index 00000000..f22ee1f0 --- /dev/null +++ b/aggoracle/oracle.go @@ -0,0 +1,110 @@ +package aggoracle + +import ( + "context" + "math/big" + "time" + + "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" +) + +type L1InfoTreer interface { + GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) +} + +type ChainSender interface { + IsGERAlreadyInjected(ger common.Hash) (bool, error) + UpdateGERWaitUntilMined(ctx context.Context, ger common.Hash) error +} + +type AggOracle struct { + ticker *time.Ticker + l1Client ethereum.ChainReader + l1Info L1InfoTreer + chainSender ChainSender + blockFinality *big.Int +} + +func New( + chainSender ChainSender, + l1Client ethereum.ChainReader, + l1InfoTreeSyncer L1InfoTreer, + blockFinalityType etherman.BlockNumberFinality, + waitPeriodNextGER time.Duration, +) (*AggOracle, error) { + ticker := time.NewTicker(waitPeriodNextGER) + finality, err := blockFinalityType.ToBlockNum() + if err != nil { + return nil, err + } + return &AggOracle{ + ticker: ticker, + l1Client: l1Client, + l1Info: l1InfoTreeSyncer, + chainSender: chainSender, + blockFinality: finality, + }, nil +} + +func (a *AggOracle) Start(ctx context.Context) { + var ( + blockNumToFetch uint64 + gerToInject common.Hash + err error + ) + for { + select { + case <-a.ticker.C: + blockNumToFetch, gerToInject, err = a.getLastFinalisedGER(ctx, blockNumToFetch) + if err != nil { + if err == l1infotreesync.ErrBlockNotProcessed { + log.Debugf("syncer is not ready for the block %d", blockNumToFetch) + } else if err == l1infotreesync.ErrNotFound { + blockNumToFetch = 0 + log.Debugf("syncer has not found any GER until block %d", blockNumToFetch) + } else { + log.Error("error calling getLastFinalisedGER: ", err) + } + continue + } + if alreadyInjected, err := a.chainSender.IsGERAlreadyInjected(gerToInject); err != nil { + log.Error("error calling isGERAlreadyInjected: ", err) + continue + } else if alreadyInjected { + log.Debugf("GER %s already injected", gerToInject.Hex()) + continue + } + log.Infof("injecting new GER: %s", gerToInject.Hex()) + if err := a.chainSender.UpdateGERWaitUntilMined(ctx, gerToInject); err != nil { + log.Errorf("error calling updateGERWaitUntilMined, when trying to inject GER %s: %v", gerToInject.Hex(), err) + continue + } + log.Infof("GER %s injected", gerToInject.Hex()) + case <-ctx.Done(): + return + } + } +} + +// getLastFinalisedGER tries to return a finalised GER: +// If blockNumToFetch != 0: it will try to fetch it until the given block +// Else it will ask the L1 client for the latest finalised block and use that +// If it fails to get the GER from the syncer, it will retunr the block number that used to query +func (a *AggOracle) getLastFinalisedGER(ctx context.Context, blockNumToFetch uint64) (uint64, common.Hash, error) { + if blockNumToFetch == 0 { + header, err := a.l1Client.HeaderByNumber(ctx, a.blockFinality) + if err != nil { + return 0, common.Hash{}, err + } + blockNumToFetch = header.Number.Uint64() + } + info, err := a.l1Info.GetLatestInfoUntilBlock(ctx, blockNumToFetch) + if err != nil { + return blockNumToFetch, common.Hash{}, err + } + return 0, info.GlobalExitRoot, nil +} diff --git a/cmd/main.go b/cmd/main.go index ce319729..a13f43e1 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -16,6 +16,8 @@ const ( SEQUENCE_SENDER = "sequence-sender" // AGGREGATOR name to identify the aggregator component AGGREGATOR = "aggregator" + // AGGORACLE name to identify the aggoracle component + AGGORACLE = "aggoracle" ) const ( @@ -47,7 +49,7 @@ var ( Aliases: []string{"co"}, Usage: "List of components to run", Required: false, - Value: cli.NewStringSlice(SEQUENCE_SENDER, AGGREGATOR), + Value: cli.NewStringSlice(SEQUENCE_SENDER, AGGREGATOR, AGGORACLE), } ) diff --git a/cmd/run.go b/cmd/run.go index 15d24c29..f7d36b74 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -10,6 +10,8 @@ import ( zkevm "github.com/0xPolygon/cdk" dataCommitteeClient "github.com/0xPolygon/cdk-data-availability/client" + "github.com/0xPolygon/cdk/aggoracle" + "github.com/0xPolygon/cdk/aggoracle/chaingersender" "github.com/0xPolygon/cdk/aggregator" "github.com/0xPolygon/cdk/aggregator/db" "github.com/0xPolygon/cdk/config" @@ -18,7 +20,9 @@ import ( "github.com/0xPolygon/cdk/etherman" ethermanconfig "github.com/0xPolygon/cdk/etherman/config" "github.com/0xPolygon/cdk/etherman/contracts" + "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/sequencesender" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/state" @@ -26,6 +30,9 @@ import ( "github.com/0xPolygon/cdk/translator" ethtxman "github.com/0xPolygonHermez/zkevm-ethtx-manager/etherman" "github.com/0xPolygonHermez/zkevm-ethtx-manager/etherman/etherscan" + "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + ethtxlog "github.com/0xPolygonHermez/zkevm-ethtx-manager/log" + "github.com/ethereum/go-ethereum/ethclient" "github.com/jackc/pgx/v4/pgxpool" "github.com/urfave/cli/v2" ) @@ -63,6 +70,17 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } }() + case AGGORACLE: + l1Client, err := ethclient.Dial(c.AggOracle.URLRPCL1) + if err != nil { + log.Fatal(err) + } + reorgDetector := newReorgDetectorL1(cliCtx.Context, *c, l1Client) + go reorgDetector.Start(cliCtx.Context) + syncer := newL1InfoTreeSyncer(cliCtx.Context, *c, l1Client, reorgDetector) + go syncer.Start(cliCtx.Context) + aggOracle := createAggoracle(*c, l1Client, syncer) + go aggOracle.Start(cliCtx.Context) } } @@ -180,6 +198,55 @@ func newTxBuilder(cfg config.Config, ethman *etherman.Client) (txbuilder.TxBuild return txBuilder, err } +func createAggoracle(cfg config.Config, l1Client *ethclient.Client, syncer *l1infotreesync.L1InfoTreeSync) *aggoracle.AggOracle { + var sender aggoracle.ChainSender + switch cfg.AggOracle.TargetChainType { + case aggoracle.EVMChain: + cfg.AggOracle.EVMSender.EthTxManager.Log = ethtxlog.Config{ + Environment: ethtxlog.LogEnvironment(cfg.Log.Environment), + Level: cfg.Log.Level, + Outputs: cfg.Log.Outputs, + } + ethTxManager, err := ethtxmanager.New(cfg.AggOracle.EVMSender.EthTxManager) + if err != nil { + log.Fatal(err) + } + go ethTxManager.Start() + l2CLient, err := ethclient.Dial(cfg.AggOracle.EVMSender.URLRPCL2) + if err != nil { + log.Fatal(err) + } + sender, err = chaingersender.NewEVMChainGERSender( + cfg.AggOracle.EVMSender.GlobalExitRootL2Addr, + cfg.AggOracle.EVMSender.SenderAddr, + l2CLient, + ethTxManager, + cfg.AggOracle.EVMSender.GasOffset, + cfg.AggOracle.EVMSender.WaitPeriodMonitorTx.Duration, + ) + if err != nil { + log.Fatal(err) + } + default: + log.Fatalf( + "Unsupported chaintype %s. Supported values: %v", + cfg.AggOracle.TargetChainType, aggoracle.SupportedChainTypes, + ) + } + aggOracle, err := aggoracle.New( + sender, + l1Client, + syncer, + etherman.BlockNumberFinality(cfg.AggOracle.BlockFinality), + cfg.AggOracle.WaitPeriodNextGER.Duration, + ) + if err != nil { + log.Fatal(err) + } + + return aggOracle +} + func newDataAvailability(c config.Config, etherman *etherman.Client) (*dataavailability.DataAvailability, error) { if !c.Common.IsValidiumMode { return nil, nil @@ -292,3 +359,40 @@ func newState(c *config.Config, l2ChainID uint64, sqlDB *pgxpool.Pool) *state.St st := state.NewState(stateCfg, stateDb) return st } + +func newReorgDetectorL1( + ctx context.Context, + cfg config.Config, + l1Client *ethclient.Client, +) *reorgdetector.ReorgDetector { + rd, err := reorgdetector.New(ctx, l1Client, cfg.ReorgDetectorL1.DBPath) + if err != nil { + log.Fatal(err) + } + return rd +} + +func newL1InfoTreeSyncer( + ctx context.Context, + cfg config.Config, + l1Client *ethclient.Client, + reorgDetector *reorgdetector.ReorgDetector, +) *l1infotreesync.L1InfoTreeSync { + syncer, err := l1infotreesync.New( + ctx, + cfg.L1InfoTreeSync.DBPath, + cfg.L1InfoTreeSync.GlobalExitRootAddr, + cfg.L1InfoTreeSync.SyncBlockChunkSize, + etherman.BlockNumberFinality(cfg.L1InfoTreeSync.BlockFinality), + reorgDetector, + l1Client, + cfg.L1InfoTreeSync.WaitForNewBlocksPeriod.Duration, + cfg.L1InfoTreeSync.InitialBlock, + cfg.L1InfoTreeSync.RetryAfterErrorPeriod.Duration, + cfg.L1InfoTreeSync.MaxRetryAttemptsAfterError, + ) + if err != nil { + log.Fatal(err) + } + return syncer +} diff --git a/common/common.go b/common/common.go index 4e93d5f8..ebbafd69 100644 --- a/common/common.go +++ b/common/common.go @@ -9,17 +9,29 @@ import ( "github.com/iden3/go-iden3-crypto/keccak256" ) -// BlockNum2Bytes converts a block number to a byte slice -func BlockNum2Bytes(blockNum uint64) []byte { - key := make([]byte, 8) - binary.LittleEndian.PutUint64(key, blockNum) +// Uint64ToBytes converts a uint64 to a byte slice +func Uint64ToBytes(num uint64) []byte { + bytes := make([]byte, 8) + binary.LittleEndian.PutUint64(bytes, num) + return bytes +} + +// BytesToUint64 converts a byte slice to a uint64 +func BytesToUint64(bytes []byte) uint64 { + return binary.LittleEndian.Uint64(bytes) +} + +// Uint32To2Bytes converts a uint32 to a byte slice +func Uint32ToBytes(num uint32) []byte { + key := make([]byte, 4) + binary.LittleEndian.PutUint32(key, num) return key } -// Bytes2BlockNum converts a byte slice to a block number -func Bytes2BlockNum(key []byte) uint64 { - return binary.LittleEndian.Uint64(key) +// BytesToUint32 converts a byte slice to a uint32 +func BytesToUint32(bytes []byte) uint32 { + return binary.LittleEndian.Uint32(bytes) } func CalculateAccInputHash( diff --git a/config/config.go b/config/config.go index 02ede0fb..ed452417 100644 --- a/config/config.go +++ b/config/config.go @@ -5,10 +5,13 @@ import ( "path/filepath" "strings" + "github.com/0xPolygon/cdk/aggoracle" "github.com/0xPolygon/cdk/aggregator" "github.com/0xPolygon/cdk/common" ethermanconfig "github.com/0xPolygon/cdk/etherman/config" + "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/sequencesender" "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" "github.com/mitchellh/mapstructure" @@ -70,6 +73,12 @@ type Config struct { // Common Config that affects all the services Common common.Config + // Configuration of the reorg detector service to be used for the L1 + ReorgDetectorL1 reorgdetector.Config + // Configuration of the aggOracle service + AggOracle aggoracle.Config + // Configuration of the L1 Info Treee Sync service + L1InfoTreeSync l1infotreesync.Config } // Default parses the default configuration values. diff --git a/config/default.go b/config/default.go index 7b12e6f3..85dbd1cd 100644 --- a/config/default.go +++ b/config/default.go @@ -118,4 +118,49 @@ SequencerPrivateKey = {} [Aggregator.Synchronizer.Etherman] [Aggregator.Synchronizer.Etherman.Validium] Enabled = false + +[ReorgDetectorL1] +DBPath = "/tmp/reorgdetector" + +[L1InfoTreeSync] +DBPath = "/tmp/L1InfoTreeSync" +GlobalExitRootAddr="0x8464135c8F25Da09e49BC8782676a84730C318bC" +SyncBlockChunkSize=10 +BlockFinality="latest" +URLRPCL1="http://test-aggoracle-l1:8545" +WaitForNewBlocksPeriod="100ms" +InitialBlock=0 + +[AggOracle] +TargetChainType="EVM" +URLRPCL1="http://test-aggoracle-l1:8545" +BlockFinality="latest" +WaitPeriodNextGER="100ms" + [EVMSender] + GlobalExitRootL2="0x8464135c8F25Da09e49BC8782676a84730C318bC" + URLRPCL2="http://test-aggoracle-l2:8545" + ChainIDL2=1337 + GasOffset=0 + WaitPeriodMonitorTx="100ms" + SenderAddr="0x70997970c51812dc3a010c7d01b50e0d17dc79c8" + [SequenceSender.EthTxManager] + FrequencyToMonitorTxs = "1s" + WaitTxToBeMined = "2s" + GetReceiptMaxTime = "250ms" + GetReceiptWaitInterval = "1s" + PrivateKeys = [ + {Path = "/app/keystore/aggoracle.keystore", Password = "testonly"}, + ] + ForcedGas = 0 + GasPriceMarginFactor = 1 + MaxGasPriceLimit = 0 + PersistenceFilename = "/tmp/ethtxmanager.json" + ReadPendingL1Txs = false + SafeStatusL1NumberOfBlocks = 5 + FinalizedStatusL1NumberOfBlocks = 10 + [SequenceSender.EthTxManager.Etherman] + URL = "http://test-aggoracle-l2" + MultiGasProvider = false + L1ChainID = 1337 + HTTPHeaders = [] ` diff --git a/go.mod b/go.mod index e2f17883..5b2fe026 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240726125827-301fa4c59245 github.com/0xPolygon/cdk-data-availability v0.0.8 github.com/0xPolygon/cdk-rpc v0.0.0-20240419104226-c0a62ba0f49d - github.com/0xPolygonHermez/zkevm-data-streamer v0.2.2 + github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3-RC4 github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.9 github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.6.3-0.20240712085301-0310358abb59 github.com/ethereum/go-ethereum v1.14.5 diff --git a/go.sum b/go.sum index 3a9df8da..a1c78e52 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/0xPolygon/cdk-data-availability v0.0.8 h1:bMmOYZ7Ei683y80ric3KzMPXtRG github.com/0xPolygon/cdk-data-availability v0.0.8/go.mod h1:3XkZ0zn0GsvAT01MPQMmukF534CVSFmtrcoK3F/BK6Q= github.com/0xPolygon/cdk-rpc v0.0.0-20240419104226-c0a62ba0f49d h1:sxh6hZ2jF/sxxj2jd5o1vuNNCZjYmn4aRG9SRlVaEFs= github.com/0xPolygon/cdk-rpc v0.0.0-20240419104226-c0a62ba0f49d/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.2 h1:XRMTk+W6vtJVGVjuEznfWyNt7HkRkkuSmlN5Y6p60Sc= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.2/go.mod h1:0QkAXcFa92mFJrCbN3UPUJGJYes851yEgYHLONnaosE= +github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3-RC4 h1:+4K+xSzv0ImbK30B/T9FauNTrTFUmWcNKYhIgwsE4C4= +github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3-RC4/go.mod h1:0QkAXcFa92mFJrCbN3UPUJGJYes851yEgYHLONnaosE= github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.9 h1:vrAezzwTNke6NroDAltGh1k2AJ6ibmZPBsG0bCltbRc= github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.9/go.mod h1:pRqfLQVM3nbzdhy3buqjAgcVyNDKAXOHqTSgkwiKpic= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.6.3-0.20240712085301-0310358abb59 h1:Qwh92vFEXnpmDggQaZA3648viEQfLdMnAw/WFSY+2i8= diff --git a/l1infotreesync/downloader.go b/l1infotreesync/downloader.go new file mode 100644 index 00000000..255395dd --- /dev/null +++ b/l1infotreesync/downloader.go @@ -0,0 +1,49 @@ +package l1infotreesync + +import ( + "fmt" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry/polygonzkevmglobalexitrootv2" + "github.com/0xPolygon/cdk/sync" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" +) + +var ( + updateL1InfoTreeSignature = crypto.Keccak256Hash([]byte("UpdateL1InfoTree(bytes32,bytes32)")) +) + +type EthClienter interface { + ethereum.LogFilterer + ethereum.BlockNumberReader + ethereum.ChainReader + bind.ContractBackend +} + +func buildAppender(client EthClienter, globalExitRoot common.Address) (sync.LogAppenderMap, error) { + contract, err := polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2(globalExitRoot, client) + if err != nil { + return nil, err + } + appender := make(sync.LogAppenderMap) + appender[updateL1InfoTreeSignature] = func(b *sync.EVMBlock, l types.Log) error { + l1InfoTreeUpdate, err := contract.ParseUpdateL1InfoTree(l) + if err != nil { + return fmt.Errorf( + "error parsing log %+v using contract.ParseUpdateL1InfoTree: %v", + l, err, + ) + } + b.Events = append(b.Events, Event{ + MainnetExitRoot: l1InfoTreeUpdate.MainnetExitRoot, + RollupExitRoot: l1InfoTreeUpdate.RollupExitRoot, + ParentHash: b.ParentHash, + Timestamp: b.Timestamp, + }) + return nil + } + return appender, nil +} diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go new file mode 100644 index 00000000..82bef733 --- /dev/null +++ b/l1infotreesync/e2e_test.go @@ -0,0 +1,82 @@ +package l1infotreesync + +import ( + "context" + "fmt" + "math/big" + "strconv" + "testing" + "time" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0" + "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/reorgdetector" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient/simulated" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func newSimulatedClient(auth *bind.TransactOpts) ( + client *simulated.Backend, + gerAddr common.Address, + gerContract *globalexitrootnopush0.Globalexitrootnopush0, + err error, +) { + balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd + address := auth.From + genesisAlloc := map[common.Address]types.Account{ + address: { + Balance: balance, + }, + } + blockGasLimit := uint64(999999999999999999) //nolint:gomnd + client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) + + gerAddr, _, gerContract, err = globalexitrootnopush0.DeployGlobalexitrootnopush0(auth, client.Client(), auth.From, auth.From) + + client.Commit() + return +} + +func TestE2E(t *testing.T) { + ctx := context.Background() + dbPath := t.TempDir() + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) + require.NoError(t, err) + rdm := NewReorgDetectorMock(t) + rdm.On("Subscribe", reorgDetectorID).Return(&reorgdetector.Subscription{}, nil) + rdm.On("AddBlockToTrack", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + client, gerAddr, gerSc, err := newSimulatedClient(auth) + require.NoError(t, err) + syncer, err := New(ctx, dbPath, gerAddr, 10, etherman.LatestBlock, rdm, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3) + require.NoError(t, err) + go syncer.Start(ctx) + + // Update GER 10 times + // TODO: test syncer restart + for i := 0; i < 10; i++ { + _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) + require.NoError(t, err) + client.Commit() + // Let the processor catch up + time.Sleep(time.Millisecond * 10) + + expectedRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + info2, err := syncer.GetInfoByIndex(ctx, uint32(i)) + require.NoError(t, err, fmt.Sprintf("index: %d, root: %s", i, common.Bytes2Hex(expectedRoot[:]))) + require.Equal(t, common.Hash(expectedGER), info2.GlobalExitRoot, fmt.Sprintf("index: %d", i)) + info, err := syncer.GetInfoByRoot(ctx, expectedRoot) + require.NoError(t, err, fmt.Sprintf("index: %d, expected root: %s, actual root: %s", i, common.Bytes2Hex(expectedRoot[:]), info2.L1InfoTreeRoot)) + require.Equal(t, common.Hash(expectedRoot), info.L1InfoTreeRoot) + require.Equal(t, info, info2) + } +} diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go new file mode 100644 index 00000000..7b3be753 --- /dev/null +++ b/l1infotreesync/l1infotreesync.go @@ -0,0 +1,124 @@ +package l1infotreesync + +import ( + "context" + "time" + + "github.com/0xPolygon/cdk/config/types" + "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/sync" + "github.com/ethereum/go-ethereum/common" +) + +const ( + reorgDetectorID = "l1infotreesync" + downloadBufferSize = 1000 +) + +type Config struct { + DBPath string `mapstructure:"DBPath"` + GlobalExitRootAddr common.Address `mapstructure:"GlobalExitRootAddr"` + SyncBlockChunkSize uint64 `mapstructure:"SyncBlockChunkSize"` + // TODO: BlockFinality doesnt work as per the jsonschema + BlockFinality string `jsonschema:"enum=latest,enum=safe, enum=pending, enum=finalized" mapstructure:"BlockFinality"` + URLRPCL1 string `mapstructure:"URLRPCL1"` + WaitForNewBlocksPeriod types.Duration `mapstructure:"WaitForNewBlocksPeriod"` + InitialBlock uint64 `mapstructure:"InitialBlock"` + RetryAfterErrorPeriod types.Duration `mapstructure:"RetryAfterErrorPeriod"` + MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"` +} + +type L1InfoTreeSync struct { + processor *processor + driver *sync.EVMDriver +} + +func New( + ctx context.Context, + dbPath string, + globalExitRoot common.Address, + syncBlockChunkSize uint64, + blockFinalityType etherman.BlockNumberFinality, + rd sync.ReorgDetector, + l1Client EthClienter, + waitForNewBlocksPeriod time.Duration, + initialBlock uint64, + retryAfterErrorPeriod time.Duration, + maxRetryAttemptsAfterError int, +) (*L1InfoTreeSync, error) { + processor, err := newProcessor(ctx, dbPath) + if err != nil { + return nil, err + } + // TODO: get the initialBlock from L1 to simplify config + lastProcessedBlock, err := processor.GetLastProcessedBlock(ctx) + if err != nil { + return nil, err + } + if initialBlock > 0 && lastProcessedBlock < initialBlock-1 { + err = processor.ProcessBlock(sync.Block{ + Num: initialBlock - 1, + }) + if err != nil { + return nil, err + } + } + rh := &sync.RetryHandler{ + RetryAfterErrorPeriod: retryAfterErrorPeriod, + MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError, + } + + appender, err := buildAppender(l1Client, globalExitRoot) + if err != nil { + return nil, err + } + downloader, err := sync.NewEVMDownloader( + l1Client, + syncBlockChunkSize, + blockFinalityType, + waitForNewBlocksPeriod, + appender, + []common.Address{globalExitRoot}, + rh, + ) + if err != nil { + return nil, err + } + + driver, err := sync.NewEVMDriver(rd, processor, downloader, reorgDetectorID, downloadBufferSize, rh) + if err != nil { + return nil, err + } + return &L1InfoTreeSync{ + processor: processor, + driver: driver, + }, nil +} + +func (s *L1InfoTreeSync) Start(ctx context.Context) { + s.driver.Sync(ctx) +} + +func (s *L1InfoTreeSync) ComputeMerkleProofByIndex(ctx context.Context, index uint32) ([][32]byte, common.Hash, error) { + return s.processor.ComputeMerkleProofByIndex(ctx, index) +} + +func (s *L1InfoTreeSync) ComputeMerkleProofByRoot(ctx context.Context, root common.Hash) ([][32]byte, common.Hash, error) { + return s.processor.ComputeMerkleProofByRoot(ctx, root) +} + +func (s *L1InfoTreeSync) GetInfoByRoot(ctx context.Context, root common.Hash) (*L1InfoTreeLeaf, error) { + return s.processor.GetInfoByRoot(ctx, root) +} + +func (s *L1InfoTreeSync) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*L1InfoTreeLeaf, error) { + return s.processor.GetLatestInfoUntilBlock(ctx, blockNum) +} + +func (s *L1InfoTreeSync) GetInfoByIndex(ctx context.Context, index uint32) (*L1InfoTreeLeaf, error) { + return s.processor.GetInfoByIndex(ctx, index) +} + +func (s *L1InfoTreeSync) GetInfoByHash(ctx context.Context, hash []byte) (*L1InfoTreeLeaf, error) { + return s.processor.GetInfoByHash(ctx, hash) +} diff --git a/l1infotreesync/mock_reorgdetector_test.go b/l1infotreesync/mock_reorgdetector_test.go new file mode 100644 index 00000000..22d174d4 --- /dev/null +++ b/l1infotreesync/mock_reorgdetector_test.go @@ -0,0 +1,73 @@ +// Code generated by mockery v2.22.1. DO NOT EDIT. + +package l1infotreesync + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + reorgdetector "github.com/0xPolygon/cdk/reorgdetector" +) + +// ReorgDetectorMock is an autogenerated mock type for the ReorgDetector type +type ReorgDetectorMock struct { + mock.Mock +} + +// AddBlockToTrack provides a mock function with given fields: ctx, id, blockNum, blockHash +func (_m *ReorgDetectorMock) AddBlockToTrack(ctx context.Context, id string, blockNum uint64, blockHash common.Hash) error { + ret := _m.Called(ctx, id, blockNum, blockHash) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, common.Hash) error); ok { + r0 = rf(ctx, id, blockNum, blockHash) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Subscribe provides a mock function with given fields: id +func (_m *ReorgDetectorMock) Subscribe(id string) (*reorgdetector.Subscription, error) { + ret := _m.Called(id) + + var r0 *reorgdetector.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(string) (*reorgdetector.Subscription, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) *reorgdetector.Subscription); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*reorgdetector.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type mockConstructorTestingTNewReorgDetectorMock interface { + mock.TestingT + Cleanup(func()) +} + +// NewReorgDetectorMock creates a new instance of ReorgDetectorMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewReorgDetectorMock(t mockConstructorTestingTNewReorgDetectorMock) *ReorgDetectorMock { + mock := &ReorgDetectorMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go new file mode 100644 index 00000000..35191062 --- /dev/null +++ b/l1infotreesync/processor.go @@ -0,0 +1,523 @@ +package l1infotreesync + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/l1infotree" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/sync" + ethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "golang.org/x/crypto/sha3" +) + +const ( + // rootTable stores the L1 info tree roots + // Key: root (common.Hash) + // Value: hash of the leaf that caused the update (common.Hash) + rootTable = "l1infotreesync-root" + // indexTable stores the L1 info tree indexes + // Key: index (uint32 converted to bytes) + // Value: hash of the leaf that caused the update (common.Hash) + indexTable = "l1infotreesync-index" + // infoTable stores the information of the tree (the leaves). Note that the value + // of rootTable and indexTable references the key of the infoTable + // Key: hash of the leaf that caused the update (common.Hash) + // Value: JSON of storeLeaf struct + infoTable = "l1infotreesync-info" + // blockTable stores the first and last index of L1 Info Tree that have been updated on + // a block. This is useful in case there are blocks with multiple updates and a reorg is needed. + // Or for when querying by block number + // Key: block number (uint64 converted to bytes) + // Value: JSON of blockWithLeafs + blockTable = "l1infotreesync-block" + // lastBlockTable used to store the last block processed. This is needed to know the last processed blcok + // when it doesn't have events that make other tables get populated + // Key: it's always lastBlockKey + // Value: block number (uint64 converted to bytes) + lastBlockTable = "l1infotreesync-lastBlock" + + treeHeight uint8 = 32 +) + +var ( + ErrBlockNotProcessed = errors.New("given block(s) have not been processed yet") + ErrNotFound = errors.New("not found") + ErrNoBlock0 = errors.New("blockNum must be greater than 0") + lastBlockKey = []byte("lb") +) + +type processor struct { + db kv.RwDB + tree *l1infotree.L1InfoTree +} + +type Event struct { + MainnetExitRoot ethCommon.Hash + RollupExitRoot ethCommon.Hash + ParentHash ethCommon.Hash + Timestamp uint64 +} + +type L1InfoTreeLeaf struct { + L1InfoTreeRoot ethCommon.Hash + L1InfoTreeIndex uint32 + PreviousBlockHash ethCommon.Hash + BlockNumber uint64 + Timestamp uint64 + MainnetExitRoot ethCommon.Hash + RollupExitRoot ethCommon.Hash + GlobalExitRoot ethCommon.Hash +} + +type storeLeaf struct { + MainnetExitRoot ethCommon.Hash + RollupExitRoot ethCommon.Hash + ParentHash ethCommon.Hash + InfoRoot ethCommon.Hash + Index uint32 + Timestamp uint64 + BlockNumber uint64 +} + +type blockWithLeafs struct { + // inclusive + FirstIndex uint32 + // not inclusive + LastIndex uint32 +} + +func (l *storeLeaf) GlobalExitRoot() ethCommon.Hash { + var gerBytes [32]byte + hasher := sha3.NewLegacyKeccak256() + hasher.Write(l.MainnetExitRoot[:]) + hasher.Write(l.RollupExitRoot[:]) + copy(gerBytes[:], hasher.Sum(nil)) + return gerBytes +} + +func tableCfgFunc(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + rootTable: {}, + indexTable: {}, + infoTable: {}, + blockTable: {}, + lastBlockTable: {}, + } +} + +func newProcessor(ctx context.Context, dbPath string) (*processor, error) { + db, err := mdbx.NewMDBX(nil). + Path(dbPath). + WithTableCfg(tableCfgFunc). + Open() + if err != nil { + return nil, err + } + p := &processor{ + db: db, + } + + tx, err := p.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + leaves, err := p.getAllLeavesHashed(tx) + if err != nil { + return nil, err + } + tree, err := l1infotree.NewL1InfoTree(treeHeight, leaves) + if err != nil { + return nil, err + } + p.tree = tree + return p, nil +} + +func (p *processor) getAllLeavesHashed(tx kv.Tx) ([][32]byte, error) { + // TODO: same coment about refactor that appears at ComputeMerkleProofByIndex + index, err := p.getLastIndex(tx) + if err == ErrNotFound || index == 0 { + return nil, nil + } + if err != nil { + return nil, err + } + + return p.getHashedLeaves(tx, index) +} + +func (p *processor) ComputeMerkleProofByIndex(ctx context.Context, index uint32) ([][32]byte, ethCommon.Hash, error) { + // TODO: refactor the tree to store the nodes so it's not neede to load all the leaves and compute the tree + // every time this function is called. Since it's not a sparse MT, an alternative could be to store the proofs + // as part of the info + tx, err := p.db.BeginRo(ctx) + if err != nil { + return nil, ethCommon.Hash{}, err + } + defer tx.Rollback() + + leaves, err := p.getHashedLeaves(tx, index) + if err != nil { + return nil, ethCommon.Hash{}, err + } + return p.tree.ComputeMerkleProof(index, leaves) +} + +func (p *processor) getHashedLeaves(tx kv.Tx, untilIndex uint32) ([][32]byte, error) { + leaves := [][32]byte{} + for i := uint32(0); i <= untilIndex; i++ { + info, err := p.getInfoByIndexWithTx(tx, i) + if err != nil { + return nil, err + } + h := l1infotree.HashLeafData(info.GlobalExitRoot, info.PreviousBlockHash, info.Timestamp) + leaves = append(leaves, h) + } + return leaves, nil +} + +func (p *processor) ComputeMerkleProofByRoot(ctx context.Context, root ethCommon.Hash) ([][32]byte, ethCommon.Hash, error) { + info, err := p.GetInfoByRoot(ctx, root) + if err != nil { + return nil, ethCommon.Hash{}, err + } + return p.ComputeMerkleProofByIndex(ctx, info.L1InfoTreeIndex) +} + +func (p *processor) GetInfoByRoot(ctx context.Context, root ethCommon.Hash) (*L1InfoTreeLeaf, error) { + tx, err := p.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + hash, err := tx.GetOne(rootTable, root[:]) + if err != nil { + return nil, err + } + if hash == nil { + return nil, ErrNotFound + } + return p.getInfoByHashWithTx(tx, hash) +} + +// GetLatestInfoUntilBlock returns the most recent L1InfoTreeLeaf that occurred before or at blockNum. +// If the blockNum has not been processed yet the error ErrBlockNotProcessed will be returned +func (p *processor) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*L1InfoTreeLeaf, error) { + if blockNum == 0 { + return nil, ErrNoBlock0 + } + tx, err := p.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + lpb, err := p.getLastProcessedBlockWithTx(tx) + if err != nil { + return nil, err + } + if lpb < blockNum { + return nil, ErrBlockNotProcessed + } + iter, err := tx.RangeDescend(blockTable, common.Uint64ToBytes(blockNum), common.Uint64ToBytes(0), 1) + if err != nil { + return nil, fmt.Errorf( + "error calling RangeDescend(blockTable, %d, 0, 1): %w", blockNum, err, + ) + } + k, v, err := iter.Next() + if err != nil { + return nil, err + } + if k == nil { + return nil, ErrNotFound + } + blk := blockWithLeafs{} + if err := json.Unmarshal(v, &blk); err != nil { + return nil, err + } + hash, err := tx.GetOne(indexTable, common.Uint32ToBytes(blk.LastIndex-1)) + if err != nil { + return nil, err + } + return p.getInfoByHashWithTx(tx, hash) +} + +func (p *processor) GetInfoByIndex(ctx context.Context, index uint32) (*L1InfoTreeLeaf, error) { + tx, err := p.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + return p.getInfoByIndexWithTx(tx, index) +} + +func (p *processor) getInfoByIndexWithTx(tx kv.Tx, index uint32) (*L1InfoTreeLeaf, error) { + hash, err := tx.GetOne(indexTable, common.Uint32ToBytes(index)) + if err != nil { + return nil, err + } + if hash == nil { + return nil, ErrNotFound + } + return p.getInfoByHashWithTx(tx, hash) +} + +func (p *processor) GetInfoByHash(ctx context.Context, hash []byte) (*L1InfoTreeLeaf, error) { + tx, err := p.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + return p.getInfoByHashWithTx(tx, hash) +} + +func (p *processor) getInfoByHashWithTx(tx kv.Tx, hash []byte) (*L1InfoTreeLeaf, error) { + infoBytes, err := tx.GetOne(infoTable, hash) + if err != nil { + return nil, err + } + if infoBytes == nil { + return nil, ErrNotFound + } + var info storeLeaf + if err := json.Unmarshal(infoBytes, &info); err != nil { + return nil, err + } + + return &L1InfoTreeLeaf{ + L1InfoTreeRoot: info.InfoRoot, + L1InfoTreeIndex: info.Index, + PreviousBlockHash: info.ParentHash, + BlockNumber: info.BlockNumber, + Timestamp: info.Timestamp, + MainnetExitRoot: info.MainnetExitRoot, + RollupExitRoot: info.RollupExitRoot, + GlobalExitRoot: info.GlobalExitRoot(), + }, nil +} + +func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { + tx, err := p.db.BeginRo(ctx) + if err != nil { + return 0, err + } + defer tx.Rollback() + return p.getLastProcessedBlockWithTx(tx) +} + +func (p *processor) getLastProcessedBlockWithTx(tx kv.Tx) (uint64, error) { + blockNumBytes, err := tx.GetOne(lastBlockTable, lastBlockKey) + if err != nil { + return 0, err + } else if blockNumBytes == nil { + return 0, nil + } + return common.BytesToUint64(blockNumBytes), nil +} + +func (p *processor) Reorg(firstReorgedBlock uint64) error { + tx, err := p.db.BeginRw(context.Background()) + if err != nil { + return err + } + c, err := tx.Cursor(blockTable) + if err != nil { + return err + } + defer c.Close() + firstKey := common.Uint64ToBytes(firstReorgedBlock) + for blkKey, blkValue, err := c.Seek(firstKey); blkKey != nil; blkKey, blkValue, err = c.Next() { + if err != nil { + tx.Rollback() + return err + } + var blk blockWithLeafs + if err := json.Unmarshal(blkValue, &blk); err != nil { + tx.Rollback() + return err + } + for i := blk.FirstIndex; i < blk.LastIndex; i++ { + if err := p.deleteLeaf(tx, i); err != nil { + tx.Rollback() + return err + } + } + if err := tx.Delete(blockTable, blkKey); err != nil { + tx.Rollback() + return err + } + } + if err := p.updateLastProcessedBlock(tx, firstReorgedBlock-1); err != nil { + tx.Rollback() + return err + } + leaves, err := p.getAllLeavesHashed(tx) + if err != nil { + tx.Rollback() + return err + } + tree, err := l1infotree.NewL1InfoTree(treeHeight, leaves) + if err != nil { + tx.Rollback() + return err + } + p.tree = tree + return tx.Commit() +} + +func (p *processor) deleteLeaf(tx kv.RwTx, index uint32) error { + // TODO: do we need to do something with p.tree here? + // Get leaf info to delete all relations + hash, err := tx.GetOne(indexTable, common.Uint32ToBytes(index)) + if err != nil { + return err + } + if hash == nil { + return ErrNotFound + } + infoBytes, err := tx.GetOne(infoTable, hash) + if err != nil { + return err + } + if infoBytes == nil { + return ErrNotFound + } + var info storeLeaf + if err := json.Unmarshal(infoBytes, &info); err != nil { + return err + } + + // Delete + if err := tx.Delete(rootTable, info.InfoRoot[:]); err != nil { + return err + } + if err := tx.Delete(indexTable, common.Uint32ToBytes(index)); err != nil { + return err + } + if err := tx.Delete(infoTable, hash); err != nil { + return err + } + return nil +} + +// ProcessBlock process the leafs of the L1 info tree found on a block +// this function can be called without leafs with the intention to track the last processed block +func (p *processor) ProcessBlock(b sync.Block) error { + tx, err := p.db.BeginRw(context.Background()) + if err != nil { + return err + } + events := make([]Event, len(b.Events)) + if len(b.Events) > 0 { + var initialIndex uint32 + lastIndex, err := p.getLastIndex(tx) + if err == ErrNotFound { + initialIndex = 0 + } else if err != nil { + tx.Rollback() + return err + } else { + initialIndex = lastIndex + 1 + } + for i, e := range b.Events { + event := e.(Event) + events = append(events, event) + leafToStore := storeLeaf{ + Index: initialIndex + uint32(i), + MainnetExitRoot: event.MainnetExitRoot, + RollupExitRoot: event.RollupExitRoot, + ParentHash: event.ParentHash, + Timestamp: event.Timestamp, + BlockNumber: b.Num, + } + if err := p.addLeaf(tx, leafToStore); err != nil { + tx.Rollback() + return err + } + } + bwl := blockWithLeafs{ + FirstIndex: initialIndex, + LastIndex: initialIndex + uint32(len(b.Events)), + } + blockValue, err := json.Marshal(bwl) + if err != nil { + tx.Rollback() + return err + } + if err := tx.Put(blockTable, common.Uint64ToBytes(b.Num), blockValue); err != nil { + tx.Rollback() + return err + } + } + if err := p.updateLastProcessedBlock(tx, b.Num); err != nil { + tx.Rollback() + return err + } + log.Debugf("block %d processed with events: %+v", b.Num, events) + return tx.Commit() +} + +func (p *processor) getLastIndex(tx kv.Tx) (uint32, error) { + bNum, err := p.getLastProcessedBlockWithTx(tx) + if err != nil { + return 0, err + } + if bNum == 0 { + return 0, nil + } + iter, err := tx.RangeDescend(blockTable, common.Uint64ToBytes(bNum), common.Uint64ToBytes(0), 1) + if err != nil { + return 0, err + } + _, blkBytes, err := iter.Next() + if err != nil { + return 0, err + } + if blkBytes == nil { + return 0, ErrNotFound + } + var blk blockWithLeafs + if err := json.Unmarshal(blkBytes, &blk); err != nil { + return 0, err + } + return blk.LastIndex - 1, nil +} + +func (p *processor) addLeaf(tx kv.RwTx, leaf storeLeaf) error { + // Update tree + hash := l1infotree.HashLeafData(leaf.GlobalExitRoot(), leaf.ParentHash, leaf.Timestamp) + root, err := p.tree.AddLeaf(leaf.Index, hash) + if err != nil { + return err + } + leaf.InfoRoot = root + // store info + leafValue, err := json.Marshal(leaf) + if err != nil { + return err + } + if err := tx.Put(infoTable, hash[:], leafValue); err != nil { + return err + } + // store index relation + if err := tx.Put(indexTable, common.Uint32ToBytes(leaf.Index), hash[:]); err != nil { + return err + } + // store root relation + if err := tx.Put(rootTable, root.Bytes(), hash[:]); err != nil { + return err + } + return nil +} + +func (p *processor) updateLastProcessedBlock(tx kv.RwTx, blockNum uint64) error { + blockNumBytes := common.Uint64ToBytes(blockNum) + return tx.Put(lastBlockTable, lastBlockKey, blockNumBytes) +} diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go new file mode 100644 index 00000000..01550f31 --- /dev/null +++ b/l1infotreesync/processor_test.go @@ -0,0 +1,3 @@ +package l1infotreesync + +// TODO: add unit test diff --git a/localbridgesync/downloader.go b/localbridgesync/downloader.go index 9763f818..5b6ab8f6 100644 --- a/localbridgesync/downloader.go +++ b/localbridgesync/downloader.go @@ -1,14 +1,13 @@ package localbridgesync import ( - "context" + "fmt" "math/big" "time" "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridge" "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridgev2" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/sync" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -33,164 +32,26 @@ type EthClienter interface { bind.ContractBackend } -type downloaderInterface interface { - waitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64) - getEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) []block - getLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log - appendLog(b *block, l types.Log) - getBlockHeader(ctx context.Context, blockNum uint64) blockHeader -} - -type downloader struct { - syncBlockChunkSize uint64 - downloaderInterface -} - -func newDownloader( - bridgeAddr common.Address, - ethClient EthClienter, - syncBlockChunkSize uint64, - blockFinalityType etherman.BlockNumberFinality, -) (*downloader, error) { - bridgeContractV1, err := polygonzkevmbridge.NewPolygonzkevmbridge(bridgeAddr, ethClient) +func buildAppender(client EthClienter, bridge common.Address) (sync.LogAppenderMap, error) { + bridgeContractV1, err := polygonzkevmbridge.NewPolygonzkevmbridge(bridge, client) if err != nil { return nil, err } - bridgeContractV2, err := polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, ethClient) + bridgeContractV2, err := polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridge, client) if err != nil { return nil, err } - finality, err := blockFinalityType.ToBlockNum() - if err != nil { - return nil, err - } - return &downloader{ - syncBlockChunkSize: syncBlockChunkSize, - downloaderInterface: &downloaderImplementation{ - bridgeAddr: bridgeAddr, - bridgeContractV1: bridgeContractV1, - bridgeContractV2: bridgeContractV2, - ethClient: ethClient, - blockFinality: finality, - }, - }, nil -} - -func (d *downloader) download(ctx context.Context, fromBlock uint64, downloadedCh chan block) { - lastBlock := d.waitForNewBlocks(ctx, 0) - for { - select { - case <-ctx.Done(): - log.Debug("closing channel") - close(downloadedCh) - return - default: - } - toBlock := fromBlock + d.syncBlockChunkSize - if toBlock > lastBlock { - toBlock = lastBlock - } - if fromBlock > toBlock { - log.Debug("waiting for new blocks, last block ", toBlock) - lastBlock = d.waitForNewBlocks(ctx, toBlock) - continue - } - log.Debugf("getting events from blocks %d to %d", fromBlock, toBlock) - blocks := d.getEventsByBlockRange(ctx, fromBlock, toBlock) - for _, b := range blocks { - log.Debugf("sending block %d to the driver (with events)", b.Num) - downloadedCh <- b - } - if len(blocks) == 0 || blocks[len(blocks)-1].Num < toBlock { - // Indicate the last downloaded block if there are not events on it - log.Debugf("sending block %d to the driver (without evvents)", toBlock) - downloadedCh <- block{ - blockHeader: d.getBlockHeader(ctx, toBlock), - } - } - fromBlock = toBlock + 1 - } -} - -type downloaderImplementation struct { - bridgeAddr common.Address - bridgeContractV1 *polygonzkevmbridge.Polygonzkevmbridge - bridgeContractV2 *polygonzkevmbridgev2.Polygonzkevmbridgev2 - ethClient EthClienter - blockFinality *big.Int -} - -func (d *downloaderImplementation) waitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64) { - attempts := 0 - for { - header, err := d.ethClient.HeaderByNumber(ctx, d.blockFinality) - if err != nil { - attempts++ - log.Error("error geting last block num from eth client: ", err) - retryHandler("waitForNewBlocks", attempts) - continue - } - if header.Number.Uint64() > lastBlockSeen { - return header.Number.Uint64() - } - time.Sleep(waitForNewBlocksPeriod) - } -} - -func (d *downloaderImplementation) getEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) []block { - blocks := []block{} - logs := d.getLogs(ctx, fromBlock, toBlock) - for _, l := range logs { - if len(blocks) == 0 || blocks[len(blocks)-1].Num < l.BlockNumber { - blocks = append(blocks, block{ - blockHeader: blockHeader{ - Num: l.BlockNumber, - Hash: l.BlockHash, - }, - Events: []BridgeEvent{}, - }) - } - d.appendLog(&blocks[len(blocks)-1], l) - } - - return blocks -} - -func (d *downloaderImplementation) getLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log { - query := ethereum.FilterQuery{ - FromBlock: new(big.Int).SetUint64(fromBlock), - Addresses: []common.Address{d.bridgeAddr}, - Topics: [][]common.Hash{ - {bridgeEventSignature}, - {claimEventSignature}, - {claimEventSignaturePreEtrog}, - }, - ToBlock: new(big.Int).SetUint64(toBlock), - } - attempts := 0 - for { - logs, err := d.ethClient.FilterLogs(ctx, query) - if err != nil { - attempts++ - log.Error("error calling FilterLogs to eth client: ", err) - retryHandler("getLogs", attempts) - continue - } - return logs - } -} + appender := make(sync.LogAppenderMap) -func (d *downloaderImplementation) appendLog(b *block, l types.Log) { - switch l.Topics[0] { - case bridgeEventSignature: - bridge, err := d.bridgeContractV2.ParseBridgeEvent(l) + appender[bridgeEventSignature] = func(b *sync.EVMBlock, l types.Log) error { + bridge, err := bridgeContractV2.ParseBridgeEvent(l) if err != nil { - log.Fatalf( + return fmt.Errorf( "error parsing log %+v using d.bridgeContractV2.ParseBridgeEvent: %v", l, err, ) } - b.Events = append(b.Events, BridgeEvent{Bridge: &Bridge{ + b.Events = append(b.Events, Event{Bridge: &Bridge{ LeafType: bridge.LeafType, OriginNetwork: bridge.OriginNetwork, OriginAddress: bridge.OriginAddress, @@ -200,54 +61,44 @@ func (d *downloaderImplementation) appendLog(b *block, l types.Log) { Metadata: bridge.Metadata, DepositCount: bridge.DepositCount, }}) - case claimEventSignature: - claim, err := d.bridgeContractV2.ParseClaimEvent(l) + return nil + } + + appender[claimEventSignature] = func(b *sync.EVMBlock, l types.Log) error { + claim, err := bridgeContractV2.ParseClaimEvent(l) if err != nil { - log.Fatalf( + return fmt.Errorf( "error parsing log %+v using d.bridgeContractV2.ParseClaimEvent: %v", l, err, ) } - b.Events = append(b.Events, BridgeEvent{Claim: &Claim{ + b.Events = append(b.Events, Event{Claim: &Claim{ GlobalIndex: claim.GlobalIndex, OriginNetwork: claim.OriginNetwork, OriginAddress: claim.OriginAddress, DestinationAddress: claim.DestinationAddress, Amount: claim.Amount, }}) - case claimEventSignaturePreEtrog: - claim, err := d.bridgeContractV1.ParseClaimEvent(l) + return nil + } + + appender[claimEventSignature] = func(b *sync.EVMBlock, l types.Log) error { + claim, err := bridgeContractV1.ParseClaimEvent(l) if err != nil { - log.Fatalf( + return fmt.Errorf( "error parsing log %+v using d.bridgeContractV1.ParseClaimEvent: %v", l, err, ) } - b.Events = append(b.Events, BridgeEvent{Claim: &Claim{ + b.Events = append(b.Events, Event{Claim: &Claim{ GlobalIndex: big.NewInt(int64(claim.Index)), OriginNetwork: claim.OriginNetwork, OriginAddress: claim.OriginAddress, DestinationAddress: claim.DestinationAddress, Amount: claim.Amount, }}) - default: - log.Fatalf("unexpected log %+v", l) + return nil } -} -func (d *downloaderImplementation) getBlockHeader(ctx context.Context, blockNum uint64) blockHeader { - attempts := 0 - for { - header, err := d.ethClient.HeaderByNumber(ctx, big.NewInt(int64(blockNum))) - if err != nil { - attempts++ - log.Errorf("error getting block header for block %d, err: %v", blockNum, err) - retryHandler("getBlockHeader", attempts) - continue - } - return blockHeader{ - Num: header.Number.Uint64(), - Hash: header.Hash(), - } - } + return appender, nil } diff --git a/localbridgesync/driver.go b/localbridgesync/driver.go deleted file mode 100644 index 997fd215..00000000 --- a/localbridgesync/driver.go +++ /dev/null @@ -1,140 +0,0 @@ -package localbridgesync - -import ( - "context" - - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/reorgdetector" - "github.com/ethereum/go-ethereum/common" -) - -const ( - downloadBufferSize = 1000 - reorgDetectorID = "localbridgesync" -) - -type downloaderFull interface { - downloaderInterface - download(ctx context.Context, fromBlock uint64, downloadedCh chan block) -} - -type driver struct { - reorgDetector ReorgDetector - reorgSub *reorgdetector.Subscription - processor processorInterface - downloader downloaderFull -} - -type processorInterface interface { - getLastProcessedBlock(ctx context.Context) (uint64, error) - storeBridgeEvents(blockNum uint64, events []BridgeEvent) error - reorg(firstReorgedBlock uint64) error -} - -type ReorgDetector interface { - Subscribe(id string) *reorgdetector.Subscription - AddBlockToTrack(ctx context.Context, id string, blockNum uint64, blockHash common.Hash) error -} - -func newDriver( - reorgDetector ReorgDetector, - processor processorInterface, - downloader downloaderFull, -) (*driver, error) { - reorgSub := reorgDetector.Subscribe(reorgDetectorID) - return &driver{ - reorgDetector: reorgDetector, - reorgSub: reorgSub, - processor: processor, - downloader: downloader, - }, nil -} - -func (d *driver) Sync(ctx context.Context) { -reset: - var ( - lastProcessedBlock uint64 - attempts int - err error - ) - for { - lastProcessedBlock, err = d.processor.getLastProcessedBlock(ctx) - if err != nil { - attempts++ - log.Error("error geting last processed block: ", err) - retryHandler("Sync", attempts) - continue - } - break - } - cancellableCtx, cancel := context.WithCancel(ctx) - defer cancel() - - // start downloading - downloadCh := make(chan block, downloadBufferSize) - go d.downloader.download(cancellableCtx, lastProcessedBlock, downloadCh) - - for { - select { - case b := <-downloadCh: - log.Debug("handleNewBlock") - d.handleNewBlock(ctx, b) - case firstReorgedBlock := <-d.reorgSub.FirstReorgedBlock: - log.Debug("handleReorg") - d.handleReorg(cancel, downloadCh, firstReorgedBlock) - goto reset - } - } -} - -func (d *driver) handleNewBlock(ctx context.Context, b block) { - attempts := 0 - for { - err := d.reorgDetector.AddBlockToTrack(ctx, reorgDetectorID, b.Num, b.Hash) - if err != nil { - attempts++ - log.Errorf("error adding block %d to tracker: %v", b.Num, err) - retryHandler("handleNewBlock", attempts) - continue - } - break - } - attempts = 0 - for { - err := d.processor.storeBridgeEvents(b.Num, b.Events) - if err != nil { - attempts++ - log.Errorf("error processing events for blcok %d, err: ", b.Num, err) - retryHandler("handleNewBlock", attempts) - continue - } - break - } -} - -func (d *driver) handleReorg( - cancel context.CancelFunc, downloadCh chan block, firstReorgedBlock uint64, -) { - // stop downloader - cancel() - _, ok := <-downloadCh - for ok { - _, ok = <-downloadCh - } - // handle reorg - attempts := 0 - for { - err := d.processor.reorg(firstReorgedBlock) - if err != nil { - attempts++ - log.Errorf( - "error processing reorg, last valid block %d, err: %v", - firstReorgedBlock, err, - ) - retryHandler("handleReorg", attempts) - continue - } - break - } - d.reorgSub.ReorgProcessed <- true -} diff --git a/localbridgesync/e2e_test.go b/localbridgesync/e2e_test.go new file mode 100644 index 00000000..c84d8d33 --- /dev/null +++ b/localbridgesync/e2e_test.go @@ -0,0 +1,3 @@ +package localbridgesync + +// TODO: add E2E test, prolly need a mock contract diff --git a/localbridgesync/localbridgesync.go b/localbridgesync/localbridgesync.go index 42ab67d1..94e60b59 100644 --- a/localbridgesync/localbridgesync.go +++ b/localbridgesync/localbridgesync.go @@ -1,52 +1,87 @@ package localbridgesync import ( + "context" "time" "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/sync" "github.com/ethereum/go-ethereum/common" ) +const ( + reorgDetectorID = "localbridgesync" + downloadBufferSize = 1000 +) + var ( retryAfterErrorPeriod = time.Second * 10 maxRetryAttemptsAfterError = 5 ) type LocalBridgeSync struct { - *processor - *driver + processor *processor + driver *sync.EVMDriver } func New( + ctx context.Context, dbPath string, bridge common.Address, syncBlockChunkSize uint64, blockFinalityType etherman.BlockNumberFinality, - rd ReorgDetector, + rd sync.ReorgDetector, l2Client EthClienter, + initialBlock uint64, ) (*LocalBridgeSync, error) { - p, err := newProcessor(dbPath) + processor, err := newProcessor(dbPath) if err != nil { return nil, err } - dwn, err := newDownloader(bridge, l2Client, syncBlockChunkSize, blockFinalityType) + lastProcessedBlock, err := processor.GetLastProcessedBlock(ctx) if err != nil { return nil, err } - dri, err := newDriver(rd, p, dwn) + if lastProcessedBlock < initialBlock { + err = processor.ProcessBlock(sync.Block{ + Num: initialBlock, + }) + if err != nil { + return nil, err + } + } + rh := &sync.RetryHandler{ + MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError, + RetryAfterErrorPeriod: retryAfterErrorPeriod, + } + + appender, err := buildAppender(l2Client, bridge) + if err != nil { + return nil, err + } + downloader, err := sync.NewEVMDownloader( + l2Client, + syncBlockChunkSize, + blockFinalityType, + waitForNewBlocksPeriod, + appender, + []common.Address{bridge}, + rh, + ) if err != nil { return nil, err } - return &LocalBridgeSync{p, dri}, nil -} -func retryHandler(funcName string, attempts int) { - if attempts >= maxRetryAttemptsAfterError { - log.Fatalf( - "%s failed too many times (%d)", - funcName, maxRetryAttemptsAfterError, - ) + driver, err := sync.NewEVMDriver(rd, processor, downloader, reorgDetectorID, downloadBufferSize, rh) + if err != nil { + return nil, err } - time.Sleep(retryAfterErrorPeriod) + return &LocalBridgeSync{ + processor: processor, + driver: driver, + }, nil +} + +func (s *LocalBridgeSync) Start(ctx context.Context) { + s.driver.Sync(ctx) } diff --git a/localbridgesync/processor.go b/localbridgesync/processor.go index 69c885b8..5d644a9a 100644 --- a/localbridgesync/processor.go +++ b/localbridgesync/processor.go @@ -4,8 +4,11 @@ import ( "context" "encoding/json" "errors" + "math/big" "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/sync" + ethCommon "github.com/ethereum/go-ethereum/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" ) @@ -20,6 +23,30 @@ var ( lastBlokcKey = []byte("lb") ) +type Bridge struct { + LeafType uint8 + OriginNetwork uint32 + OriginAddress ethCommon.Address + DestinationNetwork uint32 + DestinationAddress ethCommon.Address + Amount *big.Int + Metadata []byte + DepositCount uint32 +} + +type Claim struct { + GlobalIndex *big.Int + OriginNetwork uint32 + OriginAddress ethCommon.Address + DestinationAddress ethCommon.Address + Amount *big.Int +} + +type Event struct { + Bridge *Bridge + Claim *Claim +} + type processor struct { db kv.RwDB } @@ -48,8 +75,8 @@ func newProcessor(dbPath string) (*processor, error) { // If toBlock has not been porcessed yet, ErrBlockNotProcessed will be returned func (p *processor) GetClaimsAndBridges( ctx context.Context, fromBlock, toBlock uint64, -) ([]BridgeEvent, error) { - events := []BridgeEvent{} +) ([]Event, error) { + events := []Event{} tx, err := p.db.BeginRo(ctx) if err != nil { @@ -69,14 +96,14 @@ func (p *processor) GetClaimsAndBridges( } defer c.Close() - for k, v, err := c.Seek(common.BlockNum2Bytes(fromBlock)); k != nil; k, v, err = c.Next() { + for k, v, err := c.Seek(common.Uint64ToBytes(fromBlock)); k != nil; k, v, err = c.Next() { if err != nil { return nil, err } - if common.Bytes2BlockNum(k) > toBlock { + if common.BytesToUint64(k) > toBlock { break } - blockEvents := []BridgeEvent{} + blockEvents := []Event{} err := json.Unmarshal(v, &blockEvents) if err != nil { return nil, err @@ -87,7 +114,7 @@ func (p *processor) GetClaimsAndBridges( return events, nil } -func (p *processor) getLastProcessedBlock(ctx context.Context) (uint64, error) { +func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { tx, err := p.db.BeginRo(ctx) if err != nil { return 0, err @@ -102,11 +129,11 @@ func (p *processor) getLastProcessedBlockWithTx(tx kv.Tx) (uint64, error) { } else if blockNumBytes == nil { return 0, nil } else { - return common.Bytes2BlockNum(blockNumBytes), nil + return common.BytesToUint64(blockNumBytes), nil } } -func (p *processor) reorg(firstReorgedBlock uint64) error { +func (p *processor) Reorg(firstReorgedBlock uint64) error { tx, err := p.db.BeginRw(context.Background()) if err != nil { return err @@ -116,7 +143,7 @@ func (p *processor) reorg(firstReorgedBlock uint64) error { return err } defer c.Close() - firstKey := common.BlockNum2Bytes(firstReorgedBlock) + firstKey := common.Uint64ToBytes(firstReorgedBlock) for k, _, err := c.Seek(firstKey); k != nil; k, _, err = c.Next() { if err != nil { tx.Rollback() @@ -134,23 +161,27 @@ func (p *processor) reorg(firstReorgedBlock uint64) error { return tx.Commit() } -func (p *processor) storeBridgeEvents(blockNum uint64, events []BridgeEvent) error { +func (p *processor) ProcessBlock(block sync.Block) error { tx, err := p.db.BeginRw(context.Background()) if err != nil { return err } - if len(events) > 0 { + if len(block.Events) > 0 { + events := []Event{} + for _, e := range block.Events { + events = append(events, e.(Event)) + } value, err := json.Marshal(events) if err != nil { tx.Rollback() return err } - if err := tx.Put(eventsTable, common.BlockNum2Bytes(blockNum), value); err != nil { + if err := tx.Put(eventsTable, common.Uint64ToBytes(block.Num), value); err != nil { tx.Rollback() return err } } - if err := p.updateLastProcessedBlock(tx, blockNum); err != nil { + if err := p.updateLastProcessedBlock(tx, block.Num); err != nil { tx.Rollback() return err } @@ -158,6 +189,6 @@ func (p *processor) storeBridgeEvents(blockNum uint64, events []BridgeEvent) err } func (p *processor) updateLastProcessedBlock(tx kv.RwTx, blockNum uint64) error { - blockNumBytes := common.BlockNum2Bytes(blockNum) + blockNumBytes := common.Uint64ToBytes(blockNum) return tx.Put(lastBlockTable, lastBlokcKey, blockNumBytes) } diff --git a/localbridgesync/processor_test.go b/localbridgesync/processor_test.go index 8e6884c2..dbf0d74c 100644 --- a/localbridgesync/processor_test.go +++ b/localbridgesync/processor_test.go @@ -7,6 +7,7 @@ import ( "slices" "testing" + "github.com/0xPolygon/cdk/sync" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" ) @@ -45,11 +46,10 @@ func TestProceessor(t *testing.T) { expectedEvents: nil, expectedErr: ErrBlockNotProcessed, }, - &storeBridgeEventsAction{ + &processBlockAction{ p: p, description: "block1", - blockNum: block1.Num, - events: block1.Events, + block: block1, expectedErr: nil, }, // processed: block1 @@ -75,7 +75,7 @@ func TestProceessor(t *testing.T) { ctx: context.Background(), fromBlock: 1, toBlock: 1, - expectedEvents: block1.Events, + expectedEvents: eventsToBridgeEvents(block1.Events), expectedErr: nil, }, &reorgAction{ @@ -94,19 +94,17 @@ func TestProceessor(t *testing.T) { expectedEvents: nil, expectedErr: ErrBlockNotProcessed, }, - &storeBridgeEventsAction{ + &processBlockAction{ p: p, description: "block1 (after it's reorged)", - blockNum: block1.Num, - events: block1.Events, + block: block1, expectedErr: nil, }, // processed: block3 - &storeBridgeEventsAction{ + &processBlockAction{ p: p, description: "block3", - blockNum: block3.Num, - events: block3.Events, + block: block3, expectedErr: nil, }, // processed: block1, block3 @@ -123,17 +121,20 @@ func TestProceessor(t *testing.T) { ctx: context.Background(), fromBlock: 2, toBlock: 2, - expectedEvents: []BridgeEvent{}, + expectedEvents: []Event{}, expectedErr: nil, }, &getClaimsAndBridgesAction{ - p: p, - description: "after block3: range 1, 3", - ctx: context.Background(), - fromBlock: 1, - toBlock: 3, - expectedEvents: append(block1.Events, block3.Events...), - expectedErr: nil, + p: p, + description: "after block3: range 1, 3", + ctx: context.Background(), + fromBlock: 1, + toBlock: 3, + expectedEvents: append( + eventsToBridgeEvents(block1.Events), + eventsToBridgeEvents(block3.Events)..., + ), + expectedErr: nil, }, &reorgAction{ p: p, @@ -162,27 +163,24 @@ func TestProceessor(t *testing.T) { expectedLastProcessedBlock: 1, expectedErr: nil, }, - &storeBridgeEventsAction{ + &processBlockAction{ p: p, description: "block3 after reorg", - blockNum: block3.Num, - events: block3.Events, + block: block3, expectedErr: nil, }, // processed: block1, block3 - &storeBridgeEventsAction{ + &processBlockAction{ p: p, description: "block4", - blockNum: block4.Num, - events: block4.Events, + block: block4, expectedErr: nil, }, // processed: block1, block3, block4 - &storeBridgeEventsAction{ + &processBlockAction{ p: p, description: "block5", - blockNum: block5.Num, - events: block5.Events, + block: block5, expectedErr: nil, }, // processed: block1, block3, block4, block5 @@ -194,22 +192,28 @@ func TestProceessor(t *testing.T) { expectedErr: nil, }, &getClaimsAndBridgesAction{ - p: p, - description: "after block5: range 1, 3", - ctx: context.Background(), - fromBlock: 1, - toBlock: 3, - expectedEvents: append(block1.Events, block3.Events...), - expectedErr: nil, + p: p, + description: "after block5: range 1, 3", + ctx: context.Background(), + fromBlock: 1, + toBlock: 3, + expectedEvents: append( + eventsToBridgeEvents(block1.Events), + eventsToBridgeEvents(block3.Events)..., + ), + expectedErr: nil, }, &getClaimsAndBridgesAction{ - p: p, - description: "after block5: range 4, 5", - ctx: context.Background(), - fromBlock: 4, - toBlock: 5, - expectedEvents: append(block4.Events, block5.Events...), - expectedErr: nil, + p: p, + description: "after block5: range 4, 5", + ctx: context.Background(), + fromBlock: 4, + toBlock: 5, + expectedEvents: append( + eventsToBridgeEvents(block4.Events), + eventsToBridgeEvents(block5.Events)..., + ), + expectedErr: nil, }, &getClaimsAndBridgesAction{ p: p, @@ -218,10 +222,10 @@ func TestProceessor(t *testing.T) { fromBlock: 0, toBlock: 5, expectedEvents: slices.Concat( - block1.Events, - block3.Events, - block4.Events, - block5.Events, + eventsToBridgeEvents(block1.Events), + eventsToBridgeEvents(block3.Events), + eventsToBridgeEvents(block4.Events), + eventsToBridgeEvents(block5.Events), ), expectedErr: nil, }, @@ -237,13 +241,10 @@ func TestProceessor(t *testing.T) { // blocks var ( - block1 = block{ - blockHeader: blockHeader{ - Num: 1, - Hash: common.HexToHash("01"), - }, - Events: []BridgeEvent{ - {Bridge: &Bridge{ + block1 = sync.Block{ + Num: 1, + Events: []interface{}{ + Event{Bridge: &Bridge{ LeafType: 1, OriginNetwork: 1, OriginAddress: common.HexToAddress("01"), @@ -253,7 +254,7 @@ var ( Metadata: common.Hex2Bytes("01"), DepositCount: 1, }}, - {Claim: &Claim{ + Event{Claim: &Claim{ GlobalIndex: big.NewInt(1), OriginNetwork: 1, OriginAddress: common.HexToAddress("01"), @@ -262,13 +263,10 @@ var ( }}, }, } - block3 = block{ - blockHeader: blockHeader{ - Num: 3, - Hash: common.HexToHash("02"), - }, - Events: []BridgeEvent{ - {Bridge: &Bridge{ + block3 = sync.Block{ + Num: 3, + Events: []interface{}{ + Event{Bridge: &Bridge{ LeafType: 2, OriginNetwork: 2, OriginAddress: common.HexToAddress("02"), @@ -278,7 +276,7 @@ var ( Metadata: common.Hex2Bytes("02"), DepositCount: 2, }}, - {Bridge: &Bridge{ + Event{Bridge: &Bridge{ LeafType: 3, OriginNetwork: 3, OriginAddress: common.HexToAddress("03"), @@ -290,27 +288,21 @@ var ( }}, }, } - block4 = block{ - blockHeader: blockHeader{ - Num: 4, - Hash: common.HexToHash("03"), - }, - Events: []BridgeEvent{}, + block4 = sync.Block{ + Num: 4, + Events: []interface{}{}, } - block5 = block{ - blockHeader: blockHeader{ - Num: 5, - Hash: common.HexToHash("04"), - }, - Events: []BridgeEvent{ - {Claim: &Claim{ + block5 = sync.Block{ + Num: 5, + Events: []interface{}{ + Event{Claim: &Claim{ GlobalIndex: big.NewInt(4), OriginNetwork: 4, OriginAddress: common.HexToAddress("04"), DestinationAddress: common.HexToAddress("04"), Amount: big.NewInt(4), }}, - {Claim: &Claim{ + Event{Claim: &Claim{ GlobalIndex: big.NewInt(5), OriginNetwork: 5, OriginAddress: common.HexToAddress("05"), @@ -337,7 +329,7 @@ type getClaimsAndBridgesAction struct { ctx context.Context fromBlock uint64 toBlock uint64 - expectedEvents []BridgeEvent + expectedEvents []Event expectedErr error } @@ -374,7 +366,7 @@ func (a *getLastProcessedBlockAction) desc() string { } func (a *getLastProcessedBlockAction) execute(t *testing.T) { - actualLastProcessedBlock, actualErr := a.p.getLastProcessedBlock(a.ctx) + actualLastProcessedBlock, actualErr := a.p.GetLastProcessedBlock(a.ctx) require.Equal(t, a.expectedLastProcessedBlock, actualLastProcessedBlock) require.Equal(t, a.expectedErr, actualErr) } @@ -397,29 +389,36 @@ func (a *reorgAction) desc() string { } func (a *reorgAction) execute(t *testing.T) { - actualErr := a.p.reorg(a.firstReorgedBlock) + actualErr := a.p.Reorg(a.firstReorgedBlock) require.Equal(t, a.expectedErr, actualErr) } // storeBridgeEvents -type storeBridgeEventsAction struct { +type processBlockAction struct { p *processor description string - blockNum uint64 - events []BridgeEvent + block sync.Block expectedErr error } -func (a *storeBridgeEventsAction) method() string { +func (a *processBlockAction) method() string { return "storeBridgeEvents" } -func (a *storeBridgeEventsAction) desc() string { +func (a *processBlockAction) desc() string { return a.description } -func (a *storeBridgeEventsAction) execute(t *testing.T) { - actualErr := a.p.storeBridgeEvents(a.blockNum, a.events) +func (a *processBlockAction) execute(t *testing.T) { + actualErr := a.p.ProcessBlock(a.block) require.Equal(t, a.expectedErr, actualErr) } + +func eventsToBridgeEvents(events []interface{}) []Event { + bridgeEvents := []Event{} + for _, event := range events { + bridgeEvents = append(bridgeEvents, event.(Event)) + } + return bridgeEvents +} diff --git a/localbridgesync/types.go b/localbridgesync/types.go deleted file mode 100644 index 3a6a508e..00000000 --- a/localbridgesync/types.go +++ /dev/null @@ -1,42 +0,0 @@ -package localbridgesync - -import ( - "math/big" - - "github.com/ethereum/go-ethereum/common" -) - -type Bridge struct { - LeafType uint8 - OriginNetwork uint32 - OriginAddress common.Address - DestinationNetwork uint32 - DestinationAddress common.Address - Amount *big.Int - Metadata []byte - DepositCount uint32 -} - -type Claim struct { - // TODO: pre uLxLy there was Index instead of GlobalIndex, should we treat this differently? - GlobalIndex *big.Int - OriginNetwork uint32 - OriginAddress common.Address - DestinationAddress common.Address - Amount *big.Int -} - -type BridgeEvent struct { - Bridge *Bridge - Claim *Claim -} - -type block struct { - blockHeader - Events []BridgeEvent -} - -type blockHeader struct { - Num uint64 - Hash common.Hash -} diff --git a/reorgdetector/mock_eth_client.go b/reorgdetector/mock_eth_client.go index add883f6..85376cc4 100644 --- a/reorgdetector/mock_eth_client.go +++ b/reorgdetector/mock_eth_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.1. DO NOT EDIT. +// Code generated by mockery v2.22.1. DO NOT EDIT. package reorgdetector @@ -20,10 +20,6 @@ type EthClientMock struct { func (_m *EthClientMock) BlockNumber(ctx context.Context) (uint64, error) { ret := _m.Called(ctx) - if len(ret) == 0 { - panic("no return value specified for BlockNumber") - } - var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { @@ -48,10 +44,6 @@ func (_m *EthClientMock) BlockNumber(ctx context.Context) (uint64, error) { func (_m *EthClientMock) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { ret := _m.Called(ctx, number) - if len(ret) == 0 { - panic("no return value specified for HeaderByNumber") - } - var r0 *types.Header var r1 error if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { @@ -74,12 +66,13 @@ func (_m *EthClientMock) HeaderByNumber(ctx context.Context, number *big.Int) (* return r0, r1 } -// NewEthClientMock creates a new instance of EthClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewEthClientMock(t interface { +type mockConstructorTestingTNewEthClientMock interface { mock.TestingT Cleanup(func()) -}) *EthClientMock { +} + +// NewEthClientMock creates a new instance of EthClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewEthClientMock(t mockConstructorTestingTNewEthClientMock) *EthClientMock { mock := &EthClientMock{} mock.Mock.Test(t) diff --git a/reorgdetector/reorgdetector.go b/reorgdetector/reorgdetector.go index 4b5a963b..c0b0caa2 100644 --- a/reorgdetector/reorgdetector.go +++ b/reorgdetector/reorgdetector.go @@ -140,6 +140,10 @@ type ReorgDetector struct { waitPeriodBlockAdder time.Duration } +type Config struct { + DBPath string `mapstructure:"DBPath"` +} + // New creates a new instance of ReorgDetector func New(ctx context.Context, client EthClient, dbPath string) (*ReorgDetector, error) { db, err := mdbx.NewMDBX(nil). diff --git a/sync/common.go b/sync/common.go new file mode 100644 index 00000000..4fe049d5 --- /dev/null +++ b/sync/common.go @@ -0,0 +1,21 @@ +package sync + +import ( + "log" + "time" +) + +type RetryHandler struct { + RetryAfterErrorPeriod time.Duration + MaxRetryAttemptsAfterError int +} + +func (h *RetryHandler) Handle(funcName string, attempts int) { + if h.MaxRetryAttemptsAfterError > -1 && attempts >= h.MaxRetryAttemptsAfterError { + log.Fatalf( + "%s failed too many times (%d)", + funcName, h.MaxRetryAttemptsAfterError, + ) + } + time.Sleep(h.RetryAfterErrorPeriod) +} diff --git a/sync/driver.go b/sync/driver.go new file mode 100644 index 00000000..bd066ba1 --- /dev/null +++ b/sync/driver.go @@ -0,0 +1,14 @@ +package sync + +import "context" + +type Block struct { + Num uint64 + Events []interface{} +} + +type ProcessorInterface interface { + GetLastProcessedBlock(ctx context.Context) (uint64, error) + ProcessBlock(block Block) error + Reorg(firstReorgedBlock uint64) error +} diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go new file mode 100644 index 00000000..ad452856 --- /dev/null +++ b/sync/evmdownloader.go @@ -0,0 +1,208 @@ +package sync + +import ( + "context" + "math/big" + "time" + + "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/log" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +type EthClienter interface { + ethereum.LogFilterer + ethereum.BlockNumberReader + ethereum.ChainReader + bind.ContractBackend +} + +type evmDownloaderInterface interface { + waitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64) + getEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) []EVMBlock + getLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log + getBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader +} + +type LogAppenderMap map[common.Hash]func(b *EVMBlock, l types.Log) error + +type EVMDownloader struct { + syncBlockChunkSize uint64 + evmDownloaderInterface +} + +func NewEVMDownloader( + ethClient EthClienter, + syncBlockChunkSize uint64, + blockFinalityType etherman.BlockNumberFinality, + waitForNewBlocksPeriod time.Duration, + appender LogAppenderMap, + adressessToQuery []common.Address, + rh *RetryHandler, +) (*EVMDownloader, error) { + finality, err := blockFinalityType.ToBlockNum() + if err != nil { + return nil, err + } + topicsToQuery := [][]common.Hash{} + for topic := range appender { + topicsToQuery = append(topicsToQuery, []common.Hash{topic}) + } + return &EVMDownloader{ + syncBlockChunkSize: syncBlockChunkSize, + evmDownloaderInterface: &downloaderImplementation{ + ethClient: ethClient, + blockFinality: finality, + waitForNewBlocksPeriod: waitForNewBlocksPeriod, + appender: appender, + topicsToQuery: topicsToQuery, + adressessToQuery: adressessToQuery, + rh: rh, + }, + }, nil +} + +func (d *EVMDownloader) download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock) { + lastBlock := d.waitForNewBlocks(ctx, 0) + for { + select { + case <-ctx.Done(): + log.Debug("closing channel") + close(downloadedCh) + return + default: + } + toBlock := fromBlock + d.syncBlockChunkSize + if toBlock > lastBlock { + toBlock = lastBlock + } + if fromBlock > toBlock { + log.Debug("waiting for new blocks, last block ", toBlock) + lastBlock = d.waitForNewBlocks(ctx, toBlock) + continue + } + log.Debugf("getting events from blocks %d to %d", fromBlock, toBlock) + blocks := d.getEventsByBlockRange(ctx, fromBlock, toBlock) + for _, b := range blocks { + log.Debugf("sending block %d to the driver (with events)", b.Num) + downloadedCh <- b + } + if len(blocks) == 0 || blocks[len(blocks)-1].Num < toBlock { + // Indicate the last downloaded block if there are not events on it + log.Debugf("sending block %d to the driver (without evvents)", toBlock) + downloadedCh <- EVMBlock{ + EVMBlockHeader: d.getBlockHeader(ctx, toBlock), + } + } + fromBlock = toBlock + 1 + } +} + +type downloaderImplementation struct { + ethClient EthClienter + blockFinality *big.Int + waitForNewBlocksPeriod time.Duration + appender LogAppenderMap + topicsToQuery [][]common.Hash + adressessToQuery []common.Address + rh *RetryHandler +} + +func (d *downloaderImplementation) waitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64) { + attempts := 0 + ticker := time.NewTicker(d.waitForNewBlocksPeriod) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + log.Info("context cancelled") + return lastBlockSeen + case <-ticker.C: + header, err := d.ethClient.HeaderByNumber(ctx, d.blockFinality) + if err != nil { + attempts++ + log.Error("error getting last block num from eth client: ", err) + d.rh.Handle("waitForNewBlocks", attempts) + continue + } + if header.Number.Uint64() > lastBlockSeen { + return header.Number.Uint64() + } + } + } +} + +func (d *downloaderImplementation) getEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) []EVMBlock { + blocks := []EVMBlock{} + logs := d.getLogs(ctx, fromBlock, toBlock) + for _, l := range logs { + if len(blocks) == 0 || blocks[len(blocks)-1].Num < l.BlockNumber { + b := d.getBlockHeader(ctx, l.BlockNumber) + blocks = append(blocks, EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: l.BlockNumber, + Hash: l.BlockHash, + Timestamp: b.Timestamp, + ParentHash: b.ParentHash, + }, + Events: []interface{}{}, + }) + } + + for { + attempts := 0 + err := d.appender[l.Topics[0]](&blocks[len(blocks)-1], l) + if err != nil { + attempts++ + log.Error("error trying to append log: ", err) + d.rh.Handle("getLogs", attempts) + continue + } + break + } + } + + return blocks +} + +func (d *downloaderImplementation) getLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log { + query := ethereum.FilterQuery{ + FromBlock: new(big.Int).SetUint64(fromBlock), + Addresses: d.adressessToQuery, + Topics: d.topicsToQuery, + ToBlock: new(big.Int).SetUint64(toBlock), + } + attempts := 0 + for { + logs, err := d.ethClient.FilterLogs(ctx, query) + if err != nil { + attempts++ + log.Error("error calling FilterLogs to eth client: ", err) + d.rh.Handle("getLogs", attempts) + continue + } + return logs + } +} + +func (d *downloaderImplementation) getBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader { + attempts := 0 + for { + header, err := d.ethClient.HeaderByNumber(ctx, big.NewInt(int64(blockNum))) + if err != nil { + attempts++ + log.Errorf("error getting block header for block %d, err: %v", blockNum, err) + d.rh.Handle("getBlockHeader", attempts) + continue + } + return EVMBlockHeader{ + Num: header.Number.Uint64(), + Hash: header.Hash(), + ParentHash: header.ParentHash, + Timestamp: header.Time, + } + } +} diff --git a/localbridgesync/downloader_test.go b/sync/evmdownloader_test.go similarity index 55% rename from localbridgesync/downloader_test.go rename to sync/evmdownloader_test.go index 553efbec..2f5a7ee5 100644 --- a/localbridgesync/downloader_test.go +++ b/sync/evmdownloader_test.go @@ -1,46 +1,45 @@ -package localbridgesync +package sync import ( "context" "errors" "math/big" + "strconv" "testing" "time" - "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridge" - "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridgev2" - cdkcommon "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/log" "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) var ( - contractAddr = common.HexToAddress("1234567890") + contractAddr = common.HexToAddress("f00") + eventSignature = crypto.Keccak256Hash([]byte("foo")) ) const ( syncBlockChunck = uint64(10) ) +type testEvent common.Hash + func TestGetEventsByBlockRange(t *testing.T) { type testCase struct { description string inputLogs []types.Log fromBlock, toBlock uint64 - expectedBlocks []block + expectedBlocks []EVMBlock } testCases := []testCase{} - clientMock := NewL2Mock(t) ctx := context.Background() - d, err := newDownloader(contractAddr, clientMock, syncBlockChunck, etherman.LatestBlock) - require.NoError(t, err) + d, clientMock := NewTestDownloader(t) // case 0: single block, no events case0 := testCase{ @@ -48,26 +47,23 @@ func TestGetEventsByBlockRange(t *testing.T) { inputLogs: []types.Log{}, fromBlock: 1, toBlock: 3, - expectedBlocks: []block{}, + expectedBlocks: []EVMBlock{}, } testCases = append(testCases, case0) // case 1: single block, single event - logC1, bridgeC1 := generateBridge(t, 3) + logC1, updateC1 := generateEvent(3) logsC1 := []types.Log{ *logC1, } - blocksC1 := []block{ + blocksC1 := []EVMBlock{ { - blockHeader: blockHeader{ - Num: logC1.BlockNumber, - Hash: logC1.BlockHash, - }, - Events: []BridgeEvent{ - { - Bridge: &bridgeC1, - }, + EVMBlockHeader: EVMBlockHeader{ + Num: logC1.BlockNumber, + Hash: logC1.BlockHash, + ParentHash: common.HexToHash("foo"), }, + Events: []interface{}{updateC1}, }, } case1 := testCase{ @@ -80,27 +76,28 @@ func TestGetEventsByBlockRange(t *testing.T) { testCases = append(testCases, case1) // case 2: single block, multiple events - logC2_1, bridgeC2_1 := generateBridge(t, 5) - logC2_2, bridgeC2_2 := generateBridge(t, 5) - logC2_3, claimC2_1 := generateClaimV1(t, 5) - logC2_4, claimC2_2 := generateClaimV2(t, 5) + logC2_1, updateC2_1 := generateEvent(5) + logC2_2, updateC2_2 := generateEvent(5) + logC2_3, updateC2_3 := generateEvent(5) + logC2_4, updateC2_4 := generateEvent(5) logsC2 := []types.Log{ *logC2_1, *logC2_2, *logC2_3, *logC2_4, } - blocksC2 := []block{ + blocksC2 := []EVMBlock{ { - blockHeader: blockHeader{ - Num: logC2_1.BlockNumber, - Hash: logC2_1.BlockHash, + EVMBlockHeader: EVMBlockHeader{ + Num: logC2_1.BlockNumber, + Hash: logC2_1.BlockHash, + ParentHash: common.HexToHash("foo"), }, - Events: []BridgeEvent{ - {Bridge: &bridgeC2_1}, - {Bridge: &bridgeC2_2}, - {Claim: &claimC2_1}, - {Claim: &claimC2_2}, + Events: []interface{}{ + updateC2_1, + updateC2_2, + updateC2_3, + updateC2_4, }, }, } @@ -114,35 +111,37 @@ func TestGetEventsByBlockRange(t *testing.T) { testCases = append(testCases, case2) // case 3: multiple blocks, some events - logC3_1, bridgeC3_1 := generateBridge(t, 7) - logC3_2, bridgeC3_2 := generateBridge(t, 7) - logC3_3, claimC3_1 := generateClaimV1(t, 8) - logC3_4, claimC3_2 := generateClaimV2(t, 8) + logC3_1, updateC3_1 := generateEvent(7) + logC3_2, updateC3_2 := generateEvent(7) + logC3_3, updateC3_3 := generateEvent(8) + logC3_4, updateC3_4 := generateEvent(8) logsC3 := []types.Log{ *logC3_1, *logC3_2, *logC3_3, *logC3_4, } - blocksC3 := []block{ + blocksC3 := []EVMBlock{ { - blockHeader: blockHeader{ - Num: logC3_1.BlockNumber, - Hash: logC3_1.BlockHash, + EVMBlockHeader: EVMBlockHeader{ + Num: logC3_1.BlockNumber, + Hash: logC3_1.BlockHash, + ParentHash: common.HexToHash("foo"), }, - Events: []BridgeEvent{ - {Bridge: &bridgeC3_1}, - {Bridge: &bridgeC3_2}, + Events: []interface{}{ + updateC3_1, + updateC3_2, }, }, { - blockHeader: blockHeader{ - Num: logC3_3.BlockNumber, - Hash: logC3_3.BlockHash, + EVMBlockHeader: EVMBlockHeader{ + Num: logC3_3.BlockNumber, + Hash: logC3_3.BlockHash, + ParentHash: common.HexToHash("foo"), }, - Events: []BridgeEvent{ - {Claim: &claimC3_1}, - {Claim: &claimC3_2}, + Events: []interface{}{ + updateC3_3, + updateC3_4, }, }, } @@ -160,114 +159,40 @@ func TestGetEventsByBlockRange(t *testing.T) { FromBlock: new(big.Int).SetUint64(tc.fromBlock), Addresses: []common.Address{contractAddr}, Topics: [][]common.Hash{ - {bridgeEventSignature}, - {claimEventSignature}, - {claimEventSignaturePreEtrog}, + {eventSignature}, }, ToBlock: new(big.Int).SetUint64(tc.toBlock), } clientMock. On("FilterLogs", mock.Anything, query). Return(tc.inputLogs, nil) + for _, b := range tc.expectedBlocks { + clientMock. + On("HeaderByNumber", mock.Anything, big.NewInt(int64(b.Num))). + Return(&types.Header{ + Number: big.NewInt(int64(b.Num)), + ParentHash: common.HexToHash("foo"), + }, nil) + } actualBlocks := d.getEventsByBlockRange(ctx, tc.fromBlock, tc.toBlock) require.Equal(t, tc.expectedBlocks, actualBlocks, tc.description) } } -func generateBridge(t *testing.T, blockNum uint32) (*types.Log, Bridge) { - b := Bridge{ - LeafType: 1, - OriginNetwork: blockNum, - OriginAddress: contractAddr, - DestinationNetwork: blockNum, - DestinationAddress: contractAddr, - Amount: big.NewInt(int64(blockNum)), - Metadata: common.Hex2Bytes("01"), - DepositCount: blockNum, - } - abi, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() - require.NoError(t, err) - event, err := abi.EventByID(bridgeEventSignature) - require.NoError(t, err) - data, err := event.Inputs.Pack( - b.LeafType, - b.OriginNetwork, - b.OriginAddress, - b.DestinationNetwork, - b.DestinationAddress, - b.Amount, - b.Metadata, - b.DepositCount, - ) - require.NoError(t, err) - log := &types.Log{ - Address: contractAddr, - BlockNumber: uint64(blockNum), - BlockHash: common.BytesToHash(cdkcommon.BlockNum2Bytes(uint64(blockNum))), - Topics: []common.Hash{bridgeEventSignature}, - Data: data, - } - return log, b -} - -func generateClaimV1(t *testing.T, blockNum uint32) (*types.Log, Claim) { - abi, err := polygonzkevmbridge.PolygonzkevmbridgeMetaData.GetAbi() - require.NoError(t, err) - event, err := abi.EventByID(claimEventSignaturePreEtrog) - require.NoError(t, err) - return generateClaim(t, blockNum, event, true) -} - -func generateClaimV2(t *testing.T, blockNum uint32) (*types.Log, Claim) { - abi, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() - require.NoError(t, err) - event, err := abi.EventByID(claimEventSignature) - require.NoError(t, err) - return generateClaim(t, blockNum, event, false) -} - -func generateClaim(t *testing.T, blockNum uint32, event *abi.Event, isV1 bool) (*types.Log, Claim) { - c := Claim{ - GlobalIndex: big.NewInt(int64(blockNum)), - OriginNetwork: blockNum, - OriginAddress: contractAddr, - DestinationAddress: contractAddr, - Amount: big.NewInt(int64(blockNum)), - } - var ( - data []byte - err error - signature common.Hash - ) - if isV1 { - data, err = event.Inputs.Pack( - uint32(c.GlobalIndex.Uint64()), - c.OriginNetwork, - c.OriginAddress, - c.DestinationAddress, - c.Amount, - ) - signature = claimEventSignaturePreEtrog - } else { - data, err = event.Inputs.Pack( - c.GlobalIndex, - c.OriginNetwork, - c.OriginAddress, - c.DestinationAddress, - c.Amount, - ) - signature = claimEventSignature - } - require.NoError(t, err) +func generateEvent(blockNum uint32) (*types.Log, testEvent) { + h := common.HexToHash(strconv.Itoa(int(blockNum))) log := &types.Log{ Address: contractAddr, BlockNumber: uint64(blockNum), - BlockHash: common.BytesToHash(cdkcommon.BlockNum2Bytes(uint64(blockNum))), - Topics: []common.Hash{signature}, - Data: data, + BlockHash: h, + Topics: []common.Hash{ + eventSignature, + h, + }, + Data: nil, } - return log, c + return log, testEvent(h) } func TestDownload(t *testing.T) { @@ -275,31 +200,29 @@ func TestDownload(t *testing.T) { NOTE: due to the concurrent nature of this test (the function being tested runs through a goroutine) if the mock doesn't match, the goroutine will get stuck and the test will timeout */ - d := NewDownloaderMock(t) - downloadCh := make(chan block, 1) + d := NewEVMDownloaderMock(t) + downloadCh := make(chan EVMBlock, 1) ctx := context.Background() ctx1, cancel := context.WithCancel(ctx) - expectedBlocks := []block{} - clientMock := NewL2Mock(t) - dwnldr, err := newDownloader(contractAddr, clientMock, syncBlockChunck, etherman.LatestBlock) - require.NoError(t, err) - dwnldr.downloaderInterface = d + expectedBlocks := []EVMBlock{} + dwnldr, _ := NewTestDownloader(t) + dwnldr.evmDownloaderInterface = d d.On("waitForNewBlocks", mock.Anything, uint64(0)). Return(uint64(1)) // iteratiion 0: // last block is 1, download that block (no events and wait) - b1 := block{ - blockHeader: blockHeader{ + b1 := EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ Num: 1, Hash: common.HexToHash("01"), }, } expectedBlocks = append(expectedBlocks, b1) d.On("getEventsByBlockRange", mock.Anything, uint64(0), uint64(1)). - Return([]block{}) + Return([]EVMBlock{}) d.On("getBlockHeader", mock.Anything, uint64(1)). - Return(b1.blockHeader) + Return(b1.EVMBlockHeader) // iteration 1: wait for next block to be created d.On("waitForNewBlocks", mock.Anything, uint64(1)). @@ -307,15 +230,15 @@ func TestDownload(t *testing.T) { Return(uint64(2)).Once() // iteration 2: block 2 has events - b2 := block{ - blockHeader: blockHeader{ + b2 := EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ Num: 2, Hash: common.HexToHash("02"), }, } expectedBlocks = append(expectedBlocks, b2) d.On("getEventsByBlockRange", mock.Anything, uint64(2), uint64(2)). - Return([]block{b2}) + Return([]EVMBlock{b2}) // iteration 3: wait for next block to be created (jump to block 8) d.On("waitForNewBlocks", mock.Anything, uint64(2)). @@ -323,35 +246,31 @@ func TestDownload(t *testing.T) { Return(uint64(8)).Once() // iteration 4: blocks 6 and 7 have events - b6 := block{ - blockHeader: blockHeader{ + b6 := EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ Num: 6, Hash: common.HexToHash("06"), }, - Events: []BridgeEvent{ - {Claim: &Claim{OriginNetwork: 6}}, - }, + Events: []interface{}{"06"}, } - b7 := block{ - blockHeader: blockHeader{ + b7 := EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ Num: 7, Hash: common.HexToHash("07"), }, - Events: []BridgeEvent{ - {Bridge: &Bridge{DestinationNetwork: 7}}, - }, + Events: []interface{}{"07"}, } - b8 := block{ - blockHeader: blockHeader{ + b8 := EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ Num: 8, Hash: common.HexToHash("08"), }, } expectedBlocks = append(expectedBlocks, b6, b7, b8) d.On("getEventsByBlockRange", mock.Anything, uint64(3), uint64(8)). - Return([]block{b6, b7}) + Return([]EVMBlock{b6, b7}) d.On("getBlockHeader", mock.Anything, uint64(8)). - Return(b8.blockHeader) + Return(b8.EVMBlockHeader) // iteration 5: wait for next block to be created (jump to block 30) d.On("waitForNewBlocks", mock.Anything, uint64(8)). @@ -359,31 +278,29 @@ func TestDownload(t *testing.T) { Return(uint64(30)).Once() // iteration 6: from block 9 to 19, no events - b19 := block{ - blockHeader: blockHeader{ + b19 := EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ Num: 19, Hash: common.HexToHash("19"), }, } expectedBlocks = append(expectedBlocks, b19) d.On("getEventsByBlockRange", mock.Anything, uint64(9), uint64(19)). - Return([]block{}) + Return([]EVMBlock{}) d.On("getBlockHeader", mock.Anything, uint64(19)). - Return(b19.blockHeader) + Return(b19.EVMBlockHeader) // iteration 7: from block 20 to 30, events on last block - b30 := block{ - blockHeader: blockHeader{ + b30 := EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ Num: 30, Hash: common.HexToHash("30"), }, - Events: []BridgeEvent{ - {Bridge: &Bridge{DestinationNetwork: 30}}, - }, + Events: []interface{}{testEvent(common.HexToHash("30"))}, } expectedBlocks = append(expectedBlocks, b30) d.On("getEventsByBlockRange", mock.Anything, uint64(20), uint64(30)). - Return([]block{b30}) + Return([]EVMBlock{b30}) // iteration 8: wait for next block to be created (jump to block 35) d.On("waitForNewBlocks", mock.Anything, uint64(30)). @@ -403,11 +320,8 @@ func TestDownload(t *testing.T) { } func TestWaitForNewBlocks(t *testing.T) { - retryAfterErrorPeriod = time.Millisecond * 100 - clientMock := NewL2Mock(t) ctx := context.Background() - d, err := newDownloader(contractAddr, clientMock, syncBlockChunck, etherman.LatestBlock) - require.NoError(t, err) + d, clientMock := NewTestDownloader(t) // at first attempt currentBlock := uint64(5) @@ -438,18 +352,15 @@ func TestWaitForNewBlocks(t *testing.T) { } func TestGetBlockHeader(t *testing.T) { - retryAfterErrorPeriod = time.Millisecond * 100 - clientMock := NewL2Mock(t) ctx := context.Background() - d, err := newDownloader(contractAddr, clientMock, syncBlockChunck, etherman.LatestBlock) - require.NoError(t, err) + d, clientMock := NewTestDownloader(t) blockNum := uint64(5) blockNumBig := big.NewInt(5) returnedBlock := &types.Header{ Number: blockNumBig, } - expectedBlock := blockHeader{ + expectedBlock := EVMBlockHeader{ Num: 5, Hash: returnedBlock.Hash(), } @@ -465,3 +376,23 @@ func TestGetBlockHeader(t *testing.T) { actualBlock = d.getBlockHeader(ctx, blockNum) assert.Equal(t, expectedBlock, actualBlock) } + +func buildAppender() LogAppenderMap { + appender := make(LogAppenderMap) + appender[eventSignature] = func(b *EVMBlock, l types.Log) error { + b.Events = append(b.Events, testEvent(l.Topics[1])) + return nil + } + return appender +} + +func NewTestDownloader(t *testing.T) (*EVMDownloader, *L2Mock) { + rh := &RetryHandler{ + MaxRetryAttemptsAfterError: 5, + RetryAfterErrorPeriod: time.Millisecond * 100, + } + clientMock := NewL2Mock(t) + d, err := NewEVMDownloader(clientMock, syncBlockChunck, etherman.LatestBlock, time.Millisecond, buildAppender(), []common.Address{contractAddr}, rh) + require.NoError(t, err) + return d, clientMock +} diff --git a/sync/evmdriver.go b/sync/evmdriver.go new file mode 100644 index 00000000..0e20731f --- /dev/null +++ b/sync/evmdriver.go @@ -0,0 +1,151 @@ +package sync + +import ( + "context" + + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/reorgdetector" + "github.com/ethereum/go-ethereum/common" +) + +type evmDownloaderFull interface { + evmDownloaderInterface + download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock) +} + +type EVMDriver struct { + reorgDetector ReorgDetector + reorgSub *reorgdetector.Subscription + processor processorInterface + downloader evmDownloaderFull + reorgDetectorID string + downloadBufferSize int + rh *RetryHandler +} + +type processorInterface interface { + GetLastProcessedBlock(ctx context.Context) (uint64, error) + ProcessBlock(block Block) error + Reorg(firstReorgedBlock uint64) error +} + +type ReorgDetector interface { + Subscribe(id string) (*reorgdetector.Subscription, error) + AddBlockToTrack(ctx context.Context, id string, blockNum uint64, blockHash common.Hash) error +} + +func NewEVMDriver( + reorgDetector ReorgDetector, + processor processorInterface, + downloader evmDownloaderFull, + reorgDetectorID string, + downloadBufferSize int, + rh *RetryHandler, +) (*EVMDriver, error) { + reorgSub, err := reorgDetector.Subscribe(reorgDetectorID) + if err != nil { + return nil, err + } + return &EVMDriver{ + reorgDetector: reorgDetector, + reorgSub: reorgSub, + processor: processor, + downloader: downloader, + reorgDetectorID: reorgDetectorID, + downloadBufferSize: downloadBufferSize, + rh: rh, + }, nil +} + +func (d *EVMDriver) Sync(ctx context.Context) { +reset: + var ( + lastProcessedBlock uint64 + attempts int + err error + ) + for { + lastProcessedBlock, err = d.processor.GetLastProcessedBlock(ctx) + if err != nil { + attempts++ + log.Error("error geting last processed block: ", err) + d.rh.Handle("Sync", attempts) + continue + } + break + } + cancellableCtx, cancel := context.WithCancel(ctx) + defer cancel() + + // start downloading + downloadCh := make(chan EVMBlock, d.downloadBufferSize) + go d.downloader.download(cancellableCtx, lastProcessedBlock, downloadCh) + + for { + select { + case b := <-downloadCh: + log.Debug("handleNewBlock") + d.handleNewBlock(ctx, b) + case firstReorgedBlock := <-d.reorgSub.FirstReorgedBlock: + log.Debug("handleReorg") + d.handleReorg(cancel, downloadCh, firstReorgedBlock) + goto reset + } + } +} + +func (d *EVMDriver) handleNewBlock(ctx context.Context, b EVMBlock) { + attempts := 0 + for { + err := d.reorgDetector.AddBlockToTrack(ctx, d.reorgDetectorID, b.Num, b.Hash) + if err != nil { + attempts++ + log.Errorf("error adding block %d to tracker: %v", b.Num, err) + d.rh.Handle("handleNewBlock", attempts) + continue + } + break + } + attempts = 0 + for { + blockToProcess := Block{ + Num: b.Num, + Events: b.Events, + } + err := d.processor.ProcessBlock(blockToProcess) + if err != nil { + attempts++ + log.Errorf("error processing events for blcok %d, err: ", b.Num, err) + d.rh.Handle("handleNewBlock", attempts) + continue + } + break + } +} + +func (d *EVMDriver) handleReorg( + cancel context.CancelFunc, downloadCh chan EVMBlock, firstReorgedBlock uint64, +) { + // stop downloader + cancel() + _, ok := <-downloadCh + for ok { + _, ok = <-downloadCh + } + // handle reorg + attempts := 0 + for { + err := d.processor.Reorg(firstReorgedBlock) + if err != nil { + attempts++ + log.Errorf( + "error processing reorg, last valid Block %d, err: %v", + firstReorgedBlock, err, + ) + d.rh.Handle("handleReorg", attempts) + continue + } + break + } + d.reorgSub.ReorgProcessed <- true +} diff --git a/localbridgesync/driver_test.go b/sync/evmdriver_test.go similarity index 67% rename from localbridgesync/driver_test.go rename to sync/evmdriver_test.go index 543542f7..853dda81 100644 --- a/localbridgesync/driver_test.go +++ b/sync/evmdriver_test.go @@ -1,4 +1,4 @@ -package localbridgesync +package sync import ( "context" @@ -14,28 +14,35 @@ import ( "github.com/stretchr/testify/require" ) +var ( + reorgDetectorID = "foo" +) + func TestSync(t *testing.T) { - retryAfterErrorPeriod = time.Millisecond * 100 + rh := &RetryHandler{ + MaxRetryAttemptsAfterError: 5, + RetryAfterErrorPeriod: time.Millisecond * 100, + } rdm := NewReorgDetectorMock(t) pm := NewProcessorMock(t) - dm := NewDownloaderMock(t) + dm := NewEVMDownloaderMock(t) firstReorgedBlock := make(chan uint64) reorgProcessed := make(chan bool) rdm.On("Subscribe", reorgDetectorID).Return(&reorgdetector.Subscription{ FirstReorgedBlock: firstReorgedBlock, ReorgProcessed: reorgProcessed, - }) - driver, err := newDriver(rdm, pm, dm) + }, nil) + driver, err := NewEVMDriver(rdm, pm, dm, reorgDetectorID, 10, rh) require.NoError(t, err) ctx := context.Background() - expectedBlock1 := block{ - blockHeader: blockHeader{ + expectedBlock1 := EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ Num: 3, Hash: common.HexToHash("03"), }, } - expectedBlock2 := block{ - blockHeader: blockHeader{ + expectedBlock2 := EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ Num: 9, Hash: common.HexToHash("09"), }, @@ -47,7 +54,7 @@ func TestSync(t *testing.T) { reorg1Completed := reorgSemaphore{} dm.On("download", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { ctx := args.Get(0).(context.Context) - downloadedCh := args.Get(2).(chan block) + downloadedCh := args.Get(2).(chan EVMBlock) log.Info("entering mock loop") for { select { @@ -70,22 +77,22 @@ func TestSync(t *testing.T) { }) // Mocking this actions, the driver should "store" all the blocks from the downloader - pm.On("getLastProcessedBlock", ctx). + pm.On("GetLastProcessedBlock", ctx). Return(uint64(3), nil) rdm.On("AddBlockToTrack", ctx, reorgDetectorID, expectedBlock1.Num, expectedBlock1.Hash). Return(nil) - pm.On("storeBridgeEvents", expectedBlock1.Num, expectedBlock1.Events). + pm.On("ProcessBlock", Block{Num: expectedBlock1.Num, Events: expectedBlock1.Events}). Return(nil) rdm.On("AddBlockToTrack", ctx, reorgDetectorID, expectedBlock2.Num, expectedBlock2.Hash). Return(nil) - pm.On("storeBridgeEvents", expectedBlock2.Num, expectedBlock2.Events). + pm.On("ProcessBlock", Block{Num: expectedBlock2.Num, Events: expectedBlock2.Events}). Return(nil) go driver.Sync(ctx) time.Sleep(time.Millisecond * 200) // time to download expectedBlock1 // Trigger reorg 1 reorgedBlock1 := uint64(5) - pm.On("reorg", reorgedBlock1).Return(nil) + pm.On("Reorg", reorgedBlock1).Return(nil) firstReorgedBlock <- reorgedBlock1 ok := <-reorgProcessed require.True(t, ok) @@ -96,25 +103,28 @@ func TestSync(t *testing.T) { // Trigger reorg 2: syncer restarts the porcess reorgedBlock2 := uint64(7) - pm.On("reorg", reorgedBlock2).Return(nil) + pm.On("Reorg", reorgedBlock2).Return(nil) firstReorgedBlock <- reorgedBlock2 ok = <-reorgProcessed require.True(t, ok) } func TestHandleNewBlock(t *testing.T) { - retryAfterErrorPeriod = time.Millisecond * 100 + rh := &RetryHandler{ + MaxRetryAttemptsAfterError: 5, + RetryAfterErrorPeriod: time.Millisecond * 100, + } rdm := NewReorgDetectorMock(t) pm := NewProcessorMock(t) - dm := NewDownloaderMock(t) - rdm.On("Subscribe", reorgDetectorID).Return(&reorgdetector.Subscription{}) - driver, err := newDriver(rdm, pm, dm) + dm := NewEVMDownloaderMock(t) + rdm.On("Subscribe", reorgDetectorID).Return(&reorgdetector.Subscription{}, nil) + driver, err := NewEVMDriver(rdm, pm, dm, reorgDetectorID, 10, rh) require.NoError(t, err) ctx := context.Background() // happy path - b1 := block{ - blockHeader: blockHeader{ + b1 := EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ Num: 1, Hash: common.HexToHash("f00"), }, @@ -122,13 +132,13 @@ func TestHandleNewBlock(t *testing.T) { rdm. On("AddBlockToTrack", ctx, reorgDetectorID, b1.Num, b1.Hash). Return(nil) - pm.On("storeBridgeEvents", b1.Num, b1.Events). + pm.On("ProcessBlock", Block{Num: b1.Num, Events: b1.Events}). Return(nil) driver.handleNewBlock(ctx, b1) // reorg deteector fails once - b2 := block{ - blockHeader: blockHeader{ + b2 := EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ Num: 2, Hash: common.HexToHash("f00"), }, @@ -139,13 +149,13 @@ func TestHandleNewBlock(t *testing.T) { rdm. On("AddBlockToTrack", ctx, reorgDetectorID, b2.Num, b2.Hash). Return(nil).Once() - pm.On("storeBridgeEvents", b2.Num, b2.Events). + pm.On("ProcessBlock", Block{Num: b2.Num, Events: b2.Events}). Return(nil) driver.handleNewBlock(ctx, b2) // processor fails once - b3 := block{ - blockHeader: blockHeader{ + b3 := EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ Num: 3, Hash: common.HexToHash("f00"), }, @@ -153,32 +163,35 @@ func TestHandleNewBlock(t *testing.T) { rdm. On("AddBlockToTrack", ctx, reorgDetectorID, b3.Num, b3.Hash). Return(nil) - pm.On("storeBridgeEvents", b3.Num, b3.Events). + pm.On("ProcessBlock", Block{Num: b3.Num, Events: b3.Events}). Return(errors.New("foo")).Once() - pm.On("storeBridgeEvents", b3.Num, b3.Events). + pm.On("ProcessBlock", Block{Num: b3.Num, Events: b3.Events}). Return(nil).Once() driver.handleNewBlock(ctx, b3) } func TestHandleReorg(t *testing.T) { - retryAfterErrorPeriod = time.Millisecond * 100 + rh := &RetryHandler{ + MaxRetryAttemptsAfterError: 5, + RetryAfterErrorPeriod: time.Millisecond * 100, + } rdm := NewReorgDetectorMock(t) pm := NewProcessorMock(t) - dm := NewDownloaderMock(t) + dm := NewEVMDownloaderMock(t) reorgProcessed := make(chan bool) rdm.On("Subscribe", reorgDetectorID).Return(&reorgdetector.Subscription{ ReorgProcessed: reorgProcessed, - }) - driver, err := newDriver(rdm, pm, dm) + }, nil) + driver, err := NewEVMDriver(rdm, pm, dm, reorgDetectorID, 10, rh) require.NoError(t, err) ctx := context.Background() // happy path _, cancel := context.WithCancel(ctx) - downloadCh := make(chan block) + downloadCh := make(chan EVMBlock) firstReorgedBlock := uint64(5) - pm.On("reorg", firstReorgedBlock).Return(nil) + pm.On("Reorg", firstReorgedBlock).Return(nil) go driver.handleReorg(cancel, downloadCh, firstReorgedBlock) close(downloadCh) done := <-reorgProcessed @@ -186,24 +199,24 @@ func TestHandleReorg(t *testing.T) { // download ch sends some garbage _, cancel = context.WithCancel(ctx) - downloadCh = make(chan block) + downloadCh = make(chan EVMBlock) firstReorgedBlock = uint64(6) - pm.On("reorg", firstReorgedBlock).Return(nil) + pm.On("Reorg", firstReorgedBlock).Return(nil) go driver.handleReorg(cancel, downloadCh, firstReorgedBlock) - downloadCh <- block{} - downloadCh <- block{} - downloadCh <- block{} + downloadCh <- EVMBlock{} + downloadCh <- EVMBlock{} + downloadCh <- EVMBlock{} close(downloadCh) done = <-reorgProcessed require.True(t, done) // processor fails 2 times _, cancel = context.WithCancel(ctx) - downloadCh = make(chan block) + downloadCh = make(chan EVMBlock) firstReorgedBlock = uint64(7) - pm.On("reorg", firstReorgedBlock).Return(errors.New("foo")).Once() - pm.On("reorg", firstReorgedBlock).Return(errors.New("foo")).Once() - pm.On("reorg", firstReorgedBlock).Return(nil).Once() + pm.On("Reorg", firstReorgedBlock).Return(errors.New("foo")).Once() + pm.On("Reorg", firstReorgedBlock).Return(errors.New("foo")).Once() + pm.On("Reorg", firstReorgedBlock).Return(nil).Once() go driver.handleReorg(cancel, downloadCh, firstReorgedBlock) close(downloadCh) done = <-reorgProcessed diff --git a/sync/evmtypes.go b/sync/evmtypes.go new file mode 100644 index 00000000..d242dbc4 --- /dev/null +++ b/sync/evmtypes.go @@ -0,0 +1,15 @@ +package sync + +import "github.com/ethereum/go-ethereum/common" + +type EVMBlock struct { + EVMBlockHeader + Events []interface{} +} + +type EVMBlockHeader struct { + Num uint64 + Hash common.Hash + ParentHash common.Hash + Timestamp uint64 +} diff --git a/localbridgesync/mock_downloader_test.go b/sync/mock_downloader_test.go similarity index 54% rename from localbridgesync/mock_downloader_test.go rename to sync/mock_downloader_test.go index f2df97d0..738fc873 100644 --- a/localbridgesync/mock_downloader_test.go +++ b/sync/mock_downloader_test.go @@ -1,6 +1,6 @@ // Code generated by mockery v2.22.1. DO NOT EDIT. -package localbridgesync +package sync import ( context "context" @@ -9,45 +9,40 @@ import ( mock "github.com/stretchr/testify/mock" ) -// DownloaderMock is an autogenerated mock type for the downloaderFull type -type DownloaderMock struct { +// EVMDownloaderMock is an autogenerated mock type for the evmDownloaderFull type +type EVMDownloaderMock struct { mock.Mock } -// appendLog provides a mock function with given fields: b, l -func (_m *DownloaderMock) appendLog(b *block, l types.Log) { - _m.Called(b, l) -} - // download provides a mock function with given fields: ctx, fromBlock, downloadedCh -func (_m *DownloaderMock) download(ctx context.Context, fromBlock uint64, downloadedCh chan block) { +func (_m *EVMDownloaderMock) download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock) { _m.Called(ctx, fromBlock, downloadedCh) } // getBlockHeader provides a mock function with given fields: ctx, blockNum -func (_m *DownloaderMock) getBlockHeader(ctx context.Context, blockNum uint64) blockHeader { +func (_m *EVMDownloaderMock) getBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader { ret := _m.Called(ctx, blockNum) - var r0 blockHeader - if rf, ok := ret.Get(0).(func(context.Context, uint64) blockHeader); ok { + var r0 EVMBlockHeader + if rf, ok := ret.Get(0).(func(context.Context, uint64) EVMBlockHeader); ok { r0 = rf(ctx, blockNum) } else { - r0 = ret.Get(0).(blockHeader) + r0 = ret.Get(0).(EVMBlockHeader) } return r0 } // getEventsByBlockRange provides a mock function with given fields: ctx, fromBlock, toBlock -func (_m *DownloaderMock) getEventsByBlockRange(ctx context.Context, fromBlock uint64, toBlock uint64) []block { +func (_m *EVMDownloaderMock) getEventsByBlockRange(ctx context.Context, fromBlock uint64, toBlock uint64) []EVMBlock { ret := _m.Called(ctx, fromBlock, toBlock) - var r0 []block - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []block); ok { + var r0 []EVMBlock + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []EVMBlock); ok { r0 = rf(ctx, fromBlock, toBlock) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]block) + r0 = ret.Get(0).([]EVMBlock) } } @@ -55,7 +50,7 @@ func (_m *DownloaderMock) getEventsByBlockRange(ctx context.Context, fromBlock u } // getLogs provides a mock function with given fields: ctx, fromBlock, toBlock -func (_m *DownloaderMock) getLogs(ctx context.Context, fromBlock uint64, toBlock uint64) []types.Log { +func (_m *EVMDownloaderMock) getLogs(ctx context.Context, fromBlock uint64, toBlock uint64) []types.Log { ret := _m.Called(ctx, fromBlock, toBlock) var r0 []types.Log @@ -71,7 +66,7 @@ func (_m *DownloaderMock) getLogs(ctx context.Context, fromBlock uint64, toBlock } // waitForNewBlocks provides a mock function with given fields: ctx, lastBlockSeen -func (_m *DownloaderMock) waitForNewBlocks(ctx context.Context, lastBlockSeen uint64) uint64 { +func (_m *EVMDownloaderMock) waitForNewBlocks(ctx context.Context, lastBlockSeen uint64) uint64 { ret := _m.Called(ctx, lastBlockSeen) var r0 uint64 @@ -84,14 +79,14 @@ func (_m *DownloaderMock) waitForNewBlocks(ctx context.Context, lastBlockSeen ui return r0 } -type mockConstructorTestingTNewDownloaderMock interface { +type mockConstructorTestingTNewEVMDownloaderMock interface { mock.TestingT Cleanup(func()) } -// NewDownloaderMock creates a new instance of DownloaderMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDownloaderMock(t mockConstructorTestingTNewDownloaderMock) *DownloaderMock { - mock := &DownloaderMock{} +// NewEVMDownloaderMock creates a new instance of EVMDownloaderMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewEVMDownloaderMock(t mockConstructorTestingTNewEVMDownloaderMock) *EVMDownloaderMock { + mock := &EVMDownloaderMock{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/localbridgesync/mock_l2_test.go b/sync/mock_l2_test.go similarity index 99% rename from localbridgesync/mock_l2_test.go rename to sync/mock_l2_test.go index 78baa614..0d1e03da 100644 --- a/localbridgesync/mock_l2_test.go +++ b/sync/mock_l2_test.go @@ -1,6 +1,6 @@ // Code generated by mockery v2.22.1. DO NOT EDIT. -package localbridgesync +package sync import ( context "context" diff --git a/localbridgesync/mock_processor_test.go b/sync/mock_processor_test.go similarity index 70% rename from localbridgesync/mock_processor_test.go rename to sync/mock_processor_test.go index 4a629f5c..d2c3e299 100644 --- a/localbridgesync/mock_processor_test.go +++ b/sync/mock_processor_test.go @@ -1,6 +1,6 @@ // Code generated by mockery v2.22.1. DO NOT EDIT. -package localbridgesync +package sync import ( context "context" @@ -13,8 +13,8 @@ type ProcessorMock struct { mock.Mock } -// getLastProcessedBlock provides a mock function with given fields: ctx -func (_m *ProcessorMock) getLastProcessedBlock(ctx context.Context) (uint64, error) { +// GetLastProcessedBlock provides a mock function with given fields: ctx +func (_m *ProcessorMock) GetLastProcessedBlock(ctx context.Context) (uint64, error) { ret := _m.Called(ctx) var r0 uint64 @@ -37,13 +37,13 @@ func (_m *ProcessorMock) getLastProcessedBlock(ctx context.Context) (uint64, err return r0, r1 } -// reorg provides a mock function with given fields: firstReorgedBlock -func (_m *ProcessorMock) reorg(firstReorgedBlock uint64) error { - ret := _m.Called(firstReorgedBlock) +// ProcessBlock provides a mock function with given fields: block +func (_m *ProcessorMock) ProcessBlock(block Block) error { + ret := _m.Called(block) var r0 error - if rf, ok := ret.Get(0).(func(uint64) error); ok { - r0 = rf(firstReorgedBlock) + if rf, ok := ret.Get(0).(func(Block) error); ok { + r0 = rf(block) } else { r0 = ret.Error(0) } @@ -51,13 +51,13 @@ func (_m *ProcessorMock) reorg(firstReorgedBlock uint64) error { return r0 } -// storeBridgeEvents provides a mock function with given fields: blockNum, events -func (_m *ProcessorMock) storeBridgeEvents(blockNum uint64, events []BridgeEvent) error { - ret := _m.Called(blockNum, events) +// Reorg provides a mock function with given fields: firstReorgedBlock +func (_m *ProcessorMock) Reorg(firstReorgedBlock uint64) error { + ret := _m.Called(firstReorgedBlock) var r0 error - if rf, ok := ret.Get(0).(func(uint64, []BridgeEvent) error); ok { - r0 = rf(blockNum, events) + if rf, ok := ret.Get(0).(func(uint64) error); ok { + r0 = rf(firstReorgedBlock) } else { r0 = ret.Error(0) } diff --git a/localbridgesync/mock_reorgdetector_test.go b/sync/mock_reorgdetector_test.go similarity index 82% rename from localbridgesync/mock_reorgdetector_test.go rename to sync/mock_reorgdetector_test.go index d11434a1..056da2a1 100644 --- a/localbridgesync/mock_reorgdetector_test.go +++ b/sync/mock_reorgdetector_test.go @@ -1,6 +1,6 @@ // Code generated by mockery v2.22.1. DO NOT EDIT. -package localbridgesync +package sync import ( context "context" @@ -32,10 +32,14 @@ func (_m *ReorgDetectorMock) AddBlockToTrack(ctx context.Context, id string, blo } // Subscribe provides a mock function with given fields: id -func (_m *ReorgDetectorMock) Subscribe(id string) *reorgdetector.Subscription { +func (_m *ReorgDetectorMock) Subscribe(id string) (*reorgdetector.Subscription, error) { ret := _m.Called(id) var r0 *reorgdetector.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(string) (*reorgdetector.Subscription, error)); ok { + return rf(id) + } if rf, ok := ret.Get(0).(func(string) *reorgdetector.Subscription); ok { r0 = rf(id) } else { @@ -44,7 +48,13 @@ func (_m *ReorgDetectorMock) Subscribe(id string) *reorgdetector.Subscription { } } - return r0 + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } type mockConstructorTestingTNewReorgDetectorMock interface { diff --git a/test/Makefile b/test/Makefile index 3511b4f7..0e671023 100644 --- a/test/Makefile +++ b/test/Makefile @@ -1,14 +1,9 @@ .PHONY: generate-mocks generate-mocks: - $(MAKE) generate-mocks-localbridgesync $(MAKE) generate-mocks-reorgdetector - -.PHONY: generate-mocks-localbridgesync -generate-mocks-localbridgesync: ## Generates mocks for localbridgesync, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthClienter --dir=../localbridgesync --output=../localbridgesync --outpkg=localbridgesync --inpackage --structname=L2Mock --filename=mock_l2_test.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=downloaderFull --dir=../localbridgesync --output=../localbridgesync --outpkg=localbridgesync --inpackage --structname=DownloaderMock --filename=mock_downloader_test.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=processorInterface --dir=../localbridgesync --output=../localbridgesync --outpkg=localbridgesync --inpackage --structname=ProcessorMock --filename=mock_processor_test.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../localbridgesync --output=../localbridgesync --outpkg=localbridgesync --inpackage --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go + $(MAKE) generate-mocks-l1infotreesync + $(MAKE) generate-mocks-aggoracle + $(MAKE) generate-mocks-sync .PHONY: generate-mocks-reorgdetector generate-mocks-reorgdetector: ## Generates mocks for reorgdetector, using mockery tool @@ -37,3 +32,17 @@ help: ## Prints this help @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) \ | sort \ | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' +.PHONY: generate-mocks-l1infotreesync +generate-mocks-l1infotreesync: ## Generates mocks for l1infotreesync , using mockery tool + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../l1infotreesync --outpkg=l1infotreesync --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go + +.PHONY: generate-mocks-aggoracle +generate-mocks-aggoracle: ## Generates mocks for aggoracle , using mockery tool + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManager --dir=../aggoracle/chaingersender --output=../aggoracle --outpkg=aggoracle --structname=EthTxManagerMock --filename=mock_ethtxmanager_test.go + +.PHONY: generate-mocks-sync +generate-mocks-sync: ## Generates mocks for sync, using mockery tool + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthClienter --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=L2Mock --filename=mock_l2_test.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=evmDownloaderFull --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=EVMDownloaderMock --filename=mock_downloader_test.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=processorInterface --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ProcessorMock --filename=mock_processor_test.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go