diff --git a/cmd/start.go b/cmd/start.go index a67159d..6f4b442 100644 --- a/cmd/start.go +++ b/cmd/start.go @@ -20,6 +20,7 @@ func startCommand() *cobra.Command { nodeURL, privateKey, timelockAddress, callProxyAddress string fromBlock, pollPeriod, eventListenerPollPeriod int64 + dryRun bool ) // Initialize timelock-worker configuration. @@ -41,6 +42,7 @@ func startCommand() *cobra.Command { startCmd.Flags().Int64Var(&fromBlock, "from-block", timelockConf.FromBlock, "Start watching from this block") startCmd.Flags().Int64Var(&pollPeriod, "poll-period", timelockConf.PollPeriod, "Poll period in seconds") startCmd.Flags().Int64Var(&eventListenerPollPeriod, "event-listener-poll-period", timelockConf.EventListenerPollPeriod, "Event Listener poll period in seconds") + startCmd.Flags().BoolVar(&dryRun, "dry-run", timelockConf.DryRun, "Enable \"dry run\" mode -- monitor events but don't trigger any calls") return &startCmd } @@ -87,8 +89,13 @@ func startTimelock(cmd *cobra.Command) { logs.Fatal().Msgf("value of poll-period not set: %s", err.Error()) } + dryRun, err := cmd.Flags().GetBool("dry-run") + if err != nil { + logs.Fatal().Msgf("value of dry-run not set: %s", err.Error()) + } + tWorker, err := timelock.NewTimelockWorker(nodeURL, timelockAddress, callProxyAddress, privateKey, - big.NewInt(fromBlock), pollPeriod, eventListenerPollPeriod, logs) + big.NewInt(fromBlock), pollPeriod, eventListenerPollPeriod, dryRun, logs) if err != nil { logs.Fatal().Msgf("error creating the timelock-worker: %s", err.Error()) } diff --git a/go.mod b/go.mod index 5472f2a..4b3edee 100644 --- a/go.mod +++ b/go.mod @@ -5,8 +5,10 @@ go 1.22 require ( github.com/docker/go-connections v0.5.0 github.com/ethereum/go-ethereum v1.13.15 + github.com/google/go-cmp v0.6.0 github.com/prometheus/client_golang v1.19.1 github.com/rs/zerolog v1.33.0 + github.com/samber/lo v1.47.0 github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240917103524-56f1a8d2cd4b github.com/smartcontractkit/chain-selectors v1.0.17 github.com/spf13/cobra v1.8.1 @@ -95,9 +97,9 @@ require ( go.uber.org/multierr v1.9.0 // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect - golang.org/x/sync v0.6.0 // indirect + golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.21.0 // indirect - golang.org/x/text v0.15.0 // indirect + golang.org/x/text v0.16.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 382824a..7fb4ceb 100644 --- a/go.sum +++ b/go.sum @@ -269,6 +269,8 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= +github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= @@ -389,8 +391,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -430,8 +432,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/cli/config.go b/pkg/cli/config.go index e10e198..ca3a8c1 100644 --- a/pkg/cli/config.go +++ b/pkg/cli/config.go @@ -3,7 +3,9 @@ package cli import ( "fmt" "os" + "slices" "strconv" + "strings" "github.com/spf13/viper" ) @@ -17,6 +19,7 @@ type Config struct { FromBlock int64 `mapstructure:"FROM_BLOCK"` PollPeriod int64 `mapstructure:"POLL_PERIOD"` EventListenerPollPeriod int64 `mapstructure:"EVENT_LISTENER_POLL_PERIOD"` + DryRun bool `mapstructure:"DRY_RUN"` } // NewTimelockCLI return a new Timelock instance configured. @@ -80,5 +83,10 @@ func NewTimelockCLI() (*Config, error) { c.EventListenerPollPeriod = int64(pp) } + if os.Getenv("DRY_RUN") != "" { + trueValues := []string{"true", "yes", "on", "enabled", "1"} + c.DryRun = slices.Contains(trueValues, strings.ToLower(os.Getenv("DRY_RUN"))) + } + return &c, nil } diff --git a/pkg/cli/config_test.go b/pkg/cli/config_test.go index ec13ff0..1cf3864 100644 --- a/pkg/cli/config_test.go +++ b/pkg/cli/config_test.go @@ -21,7 +21,7 @@ func Test_NewTimelockCLI(t *testing.T) { name: "load from file", setup: func(t *testing.T) { unsetenvs(t, "NODE_URL", "TIMELOCK_ADDRESS", "CALL_PROXY_ADDRESS", "PRIVATE_KEY", "FROM_BLOCK", - "POLL_PERIOD", "EVENT_LISTENER_POLL_PERIOD") + "POLL_PERIOD", "EVENT_LISTENER_POLL_PERIOD", "DRY_RUN") err := os.WriteFile(configFileName, []byte(string( "NODE_URL=wss://goerli/test\n"+ @@ -30,7 +30,8 @@ func Test_NewTimelockCLI(t *testing.T) { "PRIVATE_KEY=9876543210\n"+ "FROM_BLOCK=1\n"+ "POLL_PERIOD=2\n"+ - "EVENT_LISTENER_POLL_PERIOD=3\n", + "EVENT_LISTENER_POLL_PERIOD=3\n"+ + "DRY_RUN=true\n", )), os.FileMode(0644)) require.NoError(t, err) @@ -44,6 +45,7 @@ func Test_NewTimelockCLI(t *testing.T) { FromBlock: 1, PollPeriod: 2, EventListenerPollPeriod: 3, + DryRun: true, }, }, { @@ -56,7 +58,8 @@ func Test_NewTimelockCLI(t *testing.T) { "PRIVATE_KEY=9876543210\n"+ "FROM_BLOCK=1\n"+ "POLL_PERIOD=2\n"+ - "EVENT_LISTENER_POLL_PERIOD=3\n", + "EVENT_LISTENER_POLL_PERIOD=3\n"+ + "DRY_RUN=true\n", )), os.FileMode(0644)) require.NoError(t, err) @@ -67,6 +70,7 @@ func Test_NewTimelockCLI(t *testing.T) { t.Setenv("FROM_BLOCK", "4") t.Setenv("POLL_PERIOD", "5") t.Setenv("EVENT_LISTENER_POLL_PERIOD", "6") + t.Setenv("DRY_RUN", "false") t.Cleanup(func() { os.Remove(configFileName) }) }, @@ -95,6 +99,7 @@ func Test_NewTimelockCLI(t *testing.T) { t.Setenv("FROM_BLOCK", "4") t.Setenv("POLL_PERIOD", "5") t.Setenv("EVENT_LISTENER_POLL_PERIOD", "6") + t.Setenv("DRY_RUN", "yes") t.Cleanup(func() { os.Remove(configFileName) }) }, @@ -106,6 +111,7 @@ func Test_NewTimelockCLI(t *testing.T) { FromBlock: 4, PollPeriod: 5, EventListenerPollPeriod: 6, + DryRun: true, }, }, { diff --git a/pkg/timelock/const_test.go b/pkg/timelock/const_test.go index e2e8c01..7eabd85 100644 --- a/pkg/timelock/const_test.go +++ b/pkg/timelock/const_test.go @@ -15,5 +15,6 @@ var ( testFromBlock = big.NewInt(0) testPollPeriod = 5 testEventListenerPollPeriod = 0 + testDryRun = false testLogger = logger.Logger("info", "human") ) diff --git a/pkg/timelock/operations_test.go b/pkg/timelock/operations_test.go index 4bdbee9..bb797bd 100644 --- a/pkg/timelock/operations_test.go +++ b/pkg/timelock/operations_test.go @@ -12,7 +12,7 @@ import ( func Test_isOperation(t *testing.T) { testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, - testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger) + testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger) var ctx context.Context @@ -56,7 +56,7 @@ func Test_isOperation(t *testing.T) { func Test_isReady(t *testing.T) { testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, - testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger) + testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger) var ctx context.Context @@ -100,7 +100,7 @@ func Test_isReady(t *testing.T) { func Test_isDone(t *testing.T) { testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, - testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger) + testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger) var ctx context.Context @@ -144,7 +144,7 @@ func Test_isDone(t *testing.T) { func Test_isPending(t *testing.T) { testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, - testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger) + testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger) var ctx context.Context diff --git a/pkg/timelock/scheduler.go b/pkg/timelock/scheduler.go index 1f12d3c..bf5409c 100644 --- a/pkg/timelock/scheduler.go +++ b/pkg/timelock/scheduler.go @@ -18,26 +18,39 @@ import ( type operationKey [32]byte +type Scheduler interface { + runScheduler(ctx context.Context) <-chan struct{} + addToScheduler(op *contract.TimelockCallScheduled) + delFromScheduler(op operationKey) + dumpOperationStore(now func() time.Time) +} + +type executeFn func(context.Context, []*contract.TimelockCallScheduled) + // Scheduler represents a scheduler with an in memory store. // Whenever accesing the map the mutex should be Locked, to prevent // any race condition. type scheduler struct { - mu sync.Mutex - ticker *time.Ticker - add chan *contract.TimelockCallScheduled - del chan operationKey - store map[operationKey][]*contract.TimelockCallScheduled - busy bool + mu sync.Mutex + ticker *time.Ticker + add chan *contract.TimelockCallScheduled + del chan operationKey + store map[operationKey][]*contract.TimelockCallScheduled + busy bool + logger *zerolog.Logger + executeFn executeFn } // newScheduler returns a new initialized scheduler. -func newScheduler(tick time.Duration) *scheduler { +func newScheduler(tick time.Duration, logger *zerolog.Logger, executeFn executeFn) *scheduler { s := &scheduler{ - ticker: time.NewTicker(tick), - add: make(chan *contract.TimelockCallScheduled), - del: make(chan operationKey), - store: make(map[operationKey][]*contract.TimelockCallScheduled), - busy: false, + ticker: time.NewTicker(tick), + add: make(chan *contract.TimelockCallScheduled), + del: make(chan operationKey), + store: make(map[operationKey][]*contract.TimelockCallScheduled), + busy: false, + logger: logger, + executeFn: executeFn, } return s @@ -50,7 +63,7 @@ func newScheduler(tick time.Duration) *scheduler { // call them this way so no process is allowd to add/delete from // the store, which could cause race conditions like adding/deleting // while the operation is being executed. -func (tw *Worker) runScheduler(ctx context.Context) <-chan struct{} { +func (tw *scheduler) runScheduler(ctx context.Context) <-chan struct{} { done := make(chan struct{}) go func() { @@ -67,7 +80,7 @@ func (tw *Worker) runScheduler(ctx context.Context) <-chan struct{} { tw.logger.Debug().Msgf("new scheduler tick: operations in store") tw.setSchedulerBusy() for _, op := range tw.store { - tw.execute(ctx, op) + tw.executeFn(ctx, op) } tw.setSchedulerFree() } else { @@ -102,7 +115,7 @@ func (tw *Worker) runScheduler(ctx context.Context) <-chan struct{} { } // updateSchedulerDelay updates the internal ticker delay, so it can be reconfigured while running. -func (tw *Worker) updateSchedulerDelay(t time.Duration) { +func (tw *scheduler) updateSchedulerDelay(t time.Duration) { if t <= 0 { tw.logger.Debug().Msgf("internal min delay not changed, invalid duration: %v", t.String()) return @@ -113,40 +126,38 @@ func (tw *Worker) updateSchedulerDelay(t time.Duration) { } // addToScheduler adds a new CallSchedule operation safely to the store. -func (tw *Worker) addToScheduler(op *contract.TimelockCallScheduled) { +func (tw *scheduler) addToScheduler(op *contract.TimelockCallScheduled) { tw.logger.Debug().Msgf("scheduling operation: %x", op.Id) tw.add <- op - tw.logger.Debug().Msgf("operations in scheduler: %v", len(tw.store)) } // delFromScheduler deletes an operation safely from the store. -func (tw *Worker) delFromScheduler(op operationKey) { +func (tw *scheduler) delFromScheduler(op operationKey) { tw.logger.Debug().Msgf("de-scheduling operation: %v", op) tw.del <- op - tw.logger.Debug().Msgf("operations in scheduler: %v", len(tw.store)) } -func (tw *Worker) setSchedulerBusy() { +func (tw *scheduler) setSchedulerBusy() { tw.logger.Debug().Msgf("setting scheduler busy") tw.mu.Lock() tw.busy = true tw.mu.Unlock() } -func (tw *Worker) setSchedulerFree() { +func (tw *scheduler) setSchedulerFree() { tw.logger.Debug().Msgf("setting scheduler free") tw.mu.Lock() tw.busy = false tw.mu.Unlock() } -func (tw *Worker) isSchedulerBusy() bool { +func (tw *scheduler) isSchedulerBusy() bool { return tw.busy } // dumpOperationStore dumps to the logger and to the log file the current scheduled unexecuted operations. // maps in go don't guarantee order, so that's why we have to find the earliest block. -func (tw *Worker) dumpOperationStore(now func() time.Time) { +func (tw *scheduler) dumpOperationStore(now func() time.Time) { if len(tw.store) <= 0 { tw.logger.Info().Msgf("no operations to dump") return @@ -249,3 +260,37 @@ func toEarliestRecord(op *contract.TimelockCallScheduled) string { func toSubsequentRecord(op *contract.TimelockCallScheduled) string { return fmt.Sprintf("CallSchedule pending ID: %x\tBlock Number: %v\n", op.Id, op.Raw.BlockNumber) } + +// ----- nop scheduler ----- +// nopScheduler implements the Scheduler interface but doesn't not effectively trigger any operations. +type nopScheduler struct { + logger *zerolog.Logger +} + +func newNopScheduler(logger *zerolog.Logger) *nopScheduler { + return &nopScheduler{logger: logger} +} + +func (s *nopScheduler) runScheduler(ctx context.Context) <-chan struct{} { + s.logger.Info().Msg("nop.runScheduler") + ch := make(chan struct{}) + + go func() { + <-ctx.Done() + close(ch) + }() + + return ch +} + +func (s *nopScheduler) addToScheduler(op *contract.TimelockCallScheduled) { + s.logger.Info().Any("op", op).Msg("nop.addToScheduler") +} + +func (s *nopScheduler) delFromScheduler(key operationKey) { + s.logger.Info().Any("key", key).Msg("nop.delFromScheduler") +} + +func (s *nopScheduler) dumpOperationStore(now func() time.Time) { + s.logger.Info().Msg("nop.dumpOperationStore") +} diff --git a/pkg/timelock/scheduler_test.go b/pkg/timelock/scheduler_test.go index 38d6868..6e5e85a 100644 --- a/pkg/timelock/scheduler_test.go +++ b/pkg/timelock/scheduler_test.go @@ -1,22 +1,31 @@ package timelock import ( + "context" "fmt" + "math/big" + "math/rand" "os" "reflect" + "slices" "strings" "testing" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/google/go-cmp/cmp" "github.com/rs/zerolog" + "github.com/samber/lo" "github.com/smartcontractkit/timelock-worker/pkg/timelock/contract" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func Test_newScheduler(t *testing.T) { - tScheduler := newScheduler(10 * time.Second) + logger := zerolog.Nop() + execFn := func(context.Context, []*contract.TimelockCallScheduled) {} + tScheduler := newTestScheduler() type args struct { tick time.Duration @@ -36,7 +45,7 @@ func Test_newScheduler(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := newScheduler(tt.args.tick) + got := newScheduler(tt.args.tick, &logger, execFn) if reflect.TypeOf(got) != reflect.TypeOf(tt.want) { t.Errorf("newScheduler() = %v, want %v", got, tt.want) } @@ -44,47 +53,45 @@ func Test_newScheduler(t *testing.T) { } } -func TestWorker_updateSchedulerDelay(t *testing.T) { - testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, - testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger) +func Test_scheduler_updateSchedulerDelay(t *testing.T) { + tScheduler := newTestScheduler() // Should never fail - testWorker.updateSchedulerDelay(1 * time.Second) - testWorker.updateSchedulerDelay(-1 * time.Second) - testWorker.updateSchedulerDelay(0 * time.Second) + tScheduler.updateSchedulerDelay(1 * time.Second) + tScheduler.updateSchedulerDelay(-1 * time.Second) + tScheduler.updateSchedulerDelay(0 * time.Second) } -func TestWorker_isSchedulerBusy(t *testing.T) { - testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, - testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger) +func Test_scheduler_isSchedulerBusy(t *testing.T) { + tScheduler := newTestScheduler() - isBusy := testWorker.isSchedulerBusy() + isBusy := tScheduler.isSchedulerBusy() assert.Equal(t, false, isBusy, "scheduler should be busy by default") - testWorker.setSchedulerBusy() - isBusy = testWorker.isSchedulerBusy() + tScheduler.setSchedulerBusy() + isBusy = tScheduler.isSchedulerBusy() assert.Equal(t, true, isBusy, "scheduler should be busy after setSchedulerBusy()") - testWorker.setSchedulerFree() - isBusy = testWorker.isSchedulerBusy() + tScheduler.setSchedulerFree() + isBusy = tScheduler.isSchedulerBusy() assert.Equal(t, false, isBusy, "scheduler shouldn't be busy after setSchedulerFree()") } -func TestWorker_setSchedulerBusy(t *testing.T) { - testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, - testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger) +func Test_scheduler_setSchedulerBusy(t *testing.T) { + tScheduler := newTestScheduler() - testWorker.setSchedulerBusy() - isBusy := testWorker.isSchedulerBusy() + tScheduler.setSchedulerBusy() + isBusy := tScheduler.isSchedulerBusy() assert.Equal(t, true, isBusy, "scheduler should be busy after setSchedulerBusy()") } -func TestWorker_setSchedulerFree(t *testing.T) { - testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, - testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger) +func Test_scheduler_setSchedulerFree(t *testing.T) { + logger := zerolog.Nop() + execFn := func(context.Context, []*contract.TimelockCallScheduled) {} + tScheduler := newScheduler(10 * time.Second, &logger, execFn) - testWorker.setSchedulerFree() - isBusy := testWorker.isSchedulerBusy() + tScheduler.setSchedulerFree() + isBusy := tScheduler.isSchedulerBusy() assert.Equal(t, false, isBusy, "scheduler shouldn't be busy after setSchedulerFree()") } @@ -92,7 +99,6 @@ func TestWorker_setSchedulerFree(t *testing.T) { func Test_dumpOperationStore(t *testing.T) { var ( fName = logPath + logFile - logger = zerolog.Nop() earliestBlock = 42 opKeys = generateOpKeys(t, []string{"1", "2"}) @@ -117,11 +123,9 @@ func Test_dumpOperationStore(t *testing.T) { opKeys[1]: {following}, } - worker = &Worker{ - logger: &logger, - scheduler: scheduler{ - store: store, - }, + scheduler = scheduler{ + store: store, + logger: func(l zerolog.Logger) *zerolog.Logger { return &l }(zerolog.Nop()), } ) @@ -139,7 +143,7 @@ func Test_dumpOperationStore(t *testing.T) { wantPrefix := fmt.Sprintf("Process stopped at %v\n", nowFunc().In(time.UTC)) // Write the store to the file. - worker.dumpOperationStore(nowFunc) + scheduler.dumpOperationStore(nowFunc) // Read the file and compare the contents. gotRead, err := os.ReadFile(fName) @@ -153,6 +157,48 @@ func Test_dumpOperationStore(t *testing.T) { assert.Equal(t, wantRead, gotRead) } +func Test_scheduler_concurrency(t *testing.T) { + const numOps = 100 + logger := zerolog.Nop() + ctx, cancel := context.WithCancel(context.Background()) + + executedOps := map[int]uint16{} // {numericOpId: executionCount} + executedCh := make(chan operationKey) + execFn := func(ctx context.Context, ops []*contract.TimelockCallScheduled) { + for _, op := range ops { + opNum := int(opIDToNum(t, op.Id)) + executedOps[opNum] = executedOps[opNum] + 1 + go func() { + time.Sleep(time.Duration(1+rand.Intn(50)) * time.Millisecond) + executedCh <- op.Id + }() + } + } + + // run scheduler + testScheduler := newScheduler(10*time.Millisecond, &logger, execFn) + _ = testScheduler.runScheduler(ctx) + + // run mock event listener + go runMockEventListener(t, ctx, cancel, testScheduler, executedCh, numOps) + + // wait for all operations to be executed + <-ctx.Done() + + require.GreaterOrEqual(t, len(executedOps), numOps) + executedIDs := lo.Keys(executedOps) + slices.Sort(executedIDs) + require.Empty(t, cmp.Diff(lo.Range(100), executedIDs[:numOps])) +} + +// ----- helpers ----- + +func newTestScheduler() *scheduler { + logger := zerolog.Nop() + execFn := func(context.Context, []*contract.TimelockCallScheduled) {} + return newScheduler(10 * time.Second, &logger, execFn) +} + // generateOpKeys generates a slice of operation keys from a slice of strings. func generateOpKeys(t *testing.T, in []string) [][32]byte { t.Helper() @@ -167,3 +213,49 @@ func generateOpKeys(t *testing.T, in []string) [][32]byte { } return opKeys } + +func runMockEventListener( + t *testing.T, + ctx context.Context, + cancel context.CancelFunc, + testScheduler *scheduler, + executedCh <-chan operationKey, + lastOpID int16, +) { + t.Helper() + + opNum := int64(0) + + ticker := time.NewTicker(15 * time.Millisecond) + for { + select { + case <-ticker.C: + op := &contract.TimelockCallScheduled{Id: opID(uint16(opNum)), Index: big.NewInt(0)} + opNum += 1 + testScheduler.addToScheduler(op) + + case executedOpID := <-executedCh: + testScheduler.delFromScheduler(executedOpID) + if opIDToNum(t, executedOpID) == lastOpID { + cancel() + } + + case <-ctx.Done(): + return + } + } +} + +func opID(n uint16) [32]byte { + id := [32]byte{} + id[31] = byte(n) + id[30] = byte(n >> 8) + return id +} + +func opIDToNum(t *testing.T, opID [32]byte) int16 { + t.Helper() + opNum, ok := new(big.Int).SetString(fmt.Sprintf("%x", opID), 16) + require.True(t, ok) + return int16(opNum.Uint64()) +} diff --git a/pkg/timelock/timelock.go b/pkg/timelock/timelock.go index 7ceeb6e..c627ec9 100644 --- a/pkg/timelock/timelock.go +++ b/pkg/timelock/timelock.go @@ -36,9 +36,10 @@ type Worker struct { fromBlock *big.Int pollPeriod int64 listenerPollPeriod int64 + dryRun bool logger *zerolog.Logger privateKey *ecdsa.PrivateKey - scheduler + scheduler Scheduler } var httpSchemes = []string{"http", "https"} @@ -49,7 +50,7 @@ var validNodeUrlSchemes = []string{"http", "https", "ws", "wss"} // It's a singleton, so further executions will retrieve the same timelockWorker. func NewTimelockWorker( nodeURL, timelockAddress, callProxyAddress, privateKey string, fromBlock *big.Int, - pollPeriod int64, listenerPollPeriod int64, logger *zerolog.Logger, + pollPeriod int64, listenerPollPeriod int64, dryRun bool, logger *zerolog.Logger, ) (*Worker, error) { // Sanity check on each provided variable before allocating more resources. u, err := url.ParseRequestURI(nodeURL) @@ -126,9 +127,15 @@ func NewTimelockWorker( fromBlock: fromBlock, pollPeriod: pollPeriod, listenerPollPeriod: listenerPollPeriod, + dryRun: dryRun, logger: logger, privateKey: privateKeyECDSA, - scheduler: *newScheduler(time.Duration(pollPeriod) * time.Second), + } + + if dryRun { + tWorker.scheduler = newNopScheduler(logger) + } else { + tWorker.scheduler = newScheduler(time.Duration(pollPeriod)*time.Second, logger, tWorker.execute) } return tWorker, nil @@ -143,7 +150,7 @@ func (tw *Worker) Listen(ctx context.Context) error { tw.startLog() // Run the scheduler to add/del operations in a thread-safe way. - schedulingDone := tw.runScheduler(ctxwc) + schedulingDone := tw.scheduler.runScheduler(ctxwc) // Retrieve historical logs. historyDone, historyCh, err := tw.retrieveHistoricalLogs(ctxwc) @@ -174,7 +181,7 @@ func (tw *Worker) Listen(ctx context.Context) error { tw.logger.Info().Msg("shutting down timelock-worker") tw.logger.Info().Msg("dumping operation store") - tw.dumpOperationStore(time.Now) + tw.scheduler.dumpOperationStore(time.Now) // Wait for all goroutines to finish. shutdownCtx, cancel := context.WithTimeout(context.Background(), time.Second*5) @@ -196,7 +203,7 @@ func (tw *Worker) setupFilterQuery(fromBlock *big.Int) ethereum.FilterQuery { // retrieveNewLogs returns a "control channel" and a "logs channels". The logs channel is where // new log events will be asynchronously pushed to. // -// The actual retrieveal is performed by either `subscribeNewLogs`, if the node connection +// The actual retrieval is performed by either `subscribeNewLogs`, if the node connection // supports subscriptions, or `pollNewLogs` otherwise. In practice, the ethclient library // simply checks if the given node URL is "http(s)" or not. func (tw *Worker) retrieveNewLogs(ctx context.Context) (<-chan struct{}, <-chan types.Log, error) { @@ -457,7 +464,7 @@ func (tw *Worker) handleLog(ctx context.Context, log types.Log) error { if !isDone(ctx, tw.contract, cs.Id) && isOperation(ctx, tw.contract, cs.Id) { tw.logger.Info().Hex(fieldTXHash, cs.Raw.TxHash[:]).Uint64(fieldBlockNumber, cs.Raw.BlockNumber).Msgf("%s received", eventCallScheduled) - tw.addToScheduler(cs) + tw.scheduler.addToScheduler(cs) } // A CallExecuted which is in Done status should delete the task in the scheduler store. @@ -469,7 +476,7 @@ func (tw *Worker) handleLog(ctx context.Context, log types.Log) error { if isDone(ctx, tw.contract, cs.Id) { tw.logger.Info().Hex(fieldTXHash, cs.Raw.TxHash[:]).Uint64(fieldBlockNumber, cs.Raw.BlockNumber).Msgf("%s received, skipping operation", eventCallExecuted) - tw.delFromScheduler(cs.Id) + tw.scheduler.delFromScheduler(cs.Id) } // A Cancelled which is in Done status should delete the task in the scheduler store. @@ -481,7 +488,7 @@ func (tw *Worker) handleLog(ctx context.Context, log types.Log) error { if isDone(ctx, tw.contract, cs.Id) { tw.logger.Info().Hex(fieldTXHash, cs.Raw.TxHash[:]).Uint64(fieldBlockNumber, cs.Raw.BlockNumber).Msgf("%s received, cancelling operation", eventCancelled) - tw.delFromScheduler(cs.Id) + tw.scheduler.delFromScheduler(cs.Id) } default: tw.logger.Info().Str("event", event.Name).Msgf("discarding event") diff --git a/pkg/timelock/timelock_test.go b/pkg/timelock/timelock_test.go index 9e668f0..6c27c75 100644 --- a/pkg/timelock/timelock_test.go +++ b/pkg/timelock/timelock_test.go @@ -2,16 +2,18 @@ package timelock import ( "math/big" - "reflect" + "net/http" + "net/http/httptest" "testing" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func newTestTimelockWorker( t *testing.T, nodeURL, timelockAddress, callProxyAddress, privateKey string, fromBlock *big.Int, - pollPeriod int64, eventListenerPollPeriod int64, logger *zerolog.Logger, + pollPeriod int64, eventListenerPollPeriod int64, dryRun bool, logger *zerolog.Logger, ) *Worker { assert.NotEmpty(t, nodeURL, "nodeURL is empty. Are environment variabes in const_test.go set?") assert.NotEmpty(t, timelockAddress, "nodeURL is empty. Are environment variabes in const_test.go set?") @@ -22,7 +24,7 @@ func newTestTimelockWorker( assert.NotNil(t, logger, "logger is nil. Are environment variabes in const_test.go set?") tw, err := NewTimelockWorker(nodeURL, timelockAddress, callProxyAddress, privateKey, fromBlock, - pollPeriod, eventListenerPollPeriod, logger) + pollPeriod, eventListenerPollPeriod, dryRun, logger) assert.NoError(t, err) assert.NotNil(t, tw) @@ -30,10 +32,14 @@ func newTestTimelockWorker( } func TestNewTimelockWorker(t *testing.T) { - testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, - testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger) + t.Parallel() + + svr := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, req *http.Request) { + writer.Write([]byte("Ok")) + })) + defer svr.Close() - type args struct { + type argsT struct { nodeURL string timelockAddress string callProxyAddress string @@ -41,145 +47,97 @@ func TestNewTimelockWorker(t *testing.T) { fromBlock *big.Int pollPeriod int64 eventListenerPollPeriod int64 - logger *zerolog.Logger + dryRun bool + logger zerolog.Logger + } + defaultArgs := argsT{ + nodeURL: svr.URL, + timelockAddress: "0x0000000000000000000000000000000000000001", + callProxyAddress: "0x0000000000000000000000000000000000000002", + privateKey: "1921763610b80b2b147ca676c775c172ba6c037f6ba135792bed6bc458c660f0", + fromBlock: big.NewInt(1), + pollPeriod: 900, + eventListenerPollPeriod: 60, + dryRun: false, + logger: zerolog.Nop(), } + tests := []struct { name string - args args - want *Worker - wantErr bool + setup func(*argsT) + wantErr string }{ { - name: "NewTimelockWorker new instance is created (success)", - args: args{ - nodeURL: testNodeURL, - timelockAddress: testTimelockAddress, - callProxyAddress: testCallProxyAddress, - privateKey: testPrivateKey, - fromBlock: testFromBlock, - pollPeriod: int64(testPollPeriod), - logger: testLogger, - }, - want: testWorker, - wantErr: false, + name: "success", + setup: func(*argsT) {}, + }, + { + name: "failure - invalid host in node url", + setup: func(a *argsT) { a.nodeURL = "wss://invalid.host/rpc" }, + wantErr: "no such host", }, { - name: "NewTimelockWorker bad rpc provided (fail)", - args: args{ - nodeURL: "wss://bad/rpc", - timelockAddress: testTimelockAddress, - callProxyAddress: testCallProxyAddress, - privateKey: testPrivateKey, - fromBlock: testFromBlock, - pollPeriod: int64(testPollPeriod), - logger: testLogger, - }, - want: testWorker, - wantErr: true, + name: "failure - invalid url scheme in node url", + setup: func(a *argsT) { a.nodeURL = "invalid://localhost/rpc" }, + wantErr: "invalid node URL: invalid://localhost/rpc (accepted schemes are: [http https ws wss])", }, { - name: "NewTimelockWorker bad rpc protocol provided (fail)", - args: args{ - nodeURL: "https://bad/protocol", - timelockAddress: testTimelockAddress, - callProxyAddress: testCallProxyAddress, - privateKey: testPrivateKey, - fromBlock: testFromBlock, - pollPeriod: int64(testPollPeriod), - logger: testLogger, - }, - want: testWorker, - wantErr: true, + name: "failure - bad timelock address", + setup: func(a *argsT) { a.timelockAddress = "invalid" }, + wantErr: "timelock address provided is not valid: invalid", }, { - name: "NewTimelockWorker bad timelock address provided (fail)", - args: args{ - nodeURL: testNodeURL, - timelockAddress: "0x1234", - callProxyAddress: testCallProxyAddress, - privateKey: testPrivateKey, - fromBlock: testFromBlock, - pollPeriod: int64(testPollPeriod), - logger: testLogger, - }, - want: testWorker, - wantErr: true, + name: "failure - bad call proxy address", + setup: func(a *argsT) { a.callProxyAddress = "invalid" }, + wantErr: "call proxy address provided is not valid: invalid", }, { - name: "NewTimelockWorker bad call proxy address provided (fail)", - args: args{ - nodeURL: testNodeURL, - timelockAddress: testTimelockAddress, - callProxyAddress: "0x1234", - privateKey: testPrivateKey, - fromBlock: testFromBlock, - pollPeriod: int64(testPollPeriod), - logger: testLogger, - }, - want: testWorker, - wantErr: true, + name: "failure - bad private key", + setup: func(a *argsT) { a.privateKey = "invalid" }, + wantErr: "the provided private key is not valid: got invalid", }, { - name: "NewTimelockWorker bad private key provided (fail)", - args: args{ - nodeURL: testNodeURL, - timelockAddress: testTimelockAddress, - callProxyAddress: testCallProxyAddress, - privateKey: "0123456789", - fromBlock: testFromBlock, - pollPeriod: int64(testPollPeriod), - logger: testLogger, - }, - want: testWorker, - wantErr: true, + name: "failure - bad from block", + setup: func(a *argsT) { a.fromBlock = big.NewInt(-1) }, + wantErr: "from block can't be a negative number (minimum value 0): got -1", }, { - name: "NewTimelockWorker bad negative from block provided (fail)", - args: args{ - nodeURL: testNodeURL, - timelockAddress: testTimelockAddress, - callProxyAddress: testCallProxyAddress, - privateKey: testPrivateKey, - fromBlock: big.NewInt(-1), - pollPeriod: int64(testPollPeriod), - logger: testLogger, - }, - want: testWorker, - wantErr: true, + name: "failure - bad poll period", + setup: func(a *argsT) { a.pollPeriod = -1 }, + wantErr: "poll-period must be a positive non-zero integer: got -1", }, { - name: "NewTimelockWorker bad poll period provided (fail)", - args: args{ - nodeURL: testNodeURL, - timelockAddress: testTimelockAddress, - callProxyAddress: testCallProxyAddress, - privateKey: testPrivateKey, - fromBlock: testFromBlock, - pollPeriod: 0, - logger: testLogger, - }, - want: testWorker, - wantErr: true, + name: "failure - bad event listener poll period", + setup: func(a *argsT) { a.eventListenerPollPeriod = -1 }, + wantErr: "event-listener-poll-period must be a positive non-zero integer: got -1", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := NewTimelockWorker(tt.args.nodeURL, tt.args.timelockAddress, tt.args.callProxyAddress, - tt.args.privateKey, tt.args.fromBlock, tt.args.pollPeriod, tt.args.eventListenerPollPeriod, tt.args.logger) - if (err != nil) != tt.wantErr { - t.Errorf("NewTimelockWorker() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !(reflect.TypeOf(tt.want) == reflect.TypeOf(got)) { - t.Errorf("NewTimelockWorker() = %v, want %v", got, tt.want) + t.Parallel() + + args := defaultArgs + tt.setup(&args) + + got, err := NewTimelockWorker(args.nodeURL, args.timelockAddress, args.callProxyAddress, + args.privateKey, args.fromBlock, args.pollPeriod, args.eventListenerPollPeriod, + args.dryRun, &args.logger) + + if tt.wantErr == "" { + require.NoError(t, err) + require.IsType(t, &Worker{}, got) + } else { + require.ErrorContains(t, err, tt.wantErr) } }) } } func TestWorker_startLog(t *testing.T) { + t.Parallel() + testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, - testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger) + testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger) tests := []struct { name string diff --git a/tests/integration/suite.go b/tests/integration/suite.go index ccd7cc3..d9a25c8 100644 --- a/tests/integration/suite.go +++ b/tests/integration/suite.go @@ -61,7 +61,8 @@ func (s *integrationTestSuite) KeyedTransactor(privateKey *ecdsa.PrivateKey, cha } func (s *integrationTestSuite) DeployTimelock( - ctx context.Context, transactor *bind.TransactOpts, client *ethclient.Client, adminAccount common.Address, + ctx context.Context, transactor *bind.TransactOpts, client *ethclient.Client, + adminAccount common.Address, minDelay *big.Int, ) ( common.Address, *types.Transaction, *types.Receipt, *contracts.RBACTimelock, ) { @@ -71,13 +72,12 @@ func (s *integrationTestSuite) DeployTimelock( bypassers := []common.Address{} address, transaction, contract, err := contracts.DeployRBACTimelock( - transactor, client, big.NewInt(10800), adminAccount, proposers, executors, cancellers, bypassers) + transactor, client, minDelay, adminAccount, proposers, executors, cancellers, bypassers) s.Require().NoError(err) receipt, err := bind.WaitMined(ctx, client, transaction) s.Require().NoError(err) s.Require().Equal(types.ReceiptStatusSuccessful, receipt.Status) - s.Logf("timelock address: %v; deploy transaction: %v", address, transaction.Hash()) return address, transaction, receipt, contract @@ -95,7 +95,6 @@ func (s *integrationTestSuite) DeployCallProxy( receipt, err := bind.WaitMined(ctx, client, transaction) s.Require().NoError(err) s.Require().Equal(types.ReceiptStatusSuccessful, receipt.Status) - s.Logf("call proxy address: %v; deploy transaction: %v", address, transaction.Hash()) return address, transaction, receipt, contract @@ -113,8 +112,25 @@ func (s *integrationTestSuite) UpdateDelay( receipt, err := bind.WaitMined(ctx, client, transaction) s.Require().NoError(err) s.Require().Equal(types.ReceiptStatusSuccessful, receipt.Status) - s.Logf("update delay transaction: %v", transaction.Hash()) return transaction, receipt } + +func (s *integrationTestSuite) ScheduleBatch( + ctx context.Context, transactor *bind.TransactOpts, client *ethclient.Client, + timelockContract *contracts.RBACTimelock, calls []contracts.RBACTimelockCall, + predecessor [32]byte, salt [32]byte, delay *big.Int, +) ( + *types.Transaction, *types.Receipt, +) { + transaction, err := timelockContract.ScheduleBatch(transactor, calls, predecessor, salt, delay) + s.Require().NoError(err) + + receipt, err := bind.WaitMined(ctx, client, transaction) + s.Require().NoError(err) + s.Require().Equal(types.ReceiptStatusSuccessful, receipt.Status) + s.Logf("schedule batch transaction: %v", transaction.Hash()) + + return transaction, receipt +} diff --git a/tests/integration/timelock_test.go b/tests/integration/timelock_test.go index 43f9b09..bb36a7b 100644 --- a/tests/integration/timelock_test.go +++ b/tests/integration/timelock_test.go @@ -3,12 +3,18 @@ package integration import ( "context" "encoding/json" + "fmt" "math/big" + "regexp" "testing" "time" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/ethclient" "github.com/rs/zerolog" + "github.com/samber/lo" + contracts "github.com/smartcontractkit/ccip-owner-contracts/gethwrappers" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -20,18 +26,19 @@ func (s *integrationTestSuite) TestTimelockWorkerListen() { ctx, cancel := context.WithCancel(s.Ctx) defer cancel() - account1 := NewTestAccount(s.T()) - s.Logf("new account created: %v", account1) + account := NewTestAccount(s.T()) + s.Logf("new account created: %v", account) - _, err := s.GethContainer.CreateAccount(ctx, account1.hexAddress, account1.hexPrivateKey, 1) + _, err := s.GethContainer.CreateAccount(ctx, account.hexAddress, account.hexPrivateKey, 1) s.Require().NoError(err) // create geth rpc client gethURL := s.GethContainer.HTTPConnStr(s.T(), ctx) client, err := ethclient.DialContext(ctx, gethURL) s.Require().NoError(err) + defer client.Close() - transactor := s.KeyedTransactor(account1.privateKey, nil) + transactor := s.KeyedTransactor(account.privateKey, nil) tests := []struct { name string @@ -47,11 +54,11 @@ func (s *integrationTestSuite) TestTimelockWorkerListen() { logger := timelockTests.NewTestLogger(zerolog.Nop()) // "zerolog.TestWriter{T: t, Frame: 6}" when debugging - timelockAddress, _, _, timelockContract := s.DeployTimelock(ctx, transactor, client, account1.address) + timelockAddress, _, _, timelockContract := s.DeployTimelock(ctx, transactor, client, account.address, big.NewInt(1)) callProxyAddress, _, _, _ := s.DeployCallProxy(ctx, transactor, client, timelockAddress) go runTimelockWorker(s.T(), sctx, tt.url, timelockAddress.String(), callProxyAddress.String(), - account1.hexPrivateKey, big.NewInt(0), int64(60), int64(1), logger.Logger()) + account.hexPrivateKey, big.NewInt(0), int64(60), int64(1), true, logger.Logger()) s.UpdateDelay(ctx, transactor, client, timelockContract, big.NewInt(10)) @@ -60,16 +67,95 @@ func (s *integrationTestSuite) TestTimelockWorkerListen() { } } +func (s *integrationTestSuite) TestTimelockWorkerDryRun() { + ctx, cancel := context.WithCancel(s.Ctx) + defer cancel() + + account := NewTestAccount(s.T()) + s.Logf("new account created: %v", account) + + _, err := s.GethContainer.CreateAccount(ctx, account.hexAddress, account.hexPrivateKey, 1) + s.Require().NoError(err) + + // create geth rpc client + gethURL := s.GethContainer.HTTPConnStr(s.T(), ctx) + client, err := ethclient.DialContext(ctx, gethURL) + s.Require().NoError(err) + defer client.Close() + + transactor := s.KeyedTransactor(account.privateKey, nil) + + tests := []struct { + name string + dryRun bool + assert func(t *testing.T, logger timelockTests.TestLogger) + }{ + { + name: "dry run enabled", + dryRun: true, + assert: func(t *testing.T, logger timelockTests.TestLogger) { + messages := []string{ + `"message":"CallScheduled received"`, + `"message":"nop.addToScheduler"`, + } + s.Require().EventuallyWithT(func(t *assert.CollectT) { + for _, message := range messages { + s.Assert().True(containsMatchingMessage(logger, regexp.MustCompile(message))) + } + }, 2*time.Second, 100*time.Millisecond) + }, + }, + { + name: "dry run disabled", + dryRun: false, + assert: func(t *testing.T, logger timelockTests.TestLogger) { + messages := []string{ + `"message":"scheduling operation: 371141ec10c0cc52996bed94240931136172d0b46bdc4bceaea1ef76675c1237"`, + `"message":"scheduled operation: 371141ec10c0cc52996bed94240931136172d0b46bdc4bceaea1ef76675c1237"`, + } + s.Require().EventuallyWithT(func(t *assert.CollectT) { + for _, message := range messages { + s.Assert().True(containsMatchingMessage(logger, regexp.MustCompile(message))) + } + }, 2*time.Second, 100*time.Millisecond) + }, + }, + } + for _, tt := range tests { + s.Run(tt.name, func() { + tctx, cancel := context.WithCancel(ctx) + defer cancel() + + logger := timelockTests.NewTestLogger(zerolog.Nop()) // "zerolog.TestWriter{T: t, Frame: 6}" when debugging + + timelockAddress, _, _, timelockContract := s.DeployTimelock(tctx, transactor, client, account.address, big.NewInt(1)) + callProxyAddress, _, _, _ := s.DeployCallProxy(tctx, transactor, client, timelockAddress) + + go runTimelockWorker(s.T(), tctx, gethURL, timelockAddress.String(), callProxyAddress.String(), + account.hexPrivateKey, big.NewInt(0), int64(1), int64(1), tt.dryRun, logger.Logger()) + + calls := []contracts.RBACTimelockCall{{ + Target: common.HexToAddress("0x000000000000000000000000000000000000000"), + Value: big.NewInt(1), + Data: hexutil.MustDecode("0x0123456789abcdef"), + }} + s.ScheduleBatch(tctx, transactor, client, timelockContract, calls, [32]byte{}, [32]byte{}, big.NewInt(1)) + + tt.assert(s.T(), logger) + }) + } +} + // ----- helpers ----- func runTimelockWorker( t *testing.T, ctx context.Context, nodeURL, timelockAddress, callProxyAddress, privateKey string, - fromBlock *big.Int, pollPeriod int64, listenerPollPeriod int64, logger *zerolog.Logger, + fromBlock *big.Int, pollPeriod int64, listenerPollPeriod int64, dryRun bool, logger *zerolog.Logger, ) { t.Logf("TimelockWorker.Listen(%v, %v, %v, %v, %v, %v, %v)", nodeURL, timelockAddress, callProxyAddress, privateKey, fromBlock, pollPeriod, listenerPollPeriod) timelockWorker, err := timelock.NewTimelockWorker(nodeURL, timelockAddress, - callProxyAddress, privateKey, fromBlock, pollPeriod, listenerPollPeriod, logger) + callProxyAddress, privateKey, fromBlock, pollPeriod, listenerPollPeriod, dryRun, logger) require.NoError(t, err) require.NotNil(t, timelockWorker) @@ -97,23 +183,45 @@ func assertCapturedLogMessages(t *testing.T, logger timelockTests.TestLogger) { }, 5*time.Second, 200*time.Millisecond) } +func selectMatchingMessage(logger timelockTests.TestLogger, pattern *regexp.Regexp) []string { + return lo.Filter(logger.Messages(), func(loggedMessage string, _ int) bool { + return pattern.MatchString(loggedMessage) + }) +} + +func containsMatchingMessage(logger timelockTests.TestLogger, pattern *regexp.Regexp) bool { + return len(selectMatchingMessage(logger, pattern)) > 0 +} + func selectDiscardingEventMessages(t *testing.T, logger timelockTests.TestLogger) []string { t.Helper() - selectedEvents := []string{} - - for _, entry := range logger.Messages() { - parsedEntry := struct { - Message string - Event string - }{} - err := json.Unmarshal([]byte(entry), &parsedEntry) + messages := selectMatchingMessage(logger, regexp.MustCompile(`"message":"discarding event"`)) + return lo.Map(messages, func(message string, _ int) string { + parsedEntry := struct{ Event string }{} + err := json.Unmarshal([]byte(message), &parsedEntry) require.NoError(t, err) + return parsedEntry.Event + }) +} + +func assertJSONSubset(t assert.TestingT, expected string, actual string) bool { + var expectedJSONAsInterface, actualJSONAsInterface interface{} - if parsedEntry.Message == "discarding event" { - selectedEvents = append(selectedEvents, parsedEntry.Event) - } + if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { + return assert.Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error())) } - return selectedEvents + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { + return assert.Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error())) + } + + return assert.Subset(t, expectedJSONAsInterface, actualJSONAsInterface) +} + +func requireJSONSubset(t require.TestingT, expected string, actual string) { + if assertJSONSubset(t, expected, actual) { + return + } + t.FailNow() } diff --git a/tests/logger.go b/tests/logger.go index 4d3365c..76f54a4 100644 --- a/tests/logger.go +++ b/tests/logger.go @@ -57,7 +57,7 @@ func (tl testLogger) LastMessage() string { tl.mutex.Lock() defer tl.mutex.Unlock() - return (*tl.messages)[tl.NumMessages()-1] + return (*tl.messages)[len(*tl.messages)-1] } func (tl testLogger) Messages() []string {