From ba738cb112613d839217598542949c8e86dd8bc0 Mon Sep 17 00:00:00 2001 From: Gustavo Gama Date: Tue, 19 Nov 2024 02:53:41 -0300 Subject: [PATCH] review: add "scheduler" and "nopScheduler" types Instead of using the `dryRun` flag to control whether or not operations are added to the (standard) scheduler, we now select the type of the scheduler pass to the timelock worker service: * if dryRun is false, use the standard scheduler * if dryRun is true, use the new "nop" scheduler, which only logs the calls but does not do anything In practice the "standard scheduler" is a new type + interface as well, since the existing implementation defined a the schedule as a simple data type which was associated with the timelock worker via implicit composition (though all the schedule related methods were defined on the timelock worker type). --- go.mod | 1 + pkg/timelock/scheduler.go | 95 ++++++++++++----- pkg/timelock/scheduler_test.go | 158 +++++++++++++++++++++++------ pkg/timelock/timelock.go | 25 +++-- pkg/timelock/timelock_test.go | 6 ++ tests/integration/suite.go | 7 +- tests/integration/timelock_test.go | 21 ++-- 7 files changed, 228 insertions(+), 85 deletions(-) diff --git a/go.mod b/go.mod index d9d03d5..a3da24b 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.22 require ( github.com/docker/go-connections v0.5.0 github.com/ethereum/go-ethereum v1.13.15 + github.com/google/go-cmp v0.6.0 github.com/prometheus/client_golang v1.19.0 github.com/rs/zerolog v1.31.0 github.com/samber/lo v1.47.0 diff --git a/pkg/timelock/scheduler.go b/pkg/timelock/scheduler.go index 9423670..bf5409c 100644 --- a/pkg/timelock/scheduler.go +++ b/pkg/timelock/scheduler.go @@ -18,26 +18,39 @@ import ( type operationKey [32]byte +type Scheduler interface { + runScheduler(ctx context.Context) <-chan struct{} + addToScheduler(op *contract.TimelockCallScheduled) + delFromScheduler(op operationKey) + dumpOperationStore(now func() time.Time) +} + +type executeFn func(context.Context, []*contract.TimelockCallScheduled) + // Scheduler represents a scheduler with an in memory store. // Whenever accesing the map the mutex should be Locked, to prevent // any race condition. type scheduler struct { - mu sync.Mutex - ticker *time.Ticker - add chan *contract.TimelockCallScheduled - del chan operationKey - store map[operationKey][]*contract.TimelockCallScheduled - busy bool + mu sync.Mutex + ticker *time.Ticker + add chan *contract.TimelockCallScheduled + del chan operationKey + store map[operationKey][]*contract.TimelockCallScheduled + busy bool + logger *zerolog.Logger + executeFn executeFn } // newScheduler returns a new initialized scheduler. -func newScheduler(tick time.Duration) *scheduler { +func newScheduler(tick time.Duration, logger *zerolog.Logger, executeFn executeFn) *scheduler { s := &scheduler{ - ticker: time.NewTicker(tick), - add: make(chan *contract.TimelockCallScheduled), - del: make(chan operationKey), - store: make(map[operationKey][]*contract.TimelockCallScheduled), - busy: false, + ticker: time.NewTicker(tick), + add: make(chan *contract.TimelockCallScheduled), + del: make(chan operationKey), + store: make(map[operationKey][]*contract.TimelockCallScheduled), + busy: false, + logger: logger, + executeFn: executeFn, } return s @@ -50,7 +63,7 @@ func newScheduler(tick time.Duration) *scheduler { // call them this way so no process is allowd to add/delete from // the store, which could cause race conditions like adding/deleting // while the operation is being executed. -func (tw *Worker) runScheduler(ctx context.Context) <-chan struct{} { +func (tw *scheduler) runScheduler(ctx context.Context) <-chan struct{} { done := make(chan struct{}) go func() { @@ -67,7 +80,7 @@ func (tw *Worker) runScheduler(ctx context.Context) <-chan struct{} { tw.logger.Debug().Msgf("new scheduler tick: operations in store") tw.setSchedulerBusy() for _, op := range tw.store { - tw.execute(ctx, op) + tw.executeFn(ctx, op) } tw.setSchedulerFree() } else { @@ -102,7 +115,7 @@ func (tw *Worker) runScheduler(ctx context.Context) <-chan struct{} { } // updateSchedulerDelay updates the internal ticker delay, so it can be reconfigured while running. -func (tw *Worker) updateSchedulerDelay(t time.Duration) { +func (tw *scheduler) updateSchedulerDelay(t time.Duration) { if t <= 0 { tw.logger.Debug().Msgf("internal min delay not changed, invalid duration: %v", t.String()) return @@ -113,44 +126,38 @@ func (tw *Worker) updateSchedulerDelay(t time.Duration) { } // addToScheduler adds a new CallSchedule operation safely to the store. -func (tw *Worker) addToScheduler(op *contract.TimelockCallScheduled) { - tw.mu.Lock() - defer tw.mu.Unlock() +func (tw *scheduler) addToScheduler(op *contract.TimelockCallScheduled) { tw.logger.Debug().Msgf("scheduling operation: %x", op.Id) tw.add <- op - tw.logger.Debug().Msgf("operations in scheduler: %v", len(tw.store)) } // delFromScheduler deletes an operation safely from the store. -func (tw *Worker) delFromScheduler(op operationKey) { - tw.mu.Lock() - defer tw.mu.Unlock() +func (tw *scheduler) delFromScheduler(op operationKey) { tw.logger.Debug().Msgf("de-scheduling operation: %v", op) tw.del <- op - tw.logger.Debug().Msgf("operations in scheduler: %v", len(tw.store)) } -func (tw *Worker) setSchedulerBusy() { +func (tw *scheduler) setSchedulerBusy() { tw.logger.Debug().Msgf("setting scheduler busy") tw.mu.Lock() tw.busy = true tw.mu.Unlock() } -func (tw *Worker) setSchedulerFree() { +func (tw *scheduler) setSchedulerFree() { tw.logger.Debug().Msgf("setting scheduler free") tw.mu.Lock() tw.busy = false tw.mu.Unlock() } -func (tw *Worker) isSchedulerBusy() bool { +func (tw *scheduler) isSchedulerBusy() bool { return tw.busy } // dumpOperationStore dumps to the logger and to the log file the current scheduled unexecuted operations. // maps in go don't guarantee order, so that's why we have to find the earliest block. -func (tw *Worker) dumpOperationStore(now func() time.Time) { +func (tw *scheduler) dumpOperationStore(now func() time.Time) { if len(tw.store) <= 0 { tw.logger.Info().Msgf("no operations to dump") return @@ -253,3 +260,37 @@ func toEarliestRecord(op *contract.TimelockCallScheduled) string { func toSubsequentRecord(op *contract.TimelockCallScheduled) string { return fmt.Sprintf("CallSchedule pending ID: %x\tBlock Number: %v\n", op.Id, op.Raw.BlockNumber) } + +// ----- nop scheduler ----- +// nopScheduler implements the Scheduler interface but doesn't not effectively trigger any operations. +type nopScheduler struct { + logger *zerolog.Logger +} + +func newNopScheduler(logger *zerolog.Logger) *nopScheduler { + return &nopScheduler{logger: logger} +} + +func (s *nopScheduler) runScheduler(ctx context.Context) <-chan struct{} { + s.logger.Info().Msg("nop.runScheduler") + ch := make(chan struct{}) + + go func() { + <-ctx.Done() + close(ch) + }() + + return ch +} + +func (s *nopScheduler) addToScheduler(op *contract.TimelockCallScheduled) { + s.logger.Info().Any("op", op).Msg("nop.addToScheduler") +} + +func (s *nopScheduler) delFromScheduler(key operationKey) { + s.logger.Info().Any("key", key).Msg("nop.delFromScheduler") +} + +func (s *nopScheduler) dumpOperationStore(now func() time.Time) { + s.logger.Info().Msg("nop.dumpOperationStore") +} diff --git a/pkg/timelock/scheduler_test.go b/pkg/timelock/scheduler_test.go index c64761f..6e5e85a 100644 --- a/pkg/timelock/scheduler_test.go +++ b/pkg/timelock/scheduler_test.go @@ -1,22 +1,31 @@ package timelock import ( + "context" "fmt" + "math/big" + "math/rand" "os" "reflect" + "slices" "strings" "testing" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/google/go-cmp/cmp" "github.com/rs/zerolog" + "github.com/samber/lo" "github.com/smartcontractkit/timelock-worker/pkg/timelock/contract" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func Test_newScheduler(t *testing.T) { - tScheduler := newScheduler(10 * time.Second) + logger := zerolog.Nop() + execFn := func(context.Context, []*contract.TimelockCallScheduled) {} + tScheduler := newTestScheduler() type args struct { tick time.Duration @@ -36,7 +45,7 @@ func Test_newScheduler(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := newScheduler(tt.args.tick) + got := newScheduler(tt.args.tick, &logger, execFn) if reflect.TypeOf(got) != reflect.TypeOf(tt.want) { t.Errorf("newScheduler() = %v, want %v", got, tt.want) } @@ -44,47 +53,45 @@ func Test_newScheduler(t *testing.T) { } } -func TestWorker_updateSchedulerDelay(t *testing.T) { - testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, - testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger) +func Test_scheduler_updateSchedulerDelay(t *testing.T) { + tScheduler := newTestScheduler() // Should never fail - testWorker.updateSchedulerDelay(1 * time.Second) - testWorker.updateSchedulerDelay(-1 * time.Second) - testWorker.updateSchedulerDelay(0 * time.Second) + tScheduler.updateSchedulerDelay(1 * time.Second) + tScheduler.updateSchedulerDelay(-1 * time.Second) + tScheduler.updateSchedulerDelay(0 * time.Second) } -func TestWorker_isSchedulerBusy(t *testing.T) { - testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, - testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger) +func Test_scheduler_isSchedulerBusy(t *testing.T) { + tScheduler := newTestScheduler() - isBusy := testWorker.isSchedulerBusy() + isBusy := tScheduler.isSchedulerBusy() assert.Equal(t, false, isBusy, "scheduler should be busy by default") - testWorker.setSchedulerBusy() - isBusy = testWorker.isSchedulerBusy() + tScheduler.setSchedulerBusy() + isBusy = tScheduler.isSchedulerBusy() assert.Equal(t, true, isBusy, "scheduler should be busy after setSchedulerBusy()") - testWorker.setSchedulerFree() - isBusy = testWorker.isSchedulerBusy() + tScheduler.setSchedulerFree() + isBusy = tScheduler.isSchedulerBusy() assert.Equal(t, false, isBusy, "scheduler shouldn't be busy after setSchedulerFree()") } -func TestWorker_setSchedulerBusy(t *testing.T) { - testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, - testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger) +func Test_scheduler_setSchedulerBusy(t *testing.T) { + tScheduler := newTestScheduler() - testWorker.setSchedulerBusy() - isBusy := testWorker.isSchedulerBusy() + tScheduler.setSchedulerBusy() + isBusy := tScheduler.isSchedulerBusy() assert.Equal(t, true, isBusy, "scheduler should be busy after setSchedulerBusy()") } -func TestWorker_setSchedulerFree(t *testing.T) { - testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, - testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger) +func Test_scheduler_setSchedulerFree(t *testing.T) { + logger := zerolog.Nop() + execFn := func(context.Context, []*contract.TimelockCallScheduled) {} + tScheduler := newScheduler(10 * time.Second, &logger, execFn) - testWorker.setSchedulerFree() - isBusy := testWorker.isSchedulerBusy() + tScheduler.setSchedulerFree() + isBusy := tScheduler.isSchedulerBusy() assert.Equal(t, false, isBusy, "scheduler shouldn't be busy after setSchedulerFree()") } @@ -92,7 +99,6 @@ func TestWorker_setSchedulerFree(t *testing.T) { func Test_dumpOperationStore(t *testing.T) { var ( fName = logPath + logFile - logger = zerolog.Nop() earliestBlock = 42 opKeys = generateOpKeys(t, []string{"1", "2"}) @@ -117,11 +123,9 @@ func Test_dumpOperationStore(t *testing.T) { opKeys[1]: {following}, } - worker = &Worker{ - logger: &logger, - scheduler: scheduler{ - store: store, - }, + scheduler = scheduler{ + store: store, + logger: func(l zerolog.Logger) *zerolog.Logger { return &l }(zerolog.Nop()), } ) @@ -139,7 +143,7 @@ func Test_dumpOperationStore(t *testing.T) { wantPrefix := fmt.Sprintf("Process stopped at %v\n", nowFunc().In(time.UTC)) // Write the store to the file. - worker.dumpOperationStore(nowFunc) + scheduler.dumpOperationStore(nowFunc) // Read the file and compare the contents. gotRead, err := os.ReadFile(fName) @@ -153,6 +157,48 @@ func Test_dumpOperationStore(t *testing.T) { assert.Equal(t, wantRead, gotRead) } +func Test_scheduler_concurrency(t *testing.T) { + const numOps = 100 + logger := zerolog.Nop() + ctx, cancel := context.WithCancel(context.Background()) + + executedOps := map[int]uint16{} // {numericOpId: executionCount} + executedCh := make(chan operationKey) + execFn := func(ctx context.Context, ops []*contract.TimelockCallScheduled) { + for _, op := range ops { + opNum := int(opIDToNum(t, op.Id)) + executedOps[opNum] = executedOps[opNum] + 1 + go func() { + time.Sleep(time.Duration(1+rand.Intn(50)) * time.Millisecond) + executedCh <- op.Id + }() + } + } + + // run scheduler + testScheduler := newScheduler(10*time.Millisecond, &logger, execFn) + _ = testScheduler.runScheduler(ctx) + + // run mock event listener + go runMockEventListener(t, ctx, cancel, testScheduler, executedCh, numOps) + + // wait for all operations to be executed + <-ctx.Done() + + require.GreaterOrEqual(t, len(executedOps), numOps) + executedIDs := lo.Keys(executedOps) + slices.Sort(executedIDs) + require.Empty(t, cmp.Diff(lo.Range(100), executedIDs[:numOps])) +} + +// ----- helpers ----- + +func newTestScheduler() *scheduler { + logger := zerolog.Nop() + execFn := func(context.Context, []*contract.TimelockCallScheduled) {} + return newScheduler(10 * time.Second, &logger, execFn) +} + // generateOpKeys generates a slice of operation keys from a slice of strings. func generateOpKeys(t *testing.T, in []string) [][32]byte { t.Helper() @@ -167,3 +213,49 @@ func generateOpKeys(t *testing.T, in []string) [][32]byte { } return opKeys } + +func runMockEventListener( + t *testing.T, + ctx context.Context, + cancel context.CancelFunc, + testScheduler *scheduler, + executedCh <-chan operationKey, + lastOpID int16, +) { + t.Helper() + + opNum := int64(0) + + ticker := time.NewTicker(15 * time.Millisecond) + for { + select { + case <-ticker.C: + op := &contract.TimelockCallScheduled{Id: opID(uint16(opNum)), Index: big.NewInt(0)} + opNum += 1 + testScheduler.addToScheduler(op) + + case executedOpID := <-executedCh: + testScheduler.delFromScheduler(executedOpID) + if opIDToNum(t, executedOpID) == lastOpID { + cancel() + } + + case <-ctx.Done(): + return + } + } +} + +func opID(n uint16) [32]byte { + id := [32]byte{} + id[31] = byte(n) + id[30] = byte(n >> 8) + return id +} + +func opIDToNum(t *testing.T, opID [32]byte) int16 { + t.Helper() + opNum, ok := new(big.Int).SetString(fmt.Sprintf("%x", opID), 16) + require.True(t, ok) + return int16(opNum.Uint64()) +} diff --git a/pkg/timelock/timelock.go b/pkg/timelock/timelock.go index 9e67e13..c627ec9 100644 --- a/pkg/timelock/timelock.go +++ b/pkg/timelock/timelock.go @@ -39,7 +39,7 @@ type Worker struct { dryRun bool logger *zerolog.Logger privateKey *ecdsa.PrivateKey - scheduler + scheduler Scheduler } var httpSchemes = []string{"http", "https"} @@ -130,7 +130,12 @@ func NewTimelockWorker( dryRun: dryRun, logger: logger, privateKey: privateKeyECDSA, - scheduler: *newScheduler(time.Duration(pollPeriod) * time.Second), + } + + if dryRun { + tWorker.scheduler = newNopScheduler(logger) + } else { + tWorker.scheduler = newScheduler(time.Duration(pollPeriod)*time.Second, logger, tWorker.execute) } return tWorker, nil @@ -145,7 +150,7 @@ func (tw *Worker) Listen(ctx context.Context) error { tw.startLog() // Run the scheduler to add/del operations in a thread-safe way. - schedulingDone := tw.runScheduler(ctxwc) + schedulingDone := tw.scheduler.runScheduler(ctxwc) // Retrieve historical logs. historyDone, historyCh, err := tw.retrieveHistoricalLogs(ctxwc) @@ -176,7 +181,7 @@ func (tw *Worker) Listen(ctx context.Context) error { tw.logger.Info().Msg("shutting down timelock-worker") tw.logger.Info().Msg("dumping operation store") - tw.dumpOperationStore(time.Now) + tw.scheduler.dumpOperationStore(time.Now) // Wait for all goroutines to finish. shutdownCtx, cancel := context.WithTimeout(context.Background(), time.Second*5) @@ -459,9 +464,7 @@ func (tw *Worker) handleLog(ctx context.Context, log types.Log) error { if !isDone(ctx, tw.contract, cs.Id) && isOperation(ctx, tw.contract, cs.Id) { tw.logger.Info().Hex(fieldTXHash, cs.Raw.TxHash[:]).Uint64(fieldBlockNumber, cs.Raw.BlockNumber).Msgf("%s received", eventCallScheduled) - if !tw.dryRun { - tw.addToScheduler(cs) - } + tw.scheduler.addToScheduler(cs) } // A CallExecuted which is in Done status should delete the task in the scheduler store. @@ -473,9 +476,7 @@ func (tw *Worker) handleLog(ctx context.Context, log types.Log) error { if isDone(ctx, tw.contract, cs.Id) { tw.logger.Info().Hex(fieldTXHash, cs.Raw.TxHash[:]).Uint64(fieldBlockNumber, cs.Raw.BlockNumber).Msgf("%s received, skipping operation", eventCallExecuted) - if !tw.dryRun { - tw.delFromScheduler(cs.Id) - } + tw.scheduler.delFromScheduler(cs.Id) } // A Cancelled which is in Done status should delete the task in the scheduler store. @@ -487,9 +488,7 @@ func (tw *Worker) handleLog(ctx context.Context, log types.Log) error { if isDone(ctx, tw.contract, cs.Id) { tw.logger.Info().Hex(fieldTXHash, cs.Raw.TxHash[:]).Uint64(fieldBlockNumber, cs.Raw.BlockNumber).Msgf("%s received, cancelling operation", eventCancelled) - if !tw.dryRun { - tw.delFromScheduler(cs.Id) - } + tw.scheduler.delFromScheduler(cs.Id) } default: tw.logger.Info().Str("event", event.Name).Msgf("discarding event") diff --git a/pkg/timelock/timelock_test.go b/pkg/timelock/timelock_test.go index a393fc4..6c27c75 100644 --- a/pkg/timelock/timelock_test.go +++ b/pkg/timelock/timelock_test.go @@ -32,6 +32,8 @@ func newTestTimelockWorker( } func TestNewTimelockWorker(t *testing.T) { + t.Parallel() + svr := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, req *http.Request) { writer.Write([]byte("Ok")) })) @@ -112,6 +114,8 @@ func TestNewTimelockWorker(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + t.Parallel() + args := defaultArgs tt.setup(&args) @@ -130,6 +134,8 @@ func TestNewTimelockWorker(t *testing.T) { } func TestWorker_startLog(t *testing.T) { + t.Parallel() + testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey, testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger) diff --git a/tests/integration/suite.go b/tests/integration/suite.go index db90d40..d9a25c8 100644 --- a/tests/integration/suite.go +++ b/tests/integration/suite.go @@ -78,7 +78,6 @@ func (s *integrationTestSuite) DeployTimelock( receipt, err := bind.WaitMined(ctx, client, transaction) s.Require().NoError(err) s.Require().Equal(types.ReceiptStatusSuccessful, receipt.Status) - s.Logf("timelock address: %v; deploy transaction: %v", address, transaction.Hash()) return address, transaction, receipt, contract @@ -96,7 +95,6 @@ func (s *integrationTestSuite) DeployCallProxy( receipt, err := bind.WaitMined(ctx, client, transaction) s.Require().NoError(err) s.Require().Equal(types.ReceiptStatusSuccessful, receipt.Status) - s.Logf("call proxy address: %v; deploy transaction: %v", address, transaction.Hash()) return address, transaction, receipt, contract @@ -114,7 +112,6 @@ func (s *integrationTestSuite) UpdateDelay( receipt, err := bind.WaitMined(ctx, client, transaction) s.Require().NoError(err) s.Require().Equal(types.ReceiptStatusSuccessful, receipt.Status) - s.Logf("update delay transaction: %v", transaction.Hash()) return transaction, receipt @@ -132,8 +129,8 @@ func (s *integrationTestSuite) ScheduleBatch( receipt, err := bind.WaitMined(ctx, client, transaction) s.Require().NoError(err) - s.Require().Equal(receipt.Status, types.ReceiptStatusSuccessful) - + s.Require().Equal(types.ReceiptStatusSuccessful, receipt.Status) s.Logf("schedule batch transaction: %v", transaction.Hash()) + return transaction, receipt } diff --git a/tests/integration/timelock_test.go b/tests/integration/timelock_test.go index a2b71c3..bb36a7b 100644 --- a/tests/integration/timelock_test.go +++ b/tests/integration/timelock_test.go @@ -13,10 +13,10 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/ethclient" "github.com/rs/zerolog" + "github.com/samber/lo" contracts "github.com/smartcontractkit/ccip-owner-contracts/gethwrappers" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/samber/lo" "github.com/smartcontractkit/timelock-worker/pkg/timelock" timelockTests "github.com/smartcontractkit/timelock-worker/tests" @@ -94,7 +94,15 @@ func (s *integrationTestSuite) TestTimelockWorkerDryRun() { name: "dry run enabled", dryRun: true, assert: func(t *testing.T, logger timelockTests.TestLogger) { - requireJSONSubset(s.T(), logger.LastMessage(), `{"message":"CallScheduled received"}`) + messages := []string{ + `"message":"CallScheduled received"`, + `"message":"nop.addToScheduler"`, + } + s.Require().EventuallyWithT(func(t *assert.CollectT) { + for _, message := range messages { + s.Assert().True(containsMatchingMessage(logger, regexp.MustCompile(message))) + } + }, 2*time.Second, 100*time.Millisecond) }, }, { @@ -103,19 +111,18 @@ func (s *integrationTestSuite) TestTimelockWorkerDryRun() { assert: func(t *testing.T, logger timelockTests.TestLogger) { messages := []string{ `"message":"scheduling operation: 371141ec10c0cc52996bed94240931136172d0b46bdc4bceaea1ef76675c1237"`, - `"message":"operations in scheduler:`, `"message":"scheduled operation: 371141ec10c0cc52996bed94240931136172d0b46bdc4bceaea1ef76675c1237"`, } s.Require().EventuallyWithT(func(t *assert.CollectT) { for _, message := range messages { - s.Assert().True(containsMatchingMessage( logger, regexp.MustCompile(message))) + s.Assert().True(containsMatchingMessage(logger, regexp.MustCompile(message))) } }, 2*time.Second, 100*time.Millisecond) }, }, } for _, tt := range tests { - s.Run(tt.name, func(t *testing.T) { + s.Run(tt.name, func() { tctx, cancel := context.WithCancel(ctx) defer cancel() @@ -125,7 +132,7 @@ func (s *integrationTestSuite) TestTimelockWorkerDryRun() { callProxyAddress, _, _, _ := s.DeployCallProxy(tctx, transactor, client, timelockAddress) go runTimelockWorker(s.T(), tctx, gethURL, timelockAddress.String(), callProxyAddress.String(), - account.hexPrivateKey, big.NewInt(0), int64(60), int64(1), tt.dryRun, logger.Logger()) + account.hexPrivateKey, big.NewInt(0), int64(1), int64(1), tt.dryRun, logger.Logger()) calls := []contracts.RBACTimelockCall{{ Target: common.HexToAddress("0x000000000000000000000000000000000000000"), @@ -134,7 +141,7 @@ func (s *integrationTestSuite) TestTimelockWorkerDryRun() { }} s.ScheduleBatch(tctx, transactor, client, timelockContract, calls, [32]byte{}, [32]byte{}, big.NewInt(1)) - tt.assert(t, logger) + tt.assert(s.T(), logger) }) } }