From 26394ab317ac06367ab119eb9d678c23dc418c0a Mon Sep 17 00:00:00 2001
From: Gustavo Gama <gustavo.gama@smartcontract.com>
Date: Fri, 15 Nov 2024 16:38:55 -0300
Subject: [PATCH] feat: add DRY_RUN config option

Add a `DRY_RUN` configuration option that controls whether the service
executes operations or simply logs the received events.

This is a temporary feature needed to help test the recently added logic
to monitor contract events using http connections without causing any
side-effects.
---
 cmd/start.go                       |   9 +-
 go.mod                             |   5 +-
 go.sum                             |  10 +-
 pkg/cli/config.go                  |   8 ++
 pkg/cli/config_test.go             |  12 +-
 pkg/timelock/const_test.go         |   1 +
 pkg/timelock/operations_test.go    |   8 +-
 pkg/timelock/scheduler.go          |   4 +
 pkg/timelock/scheduler_test.go     |   8 +-
 pkg/timelock/timelock.go           |  18 ++-
 pkg/timelock/timelock_test.go      | 182 +++++++++++------------------
 tests/integration/suite.go         |  23 +++-
 tests/integration/timelock_test.go | 141 ++++++++++++++++++----
 tests/logger.go                    |   2 +-
 14 files changed, 270 insertions(+), 161 deletions(-)

diff --git a/cmd/start.go b/cmd/start.go
index aad6ef0..60d8cbb 100644
--- a/cmd/start.go
+++ b/cmd/start.go
@@ -19,6 +19,7 @@ func startCommand() *cobra.Command {
 
 		nodeURL, privateKey, timelockAddress, callProxyAddress string
 		fromBlock, pollPeriod, eventListenerPollPeriod         int64
+		dryRun                                                 bool
 	)
 
 	// Initialize timelock-worker configuration.
@@ -40,6 +41,7 @@ func startCommand() *cobra.Command {
 	startCmd.Flags().Int64Var(&fromBlock, "from-block", timelockConf.FromBlock, "Start watching from this block")
 	startCmd.Flags().Int64Var(&pollPeriod, "poll-period", timelockConf.PollPeriod, "Poll period in seconds")
 	startCmd.Flags().Int64Var(&eventListenerPollPeriod, "event-listener-poll-period", timelockConf.EventListenerPollPeriod, "Event Listener poll period in seconds")
+	startCmd.Flags().BoolVar(&dryRun, "dry-run", timelockConf.DryRun, "Enable \"dry run\" mode -- monitor events but don't trigger any calls")
 
 	return &startCmd
 }
@@ -86,8 +88,13 @@ func startTimelock(cmd *cobra.Command) {
 		logs.Fatal().Msgf("value of poll-period not set: %s", err.Error())
 	}
 
+	dryRun, err := cmd.Flags().GetBool("dry-run")
+	if err != nil {
+		logs.Fatal().Msgf("value of dry-run not set: %s", err.Error())
+	}
+
 	tWorker, err := timelock.NewTimelockWorker(nodeURL, timelockAddress, callProxyAddress, privateKey,
-		big.NewInt(fromBlock), pollPeriod, eventListenerPollPeriod, logs)
+		big.NewInt(fromBlock), pollPeriod, eventListenerPollPeriod, dryRun, logs)
 	if err != nil {
 		logs.Fatal().Msgf("error creating the timelock-worker: %s", err.Error())
 	}
diff --git a/go.mod b/go.mod
index 64de241..d9d03d5 100644
--- a/go.mod
+++ b/go.mod
@@ -7,6 +7,7 @@ require (
 	github.com/ethereum/go-ethereum v1.13.15
 	github.com/prometheus/client_golang v1.19.0
 	github.com/rs/zerolog v1.31.0
+	github.com/samber/lo v1.47.0
 	github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240917103524-56f1a8d2cd4b
 	github.com/smartcontractkit/chain-selectors v1.0.17
 	github.com/spf13/cobra v1.8.0
@@ -95,9 +96,9 @@ require (
 	go.uber.org/multierr v1.9.0 // indirect
 	golang.org/x/crypto v0.22.0 // indirect
 	golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
-	golang.org/x/sync v0.5.0 // indirect
+	golang.org/x/sync v0.7.0 // indirect
 	golang.org/x/sys v0.21.0 // indirect
-	golang.org/x/text v0.14.0 // indirect
+	golang.org/x/text v0.16.0 // indirect
 	google.golang.org/protobuf v1.33.0 // indirect
 	gopkg.in/ini.v1 v1.67.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index 2472849..25e8d58 100644
--- a/go.sum
+++ b/go.sum
@@ -363,6 +363,8 @@ github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9c
 github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U=
 github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
 github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
+github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc=
+github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU=
 github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
 github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
 github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
@@ -559,8 +561,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
 golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
-golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -630,8 +632,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
 golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
diff --git a/pkg/cli/config.go b/pkg/cli/config.go
index e10e198..ca3a8c1 100644
--- a/pkg/cli/config.go
+++ b/pkg/cli/config.go
@@ -3,7 +3,9 @@ package cli
 import (
 	"fmt"
 	"os"
+	"slices"
 	"strconv"
+	"strings"
 
 	"github.com/spf13/viper"
 )
@@ -17,6 +19,7 @@ type Config struct {
 	FromBlock               int64  `mapstructure:"FROM_BLOCK"`
 	PollPeriod              int64  `mapstructure:"POLL_PERIOD"`
 	EventListenerPollPeriod int64  `mapstructure:"EVENT_LISTENER_POLL_PERIOD"`
+	DryRun                  bool   `mapstructure:"DRY_RUN"`
 }
 
 // NewTimelockCLI return a new Timelock instance configured.
@@ -80,5 +83,10 @@ func NewTimelockCLI() (*Config, error) {
 		c.EventListenerPollPeriod = int64(pp)
 	}
 
+	if os.Getenv("DRY_RUN") != "" {
+		trueValues := []string{"true", "yes", "on", "enabled", "1"}
+		c.DryRun = slices.Contains(trueValues, strings.ToLower(os.Getenv("DRY_RUN")))
+	}
+
 	return &c, nil
 }
diff --git a/pkg/cli/config_test.go b/pkg/cli/config_test.go
index ec13ff0..1cf3864 100644
--- a/pkg/cli/config_test.go
+++ b/pkg/cli/config_test.go
@@ -21,7 +21,7 @@ func Test_NewTimelockCLI(t *testing.T) {
 			name: "load from file",
 			setup: func(t *testing.T) {
 				unsetenvs(t, "NODE_URL", "TIMELOCK_ADDRESS", "CALL_PROXY_ADDRESS", "PRIVATE_KEY", "FROM_BLOCK",
-					"POLL_PERIOD", "EVENT_LISTENER_POLL_PERIOD")
+					"POLL_PERIOD", "EVENT_LISTENER_POLL_PERIOD", "DRY_RUN")
 
 				err := os.WriteFile(configFileName, []byte(string(
 					"NODE_URL=wss://goerli/test\n"+
@@ -30,7 +30,8 @@ func Test_NewTimelockCLI(t *testing.T) {
 						"PRIVATE_KEY=9876543210\n"+
 						"FROM_BLOCK=1\n"+
 						"POLL_PERIOD=2\n"+
-						"EVENT_LISTENER_POLL_PERIOD=3\n",
+						"EVENT_LISTENER_POLL_PERIOD=3\n"+
+						"DRY_RUN=true\n",
 				)), os.FileMode(0644))
 				require.NoError(t, err)
 
@@ -44,6 +45,7 @@ func Test_NewTimelockCLI(t *testing.T) {
 				FromBlock:               1,
 				PollPeriod:              2,
 				EventListenerPollPeriod: 3,
+				DryRun:                  true,
 			},
 		},
 		{
@@ -56,7 +58,8 @@ func Test_NewTimelockCLI(t *testing.T) {
 						"PRIVATE_KEY=9876543210\n"+
 						"FROM_BLOCK=1\n"+
 						"POLL_PERIOD=2\n"+
-						"EVENT_LISTENER_POLL_PERIOD=3\n",
+						"EVENT_LISTENER_POLL_PERIOD=3\n"+
+						"DRY_RUN=true\n",
 				)), os.FileMode(0644))
 				require.NoError(t, err)
 
@@ -67,6 +70,7 @@ func Test_NewTimelockCLI(t *testing.T) {
 				t.Setenv("FROM_BLOCK", "4")
 				t.Setenv("POLL_PERIOD", "5")
 				t.Setenv("EVENT_LISTENER_POLL_PERIOD", "6")
+				t.Setenv("DRY_RUN", "false")
 
 				t.Cleanup(func() { os.Remove(configFileName) })
 			},
@@ -95,6 +99,7 @@ func Test_NewTimelockCLI(t *testing.T) {
 				t.Setenv("FROM_BLOCK", "4")
 				t.Setenv("POLL_PERIOD", "5")
 				t.Setenv("EVENT_LISTENER_POLL_PERIOD", "6")
+				t.Setenv("DRY_RUN", "yes")
 
 				t.Cleanup(func() { os.Remove(configFileName) })
 			},
@@ -106,6 +111,7 @@ func Test_NewTimelockCLI(t *testing.T) {
 				FromBlock:               4,
 				PollPeriod:              5,
 				EventListenerPollPeriod: 6,
+				DryRun:                  true,
 			},
 		},
 		{
diff --git a/pkg/timelock/const_test.go b/pkg/timelock/const_test.go
index e2e8c01..7eabd85 100644
--- a/pkg/timelock/const_test.go
+++ b/pkg/timelock/const_test.go
@@ -15,5 +15,6 @@ var (
 	testFromBlock               = big.NewInt(0)
 	testPollPeriod              = 5
 	testEventListenerPollPeriod = 0
+	testDryRun                  = false
 	testLogger                  = logger.Logger("info", "human")
 )
diff --git a/pkg/timelock/operations_test.go b/pkg/timelock/operations_test.go
index 4bdbee9..bb797bd 100644
--- a/pkg/timelock/operations_test.go
+++ b/pkg/timelock/operations_test.go
@@ -12,7 +12,7 @@ import (
 
 func Test_isOperation(t *testing.T) {
 	testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey,
-		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger)
+		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger)
 
 	var ctx context.Context
 
@@ -56,7 +56,7 @@ func Test_isOperation(t *testing.T) {
 
 func Test_isReady(t *testing.T) {
 	testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey,
-		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger)
+		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger)
 
 	var ctx context.Context
 
@@ -100,7 +100,7 @@ func Test_isReady(t *testing.T) {
 
 func Test_isDone(t *testing.T) {
 	testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey,
-		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger)
+		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger)
 
 	var ctx context.Context
 
@@ -144,7 +144,7 @@ func Test_isDone(t *testing.T) {
 
 func Test_isPending(t *testing.T) {
 	testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey,
-		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger)
+		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger)
 
 	var ctx context.Context
 
diff --git a/pkg/timelock/scheduler.go b/pkg/timelock/scheduler.go
index c1c1208..5f0a9e1 100644
--- a/pkg/timelock/scheduler.go
+++ b/pkg/timelock/scheduler.go
@@ -112,6 +112,8 @@ func (tw *Worker) updateSchedulerDelay(t time.Duration) {
 
 // addToScheduler adds a new CallSchedule operation safely to the store.
 func (tw *Worker) addToScheduler(op *contract.TimelockCallScheduled) {
+	tw.mu.Lock()
+	defer tw.mu.Unlock()
 	tw.logger.Debug().Msgf("scheduling operation: %x", op.Id)
 	tw.add <- op
 	tw.logger.Debug().Msgf("operations in scheduler: %v", len(tw.store))
@@ -119,6 +121,8 @@ func (tw *Worker) addToScheduler(op *contract.TimelockCallScheduled) {
 
 // delFromScheduler deletes an operation safely from the store.
 func (tw *Worker) delFromScheduler(op operationKey) {
+	tw.mu.Lock()
+	defer tw.mu.Unlock()
 	tw.logger.Debug().Msgf("de-scheduling operation: %v", op)
 	tw.del <- op
 	tw.logger.Debug().Msgf("operations in scheduler: %v", len(tw.store))
diff --git a/pkg/timelock/scheduler_test.go b/pkg/timelock/scheduler_test.go
index 38d6868..c64761f 100644
--- a/pkg/timelock/scheduler_test.go
+++ b/pkg/timelock/scheduler_test.go
@@ -46,7 +46,7 @@ func Test_newScheduler(t *testing.T) {
 
 func TestWorker_updateSchedulerDelay(t *testing.T) {
 	testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey,
-		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger)
+		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger)
 
 	// Should never fail
 	testWorker.updateSchedulerDelay(1 * time.Second)
@@ -56,7 +56,7 @@ func TestWorker_updateSchedulerDelay(t *testing.T) {
 
 func TestWorker_isSchedulerBusy(t *testing.T) {
 	testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey,
-		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger)
+		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger)
 
 	isBusy := testWorker.isSchedulerBusy()
 	assert.Equal(t, false, isBusy, "scheduler should be busy by default")
@@ -72,7 +72,7 @@ func TestWorker_isSchedulerBusy(t *testing.T) {
 
 func TestWorker_setSchedulerBusy(t *testing.T) {
 	testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey,
-		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger)
+		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger)
 
 	testWorker.setSchedulerBusy()
 	isBusy := testWorker.isSchedulerBusy()
@@ -81,7 +81,7 @@ func TestWorker_setSchedulerBusy(t *testing.T) {
 
 func TestWorker_setSchedulerFree(t *testing.T) {
 	testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey,
-		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger)
+		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger)
 
 	testWorker.setSchedulerFree()
 	isBusy := testWorker.isSchedulerBusy()
diff --git a/pkg/timelock/timelock.go b/pkg/timelock/timelock.go
index 50aff84..e5f3e4f 100644
--- a/pkg/timelock/timelock.go
+++ b/pkg/timelock/timelock.go
@@ -36,6 +36,7 @@ type Worker struct {
 	fromBlock          *big.Int
 	pollPeriod         int64
 	listenerPollPeriod int64
+	dryRun             bool
 	logger             *zerolog.Logger
 	privateKey         *ecdsa.PrivateKey
 	scheduler
@@ -49,7 +50,7 @@ var validNodeUrlSchemes = []string{"http", "https", "ws", "wss"}
 // It's a singleton, so further executions will retrieve the same timelockWorker.
 func NewTimelockWorker(
 	nodeURL, timelockAddress, callProxyAddress, privateKey string, fromBlock *big.Int,
-	pollPeriod int64, listenerPollPeriod int64, logger *zerolog.Logger,
+	pollPeriod int64, listenerPollPeriod int64, dryRun bool, logger *zerolog.Logger,
 ) (*Worker, error) {
 	// Sanity check on each provided variable before allocating more resources.
 	u, err := url.ParseRequestURI(nodeURL)
@@ -126,6 +127,7 @@ func NewTimelockWorker(
 		fromBlock:          fromBlock,
 		pollPeriod:         pollPeriod,
 		listenerPollPeriod: listenerPollPeriod,
+		dryRun:             dryRun,
 		logger:             logger,
 		privateKey:         privateKeyECDSA,
 		scheduler:          *newScheduler(time.Duration(pollPeriod) * time.Second),
@@ -196,7 +198,7 @@ func (tw *Worker) setupFilterQuery(fromBlock *big.Int) ethereum.FilterQuery {
 // retrieveNewLogs returns a "control channel" and a "logs channels". The logs channel is where
 // new log events will be asynchronously pushed to.
 //
-// The actual retrieveal is performed by either `subscribeNewLogs`, if the node connection
+// The actual retrieval is performed by either `subscribeNewLogs`, if the node connection
 // supports subscriptions, or `pollNewLogs` otherwise. In practice, the ethclient library
 // simply checks if the given node URL is "http(s)" or not.
 func (tw *Worker) retrieveNewLogs(ctx context.Context) (<-chan struct{}, <-chan types.Log, error) {
@@ -449,7 +451,9 @@ func (tw *Worker) handleLog(ctx context.Context, log types.Log) error {
 
 		if !isDone(ctx, tw.contract, cs.Id) && isOperation(ctx, tw.contract, cs.Id) {
 			tw.logger.Info().Hex(fieldTXHash, cs.Raw.TxHash[:]).Uint64(fieldBlockNumber, cs.Raw.BlockNumber).Msgf("%s received", eventCallScheduled)
-			tw.addToScheduler(cs)
+			if !tw.dryRun {
+				tw.addToScheduler(cs)
+			}
 		}
 
 		// A CallExecuted which is in Done status should delete the task in the scheduler store.
@@ -461,7 +465,9 @@ func (tw *Worker) handleLog(ctx context.Context, log types.Log) error {
 
 		if isDone(ctx, tw.contract, cs.Id) {
 			tw.logger.Info().Hex(fieldTXHash, cs.Raw.TxHash[:]).Uint64(fieldBlockNumber, cs.Raw.BlockNumber).Msgf("%s received, skipping operation", eventCallExecuted)
-			tw.delFromScheduler(cs.Id)
+			if !tw.dryRun {
+				tw.delFromScheduler(cs.Id)
+			}
 		}
 
 		// A Cancelled which is in Done status should delete the task in the scheduler store.
@@ -473,7 +479,9 @@ func (tw *Worker) handleLog(ctx context.Context, log types.Log) error {
 
 		if isDone(ctx, tw.contract, cs.Id) {
 			tw.logger.Info().Hex(fieldTXHash, cs.Raw.TxHash[:]).Uint64(fieldBlockNumber, cs.Raw.BlockNumber).Msgf("%s received, cancelling operation", eventCancelled)
-			tw.delFromScheduler(cs.Id)
+			if !tw.dryRun {
+				tw.delFromScheduler(cs.Id)
+			}
 		}
 	default:
 		tw.logger.Info().Str("event", event.Name).Msgf("discarding event")
diff --git a/pkg/timelock/timelock_test.go b/pkg/timelock/timelock_test.go
index 9e668f0..a393fc4 100644
--- a/pkg/timelock/timelock_test.go
+++ b/pkg/timelock/timelock_test.go
@@ -2,16 +2,18 @@ package timelock
 
 import (
 	"math/big"
-	"reflect"
+	"net/http"
+	"net/http/httptest"
 	"testing"
 
 	"github.com/rs/zerolog"
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 func newTestTimelockWorker(
 	t *testing.T, nodeURL, timelockAddress, callProxyAddress, privateKey string, fromBlock *big.Int,
-	pollPeriod int64, eventListenerPollPeriod int64, logger *zerolog.Logger,
+	pollPeriod int64, eventListenerPollPeriod int64, dryRun bool, logger *zerolog.Logger,
 ) *Worker {
 	assert.NotEmpty(t, nodeURL, "nodeURL is empty. Are environment variabes in const_test.go set?")
 	assert.NotEmpty(t, timelockAddress, "nodeURL is empty. Are environment variabes in const_test.go set?")
@@ -22,7 +24,7 @@ func newTestTimelockWorker(
 	assert.NotNil(t, logger, "logger is nil. Are environment variabes in const_test.go set?")
 
 	tw, err := NewTimelockWorker(nodeURL, timelockAddress, callProxyAddress, privateKey, fromBlock,
-		pollPeriod, eventListenerPollPeriod, logger)
+		pollPeriod, eventListenerPollPeriod, dryRun, logger)
 	assert.NoError(t, err)
 	assert.NotNil(t, tw)
 
@@ -30,10 +32,12 @@ func newTestTimelockWorker(
 }
 
 func TestNewTimelockWorker(t *testing.T) {
-	testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey,
-		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger)
+	svr := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, req *http.Request) {
+		writer.Write([]byte("Ok"))
+	}))
+	defer svr.Close()
 
-	type args struct {
+	type argsT struct {
 		nodeURL                 string
 		timelockAddress         string
 		callProxyAddress        string
@@ -41,137 +45,85 @@ func TestNewTimelockWorker(t *testing.T) {
 		fromBlock               *big.Int
 		pollPeriod              int64
 		eventListenerPollPeriod int64
-		logger                  *zerolog.Logger
+		dryRun                  bool
+		logger                  zerolog.Logger
+	}
+	defaultArgs := argsT{
+		nodeURL:                 svr.URL,
+		timelockAddress:         "0x0000000000000000000000000000000000000001",
+		callProxyAddress:        "0x0000000000000000000000000000000000000002",
+		privateKey:              "1921763610b80b2b147ca676c775c172ba6c037f6ba135792bed6bc458c660f0",
+		fromBlock:               big.NewInt(1),
+		pollPeriod:              900,
+		eventListenerPollPeriod: 60,
+		dryRun:                  false,
+		logger:                  zerolog.Nop(),
 	}
+
 	tests := []struct {
 		name    string
-		args    args
-		want    *Worker
-		wantErr bool
+		setup   func(*argsT)
+		wantErr string
 	}{
 		{
-			name: "NewTimelockWorker new instance is created (success)",
-			args: args{
-				nodeURL:          testNodeURL,
-				timelockAddress:  testTimelockAddress,
-				callProxyAddress: testCallProxyAddress,
-				privateKey:       testPrivateKey,
-				fromBlock:        testFromBlock,
-				pollPeriod:       int64(testPollPeriod),
-				logger:           testLogger,
-			},
-			want:    testWorker,
-			wantErr: false,
+			name:  "success",
+			setup: func(*argsT) {},
+		},
+		{
+			name:    "failure - invalid host in node url",
+			setup:   func(a *argsT) { a.nodeURL = "wss://invalid.host/rpc" },
+			wantErr: "no such host",
 		},
 		{
-			name: "NewTimelockWorker bad rpc provided (fail)",
-			args: args{
-				nodeURL:          "wss://bad/rpc",
-				timelockAddress:  testTimelockAddress,
-				callProxyAddress: testCallProxyAddress,
-				privateKey:       testPrivateKey,
-				fromBlock:        testFromBlock,
-				pollPeriod:       int64(testPollPeriod),
-				logger:           testLogger,
-			},
-			want:    testWorker,
-			wantErr: true,
+			name:    "failure - invalid url scheme in node url",
+			setup:   func(a *argsT) { a.nodeURL = "invalid://localhost/rpc" },
+			wantErr: "invalid node URL: invalid://localhost/rpc (accepted schemes are: [http https ws wss])",
 		},
 		{
-			name: "NewTimelockWorker bad rpc protocol provided (fail)",
-			args: args{
-				nodeURL:          "https://bad/protocol",
-				timelockAddress:  testTimelockAddress,
-				callProxyAddress: testCallProxyAddress,
-				privateKey:       testPrivateKey,
-				fromBlock:        testFromBlock,
-				pollPeriod:       int64(testPollPeriod),
-				logger:           testLogger,
-			},
-			want:    testWorker,
-			wantErr: true,
+			name:    "failure - bad timelock address",
+			setup:   func(a *argsT) { a.timelockAddress = "invalid" },
+			wantErr: "timelock address provided is not valid: invalid",
 		},
 		{
-			name: "NewTimelockWorker bad timelock address provided (fail)",
-			args: args{
-				nodeURL:          testNodeURL,
-				timelockAddress:  "0x1234",
-				callProxyAddress: testCallProxyAddress,
-				privateKey:       testPrivateKey,
-				fromBlock:        testFromBlock,
-				pollPeriod:       int64(testPollPeriod),
-				logger:           testLogger,
-			},
-			want:    testWorker,
-			wantErr: true,
+			name:    "failure - bad call proxy address",
+			setup:   func(a *argsT) { a.callProxyAddress = "invalid" },
+			wantErr: "call proxy address provided is not valid: invalid",
 		},
 		{
-			name: "NewTimelockWorker bad call proxy address provided (fail)",
-			args: args{
-				nodeURL:          testNodeURL,
-				timelockAddress:  testTimelockAddress,
-				callProxyAddress: "0x1234",
-				privateKey:       testPrivateKey,
-				fromBlock:        testFromBlock,
-				pollPeriod:       int64(testPollPeriod),
-				logger:           testLogger,
-			},
-			want:    testWorker,
-			wantErr: true,
+			name:    "failure - bad private key",
+			setup:   func(a *argsT) { a.privateKey = "invalid" },
+			wantErr: "the provided private key is not valid: got invalid",
 		},
 		{
-			name: "NewTimelockWorker bad private key provided (fail)",
-			args: args{
-				nodeURL:          testNodeURL,
-				timelockAddress:  testTimelockAddress,
-				callProxyAddress: testCallProxyAddress,
-				privateKey:       "0123456789",
-				fromBlock:        testFromBlock,
-				pollPeriod:       int64(testPollPeriod),
-				logger:           testLogger,
-			},
-			want:    testWorker,
-			wantErr: true,
+			name:    "failure - bad from block",
+			setup:   func(a *argsT) { a.fromBlock = big.NewInt(-1) },
+			wantErr: "from block can't be a negative number (minimum value 0): got -1",
 		},
 		{
-			name: "NewTimelockWorker bad negative from block provided (fail)",
-			args: args{
-				nodeURL:          testNodeURL,
-				timelockAddress:  testTimelockAddress,
-				callProxyAddress: testCallProxyAddress,
-				privateKey:       testPrivateKey,
-				fromBlock:        big.NewInt(-1),
-				pollPeriod:       int64(testPollPeriod),
-				logger:           testLogger,
-			},
-			want:    testWorker,
-			wantErr: true,
+			name:    "failure - bad poll period",
+			setup:   func(a *argsT) { a.pollPeriod = -1 },
+			wantErr: "poll-period must be a positive non-zero integer: got -1",
 		},
 		{
-			name: "NewTimelockWorker bad poll period provided (fail)",
-			args: args{
-				nodeURL:          testNodeURL,
-				timelockAddress:  testTimelockAddress,
-				callProxyAddress: testCallProxyAddress,
-				privateKey:       testPrivateKey,
-				fromBlock:        testFromBlock,
-				pollPeriod:       0,
-				logger:           testLogger,
-			},
-			want:    testWorker,
-			wantErr: true,
+			name:    "failure - bad event listener poll period",
+			setup:   func(a *argsT) { a.eventListenerPollPeriod = -1 },
+			wantErr: "event-listener-poll-period must be a positive non-zero integer: got -1",
 		},
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			got, err := NewTimelockWorker(tt.args.nodeURL, tt.args.timelockAddress, tt.args.callProxyAddress,
-				tt.args.privateKey, tt.args.fromBlock, tt.args.pollPeriod, tt.args.eventListenerPollPeriod, tt.args.logger)
-			if (err != nil) != tt.wantErr {
-				t.Errorf("NewTimelockWorker() error = %v, wantErr %v", err, tt.wantErr)
-				return
-			}
-			if !(reflect.TypeOf(tt.want) == reflect.TypeOf(got)) {
-				t.Errorf("NewTimelockWorker() = %v, want %v", got, tt.want)
+			args := defaultArgs
+			tt.setup(&args)
+
+			got, err := NewTimelockWorker(args.nodeURL, args.timelockAddress, args.callProxyAddress,
+				args.privateKey, args.fromBlock, args.pollPeriod, args.eventListenerPollPeriod,
+				args.dryRun, &args.logger)
+
+			if tt.wantErr == "" {
+				require.NoError(t, err)
+				require.IsType(t, &Worker{}, got)
+			} else {
+				require.ErrorContains(t, err, tt.wantErr)
 			}
 		})
 	}
@@ -179,7 +131,7 @@ func TestNewTimelockWorker(t *testing.T) {
 
 func TestWorker_startLog(t *testing.T) {
 	testWorker := newTestTimelockWorker(t, testNodeURL, testTimelockAddress, testCallProxyAddress, testPrivateKey,
-		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testLogger)
+		testFromBlock, int64(testPollPeriod), int64(testEventListenerPollPeriod), testDryRun, testLogger)
 
 	tests := []struct {
 		name string
diff --git a/tests/integration/suite.go b/tests/integration/suite.go
index ddd4f66..d163ba3 100644
--- a/tests/integration/suite.go
+++ b/tests/integration/suite.go
@@ -65,7 +65,8 @@ func (s *integrationTestSuite) KeyedTransactor(privateKey *ecdsa.PrivateKey, cha
 }
 
 func (s *integrationTestSuite) DeployTimelock(
-	ctx context.Context, transactor *bind.TransactOpts, client *ethclient.Client, adminAccount common.Address,
+	ctx context.Context, transactor *bind.TransactOpts, client *ethclient.Client,
+	adminAccount common.Address, minDelay *big.Int,
 ) (
 	common.Address, *types.Transaction, *types.Receipt, *contracts.RBACTimelock,
 ) {
@@ -75,7 +76,7 @@ func (s *integrationTestSuite) DeployTimelock(
 	bypassers := []common.Address{}
 
 	address, transaction, contract, err := contracts.DeployRBACTimelock(
-		transactor, client, big.NewInt(10800), adminAccount, proposers, executors, cancellers, bypassers)
+		transactor, client, minDelay, adminAccount, proposers, executors, cancellers, bypassers)
 	s.Require().NoError(err)
 
 	receipt, err := bind.WaitMined(ctx, client, transaction)
@@ -119,3 +120,21 @@ func (s *integrationTestSuite) UpdateDelay(
 	s.Logf("update delay transaction: %v", transaction.Hash())
 	return transaction, receipt
 }
+
+func (s *integrationTestSuite) ScheduleBatch(
+	ctx context.Context, transactor *bind.TransactOpts, client *ethclient.Client,
+	timelockContract *contracts.RBACTimelock, calls []contracts.RBACTimelockCall,
+	predecessor [32]byte, salt [32]byte, delay *big.Int,
+) (
+	*types.Transaction, *types.Receipt,
+) {
+	transaction, err := timelockContract.ScheduleBatch(transactor, calls, predecessor, salt, delay)
+	s.Require().NoError(err)
+
+	receipt, err := bind.WaitMined(ctx, client, transaction)
+	s.Require().NoError(err)
+	s.Require().Equal(receipt.Status, types.ReceiptStatusSuccessful)
+
+	s.Logf("schedule batch transaction: %v", transaction.Hash())
+	return transaction, receipt
+}
diff --git a/tests/integration/timelock_test.go b/tests/integration/timelock_test.go
index ab2ae2a..ca13495 100644
--- a/tests/integration/timelock_test.go
+++ b/tests/integration/timelock_test.go
@@ -3,14 +3,20 @@ package integration
 import (
 	"context"
 	"encoding/json"
+	"fmt"
 	"math/big"
+	"regexp"
 	"testing"
 	"time"
 
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/common/hexutil"
 	"github.com/ethereum/go-ethereum/ethclient"
 	"github.com/rs/zerolog"
+	contracts "github.com/smartcontractkit/ccip-owner-contracts/gethwrappers"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
+	"github.com/samber/lo"
 
 	"github.com/smartcontractkit/timelock-worker/pkg/timelock"
 	timelockTests "github.com/smartcontractkit/timelock-worker/tests"
@@ -20,18 +26,19 @@ func (s *integrationTestSuite) TestTimelockWorkerListen() {
 	ctx, cancel := context.WithCancel(s.Ctx)
 	defer cancel()
 
-	account1 := NewTestAccount(s.T())
-	s.Logf("new account created: %v", account1)
+	account := NewTestAccount(s.T())
+	s.Logf("new account created: %v", account)
 
-	_, err := s.GethContainer.CreateAccount(ctx, account1.hexAddress, account1.hexPrivateKey, 1)
+	_, err := s.GethContainer.CreateAccount(ctx, account.hexAddress, account.hexPrivateKey, 1)
 	s.Require().NoError(err)
 
 	// create geth rpc client
 	gethURL := s.GethContainer.HTTPConnStr(s.T(), ctx)
 	client, err := ethclient.DialContext(ctx, gethURL)
 	s.Require().NoError(err)
+	defer client.Close()
 
-	transactor := s.KeyedTransactor(account1.privateKey, nil)
+	transactor := s.KeyedTransactor(account.privateKey, nil)
 
 	tests := []struct {
 		name string
@@ -47,11 +54,11 @@ func (s *integrationTestSuite) TestTimelockWorkerListen() {
 
 			logger := timelockTests.NewTestLogger(zerolog.Nop()) // "zerolog.TestWriter{T: t, Frame: 6}" when debugging
 
-			timelockAddress, _, _, timelockContract := s.DeployTimelock(ctx, transactor, client, account1.address)
+			timelockAddress, _, _, timelockContract := s.DeployTimelock(ctx, transactor, client, account.address, big.NewInt(1))
 			callProxyAddress, _, _, _ := s.DeployCallProxy(ctx, transactor, client, timelockAddress)
 
 			go runTimelockWorker(s.T(), sctx, tt.url, timelockAddress.String(), callProxyAddress.String(),
-				account1.hexPrivateKey, big.NewInt(0), int64(60), int64(1), logger.Logger())
+				account.hexPrivateKey, big.NewInt(0), int64(60), int64(1), true, logger.Logger())
 
 			s.UpdateDelay(ctx, transactor, client, timelockContract, big.NewInt(10))
 
@@ -60,16 +67,88 @@ func (s *integrationTestSuite) TestTimelockWorkerListen() {
 	}
 }
 
+func (s *integrationTestSuite) TestTimelockWorkerDryRun() {
+	ctx, cancel := context.WithCancel(s.Ctx)
+	defer cancel()
+
+	account := NewTestAccount(s.T())
+	s.Logf("new account created: %v", account)
+
+	_, err := s.GethContainer.CreateAccount(ctx, account.hexAddress, account.hexPrivateKey, 1)
+	s.Require().NoError(err)
+
+	// create geth rpc client
+	gethURL := s.GethContainer.HTTPConnStr(s.T(), ctx)
+	client, err := ethclient.DialContext(ctx, gethURL)
+	s.Require().NoError(err)
+	defer client.Close()
+
+	transactor := s.KeyedTransactor(account.privateKey, nil)
+
+	tests := []struct {
+		name   string
+		dryRun bool
+		assert func(t *testing.T, logger timelockTests.TestLogger)
+	}{
+		{
+			name:   "dry run enabled",
+			dryRun: true,
+			assert: func(t *testing.T, logger timelockTests.TestLogger) {
+				requireJSONSubset(s.T(), logger.LastMessage(), `{"message":"CallScheduled received"}`)
+			},
+		},
+		{
+			name:   "dry run disabled",
+			dryRun: false,
+			assert: func(t *testing.T, logger timelockTests.TestLogger) {
+				messages := []string{
+					`"message":"scheduling operation: 371141ec10c0cc52996bed94240931136172d0b46bdc4bceaea1ef76675c1237"`,
+					`"message":"operations in scheduler:`,
+					`"message":"scheduled operation: 371141ec10c0cc52996bed94240931136172d0b46bdc4bceaea1ef76675c1237"`,
+				}
+				s.Require().EventuallyWithT(func(t *assert.CollectT) {
+					for _, message := range messages {
+						s.Assert().True(containsMatchingMessage( logger, regexp.MustCompile(message)))
+					}
+				}, 2*time.Second, 100*time.Millisecond)
+			},
+		},
+	}
+	for _, tt := range tests {
+		s.Run(tt.name, func(t *testing.T) {
+			tctx, cancel := context.WithCancel(ctx)
+			defer cancel()
+
+			logger := timelockTests.NewTestLogger(zerolog.Nop()) // "zerolog.TestWriter{T: t, Frame: 6}" when debugging
+
+			timelockAddress, _, _, timelockContract := s.DeployTimelock(tctx, transactor, client, account.address, big.NewInt(1))
+			callProxyAddress, _, _, _ := s.DeployCallProxy(tctx, transactor, client, timelockAddress)
+
+			go runTimelockWorker(s.T(), tctx, gethURL, timelockAddress.String(), callProxyAddress.String(),
+				account.hexPrivateKey, big.NewInt(0), int64(60), int64(1), tt.dryRun, logger.Logger())
+
+			calls := []contracts.RBACTimelockCall{{
+				Target: common.HexToAddress("0x000000000000000000000000000000000000000"),
+				Value:  big.NewInt(1),
+				Data:   hexutil.MustDecode("0x0123456789abcdef"),
+			}}
+			s.ScheduleBatch(tctx, transactor, client, timelockContract, calls, [32]byte{}, [32]byte{}, big.NewInt(1))
+
+			tt.assert(t, logger)
+		})
+	}
+}
+
 // ----- helpers -----
 
 func runTimelockWorker(
 	t *testing.T, ctx context.Context, nodeURL, timelockAddress, callProxyAddress, privateKey string,
-	fromBlock *big.Int, pollPeriod int64, listenerPollPeriod int64, logger *zerolog.Logger,
+	fromBlock *big.Int, pollPeriod int64, listenerPollPeriod int64, dryRun bool, logger *zerolog.Logger,
 ) {
 	t.Logf("TimelockWorker.Listen(%v, %v, %v, %v, %v, %v, %v)", nodeURL, timelockAddress,
 		callProxyAddress, privateKey, fromBlock, pollPeriod, listenerPollPeriod)
 	timelockWorker, err := timelock.NewTimelockWorker(nodeURL, timelockAddress,
-		callProxyAddress, privateKey, fromBlock, pollPeriod, listenerPollPeriod, logger)
+		callProxyAddress, privateKey, fromBlock, pollPeriod, listenerPollPeriod, dryRun, logger)
 	require.NoError(t, err)
 	require.NotNil(t, timelockWorker)
 
@@ -97,23 +176,45 @@ func assertCapturedLogMessages(t *testing.T, logger timelockTests.TestLogger) {
 	}, 5*time.Second, 200*time.Millisecond)
 }
 
+func selectMatchingMessage(logger timelockTests.TestLogger, pattern *regexp.Regexp) []string {
+	return lo.Filter(logger.Messages(), func(loggedMessage string, _ int) bool {
+		return pattern.MatchString(loggedMessage)
+	})
+}
+
+func containsMatchingMessage(logger timelockTests.TestLogger, pattern *regexp.Regexp) bool {
+	return len(selectMatchingMessage(logger, pattern)) > 0
+}
+
 func selectDiscardingEventMessages(t *testing.T, logger timelockTests.TestLogger) []string {
 	t.Helper()
 
-	selectedEvents := []string{}
-
-	for _, entry := range logger.Messages() {
-		parsedEntry := struct {
-			Message string
-			Event   string
-		}{}
-		err := json.Unmarshal([]byte(entry), &parsedEntry)
+	messages := selectMatchingMessage(logger, regexp.MustCompile(`"message":"discarding event"`))
+	return lo.Map(messages, func(message string, _ int) string {
+		parsedEntry := struct{ Event string }{}
+		err := json.Unmarshal([]byte(message), &parsedEntry)
 		require.NoError(t, err)
+		return parsedEntry.Event
+	})
+}
+
+func assertJSONSubset(t assert.TestingT, expected string, actual string) bool {
+	var expectedJSONAsInterface, actualJSONAsInterface interface{}
 
-		if parsedEntry.Message == "discarding event" {
-			selectedEvents = append(selectedEvents, parsedEntry.Event)
-		}
+	if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil {
+		return assert.Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()))
 	}
 
-	return selectedEvents
+	if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
+		return assert.Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()))
+	}
+
+	return assert.Subset(t, expectedJSONAsInterface, actualJSONAsInterface)
+}
+
+func requireJSONSubset(t require.TestingT, expected string, actual string) {
+	if assertJSONSubset(t, expected, actual) {
+		return
+	}
+	t.FailNow()
 }
diff --git a/tests/logger.go b/tests/logger.go
index 79366fe..69251cf 100644
--- a/tests/logger.go
+++ b/tests/logger.go
@@ -55,7 +55,7 @@ func (tl testLogger) LastMessage() string {
 	tl.mutex.Lock()
 	defer tl.mutex.Unlock()
 
-	return (*tl.messages)[tl.NumMessages()-1]
+	return (*tl.messages)[len(*tl.messages)-1]
 }
 
 func (tl testLogger) Messages() []string {