From b5076b13e307a513d4cb6c42e924f1e71b524a24 Mon Sep 17 00:00:00 2001 From: Dmytro Haidashenko <34754799+dhaidashenko@users.noreply.github.com> Date: Tue, 30 Jul 2024 12:24:52 +0200 Subject: [PATCH 1/5] No new finalized Heads Implementation (#13907) * No new finalized Heads Implementation * fixed tests * update defaults for NoNewFinalizedHeadsThreshold * Update common/client/node_lifecycle.go Co-authored-by: amit-momin <108959691+amit-momin@users.noreply.github.com> * Update common/client/node_lifecycle_test.go Co-authored-by: amit-momin <108959691+amit-momin@users.noreply.github.com> * Update common/client/node_lifecycle_test.go Co-authored-by: amit-momin <108959691+amit-momin@users.noreply.github.com> * rename HeadIsNotIncreasing to NoNewHead * move and add docs for syncIssue consts * rename syncIssue to syncStatus --------- Co-authored-by: amit-momin <108959691+amit-momin@users.noreply.github.com> --- .changeset/wild-seals-look.md | 5 + common/client/mock_node_client_test.go | 194 +++++-- common/client/mock_rpc_test.go | 138 ++++- common/client/mocks/config.go | 13 +- common/client/models.go | 45 ++ common/client/models_test.go | 34 ++ common/client/multi_node.go | 9 + common/client/node.go | 1 + common/client/node_fsm.go | 7 +- common/client/node_lifecycle.go | 363 ++++++++---- common/client/node_lifecycle_test.go | 524 ++++++++++++++---- common/client/types.go | 6 +- core/chains/evm/client/chain_client_test.go | 2 +- core/chains/evm/client/config_builder.go | 30 +- core/chains/evm/client/config_builder_test.go | 5 +- core/chains/evm/client/evm_client.go | 4 +- core/chains/evm/client/evm_client_test.go | 4 +- core/chains/evm/client/helpers_test.go | 4 +- core/chains/evm/client/mocks/rpc_client.go | 138 ++++- core/chains/evm/client/rpc_client.go | 65 ++- core/chains/evm/client/rpc_client_test.go | 16 +- core/chains/evm/config/chain_scoped.go | 4 + core/chains/evm/config/config.go | 1 + core/chains/evm/config/toml/config.go | 43 +- core/chains/evm/config/toml/defaults.go | 4 + .../config/toml/defaults/Avalanche_Fuji.toml | 1 + .../toml/defaults/Avalanche_Mainnet.toml | 1 + .../evm/config/toml/defaults/BSC_Mainnet.toml | 1 + .../evm/config/toml/defaults/BSC_Testnet.toml | 1 + .../config/toml/defaults/Base_Mainnet.toml | 1 + .../config/toml/defaults/Base_Sepolia.toml | 1 + .../config/toml/defaults/Celo_Mainnet.toml | 1 + .../config/toml/defaults/Celo_Testnet.toml | 1 + .../toml/defaults/Ethereum_Mainnet.toml | 1 + .../config/toml/defaults/Gnosis_Chiado.toml | 1 + .../config/toml/defaults/Gnosis_Mainnet.toml | 1 + .../toml/defaults/Optimism_Mainnet.toml | 1 + .../toml/defaults/Optimism_Sepolia.toml | 1 + .../config/toml/defaults/Polygon_Amoy.toml | 1 + .../config/toml/defaults/Polygon_Mainnet.toml | 1 + .../config/toml/defaults/WeMix_Mainnet.toml | 1 + .../config/toml/defaults/WeMix_Testnet.toml | 1 + .../evm/config/toml/defaults/fallback.toml | 1 + core/config/docs/chains-evm.toml | 5 + core/services/chainlink/config_test.go | 28 +- .../chainlink/testdata/config-full.toml | 1 + .../config-multi-chain-effective.toml | 3 + core/web/resolver/testdata/config-full.toml | 1 + .../config-multi-chain-effective.toml | 3 + docs/CONFIG.md | 68 +++ .../disk-based-logging-disabled.txtar | 1 + .../validate/disk-based-logging-no-dir.txtar | 1 + .../node/validate/disk-based-logging.txtar | 1 + testdata/scripts/node/validate/invalid.txtar | 1 + testdata/scripts/node/validate/valid.txtar | 1 + 55 files changed, 1452 insertions(+), 339 deletions(-) create mode 100644 .changeset/wild-seals-look.md diff --git a/.changeset/wild-seals-look.md b/.changeset/wild-seals-look.md new file mode 100644 index 0000000000..3cd854f0e6 --- /dev/null +++ b/.changeset/wild-seals-look.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +Added new health check that ensures RPC provides new finalized heads at least every `NoNewFinalizedHeadsThreshold` #added diff --git a/common/client/mock_node_client_test.go b/common/client/mock_node_client_test.go index 5b7abe8212..5643dcde90 100644 --- a/common/client/mock_node_client_test.go +++ b/common/client/mock_node_client_test.go @@ -400,62 +400,6 @@ func (_c *mockNodeClient_IsSyncing_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(c return _c } -// LatestFinalizedBlock provides a mock function with given fields: ctx -func (_m *mockNodeClient[CHAIN_ID, HEAD]) LatestFinalizedBlock(ctx context.Context) (HEAD, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for LatestFinalizedBlock") - } - - var r0 HEAD - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (HEAD, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) HEAD); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(HEAD) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// mockNodeClient_LatestFinalizedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestFinalizedBlock' -type mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID types.ID, HEAD Head] struct { - *mock.Call -} - -// LatestFinalizedBlock is a helper method to define mock.On call -// - ctx context.Context -func (_e *mockNodeClient_Expecter[CHAIN_ID, HEAD]) LatestFinalizedBlock(ctx interface{}) *mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD] { - return &mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("LatestFinalizedBlock", ctx)} -} - -func (_c *mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD]) Run(run func(ctx context.Context)) *mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD] { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD]) Return(_a0 HEAD, _a1 error) *mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD] { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) (HEAD, error)) *mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD] { - _c.Call.Return(run) - return _c -} - // SetAliveLoopSub provides a mock function with given fields: _a0 func (_m *mockNodeClient[CHAIN_ID, HEAD]) SetAliveLoopSub(_a0 types.Subscription) { _m.Called(_a0) @@ -538,8 +482,8 @@ func (_c *mockNodeClient_SubscribeNewHead_Call[CHAIN_ID, HEAD]) Run(run func(ctx return _c } -func (_c *mockNodeClient_SubscribeNewHead_Call[CHAIN_ID, HEAD]) Return(_a0 types.Subscription, _a1 error) *mockNodeClient_SubscribeNewHead_Call[CHAIN_ID, HEAD] { - _c.Call.Return(_a0, _a1) +func (_c *mockNodeClient_SubscribeNewHead_Call[CHAIN_ID, HEAD]) Return(s types.Subscription, err error) *mockNodeClient_SubscribeNewHead_Call[CHAIN_ID, HEAD] { + _c.Call.Return(s, err) return _c } @@ -548,6 +492,140 @@ func (_c *mockNodeClient_SubscribeNewHead_Call[CHAIN_ID, HEAD]) RunAndReturn(run return _c } +// SubscribeToFinalizedHeads provides a mock function with given fields: _a0 +func (_m *mockNodeClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(_a0 context.Context) (<-chan HEAD, types.Subscription, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToFinalizedHeads") + } + + var r0 <-chan HEAD + var r1 types.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan HEAD) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + r1 = rf(_a0) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(types.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(_a0) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// mockNodeClient_SubscribeToFinalizedHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToFinalizedHeads' +type mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID types.ID, HEAD Head] struct { + *mock.Call +} + +// SubscribeToFinalizedHeads is a helper method to define mock.On call +// - _a0 context.Context +func (_e *mockNodeClient_Expecter[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(_a0 interface{}) *mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { + return &mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("SubscribeToFinalizedHeads", _a0)} +} + +func (_c *mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]) Run(run func(_a0 context.Context)) *mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]) Return(_a0 <-chan HEAD, _a1 types.Subscription, _a2 error) *mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) (<-chan HEAD, types.Subscription, error)) *mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Return(run) + return _c +} + +// SubscribeToHeads provides a mock function with given fields: ctx +func (_m *mockNodeClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToHeads") + } + + var r0 <-chan HEAD + var r1 types.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan HEAD) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(types.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// mockNodeClient_SubscribeToHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToHeads' +type mockNodeClient_SubscribeToHeads_Call[CHAIN_ID types.ID, HEAD Head] struct { + *mock.Call +} + +// SubscribeToHeads is a helper method to define mock.On call +// - ctx context.Context +func (_e *mockNodeClient_Expecter[CHAIN_ID, HEAD]) SubscribeToHeads(ctx interface{}) *mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { + return &mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("SubscribeToHeads", ctx)} +} + +func (_c *mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]) Run(run func(ctx context.Context)) *mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]) Return(ch <-chan HEAD, sub types.Subscription, err error) *mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Return(ch, sub, err) + return _c +} + +func (_c *mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) (<-chan HEAD, types.Subscription, error)) *mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Return(run) + return _c +} + // SubscribersCount provides a mock function with given fields: func (_m *mockNodeClient[CHAIN_ID, HEAD]) SubscribersCount() int32 { ret := _m.Called() diff --git a/common/client/mock_rpc_test.go b/common/client/mock_rpc_test.go index 36beae901c..00473c6636 100644 --- a/common/client/mock_rpc_test.go +++ b/common/client/mock_rpc_test.go @@ -1508,8 +1508,8 @@ func (_c *mockRPC_SubscribeNewHead_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_ return _c } -func (_c *mockRPC_SubscribeNewHead_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) Return(_a0 types.Subscription, _a1 error) *mockRPC_SubscribeNewHead_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { - _c.Call.Return(_a0, _a1) +func (_c *mockRPC_SubscribeNewHead_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) Return(s types.Subscription, err error) *mockRPC_SubscribeNewHead_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + _c.Call.Return(s, err) return _c } @@ -1518,6 +1518,140 @@ func (_c *mockRPC_SubscribeNewHead_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_ return _c } +// SubscribeToFinalizedHeads provides a mock function with given fields: _a0 +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) SubscribeToFinalizedHeads(_a0 context.Context) (<-chan HEAD, types.Subscription, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToFinalizedHeads") + } + + var r0 <-chan HEAD + var r1 types.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan HEAD) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + r1 = rf(_a0) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(types.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(_a0) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// mockRPC_SubscribeToFinalizedHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToFinalizedHeads' +type mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID types.ID, SEQ types.Sequence, ADDR types.Hashable, BLOCK_HASH types.Hashable, TX interface{}, TX_HASH types.Hashable, EVENT interface{}, EVENT_OPS interface{}, TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], FEE feetypes.Fee, HEAD types.Head[BLOCK_HASH], BATCH_ELEM interface{}] struct { + *mock.Call +} + +// SubscribeToFinalizedHeads is a helper method to define mock.On call +// - _a0 context.Context +func (_e *mockRPC_Expecter[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) SubscribeToFinalizedHeads(_a0 interface{}) *mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + return &mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]{Call: _e.mock.On("SubscribeToFinalizedHeads", _a0)} +} + +func (_c *mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) Run(run func(_a0 context.Context)) *mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) Return(_a0 <-chan HEAD, _a1 types.Subscription, _a2 error) *mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) RunAndReturn(run func(context.Context) (<-chan HEAD, types.Subscription, error)) *mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + _c.Call.Return(run) + return _c +} + +// SubscribeToHeads provides a mock function with given fields: ctx +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) SubscribeToHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToHeads") + } + + var r0 <-chan HEAD + var r1 types.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan HEAD) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(types.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// mockRPC_SubscribeToHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToHeads' +type mockRPC_SubscribeToHeads_Call[CHAIN_ID types.ID, SEQ types.Sequence, ADDR types.Hashable, BLOCK_HASH types.Hashable, TX interface{}, TX_HASH types.Hashable, EVENT interface{}, EVENT_OPS interface{}, TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], FEE feetypes.Fee, HEAD types.Head[BLOCK_HASH], BATCH_ELEM interface{}] struct { + *mock.Call +} + +// SubscribeToHeads is a helper method to define mock.On call +// - ctx context.Context +func (_e *mockRPC_Expecter[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) SubscribeToHeads(ctx interface{}) *mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + return &mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]{Call: _e.mock.On("SubscribeToHeads", ctx)} +} + +func (_c *mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) Run(run func(ctx context.Context)) *mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) Return(ch <-chan HEAD, sub types.Subscription, err error) *mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + _c.Call.Return(ch, sub, err) + return _c +} + +func (_c *mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) RunAndReturn(run func(context.Context) (<-chan HEAD, types.Subscription, error)) *mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + _c.Call.Return(run) + return _c +} + // SubscribersCount provides a mock function with given fields: func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) SubscribersCount() int32 { ret := _m.Called() diff --git a/common/client/mocks/config.go b/common/client/mocks/config.go index d1007f39f0..95b57cce0c 100644 --- a/common/client/mocks/config.go +++ b/common/client/mocks/config.go @@ -3,10 +3,11 @@ package mocks import "time" type ChainConfig struct { - IsFinalityTagEnabled bool - FinalityDepthVal uint32 - NoNewHeadsThresholdVal time.Duration - FinalizedBlockOffsetVal uint32 + IsFinalityTagEnabled bool + FinalityDepthVal uint32 + NoNewHeadsThresholdVal time.Duration + FinalizedBlockOffsetVal uint32 + NoNewFinalizedHeadsThresholdVal time.Duration } func (t ChainConfig) NodeNoNewHeadsThreshold() time.Duration { @@ -24,3 +25,7 @@ func (t ChainConfig) FinalityTagEnabled() bool { func (t ChainConfig) FinalizedBlockOffset() uint32 { return t.FinalizedBlockOffsetVal } + +func (t ChainConfig) NoNewFinalizedHeadsThreshold() time.Duration { + return t.NoNewFinalizedHeadsThresholdVal +} diff --git a/common/client/models.go b/common/client/models.go index 8b61613766..526bb25c88 100644 --- a/common/client/models.go +++ b/common/client/models.go @@ -1,6 +1,7 @@ package client import ( + "bytes" "fmt" ) @@ -74,3 +75,47 @@ func (n NodeTier) String() string { return fmt.Sprintf("NodeTier(%d)", n) } } + +// syncStatus - defines problems related to RPC's state synchronization. Can be used as a bitmask to define multiple issues +type syncStatus int + +const ( + // syncStatusSynced - RPC is fully synced + syncStatusSynced = 0 + // syncStatusNotInSyncWithPool - RPC is lagging behind the highest block observed within the pool of RPCs + syncStatusNotInSyncWithPool syncStatus = 1 << iota + // syncStatusNoNewHead - RPC failed to produce a new head for too long + syncStatusNoNewHead + // syncStatusNoNewFinalizedHead - RPC failed to produce a new finalized head for too long + syncStatusNoNewFinalizedHead + syncStatusLen +) + +func (s syncStatus) String() string { + if s == syncStatusSynced { + return "Synced" + } + var result bytes.Buffer + for i := syncStatusNotInSyncWithPool; i < syncStatusLen; i = i << 1 { + if i&s == 0 { + continue + } + result.WriteString(i.string()) + result.WriteString(",") + } + result.Truncate(result.Len() - 1) + return result.String() +} + +func (s syncStatus) string() string { + switch s { + case syncStatusNotInSyncWithPool: + return "NotInSyncWithRPCPool" + case syncStatusNoNewHead: + return "NoNewHead" + case syncStatusNoNewFinalizedHead: + return "NoNewFinalizedHead" + default: + return fmt.Sprintf("syncStatus(%d)", s) + } +} diff --git a/common/client/models_test.go b/common/client/models_test.go index 2d5dc31b37..a10592c3b6 100644 --- a/common/client/models_test.go +++ b/common/client/models_test.go @@ -3,6 +3,8 @@ package client import ( "strings" "testing" + + "github.com/stretchr/testify/assert" ) func TestSendTxReturnCode_String(t *testing.T) { @@ -14,3 +16,35 @@ func TestSendTxReturnCode_String(t *testing.T) { } } } + +func TestSyncStatus_String(t *testing.T) { + t.Run("All of the statuses have proper string representation", func(t *testing.T) { + for i := syncStatusNotInSyncWithPool; i < syncStatusLen; i <<= 1 { + // ensure that i's string representation is not equal to `syncStatus(%d)` + assert.NotContains(t, i.String(), "syncStatus(") + } + }) + t.Run("Unwraps mask", func(t *testing.T) { + testCases := []struct { + Mask syncStatus + ExpectedStr string + }{ + { + ExpectedStr: "Synced", + }, + { + Mask: syncStatusNotInSyncWithPool | syncStatusNoNewHead, + ExpectedStr: "NotInSyncWithRPCPool,NoNewHead", + }, + { + Mask: syncStatusNotInSyncWithPool | syncStatusNoNewHead | syncStatusNoNewFinalizedHead, + ExpectedStr: "NotInSyncWithRPCPool,NoNewHead,NoNewFinalizedHead", + }, + } + for _, testCase := range testCases { + t.Run(testCase.ExpectedStr, func(t *testing.T) { + assert.Equal(t, testCase.ExpectedStr, testCase.Mask.String()) + }) + } + }) +} diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 4d4ea925fe..c9250a1d62 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -15,6 +15,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/assets" "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services" + feetypes "github.com/smartcontractkit/chainlink/v2/common/fee/types" "github.com/smartcontractkit/chainlink/v2/common/types" ) @@ -821,6 +822,14 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP return n.RPC().SubscribeNewHead(ctx, channel) } +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) SubscribeToHeads(ctx context.Context) (ch <-chan HEAD, sub types.Subscription, err error) { + n, err := c.selectNode() + if err != nil { + return nil, nil, err + } + return n.RPC().SubscribeToHeads(ctx) +} + func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) TokenBalance(ctx context.Context, account ADDR, tokenAddr ADDR) (b *big.Int, err error) { n, err := c.selectNode() if err != nil { diff --git a/common/client/node.go b/common/client/node.go index 5ea31d6596..d6543c772a 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -49,6 +49,7 @@ type NodeConfig interface { type ChainConfig interface { NodeNoNewHeadsThreshold() time.Duration + NoNewFinalizedHeadsThreshold() time.Duration FinalityDepth() uint32 FinalityTagEnabled() bool FinalizedBlockOffset() uint32 diff --git a/common/client/node_fsm.go b/common/client/node_fsm.go index 5a5e255443..e58de071fb 100644 --- a/common/client/node_fsm.go +++ b/common/client/node_fsm.go @@ -2,7 +2,6 @@ package client import ( "fmt" - "math/big" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -240,11 +239,11 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInSync(fn func()) { // declareOutOfSync puts a node into OutOfSync state, disconnecting all current // clients and making it unavailable for use until back in-sync. -func (n *node[CHAIN_ID, HEAD, RPC]) declareOutOfSync(isOutOfSync func(num int64, td *big.Int) bool) { +func (n *node[CHAIN_ID, HEAD, RPC]) declareOutOfSync(syncIssues syncStatus) { n.transitionToOutOfSync(func() { - n.lfcLog.Errorw("RPC Node is out of sync", "nodeState", n.state) + n.lfcLog.Errorw("RPC Node is out of sync", "nodeState", n.state, "syncIssues", syncIssues) n.wg.Add(1) - go n.outOfSyncLoop(isOutOfSync) + go n.outOfSyncLoop(syncIssues) }) } diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 39e17bb497..40d9a9ef6e 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -7,6 +7,8 @@ import ( "math/big" "time" + "github.com/smartcontractkit/chainlink/v2/common/types" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -86,33 +88,37 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } noNewHeadsTimeoutThreshold := n.chainCfg.NodeNoNewHeadsThreshold() + noNewFinalizedBlocksTimeoutThreshold := n.chainCfg.NoNewFinalizedHeadsThreshold() pollFailureThreshold := n.nodePoolCfg.PollFailureThreshold() pollInterval := n.nodePoolCfg.PollInterval() lggr := logger.Sugared(n.lfcLog).Named("Alive").With("noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold, "pollInterval", pollInterval, "pollFailureThreshold", pollFailureThreshold) lggr.Tracew("Alive loop starting", "nodeState", n.getCachedState()) - headsC := make(chan HEAD) - sub, err := n.rpc.SubscribeNewHead(ctx, headsC) + headsSub, err := n.registerNewSubscription(ctx, lggr.With("subscriptionType", "heads"), + n.chainCfg.NodeNoNewHeadsThreshold(), n.rpc.SubscribeToHeads) if err != nil { - lggr.Errorw("Initial subscribe for heads failed", "nodeState", n.getCachedState()) + lggr.Errorw("Initial subscribe for heads failed", "nodeState", n.getCachedState(), "err", err) n.declareUnreachable() return } - // TODO: nit fix. If multinode switches primary node before we set sub as AliveSub, sub will be closed and we'll - // falsely transition this node to unreachable state - n.rpc.SetAliveLoopSub(sub) - defer sub.Unsubscribe() - - var outOfSyncT *time.Ticker - var outOfSyncTC <-chan time.Time - if noNewHeadsTimeoutThreshold > 0 { - lggr.Debugw("Head liveness checking enabled", "nodeState", n.getCachedState()) - outOfSyncT = time.NewTicker(noNewHeadsTimeoutThreshold) - defer outOfSyncT.Stop() - outOfSyncTC = outOfSyncT.C - } else { - lggr.Debug("Head liveness checking disabled") + + // TODO: will be removed as part of merging effort with BCI-2875 + n.rpc.SetAliveLoopSub(headsSub.sub) + + defer headsSub.Unsubscribe() + + var finalizedHeadsSub headSubscription[HEAD] + if n.chainCfg.FinalityTagEnabled() { + finalizedHeadsSub, err = n.registerNewSubscription(ctx, lggr.With("subscriptionType", "finalizedHeads"), + n.chainCfg.NoNewFinalizedHeadsThreshold(), n.rpc.SubscribeToFinalizedHeads) + if err != nil { + lggr.Errorw("Failed to subscribe to finalized heads", "err", err) + n.declareUnreachable() + return + } + + defer finalizedHeadsSub.Unsubscribe() } var pollCh <-chan time.Time @@ -131,14 +137,6 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { lggr.Debug("Polling disabled") } - var pollFinalizedHeadCh <-chan time.Time - if n.chainCfg.FinalityTagEnabled() && n.nodePoolCfg.FinalizedBlockPollInterval() > 0 { - lggr.Debugw("Finalized block polling enabled") - pollT := time.NewTicker(n.nodePoolCfg.FinalizedBlockPollInterval()) - defer pollT.Stop() - pollFinalizedHeadCh = pollT.C - } - localHighestChainInfo, _ := n.rpc.GetInterceptedChainInfo() var pollFailures uint32 @@ -149,7 +147,8 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { case <-pollCh: promPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() lggr.Tracew("Polling for version", "nodeState", n.getCachedState(), "pollFailures", pollFailures) - version, err := func(ctx context.Context) (string, error) { + var version string + version, err = func(ctx context.Context) (string, error) { ctx, cancel := context.WithTimeout(ctx, pollInterval) defer cancel() return n.RPC().ClientVersion(ctx) @@ -177,47 +176,33 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { n.declareUnreachable() return } - _, ci := n.StateAndLatest() - if outOfSync, liveNodes := n.syncStatus(ci.BlockNumber, ci.TotalDifficulty); outOfSync { + _, latestChainInfo := n.StateAndLatest() + if outOfSync, liveNodes := n.isOutOfSyncWithPool(latestChainInfo); outOfSync { // note: there must be another live node for us to be out of sync - lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", ci.BlockNumber, "totalDifficulty", ci.TotalDifficulty, "nodeState", n.getCachedState()) + lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", latestChainInfo.BlockNumber, "totalDifficulty", latestChainInfo.TotalDifficulty, "nodeState", n.getCachedState()) if liveNodes < 2 { lggr.Criticalf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState) continue } - n.declareOutOfSync(n.isOutOfSync) + n.declareOutOfSync(syncStatusNotInSyncWithPool) return } - case bh, open := <-headsC: + case bh, open := <-headsSub.Heads: if !open { lggr.Errorw("Subscription channel unexpectedly closed", "nodeState", n.getCachedState()) n.declareUnreachable() return } - promPoolRPCNodeNumSeenBlocks.WithLabelValues(n.chainID.String(), n.name).Inc() - lggr.Tracew("Got head", "head", bh) - if bh.BlockNumber() > localHighestChainInfo.BlockNumber { - promPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(bh.BlockNumber())) - lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", localHighestChainInfo.BlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.getCachedState()) - localHighestChainInfo.BlockNumber = bh.BlockNumber() - } else { - lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", localHighestChainInfo.BlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.getCachedState()) - } - if outOfSyncT != nil { - outOfSyncT.Reset(noNewHeadsTimeoutThreshold) - } - if !n.chainCfg.FinalityTagEnabled() { - latestFinalizedBN := max(bh.BlockNumber()-int64(n.chainCfg.FinalityDepth()), 0) - if latestFinalizedBN > localHighestChainInfo.FinalizedBlockNumber { - promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) - localHighestChainInfo.FinalizedBlockNumber = latestFinalizedBN - } + + receivedNewHead := n.onNewHead(lggr, &localHighestChainInfo, bh) + if receivedNewHead && noNewHeadsTimeoutThreshold > 0 { + headsSub.ResetTimer(noNewHeadsTimeoutThreshold) } - case err := <-sub.Err(): + case err = <-headsSub.Errors: lggr.Errorw("Subscription was terminated", "err", err, "nodeState", n.getCachedState()) n.declareUnreachable() return - case <-outOfSyncTC: + case <-headsSub.NoNewHeads: // We haven't received a head on the channel for at least the // threshold amount of time, mark it broken lggr.Errorw(fmt.Sprintf("RPC endpoint detected out of sync; no new heads received for %s (last head received was %v)", noNewHeadsTimeoutThreshold, localHighestChainInfo.BlockNumber), "nodeState", n.getCachedState(), "latestReceivedBlockNumber", localHighestChainInfo.BlockNumber, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold) @@ -226,47 +211,151 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { lggr.Criticalf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState) // We don't necessarily want to wait the full timeout to check again, we should // check regularly and log noisily in this state - outOfSyncT.Reset(zombieNodeCheckInterval(noNewHeadsTimeoutThreshold)) + headsSub.ResetTimer(zombieNodeCheckInterval(noNewHeadsTimeoutThreshold)) continue } } - n.declareOutOfSync(func(num int64, td *big.Int) bool { return num < localHighestChainInfo.BlockNumber }) + n.declareOutOfSync(syncStatusNoNewHead) return - case <-pollFinalizedHeadCh: - latestFinalized, err := func(ctx context.Context) (HEAD, error) { - ctx, cancel := context.WithTimeout(ctx, n.nodePoolCfg.FinalizedBlockPollInterval()) - defer cancel() - return n.RPC().LatestFinalizedBlock(ctx) - }(ctx) - if err != nil { - lggr.Warnw("Failed to fetch latest finalized block", "err", err) - continue + case latestFinalized, open := <-finalizedHeadsSub.Heads: + if !open { + lggr.Errorw("Finalized heads subscription channel unexpectedly closed", "nodeState", n.getCachedState()) + n.declareUnreachable() + return } - if !latestFinalized.IsValid() { - lggr.Warn("Latest finalized block is not valid") - continue + receivedNewHead := n.onNewFinalizedHead(lggr, &localHighestChainInfo, latestFinalized) + if receivedNewHead && noNewFinalizedBlocksTimeoutThreshold > 0 { + finalizedHeadsSub.ResetTimer(noNewFinalizedBlocksTimeoutThreshold) } - - latestFinalizedBN := latestFinalized.BlockNumber() - if latestFinalizedBN > localHighestChainInfo.FinalizedBlockNumber { - promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) - localHighestChainInfo.FinalizedBlockNumber = latestFinalizedBN + case <-finalizedHeadsSub.NoNewHeads: + // We haven't received a finalized head on the channel for at least the + // threshold amount of time, mark it broken + lggr.Errorw(fmt.Sprintf("RPC's finalized state is out of sync; no new finalized heads received for %s (last finalized head received was %v)", noNewFinalizedBlocksTimeoutThreshold, localHighestChainInfo.FinalizedBlockNumber), "latestReceivedBlockNumber", localHighestChainInfo.BlockNumber) + if n.poolInfoProvider != nil { + if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 2 { + lggr.Criticalf("RPC's finalized state is out of sync; %s %s", msgCannotDisable, msgDegradedState) + // We don't necessarily want to wait the full timeout to check again, we should + // check regularly and log noisily in this state + finalizedHeadsSub.ResetTimer(zombieNodeCheckInterval(noNewFinalizedBlocksTimeoutThreshold)) + continue + } } + n.declareOutOfSync(syncStatusNoNewFinalizedHead) + return + case <-finalizedHeadsSub.Errors: + lggr.Errorw("Finalized heads subscription was terminated", "err", err) + n.declareUnreachable() + return } } } -func (n *node[CHAIN_ID, HEAD, RPC]) isOutOfSync(num int64, td *big.Int) (outOfSync bool) { - outOfSync, _ = n.syncStatus(num, td) - return +type headSubscription[HEAD any] struct { + Heads <-chan HEAD + Errors <-chan error + NoNewHeads <-chan time.Time + + noNewHeadsTicker *time.Ticker + sub types.Subscription + cleanUpTasks []func() +} + +func (sub *headSubscription[HEAD]) ResetTimer(duration time.Duration) { + sub.noNewHeadsTicker.Reset(duration) +} + +func (sub *headSubscription[HEAD]) Unsubscribe() { + for _, doCleanUp := range sub.cleanUpTasks { + doCleanUp() + } +} + +func (n *node[CHAIN_ID, HEAD, PRC]) registerNewSubscription(ctx context.Context, lggr logger.SugaredLogger, + noNewDataThreshold time.Duration, newSub func(ctx context.Context) (<-chan HEAD, types.Subscription, error)) (headSubscription[HEAD], error) { + result := headSubscription[HEAD]{} + var err error + var sub types.Subscription + result.Heads, sub, err = newSub(ctx) + if err != nil { + return result, err + } + + result.Errors = sub.Err() + lggr.Debug("Successfully subscribed") + + // TODO: will be removed as part of merging effort with BCI-2875 + result.sub = sub + //n.stateMu.Lock() + //n.healthCheckSubs = append(n.healthCheckSubs, sub) + //n.stateMu.Unlock() + + result.cleanUpTasks = append(result.cleanUpTasks, sub.Unsubscribe) + + if noNewDataThreshold > 0 { + lggr.Debugw("Subscription liveness checking enabled") + result.noNewHeadsTicker = time.NewTicker(noNewDataThreshold) + result.NoNewHeads = result.noNewHeadsTicker.C + result.cleanUpTasks = append(result.cleanUpTasks, result.noNewHeadsTicker.Stop) + } else { + lggr.Debug("Subscription liveness checking disabled") + } + + return result, nil } -// syncStatus returns outOfSync true if num or td is more than SyncThresold behind the best node. +func (n *node[CHAIN_ID, HEAD, RPC]) onNewFinalizedHead(lggr logger.SugaredLogger, chainInfo *ChainInfo, latestFinalized HEAD) bool { + if !latestFinalized.IsValid() { + lggr.Warn("Latest finalized block is not valid") + return false + } + + latestFinalizedBN := latestFinalized.BlockNumber() + lggr.Tracew("Got latest finalized head", "latestFinalized", latestFinalized) + if latestFinalizedBN <= chainInfo.FinalizedBlockNumber { + lggr.Tracew("Ignoring previously seen finalized block number") + return false + } + + promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) + chainInfo.FinalizedBlockNumber = latestFinalizedBN + return true +} + +func (n *node[CHAIN_ID, HEAD, RPC]) onNewHead(lggr logger.SugaredLogger, chainInfo *ChainInfo, head HEAD) bool { + if !head.IsValid() { + lggr.Warn("Latest head is not valid") + return false + } + + promPoolRPCNodeNumSeenBlocks.WithLabelValues(n.chainID.String(), n.name).Inc() + lggr.Tracew("Got head", "head", head) + lggr = lggr.With("latestReceivedBlockNumber", chainInfo.BlockNumber, "blockNumber", head.BlockNumber(), "nodeState", n.getCachedState()) + if head.BlockNumber() <= chainInfo.BlockNumber { + lggr.Tracew("Ignoring previously seen block number") + return false + } + + promPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(head.BlockNumber())) + chainInfo.BlockNumber = head.BlockNumber() + + if !n.chainCfg.FinalityTagEnabled() { + latestFinalizedBN := max(head.BlockNumber()-int64(n.chainCfg.FinalityDepth()), 0) + if latestFinalizedBN > chainInfo.FinalizedBlockNumber { + promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) + chainInfo.FinalizedBlockNumber = latestFinalizedBN + } + } + + return true +} + +// isOutOfSyncWithPool returns outOfSync true if num or td is more than SyncThresold behind the best node. // Always returns outOfSync false for SyncThreshold 0. // liveNodes is only included when outOfSync is true. -func (n *node[CHAIN_ID, HEAD, RPC]) syncStatus(num int64, td *big.Int) (outOfSync bool, liveNodes int) { +func (n *node[CHAIN_ID, HEAD, RPC]) isOutOfSyncWithPool(localState ChainInfo) (outOfSync bool, liveNodes int) { if n.poolInfoProvider == nil { + n.lfcLog.Warn("skipping sync state against the pool - should only occur in tests") return // skip for tests } threshold := n.nodePoolCfg.SyncThreshold() @@ -278,22 +367,23 @@ func (n *node[CHAIN_ID, HEAD, RPC]) syncStatus(num int64, td *big.Int) (outOfSyn mode := n.nodePoolCfg.SelectionMode() switch mode { case NodeSelectionModeHighestHead, NodeSelectionModeRoundRobin, NodeSelectionModePriorityLevel: - return num < ci.BlockNumber-int64(threshold), ln + return localState.BlockNumber < ci.BlockNumber-int64(threshold), ln case NodeSelectionModeTotalDifficulty: bigThreshold := big.NewInt(int64(threshold)) - return td.Cmp(bigmath.Sub(ci.TotalDifficulty, bigThreshold)) < 0, ln + return localState.TotalDifficulty.Cmp(bigmath.Sub(ci.TotalDifficulty, bigThreshold)) < 0, ln default: panic("unrecognized NodeSelectionMode: " + mode) } } const ( - msgReceivedBlock = "Received block for RPC node, waiting until back in-sync to mark as live again" - msgInSync = "RPC node back in sync" + msgReceivedBlock = "Received block for RPC node, waiting until back in-sync to mark as live again" + msgReceivedFinalizedBlock = "Received new finalized block for RPC node, waiting until back in-sync to mark as live again" + msgInSync = "RPC node back in sync" ) // outOfSyncLoop takes an OutOfSync node and waits until isOutOfSync returns false to go back to live status -func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td *big.Int) bool) { +func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(syncIssues syncStatus) { defer n.wg.Done() ctx, cancel := n.newCtx() defer cancel() @@ -312,8 +402,9 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td outOfSyncAt := time.Now() - lggr := logger.Sugared(logger.Named(n.lfcLog, "OutOfSync")) - lggr.Debugw("Trying to revive out-of-sync RPC node", "nodeState", n.getCachedState()) + // set logger name to OutOfSync or FinalizedBlockOutOfSync + lggr := logger.Sugared(logger.Named(n.lfcLog, n.getCachedState().String())).With("nodeState", n.getCachedState()) + lggr.Debugw("Trying to revive out-of-sync RPC node") // Need to redial since out-of-sync nodes are automatically disconnected state := n.createVerifiedConn(ctx, lggr) @@ -322,46 +413,118 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td return } - lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node", "nodeState", n.getCachedState()) - - ch := make(chan HEAD) - sub, err := n.rpc.SubscribeNewHead(ctx, ch) + noNewHeadsTimeoutThreshold := n.chainCfg.NodeNoNewHeadsThreshold() + headsSub, err := n.registerNewSubscription(ctx, lggr.With("subscriptionType", "heads"), + noNewHeadsTimeoutThreshold, n.rpc.SubscribeToHeads) if err != nil { - lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "nodeState", n.getCachedState(), "err", err) + lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "err", err) n.declareUnreachable() return } - defer sub.Unsubscribe() + lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node") + defer headsSub.Unsubscribe() + + noNewFinalizedBlocksTimeoutThreshold := n.chainCfg.NoNewFinalizedHeadsThreshold() + var finalizedHeadsSub headSubscription[HEAD] + if n.chainCfg.FinalityTagEnabled() { + finalizedHeadsSub, err = n.registerNewSubscription(ctx, lggr.With("subscriptionType", "finalizedHeads"), + noNewFinalizedBlocksTimeoutThreshold, n.rpc.SubscribeToFinalizedHeads) + if err != nil { + lggr.Errorw("Subscribe to finalized heads failed on out-of-sync RPC node", "err", err) + n.declareUnreachable() + return + } + + lggr.Tracew("Successfully subscribed to finalized heads feed on out-of-sync RPC node") + defer finalizedHeadsSub.Unsubscribe() + } + + _, localHighestChainInfo := n.rpc.GetInterceptedChainInfo() for { + if syncIssues == syncStatusSynced { + // back in-sync! flip back into alive loop + lggr.Infow(fmt.Sprintf("%s: %s. Node was out-of-sync for %s", msgInSync, n.String(), time.Since(outOfSyncAt))) + n.declareInSync() + return + } + select { case <-ctx.Done(): return - case head, open := <-ch: + case head, open := <-headsSub.Heads: if !open { - lggr.Error("Subscription channel unexpectedly closed", "nodeState", n.getCachedState()) + lggr.Errorw("Subscription channel unexpectedly closed", "nodeState", n.getCachedState()) n.declareUnreachable() return } - if !isOutOfSync(head.BlockNumber(), head.BlockDifficulty()) { - // back in-sync! flip back into alive loop - lggr.Infow(fmt.Sprintf("%s: %s. Node was out-of-sync for %s", msgInSync, n.String(), time.Since(outOfSyncAt)), "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.getCachedState()) - n.declareInSync() - return + + if !n.onNewHead(lggr, &localHighestChainInfo, head) { + continue + } + + // received a new head - clear NoNewHead flag + syncIssues &= ^syncStatusNoNewHead + if outOfSync, _ := n.isOutOfSyncWithPool(localHighestChainInfo); !outOfSync { + // we caught up with the pool - clear NotInSyncWithPool flag + syncIssues &= ^syncStatusNotInSyncWithPool + } else { + // we've received new head, but lagging behind the pool, add NotInSyncWithPool flag to prevent false transition to alive + syncIssues |= syncStatusNotInSyncWithPool + } + + if noNewHeadsTimeoutThreshold > 0 { + headsSub.ResetTimer(noNewHeadsTimeoutThreshold) } - lggr.Debugw(msgReceivedBlock, "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.getCachedState()) - case <-time.After(zombieNodeCheckInterval(n.chainCfg.NodeNoNewHeadsThreshold())): + + lggr.Debugw(msgReceivedBlock, "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "syncIssues", syncIssues) + case <-time.After(zombieNodeCheckInterval(noNewHeadsTimeoutThreshold)): if n.poolInfoProvider != nil { if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 1 { - lggr.Critical("RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state") + lggr.Criticalw("RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state", "syncIssues", syncIssues) n.declareInSync() return } } - case err := <-sub.Err(): - lggr.Errorw("Subscription was terminated", "nodeState", n.getCachedState(), "err", err) + case err := <-headsSub.Errors: + lggr.Errorw("Subscription was terminated", "err", err) + n.declareUnreachable() + return + case <-headsSub.NoNewHeads: + // we are not resetting the timer, as there is no need to add syncStatusNoNewHead until it's removed on new head. + syncIssues |= syncStatusNoNewHead + lggr.Debugw(fmt.Sprintf("No new heads received for %s. Node stays out-of-sync due to sync issues: %s", noNewHeadsTimeoutThreshold, syncIssues)) + case latestFinalized, open := <-finalizedHeadsSub.Heads: + if !open { + lggr.Errorw("Finalized heads subscription channel unexpectedly closed") + n.declareUnreachable() + return + } + if !latestFinalized.IsValid() { + lggr.Warn("Latest finalized block is not valid") + continue + } + + receivedNewHead := n.onNewFinalizedHead(lggr, &localHighestChainInfo, latestFinalized) + if !receivedNewHead { + continue + } + + // on new finalized head remove NoNewFinalizedHead flag from the mask + syncIssues &= ^syncStatusNoNewFinalizedHead + if noNewFinalizedBlocksTimeoutThreshold > 0 { + finalizedHeadsSub.ResetTimer(noNewFinalizedBlocksTimeoutThreshold) + } + + lggr.Debugw(msgReceivedFinalizedBlock, "blockNumber", latestFinalized.BlockNumber(), "syncIssues", syncIssues) + case err := <-finalizedHeadsSub.Errors: + lggr.Errorw("Finalized head subscription was terminated", "err", err) n.declareUnreachable() return + case <-finalizedHeadsSub.NoNewHeads: + // we are not resetting the timer, as there is no need to add syncStatusNoNewFinalizedHead until it's removed on new finalized head. + syncIssues |= syncStatusNoNewFinalizedHead + lggr.Debugw(fmt.Sprintf("No new finalized heads received for %s. Node stays out-of-sync due to sync issues: %s", noNewFinalizedBlocksTimeoutThreshold, syncIssues)) } } } diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index 863a15a1fa..833bccf7f2 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "math/big" + "sync" "sync/atomic" "testing" @@ -49,7 +50,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { expectedError := errors.New("failed to subscribe to rpc") rpc.On("DisconnectAll").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, expectedError).Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(nil, nil, expectedError).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -74,7 +75,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { close(errChan) sub.On("Err").Return((<-chan error)(errChan)).Once() sub.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(nil, sub, nil).Once() rpc.On("SetAliveLoopSub", sub).Once() // disconnects all on transfer to unreachable rpc.On("DisconnectAll").Once() @@ -89,7 +90,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() - opts.rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() + opts.rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil) opts.rpc.On("SetAliveLoopSub", sub).Once() return newDialedNode(t, opts) } @@ -105,7 +106,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() node.declareAlive() - tests.AssertLogEventually(t, observedLogs, "Head liveness checking disabled") + tests.AssertLogEventually(t, observedLogs, "Subscription liveness checking disabled") tests.AssertLogEventually(t, observedLogs, "Polling disabled") assert.Equal(t, nodeStateAlive, node.State()) }) @@ -340,18 +341,21 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState)) assert.Equal(t, nodeStateAlive, node.State()) }) - t.Run("rpc closed head channel", func(t *testing.T) { - t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) - rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + newSub := func(t *testing.T) *mocks.Subscription { sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) + return sub + } + t.Run("rpc closed head channel", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { close(ch) - }).Return(sub, nil).Once() - rpc.On("SetAliveLoopSub", sub).Once() + }).Return((<-chan Head)(ch), newSub(t), nil).Once() + rpc.On("SetAliveLoopSub", mock.Anything).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) node := newDialedNode(t, testNodeOpts{ lggr: lggr, @@ -380,10 +384,10 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { const finalityDepth = 10 const expectedBlock = 990 rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { go writeHeads(t, ch, head{BlockNumber: blockNumber - 1}, head{BlockNumber: blockNumber}, head{BlockNumber: blockNumber - 1}) - }).Return(sub, nil).Once() + }).Return((<-chan Head)(ch), sub, nil).Once() rpc.On("SetAliveLoopSub", sub).Once() name := "node-" + rand.Str(5) node := newDialedNode(t, testNodeOpts{ @@ -403,18 +407,13 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { return float64(expectedBlock) == m.Gauge.GetValue() }) }) - t.Run("Logs warning if failed to get finalized block", func(t *testing.T) { + t.Run("If fails to subscribe to latest finalized blocks, transitions to unreachable ", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) - rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() - rpc.On("LatestFinalizedBlock", mock.Anything).Return(newMockHead(t), errors.New("failed to get finalized block")) - sub := mocks.NewSubscription(t) - sub.On("Err").Return((<-chan error)(nil)) - sub.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() - rpc.On("SetAliveLoopSub", sub).Once() + expectedError := errors.New("failed to subscribe to finalized heads") + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(nil, mocks.NewSubscription(t), expectedError).Once() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) - node := newDialedNode(t, testNodeOpts{ + node := newSubscribedNode(t, testNodeOpts{ config: testNodeConfig{ finalizedBlockPollInterval: tests.TestInterval, }, @@ -425,26 +424,31 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { lggr: lggr, }) defer func() { assert.NoError(t, node.close()) }() + // disconnects all on transfer to unreachable or outOfSync + rpc.On("DisconnectAll").Once() + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() - tests.AssertLogEventually(t, observedLogs, "Failed to fetch latest finalized block") + tests.AssertLogEventually(t, observedLogs, "Failed to subscribe to finalized heads") + tests.AssertEventually(t, func() bool { + return nodeStateUnreachable == node.State() + }) }) t.Run("Logs warning if latest finalized block is not valid", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) + ch := make(chan Head, 1) head := newMockHead(t) head.On("IsValid").Return(false) - rpc.On("LatestFinalizedBlock", mock.Anything).Return(head, nil) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Run(func(args mock.Arguments) { + ch <- head + }).Return((<-chan Head)(ch), newSub(t), nil).Once() rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() - sub := mocks.NewSubscription(t) - sub.On("Err").Return((<-chan error)(nil)) - sub.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() - rpc.On("SetAliveLoopSub", sub).Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), newSub(t), nil).Once() + rpc.On("SetAliveLoopSub", mock.Anything).Once() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ - config: testNodeConfig{ - finalizedBlockPollInterval: tests.TestInterval, - }, + config: testNodeConfig{}, chainConfig: clientMocks.ChainConfig{ IsFinalityTagEnabled: true, }, @@ -455,29 +459,17 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { node.declareAlive() tests.AssertLogEventually(t, observedLogs, "Latest finalized block is not valid") }) - t.Run("If finality tag and finalized block polling are enabled updates latest finalized block metric", func(t *testing.T) { + t.Run("On new finalized block updates corresponding metric", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) const expectedBlock = 1101 const finalityDepth = 10 - rpc.On("LatestFinalizedBlock", mock.Anything).Return(head{BlockNumber: expectedBlock - 1}.ToMockHead(t), nil).Once() - rpc.On("LatestFinalizedBlock", mock.Anything).Return(head{BlockNumber: expectedBlock}.ToMockHead(t), nil) - sub := mocks.NewSubscription(t) - sub.On("Err").Return((<-chan error)(nil)) - sub.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) - // ensure that "calculated" finalized head is larger than actual, to ensure we are correctly setting - // the metric - go writeHeads(t, ch, head{BlockNumber: expectedBlock*2 + finalityDepth}) - }).Return(sub, nil).Once() + ch := make(chan Head) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), newSub(t), nil).Once() rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() - rpc.On("SetAliveLoopSub", sub).Once() name := "node-" + rand.Str(5) - node := newDialedNode(t, testNodeOpts{ - config: testNodeConfig{ - finalizedBlockPollInterval: tests.TestInterval, - }, + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, chainConfig: clientMocks.ChainConfig{ FinalityDepthVal: finalityDepth, IsFinalityTagEnabled: true, @@ -488,6 +480,12 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() node.declareAlive() + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + writeHeads(t, ch, head{BlockNumber: expectedBlock - 1}, head{BlockNumber: expectedBlock}, head{BlockNumber: expectedBlock - 1}) + }() tests.AssertEventually(t, func() bool { metric, err := promPoolRPCNodeHighestFinalizedBlock.GetMetricWithLabelValues(big.NewInt(1).String(), name) require.NoError(t, err) @@ -496,6 +494,123 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { return float64(expectedBlock) == m.Gauge.GetValue() }) }) + t.Run("If finalized heads channel is closed, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + ch := make(chan Head) + close(ch) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), newSub(t), nil).Once() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newSubscribedNode(t, testNodeOpts{ + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + // disconnects all on transfer to unreachable or outOfSync + rpc.On("DisconnectAll").Once() + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Finalized heads subscription channel unexpectedly closed") + tests.AssertEventually(t, func() bool { + return nodeStateUnreachable == node.State() + }) + }) + t.Run("when no new finalized heads received for threshold, transitions to out of sync", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + ch := make(chan Head, 1) + ch <- head{BlockNumber: 10}.ToMockHead(t) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), newSub(t), nil).Once() + lggr, observed := logger.TestObserved(t, zap.DebugLevel) + noNewFinalizedHeadsThreshold := tests.TestInterval + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, + chainConfig: clientMocks.ChainConfig{ + NoNewFinalizedHeadsThresholdVal: noNewFinalizedHeadsThreshold, + IsFinalityTagEnabled: true, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + // tries to redial in outOfSync + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Run(func(_ mock.Arguments) { + assert.Equal(t, nodeStateOutOfSync, node.State()) + }).Once() + // disconnects all on transfer to unreachable or outOfSync + rpc.On("DisconnectAll").Maybe() + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observed, fmt.Sprintf("RPC's finalized state is out of sync; no new finalized heads received for %s (last finalized head received was 10)", noNewFinalizedHeadsThreshold)) + tests.AssertEventually(t, func() bool { + // right after outOfSync we'll transfer to unreachable due to returned error on Dial + // we check that we were in out of sync state on first Dial call + return node.State() == nodeStateUnreachable + }) + }) + t.Run("when no new finalized heads received for threshold but we are the last live node, forcibly stays alive", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(make(<-chan Head), newSub(t), nil).Once() + lggr, observed := logger.TestObserved(t, zap.DebugLevel) + noNewFinalizedHeadsThreshold := tests.TestInterval + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, + chainConfig: clientMocks.ChainConfig{ + NoNewFinalizedHeadsThresholdVal: noNewFinalizedHeadsThreshold, + IsFinalityTagEnabled: true, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(1, ChainInfo{ + BlockNumber: 20, + TotalDifficulty: big.NewInt(10), + }).Once() + node.SetPoolChainInfoProvider(poolInfo) + node.declareAlive() + tests.AssertLogEventually(t, observed, fmt.Sprintf("RPC's finalized state is out of sync; %s %s", msgCannotDisable, msgDegradedState)) + assert.Equal(t, nodeStateAlive, node.State()) + }) + t.Run("If finalized subscription returns an error, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("DisconnectAll").Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + sub := mocks.NewSubscription(t) + errCh := make(chan error, 1) + errCh <- errors.New("subscription failed") + sub.On("Err").Return((<-chan error)(errCh)) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(nil), sub, nil).Once() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newSubscribedNode(t, testNodeOpts{ + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + // disconnects all on transfer to unreachable or outOfSync + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Finalized heads subscription was terminated") + tests.AssertEventually(t, func() bool { + return nodeStateUnreachable == node.State() + }) + }) } type head struct { @@ -525,9 +640,10 @@ func writeHeads(t *testing.T, ch chan<- Head, heads ...head) { func setupRPCForAliveLoop(t *testing.T, rpc *mockNodeClient[types.ID, Head]) { rpc.On("Dial", mock.Anything).Return(nil).Maybe() aliveSubscription := mocks.NewSubscription(t) - aliveSubscription.On("Err").Return((<-chan error)(nil)).Maybe() + aliveSubscription.On("Err").Return(nil).Maybe() aliveSubscription.On("Unsubscribe").Maybe() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(aliveSubscription, nil).Maybe() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), aliveSubscription, nil).Maybe() + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(make(<-chan Head), aliveSubscription, nil).Maybe() rpc.On("SetAliveLoopSub", mock.Anything).Maybe() rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Maybe() } @@ -544,22 +660,18 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { return node } - stubIsOutOfSync := func(num int64, td *big.Int) bool { - return false - } - t.Run("returns on closed", func(t *testing.T) { t.Parallel() node := newTestNode(t, testNodeOpts{}) node.setState(nodeStateClosed) node.wg.Add(1) - node.outOfSyncLoop(stubIsOutOfSync) + node.outOfSyncLoop(syncStatusNotInSyncWithPool) }) t.Run("on old blocks stays outOfSync and returns on close", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) nodeChainID := types.RandomID() - lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + lggr := logger.Test(t) node := newAliveNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -569,21 +681,27 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: 0}, ChainInfo{BlockNumber: 13}).Once() outOfSyncSubscription := mocks.NewSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() heads := []head{{BlockNumber: 7}, {BlockNumber: 11}, {BlockNumber: 13}} - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) - go writeHeads(t, ch, heads...) - }).Return(outOfSyncSubscription, nil).Once() + ch := make(chan Head) + var wg sync.WaitGroup + wg.Add(1) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { + go func() { + defer wg.Done() + writeHeads(t, ch, heads...) + }() + }).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() - node.declareOutOfSync(func(num int64, td *big.Int) bool { - return true - }) - tests.AssertLogCountEventually(t, observedLogs, msgReceivedBlock, len(heads)) + node.declareOutOfSync(syncStatusNoNewHead) + // wait until all heads are consumed + wg.Wait() assert.Equal(t, nodeStateOutOfSync, node.State()) }) t.Run("if initial dial fails, transitions to unreachable", func(t *testing.T) { @@ -597,7 +715,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { expectedError := errors.New("failed to dial rpc") // might be called again in unreachable loop, so no need to set once rpc.On("Dial", mock.Anything).Return(expectedError) - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable }) @@ -617,7 +735,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { expectedError := errors.New("failed to get chain ID") // might be called multiple times rpc.On("ChainID", mock.Anything).Return(types.NewIDFromInt(0), expectedError) - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable }) @@ -637,7 +755,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Twice() // might be called multiple times rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertEventually(t, func() bool { return node.State() == nodeStateInvalidChainID }) @@ -657,7 +775,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) // might be called multiple times rpc.On("IsSyncing", mock.Anything).Return(true, nil) - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertEventually(t, func() bool { return node.State() == nodeStateSyncing }) @@ -680,7 +798,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() // might be called multiple times rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check syncing")) - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable }) @@ -698,9 +816,9 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() expectedError := errors.New("failed to subscribe") - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, expectedError) + rpc.On("SubscribeToHeads", mock.Anything).Return(nil, nil, expectedError).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable }) @@ -719,15 +837,15 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() sub := mocks.NewSubscription(t) errChan := make(chan error, 1) errChan <- errors.New("subscription was terminate") sub.On("Err").Return((<-chan error)(errChan)) sub.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertLogEventually(t, observedLogs, "Subscription was terminated") tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable @@ -747,22 +865,22 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { close(ch) - }).Return(sub, nil).Once() + }).Return((<-chan Head)(ch), sub, nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertLogEventually(t, observedLogs, "Subscription channel unexpectedly closed") tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable }) }) - t.Run("becomes alive if it receives a newer head", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) @@ -782,17 +900,14 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() const highestBlock = 1000 - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) - go writeHeads(t, ch, head{BlockNumber: highestBlock - 1}, head{BlockNumber: highestBlock}) - }).Return(outOfSyncSubscription, nil).Once() + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { + go writeHeads(t, ch, head{BlockNumber: highestBlock - 1}, head{BlockNumber: highestBlock}, head{BlockNumber: highestBlock + 1}) + }).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: highestBlock}, ChainInfo{BlockNumber: highestBlock}) - setupRPCForAliveLoop(t, rpc) - node.declareOutOfSync(func(num int64, td *big.Int) bool { - return num < highestBlock - }) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertLogEventually(t, observedLogs, msgReceivedBlock) tests.AssertLogEventually(t, observedLogs, msgInSync) tests.AssertEventually(t, func() bool { @@ -819,7 +934,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { TotalDifficulty: big.NewInt(200), }) node.SetPoolChainInfoProvider(poolInfo) - rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: 0}, ChainInfo{BlockNumber: 0}) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() @@ -827,16 +942,225 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { outOfSyncSubscription := mocks.NewSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(outOfSyncSubscription, nil).Once() - + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), outOfSyncSubscription, nil).Once() setupRPCForAliveLoop(t, rpc) - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertLogEventually(t, observedLogs, "RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state") tests.AssertEventually(t, func() bool { return node.State() == nodeStateAlive }) }) + t.Run("Stays out-of-sync if received new head, but lags behind pool", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newAliveNode(t, testNodeOpts{ + chainConfig: clientMocks.ChainConfig{ + NoNewHeadsThresholdVal: tests.TestInterval, + }, + config: testNodeConfig{ + syncThreshold: 1, + selectionMode: NodeSelectionModeHighestHead, + }, + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + poolInfo := newMockPoolChainInfoProvider(t) + const highestBlock = 20 + poolInfo.On("LatestChainInfo").Return(1, ChainInfo{ + BlockNumber: highestBlock * 2, + TotalDifficulty: big.NewInt(200), + }) + node.SetPoolChainInfoProvider(poolInfo) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{BlockNumber: highestBlock}) + + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { + go writeHeads(t, ch, head{BlockNumber: highestBlock - 1}, head{BlockNumber: highestBlock}, head{BlockNumber: highestBlock + 1}) + }).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() + + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertLogEventually(t, observedLogs, msgReceivedBlock) + tests.AssertLogEventually(t, observedLogs, "No new heads received for") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateOutOfSync + }) + }) + + // creates RPC mock with all calls necessary to create heads subscription that won't produce any events + newRPCWithNoOpHeads := func(t *testing.T, chainID types.ID) *mockNodeClient[types.ID, Head] { + rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("ChainID", mock.Anything).Return(chainID, nil).Once() + sub := mocks.NewSubscription(t) + sub.On("Err").Return((<-chan error)(nil)) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() + return rpc + } + + t.Run("if fails to subscribe to finalized, becomes unreachable", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(nil), nil, errors.New("failed to subscribe")).Once() + // unreachable + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("on subscription termination becomes unreachable", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + sub := mocks.NewSubscription(t) + errChan := make(chan error, 1) + errChan <- errors.New("subscription was terminate") + sub.On("Err").Return((<-chan error)(errChan)) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + // unreachable + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertLogEventually(t, observedLogs, "Finalized head subscription was terminated") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("becomes unreachable if head channel is closed", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + sub := mocks.NewSubscription(t) + sub.On("Err").Return((<-chan error)(nil)) + sub.On("Unsubscribe").Once() + ch := make(chan Head) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Run(func(args mock.Arguments) { + close(ch) + }).Return((<-chan Head)(ch), sub, nil).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + // unreachable + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertLogEventually(t, observedLogs, "Finalized heads subscription channel unexpectedly closed") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("becomes alive on new finalized block", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + lggr := logger.Test(t) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + NoNewFinalizedHeadsThresholdVal: tests.TestInterval, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + const highestBlock = 13 + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{FinalizedBlockNumber: highestBlock}).Once() + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + ch := make(chan Head) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() + + setupRPCForAliveLoop(t, rpc) + + node.declareOutOfSync(syncStatusNoNewFinalizedHead) + heads := []head{{BlockNumber: highestBlock - 1}, {BlockNumber: highestBlock}} + writeHeads(t, ch, heads...) + assert.Equal(t, nodeStateOutOfSync, node.State()) + writeHeads(t, ch, head{BlockNumber: highestBlock + 1}) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) + t.Run("adds finalized block is not increasing flag, if there is no new finalized heads for too long", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + lggr, observed := logger.TestObserved(t, zap.DebugLevel) + const noNewFinalizedHeads = tests.TestInterval + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + NoNewFinalizedHeadsThresholdVal: noNewFinalizedHeads, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + const highestBlock = 13 + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{FinalizedBlockNumber: highestBlock}).Once() + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + ch := make(chan Head) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() + + node.declareOutOfSync(syncStatusNotInSyncWithPool) + heads := []head{{BlockNumber: highestBlock - 1}, {BlockNumber: highestBlock}} + writeHeads(t, ch, heads...) + assert.Equal(t, nodeStateOutOfSync, node.State()) + tests.AssertLogEventually(t, observed, fmt.Sprintf("No new finalized heads received for %s. Node stays "+ + "out-of-sync due to sync issues: NotInSyncWithRPCPool,NoNewFinalizedHead", noNewFinalizedHeads)) + }) } func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { @@ -1296,11 +1620,11 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) } -func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { +func TestUnit_NodeLifecycle_outOfSyncWithPool(t *testing.T) { t.Parallel() t.Run("skip if nLiveNodes is not configured", func(t *testing.T) { node := newTestNode(t, testNodeOpts{}) - outOfSync, liveNodes := node.syncStatus(0, nil) + outOfSync, liveNodes := node.isOutOfSyncWithPool(ChainInfo{}) assert.Equal(t, false, outOfSync) assert.Equal(t, 0, liveNodes) }) @@ -1308,7 +1632,7 @@ func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { node := newTestNode(t, testNodeOpts{}) poolInfo := newMockPoolChainInfoProvider(t) node.SetPoolChainInfoProvider(poolInfo) - outOfSync, liveNodes := node.syncStatus(0, nil) + outOfSync, liveNodes := node.isOutOfSyncWithPool(ChainInfo{}) assert.Equal(t, false, outOfSync) assert.Equal(t, 0, liveNodes) }) @@ -1320,7 +1644,7 @@ func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { poolInfo.On("LatestChainInfo").Return(1, ChainInfo{}).Once() node.SetPoolChainInfoProvider(poolInfo) assert.Panics(t, func() { - _, _ = node.syncStatus(0, nil) + _, _ = node.isOutOfSyncWithPool(ChainInfo{}) }) }) t.Run("block height selection mode", func(t *testing.T) { @@ -1371,7 +1695,7 @@ func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { for _, td := range []int64{totalDifficulty - syncThreshold - 1, totalDifficulty - syncThreshold, totalDifficulty, totalDifficulty + 1} { for _, testCase := range testCases { t.Run(fmt.Sprintf("%s: SelectionModeVal: %s: total difficulty: %d", testCase.name, selectionMode, td), func(t *testing.T) { - outOfSync, liveNodes := node.syncStatus(testCase.blockNumber, big.NewInt(td)) + outOfSync, liveNodes := node.isOutOfSyncWithPool(ChainInfo{BlockNumber: testCase.blockNumber, TotalDifficulty: big.NewInt(td)}) assert.Equal(t, nodesNum, liveNodes) assert.Equal(t, testCase.outOfSync, outOfSync) }) @@ -1427,7 +1751,7 @@ func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { for _, hb := range []int64{highestBlock - syncThreshold - 1, highestBlock - syncThreshold, highestBlock, highestBlock + 1} { for _, testCase := range testCases { t.Run(fmt.Sprintf("%s: SelectionModeVal: %s: highest block: %d", testCase.name, NodeSelectionModeTotalDifficulty, hb), func(t *testing.T) { - outOfSync, liveNodes := node.syncStatus(hb, big.NewInt(testCase.totalDifficulty)) + outOfSync, liveNodes := node.isOutOfSyncWithPool(ChainInfo{BlockNumber: hb, TotalDifficulty: big.NewInt(testCase.totalDifficulty)}) assert.Equal(t, nodesNum, liveNodes) assert.Equal(t, testCase.outOfSync, outOfSync) }) diff --git a/common/client/types.go b/common/client/types.go index b07f57eb8f..c9b6a3580e 100644 --- a/common/client/types.go +++ b/common/client/types.go @@ -68,7 +68,7 @@ type NodeClient[ SetAliveLoopSub(types.Subscription) UnsubscribeAllExceptAliveLoop() IsSyncing(ctx context.Context) (bool, error) - LatestFinalizedBlock(ctx context.Context) (HEAD, error) + SubscribeToFinalizedHeads(_ context.Context) (<-chan HEAD, types.Subscription, error) // GetInterceptedChainInfo - returns latest and highest observed by application layer ChainInfo. // latest ChainInfo is the most recent value received within a NodeClient's current lifecycle between Dial and DisconnectAll. // highestUserObservations ChainInfo is the highest ChainInfo observed excluding health checks calls. @@ -151,7 +151,9 @@ type connection[ ] interface { ChainID(ctx context.Context) (CHAIN_ID, error) Dial(ctx context.Context) error - SubscribeNewHead(ctx context.Context, channel chan<- HEAD) (types.Subscription, error) + SubscribeToHeads(ctx context.Context) (ch <-chan HEAD, sub types.Subscription, err error) + // TODO: remove as part of merge with BCI-2875 + SubscribeNewHead(ctx context.Context, channel chan<- HEAD) (s types.Subscription, err error) } // PoolChainInfoProvider - provides aggregation of nodes pool ChainInfo diff --git a/core/chains/evm/client/chain_client_test.go b/core/chains/evm/client/chain_client_test.go index 33955c1645..a0b89cabbc 100644 --- a/core/chains/evm/client/chain_client_test.go +++ b/core/chains/evm/client/chain_client_test.go @@ -751,7 +751,7 @@ func newMockRpc(t *testing.T) *mocks.RPCClient { mockRpc.On("Close").Return(nil).Once() mockRpc.On("ChainID", mock.Anything).Return(testutils.FixtureChainID, nil).Once() // node does not always manage to fully setup aliveLoop, so we have to make calls optional to avoid flakes - mockRpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(client.NewMockSubscription(), nil).Maybe() + mockRpc.On("SubscribeToHeads", mock.Anything).Return(nil, client.NewMockSubscription(), nil).Maybe() mockRpc.On("SetAliveLoopSub", mock.Anything).Return().Maybe() return mockRpc } diff --git a/core/chains/evm/client/config_builder.go b/core/chains/evm/client/config_builder.go index 19e0f14fd6..fa702bac11 100644 --- a/core/chains/evm/client/config_builder.go +++ b/core/chains/evm/client/config_builder.go @@ -41,6 +41,8 @@ func NewClientConfigs( finalizedBlockOffset *uint32, enforceRepeatableRead *bool, deathDeclarationDelay time.Duration, + noNewFinalizedHeadsThreshold time.Duration, + finalizedBlockPollInterval time.Duration, ) (commonclient.ChainConfig, evmconfig.NodePool, []*toml.Node, error) { nodes, err := parseNodeConfigs(nodeCfgs) @@ -48,24 +50,26 @@ func NewClientConfigs( return nil, nil, nil, err } nodePool := toml.NodePool{ - SelectionMode: selectionMode, - LeaseDuration: commonconfig.MustNewDuration(leaseDuration), - PollFailureThreshold: pollFailureThreshold, - PollInterval: commonconfig.MustNewDuration(pollInterval), - SyncThreshold: syncThreshold, - NodeIsSyncingEnabled: nodeIsSyncingEnabled, - EnforceRepeatableRead: enforceRepeatableRead, - DeathDeclarationDelay: commonconfig.MustNewDuration(deathDeclarationDelay), + SelectionMode: selectionMode, + LeaseDuration: commonconfig.MustNewDuration(leaseDuration), + PollFailureThreshold: pollFailureThreshold, + PollInterval: commonconfig.MustNewDuration(pollInterval), + SyncThreshold: syncThreshold, + NodeIsSyncingEnabled: nodeIsSyncingEnabled, + EnforceRepeatableRead: enforceRepeatableRead, + DeathDeclarationDelay: commonconfig.MustNewDuration(deathDeclarationDelay), + FinalizedBlockPollInterval: commonconfig.MustNewDuration(finalizedBlockPollInterval), } nodePoolCfg := &evmconfig.NodePoolConfig{C: nodePool} chainConfig := &evmconfig.EVMConfig{ C: &toml.EVMConfig{ Chain: toml.Chain{ - ChainType: chaintype.NewChainTypeConfig(chainType), - FinalityDepth: finalityDepth, - FinalityTagEnabled: finalityTagEnabled, - NoNewHeadsThreshold: commonconfig.MustNewDuration(noNewHeadsThreshold), - FinalizedBlockOffset: finalizedBlockOffset, + ChainType: chaintype.NewChainTypeConfig(chainType), + FinalityDepth: finalityDepth, + FinalityTagEnabled: finalityTagEnabled, + NoNewHeadsThreshold: commonconfig.MustNewDuration(noNewHeadsThreshold), + FinalizedBlockOffset: finalizedBlockOffset, + NoNewFinalizedHeadsThreshold: commonconfig.MustNewDuration(noNewFinalizedHeadsThreshold), }, }, } diff --git a/core/chains/evm/client/config_builder_test.go b/core/chains/evm/client/config_builder_test.go index 7c08bf18c1..403c6c2d61 100644 --- a/core/chains/evm/client/config_builder_test.go +++ b/core/chains/evm/client/config_builder_test.go @@ -26,6 +26,7 @@ func TestClientConfigBuilder(t *testing.T) { finalizedBlockOffset := ptr[uint32](16) enforceRepeatableRead := ptr(true) deathDeclarationDelay := time.Second * 3 + noNewFinalizedBlocksThreshold := time.Second nodeConfigs := []client.NodeConfig{ { Name: ptr("foo"), @@ -38,7 +39,7 @@ func TestClientConfigBuilder(t *testing.T) { noNewHeadsThreshold := time.Second chainCfg, nodePool, nodes, err := client.NewClientConfigs(selectionMode, leaseDuration, chainTypeStr, nodeConfigs, pollFailureThreshold, pollInterval, syncThreshold, nodeIsSyncingEnabled, noNewHeadsThreshold, finalityDepth, - finalityTagEnabled, finalizedBlockOffset, enforceRepeatableRead, deathDeclarationDelay) + finalityTagEnabled, finalizedBlockOffset, enforceRepeatableRead, deathDeclarationDelay, noNewFinalizedBlocksThreshold, pollInterval) require.NoError(t, err) // Validate node pool configs @@ -50,6 +51,7 @@ func TestClientConfigBuilder(t *testing.T) { require.Equal(t, *nodeIsSyncingEnabled, nodePool.NodeIsSyncingEnabled()) require.Equal(t, *enforceRepeatableRead, nodePool.EnforceRepeatableRead()) require.Equal(t, deathDeclarationDelay, nodePool.DeathDeclarationDelay()) + require.Equal(t, pollInterval, nodePool.FinalizedBlockPollInterval()) // Validate node configs require.Equal(t, *nodeConfigs[0].Name, *nodes[0].Name) @@ -61,6 +63,7 @@ func TestClientConfigBuilder(t *testing.T) { require.Equal(t, *finalityDepth, chainCfg.FinalityDepth()) require.Equal(t, *finalityTagEnabled, chainCfg.FinalityTagEnabled()) require.Equal(t, *finalizedBlockOffset, chainCfg.FinalizedBlockOffset()) + require.Equal(t, noNewFinalizedBlocksThreshold, chainCfg.NoNewFinalizedHeadsThreshold()) // let combiler tell us, when we do not have sufficient data to create evm client _ = client.NewEvmClient(nodePool, chainCfg, nil, logger.Test(t), big.NewInt(10), nodes, chaintype.ChainType(chainTypeStr)) diff --git a/core/chains/evm/client/evm_client.go b/core/chains/evm/client/evm_client.go index fd7fa5868a..c2373ee775 100644 --- a/core/chains/evm/client/evm_client.go +++ b/core/chains/evm/client/evm_client.go @@ -20,13 +20,13 @@ func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, cli for i, node := range nodes { if node.SendOnly != nil && *node.SendOnly { rpc := NewRPCClient(lggr, empty, (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, - commonclient.Secondary) + commonclient.Secondary, cfg.FinalizedBlockPollInterval()) sendonly := commonclient.NewSendOnlyNode(lggr, (url.URL)(*node.HTTPURL), *node.Name, chainID, rpc) sendonlys = append(sendonlys, sendonly) } else { rpc := NewRPCClient(lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), - chainID, commonclient.Primary) + chainID, commonclient.Primary, cfg.FinalizedBlockPollInterval()) primaryNode := commonclient.NewNode(cfg, chainCfg, lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, *node.Order, rpc, "EVM") diff --git a/core/chains/evm/client/evm_client_test.go b/core/chains/evm/client/evm_client_test.go index 9ad25f9602..bdfcf42674 100644 --- a/core/chains/evm/client/evm_client_test.go +++ b/core/chains/evm/client/evm_client_test.go @@ -27,6 +27,8 @@ func TestNewEvmClient(t *testing.T) { finalizedBlockOffset := ptr[uint32](16) enforceRepeatableRead := ptr(true) deathDeclarationDelay := time.Second * 3 + noNewFinalizedBlocksThreshold := time.Second * 5 + finalizedBlockPollInterval := time.Second * 4 nodeConfigs := []client.NodeConfig{ { Name: ptr("foo"), @@ -38,7 +40,7 @@ func TestNewEvmClient(t *testing.T) { finalityTagEnabled := ptr(true) chainCfg, nodePool, nodes, err := client.NewClientConfigs(selectionMode, leaseDuration, chainTypeStr, nodeConfigs, pollFailureThreshold, pollInterval, syncThreshold, nodeIsSyncingEnabled, noNewHeadsThreshold, finalityDepth, - finalityTagEnabled, finalizedBlockOffset, enforceRepeatableRead, deathDeclarationDelay) + finalityTagEnabled, finalizedBlockOffset, enforceRepeatableRead, deathDeclarationDelay, noNewFinalizedBlocksThreshold, finalizedBlockPollInterval) require.NoError(t, err) client := client.NewEvmClient(nodePool, chainCfg, nil, logger.Test(t), testutils.FixtureChainID, nodes, chaintype.ChainType(chainTypeStr)) diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go index e1017a5564..a2a55e1791 100644 --- a/core/chains/evm/client/helpers_test.go +++ b/core/chains/evm/client/helpers_test.go @@ -140,7 +140,7 @@ func NewChainClientWithTestNode( } lggr := logger.Test(t) - rpc := NewRPCClient(lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary) + rpc := NewRPCClient(lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary, 0) n := commonclient.NewNode[*big.Int, *evmtypes.Head, RPCClient]( nodeCfg, clientMocks.ChainConfig{NoNewHeadsThresholdVal: noNewHeadsThreshold}, lggr, *parsed, rpcHTTPURL, "eth-primary-node-0", id, chainID, 1, rpc, "EVM") @@ -152,7 +152,7 @@ func NewChainClientWithTestNode( return nil, pkgerrors.Errorf("sendonly ethereum rpc url scheme must be http(s): %s", u.String()) } var empty url.URL - rpc := NewRPCClient(lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary) + rpc := NewRPCClient(lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary, 0) s := commonclient.NewSendOnlyNode[*big.Int, RPCClient]( lggr, u, fmt.Sprintf("eth-sendonly-%d", i), chainID, rpc) sendonlys = append(sendonlys, s) diff --git a/core/chains/evm/client/mocks/rpc_client.go b/core/chains/evm/client/mocks/rpc_client.go index fa866af29e..06f79efd55 100644 --- a/core/chains/evm/client/mocks/rpc_client.go +++ b/core/chains/evm/client/mocks/rpc_client.go @@ -1883,8 +1883,8 @@ func (_c *RPCClient_SubscribeNewHead_Call) Run(run func(ctx context.Context, cha return _c } -func (_c *RPCClient_SubscribeNewHead_Call) Return(_a0 commontypes.Subscription, _a1 error) *RPCClient_SubscribeNewHead_Call { - _c.Call.Return(_a0, _a1) +func (_c *RPCClient_SubscribeNewHead_Call) Return(s commontypes.Subscription, err error) *RPCClient_SubscribeNewHead_Call { + _c.Call.Return(s, err) return _c } @@ -1893,6 +1893,140 @@ func (_c *RPCClient_SubscribeNewHead_Call) RunAndReturn(run func(context.Context return _c } +// SubscribeToFinalizedHeads provides a mock function with given fields: _a0 +func (_m *RPCClient) SubscribeToFinalizedHeads(_a0 context.Context) (<-chan *types.Head, commontypes.Subscription, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToFinalizedHeads") + } + + var r0 <-chan *types.Head + var r1 commontypes.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan *types.Head, commontypes.Subscription, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan *types.Head); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *types.Head) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) commontypes.Subscription); ok { + r1 = rf(_a0) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(commontypes.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(_a0) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// RPCClient_SubscribeToFinalizedHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToFinalizedHeads' +type RPCClient_SubscribeToFinalizedHeads_Call struct { + *mock.Call +} + +// SubscribeToFinalizedHeads is a helper method to define mock.On call +// - _a0 context.Context +func (_e *RPCClient_Expecter) SubscribeToFinalizedHeads(_a0 interface{}) *RPCClient_SubscribeToFinalizedHeads_Call { + return &RPCClient_SubscribeToFinalizedHeads_Call{Call: _e.mock.On("SubscribeToFinalizedHeads", _a0)} +} + +func (_c *RPCClient_SubscribeToFinalizedHeads_Call) Run(run func(_a0 context.Context)) *RPCClient_SubscribeToFinalizedHeads_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *RPCClient_SubscribeToFinalizedHeads_Call) Return(_a0 <-chan *types.Head, _a1 commontypes.Subscription, _a2 error) *RPCClient_SubscribeToFinalizedHeads_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *RPCClient_SubscribeToFinalizedHeads_Call) RunAndReturn(run func(context.Context) (<-chan *types.Head, commontypes.Subscription, error)) *RPCClient_SubscribeToFinalizedHeads_Call { + _c.Call.Return(run) + return _c +} + +// SubscribeToHeads provides a mock function with given fields: ctx +func (_m *RPCClient) SubscribeToHeads(ctx context.Context) (<-chan *types.Head, commontypes.Subscription, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToHeads") + } + + var r0 <-chan *types.Head + var r1 commontypes.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan *types.Head, commontypes.Subscription, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan *types.Head); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *types.Head) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) commontypes.Subscription); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(commontypes.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// RPCClient_SubscribeToHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToHeads' +type RPCClient_SubscribeToHeads_Call struct { + *mock.Call +} + +// SubscribeToHeads is a helper method to define mock.On call +// - ctx context.Context +func (_e *RPCClient_Expecter) SubscribeToHeads(ctx interface{}) *RPCClient_SubscribeToHeads_Call { + return &RPCClient_SubscribeToHeads_Call{Call: _e.mock.On("SubscribeToHeads", ctx)} +} + +func (_c *RPCClient_SubscribeToHeads_Call) Run(run func(ctx context.Context)) *RPCClient_SubscribeToHeads_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *RPCClient_SubscribeToHeads_Call) Return(ch <-chan *types.Head, sub commontypes.Subscription, err error) *RPCClient_SubscribeToHeads_Call { + _c.Call.Return(ch, sub, err) + return _c +} + +func (_c *RPCClient_SubscribeToHeads_Call) RunAndReturn(run func(context.Context) (<-chan *types.Head, commontypes.Subscription, error)) *RPCClient_SubscribeToHeads_Call { + _c.Call.Return(run) + return _c +} + // SubscribersCount provides a mock function with given fields: func (_m *RPCClient) SubscribersCount() int32 { ret := _m.Called() diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 9ab5fd135b..1a0023227a 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -2,6 +2,7 @@ package client import ( "context" + "errors" "fmt" "math/big" "net/url" @@ -102,6 +103,8 @@ type RPCClient interface { GetInterceptedChainInfo() (latest, highestUserObservations commonclient.ChainInfo) } +const rpcSubscriptionMethodNewHeads = "newHeads" + type rawclient struct { rpc *rpc.Client geth *ethclient.Client @@ -109,11 +112,12 @@ type rawclient struct { } type rpcClient struct { - rpcLog logger.SugaredLogger - name string - id int32 - chainID *big.Int - tier commonclient.NodeTier + rpcLog logger.SugaredLogger + name string + id int32 + chainID *big.Int + tier commonclient.NodeTier + finalizedBlockPollInterval time.Duration ws rawclient http *rawclient @@ -147,6 +151,7 @@ func NewRPCClient( id int32, chainID *big.Int, tier commonclient.NodeTier, + finalizedBlockPollInterval time.Duration, ) RPCClient { r := new(rpcClient) r.name = name @@ -154,6 +159,7 @@ func NewRPCClient( r.chainID = chainID r.tier = tier r.ws.uri = wsuri + r.finalizedBlockPollInterval = finalizedBlockPollInterval if httpuri != nil { r.http = &rawclient{uri: *httpuri} } @@ -403,6 +409,7 @@ func (r *rpcClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) err return err } +// TODO: Full transition from SubscribeNewHead to SubscribeToHeads is done in BCI-2875 func (r *rpcClient) SubscribeNewHead(ctx context.Context, channel chan<- *evmtypes.Head) (_ commontypes.Subscription, err error) { ctx, cancel, chStopInFlight, ws, _ := r.acquireQueryCtx(ctx) defer cancel() @@ -434,6 +441,54 @@ func (r *rpcClient) SubscribeNewHead(ctx context.Context, channel chan<- *evmtyp return subForwarder, nil } +func (r *rpcClient) SubscribeToHeads(ctx context.Context) (ch <-chan *evmtypes.Head, sub commontypes.Subscription, err error) { + ctx, cancel, chStopInFlight, ws, _ := r.acquireQueryCtx(ctx) + defer cancel() + + args := []interface{}{rpcSubscriptionMethodNewHeads} + start := time.Now() + lggr := r.newRqLggr().With("args", args) + + lggr.Debug("RPC call: evmclient.Client#EthSubscribe") + defer func() { + duration := time.Since(start) + r.logResult(lggr, err, duration, r.getRPCDomain(), "EthSubscribe") + err = r.wrapWS(err) + }() + + channel := make(chan *evmtypes.Head) + forwarder := newSubForwarder(channel, func(head *evmtypes.Head) *evmtypes.Head { + head.EVMChainID = ubig.New(r.chainID) + r.onNewHead(ctx, chStopInFlight, head) + return head + }, r.wrapRPCClientError) + + err = forwarder.start(ws.rpc.EthSubscribe(ctx, forwarder.srcCh, args...)) + if err != nil { + return nil, nil, err + } + + err = r.registerSub(forwarder, chStopInFlight) + if err != nil { + return nil, nil, err + } + + return channel, forwarder, err +} + +func (r *rpcClient) SubscribeToFinalizedHeads(_ context.Context) (<-chan *evmtypes.Head, commontypes.Subscription, error) { + interval := r.finalizedBlockPollInterval + if interval == 0 { + return nil, nil, errors.New("FinalizedBlockPollInterval is 0") + } + timeout := interval + poller, channel := commonclient.NewPoller[*evmtypes.Head](interval, r.LatestFinalizedBlock, timeout, r.rpcLog) + if err := poller.Start(); err != nil { + return nil, nil, err + } + return channel, &poller, nil +} + // GethClient wrappers func (r *rpcClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (receipt *evmtypes.Receipt, err error) { diff --git a/core/chains/evm/client/rpc_client_test.go b/core/chains/evm/client/rpc_client_test.go index 682c435245..9b21aedbea 100644 --- a/core/chains/evm/client/rpc_client_test.go +++ b/core/chains/evm/client/rpc_client_test.go @@ -56,7 +56,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) // set to default values @@ -106,7 +106,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) ch := make(chan *evmtypes.Head) @@ -129,7 +129,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { t.Run("Block's chain ID matched configured", func(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) ch := make(chan *evmtypes.Head) @@ -146,7 +146,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { }) wsURL := server.WSURL() observedLggr, observed := logger.TestObserved(t, zap.DebugLevel) - rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) require.NoError(t, rpc.Dial(ctx)) server.Close() _, err := rpc.SubscribeNewHead(ctx, make(chan *evmtypes.Head)) @@ -156,7 +156,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { t.Run("Subscription error is properly wrapper", func(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) sub, err := rpc.SubscribeNewHead(ctx, make(chan *evmtypes.Head)) @@ -184,7 +184,7 @@ func TestRPCClient_SubscribeFilterLogs(t *testing.T) { }) wsURL := server.WSURL() observedLggr, observed := logger.TestObserved(t, zap.DebugLevel) - rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) require.NoError(t, rpc.Dial(ctx)) server.Close() _, err := rpc.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, make(chan types.Log)) @@ -201,7 +201,7 @@ func TestRPCClient_SubscribeFilterLogs(t *testing.T) { return resp }) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) sub, err := rpc.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, make(chan types.Log)) @@ -250,7 +250,7 @@ func TestRPCClient_LatestFinalizedBlock(t *testing.T) { } server := createRPCServer() - rpc := client.NewRPCClient(lggr, *server.URL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(lggr, *server.URL, nil, "rpc", 1, chainId, commonclient.Primary, 0) require.NoError(t, rpc.Dial(ctx)) defer rpc.Close() server.Head = &evmtypes.Head{Number: 128} diff --git a/core/chains/evm/config/chain_scoped.go b/core/chains/evm/config/chain_scoped.go index db598e3e82..b9b19cdc2c 100644 --- a/core/chains/evm/config/chain_scoped.go +++ b/core/chains/evm/config/chain_scoped.go @@ -183,3 +183,7 @@ func (e *EVMConfig) LogPrunePageSize() uint32 { func (e *EVMConfig) FinalizedBlockOffset() uint32 { return *e.C.FinalizedBlockOffset } + +func (e *EVMConfig) NoNewFinalizedHeadsThreshold() time.Duration { + return e.C.NoNewFinalizedHeadsThreshold.Duration() +} diff --git a/core/chains/evm/config/config.go b/core/chains/evm/config/config.go index 3ecdd9e4b1..3ccdfeea8b 100644 --- a/core/chains/evm/config/config.go +++ b/core/chains/evm/config/config.go @@ -46,6 +46,7 @@ type EVM interface { RPCDefaultBatchSize() uint32 NodeNoNewHeadsThreshold() time.Duration FinalizedBlockOffset() uint32 + NoNewFinalizedHeadsThreshold() time.Duration IsEnabled() bool TOMLString() (string, error) diff --git a/core/chains/evm/config/toml/config.go b/core/chains/evm/config/toml/config.go index 3e35bb4b55..2cb29d9769 100644 --- a/core/chains/evm/config/toml/config.go +++ b/core/chains/evm/config/toml/config.go @@ -338,27 +338,28 @@ func (c *EVMConfig) TOMLString() (string, error) { } type Chain struct { - AutoCreateKey *bool - BlockBackfillDepth *uint32 - BlockBackfillSkip *bool - ChainType *chaintype.ChainTypeConfig - FinalityDepth *uint32 - FinalityTagEnabled *bool - FlagsContractAddress *types.EIP55Address - LinkContractAddress *types.EIP55Address - LogBackfillBatchSize *uint32 - LogPollInterval *commonconfig.Duration - LogKeepBlocksDepth *uint32 - LogPrunePageSize *uint32 - BackupLogPollerBlockDelay *uint64 - MinIncomingConfirmations *uint32 - MinContractPayment *commonassets.Link - NonceAutoSync *bool - NoNewHeadsThreshold *commonconfig.Duration - OperatorFactoryAddress *types.EIP55Address - RPCDefaultBatchSize *uint32 - RPCBlockQueryDelay *uint16 - FinalizedBlockOffset *uint32 + AutoCreateKey *bool + BlockBackfillDepth *uint32 + BlockBackfillSkip *bool + ChainType *chaintype.ChainTypeConfig + FinalityDepth *uint32 + FinalityTagEnabled *bool + FlagsContractAddress *types.EIP55Address + LinkContractAddress *types.EIP55Address + LogBackfillBatchSize *uint32 + LogPollInterval *commonconfig.Duration + LogKeepBlocksDepth *uint32 + LogPrunePageSize *uint32 + BackupLogPollerBlockDelay *uint64 + MinIncomingConfirmations *uint32 + MinContractPayment *commonassets.Link + NonceAutoSync *bool + NoNewHeadsThreshold *commonconfig.Duration + OperatorFactoryAddress *types.EIP55Address + RPCDefaultBatchSize *uint32 + RPCBlockQueryDelay *uint16 + FinalizedBlockOffset *uint32 + NoNewFinalizedHeadsThreshold *commonconfig.Duration Transactions Transactions `toml:",omitempty"` BalanceMonitor BalanceMonitor `toml:",omitempty"` diff --git a/core/chains/evm/config/toml/defaults.go b/core/chains/evm/config/toml/defaults.go index 38eef40bf7..c3f087da8c 100644 --- a/core/chains/evm/config/toml/defaults.go +++ b/core/chains/evm/config/toml/defaults.go @@ -165,6 +165,10 @@ func (c *Chain) SetFrom(f *Chain) { c.FinalizedBlockOffset = v } + if v := f.NoNewFinalizedHeadsThreshold; v != nil { + c.NoNewFinalizedHeadsThreshold = v + } + c.Transactions.setFrom(&f.Transactions) c.BalanceMonitor.setFrom(&f.BalanceMonitor) c.GasEstimator.setFrom(&f.GasEstimator) diff --git a/core/chains/evm/config/toml/defaults/Avalanche_Fuji.toml b/core/chains/evm/config/toml/defaults/Avalanche_Fuji.toml index b8e884a5a1..5ba2e3cdc7 100644 --- a/core/chains/evm/config/toml/defaults/Avalanche_Fuji.toml +++ b/core/chains/evm/config/toml/defaults/Avalanche_Fuji.toml @@ -7,6 +7,7 @@ MinIncomingConfirmations = 1 NoNewHeadsThreshold = '30s' OCR.ContractConfirmations = 1 RPCBlockQueryDelay = 2 +NoNewFinalizedHeadsThreshold = '1m' [GasEstimator] PriceDefault = '25 gwei' diff --git a/core/chains/evm/config/toml/defaults/Avalanche_Mainnet.toml b/core/chains/evm/config/toml/defaults/Avalanche_Mainnet.toml index 183c6ecd63..e7813842b4 100644 --- a/core/chains/evm/config/toml/defaults/Avalanche_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/Avalanche_Mainnet.toml @@ -7,6 +7,7 @@ MinIncomingConfirmations = 1 NoNewHeadsThreshold = '30s' OCR.ContractConfirmations = 1 RPCBlockQueryDelay = 2 +NoNewFinalizedHeadsThreshold = '1m' [GasEstimator] PriceDefault = '25 gwei' diff --git a/core/chains/evm/config/toml/defaults/BSC_Mainnet.toml b/core/chains/evm/config/toml/defaults/BSC_Mainnet.toml index 29ca3ed384..10f4c570be 100644 --- a/core/chains/evm/config/toml/defaults/BSC_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/BSC_Mainnet.toml @@ -9,6 +9,7 @@ LinkContractAddress = '0x404460C6A5EdE2D891e8297795264fDe62ADBB75' LogPollInterval = '3s' NoNewHeadsThreshold = '30s' RPCBlockQueryDelay = 2 +NoNewFinalizedHeadsThreshold = '45s' [GasEstimator] PriceDefault = '5 gwei' diff --git a/core/chains/evm/config/toml/defaults/BSC_Testnet.toml b/core/chains/evm/config/toml/defaults/BSC_Testnet.toml index 2a074f08e1..b27a877812 100644 --- a/core/chains/evm/config/toml/defaults/BSC_Testnet.toml +++ b/core/chains/evm/config/toml/defaults/BSC_Testnet.toml @@ -9,6 +9,7 @@ LinkContractAddress = '0x84b9B910527Ad5C03A9Ca831909E21e236EA7b06' LogPollInterval = '3s' NoNewHeadsThreshold = '30s' RPCBlockQueryDelay = 2 +NoNewFinalizedHeadsThreshold = '40s' [GasEstimator] PriceDefault = '5 gwei' diff --git a/core/chains/evm/config/toml/defaults/Base_Mainnet.toml b/core/chains/evm/config/toml/defaults/Base_Mainnet.toml index d6233e655a..da38182b19 100644 --- a/core/chains/evm/config/toml/defaults/Base_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/Base_Mainnet.toml @@ -5,6 +5,7 @@ FinalityTagEnabled = true LogPollInterval = '2s' NoNewHeadsThreshold = '40s' MinIncomingConfirmations = 1 +NoNewFinalizedHeadsThreshold = '15m' [GasEstimator] EIP1559DynamicFees = true diff --git a/core/chains/evm/config/toml/defaults/Base_Sepolia.toml b/core/chains/evm/config/toml/defaults/Base_Sepolia.toml index 633a838067..92f7717b27 100644 --- a/core/chains/evm/config/toml/defaults/Base_Sepolia.toml +++ b/core/chains/evm/config/toml/defaults/Base_Sepolia.toml @@ -6,6 +6,7 @@ LinkContractAddress = '0xE4aB69C077896252FAFBD49EFD26B5D171A32410' LogPollInterval = '2s' NoNewHeadsThreshold = '40s' MinIncomingConfirmations = 1 +NoNewFinalizedHeadsThreshold = '12m' [GasEstimator] EIP1559DynamicFees = true diff --git a/core/chains/evm/config/toml/defaults/Celo_Mainnet.toml b/core/chains/evm/config/toml/defaults/Celo_Mainnet.toml index b48cb25b32..a494862037 100644 --- a/core/chains/evm/config/toml/defaults/Celo_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/Celo_Mainnet.toml @@ -5,6 +5,7 @@ LogPollInterval = '5s' MinIncomingConfirmations = 1 NoNewHeadsThreshold = '1m' OCR.ContractConfirmations = 1 +NoNewFinalizedHeadsThreshold = '1m' [GasEstimator] PriceDefault = '5 gwei' diff --git a/core/chains/evm/config/toml/defaults/Celo_Testnet.toml b/core/chains/evm/config/toml/defaults/Celo_Testnet.toml index d3f595baac..eb43f080b7 100644 --- a/core/chains/evm/config/toml/defaults/Celo_Testnet.toml +++ b/core/chains/evm/config/toml/defaults/Celo_Testnet.toml @@ -5,6 +5,7 @@ LogPollInterval = '5s' MinIncomingConfirmations = 1 NoNewHeadsThreshold = '1m' OCR.ContractConfirmations = 1 +NoNewFinalizedHeadsThreshold = '1m' [GasEstimator] PriceDefault = '5 gwei' diff --git a/core/chains/evm/config/toml/defaults/Ethereum_Mainnet.toml b/core/chains/evm/config/toml/defaults/Ethereum_Mainnet.toml index 3911fd25df..0bcaf35c64 100644 --- a/core/chains/evm/config/toml/defaults/Ethereum_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/Ethereum_Mainnet.toml @@ -3,6 +3,7 @@ LinkContractAddress = '0x514910771AF9Ca656af840dff83E8264EcF986CA' MinContractPayment = '0.1 link' OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' FinalityTagEnabled = true +NoNewFinalizedHeadsThreshold = '9m' [GasEstimator] EIP1559DynamicFees = true diff --git a/core/chains/evm/config/toml/defaults/Gnosis_Chiado.toml b/core/chains/evm/config/toml/defaults/Gnosis_Chiado.toml index 1b14da2b54..379377a226 100644 --- a/core/chains/evm/config/toml/defaults/Gnosis_Chiado.toml +++ b/core/chains/evm/config/toml/defaults/Gnosis_Chiado.toml @@ -3,6 +3,7 @@ ChainID = '10200' FinalityDepth = 100 ChainType = 'gnosis' LogPollInterval = '5s' +NoNewFinalizedHeadsThreshold = '2m' [GasEstimator] EIP1559DynamicFees = true diff --git a/core/chains/evm/config/toml/defaults/Gnosis_Mainnet.toml b/core/chains/evm/config/toml/defaults/Gnosis_Mainnet.toml index 587f0083b7..628646364f 100644 --- a/core/chains/evm/config/toml/defaults/Gnosis_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/Gnosis_Mainnet.toml @@ -9,6 +9,7 @@ ChainID = '100' ChainType = 'gnosis' LinkContractAddress = '0xE2e73A1c69ecF83F464EFCE6A5be353a37cA09b2' LogPollInterval = '5s' +NoNewFinalizedHeadsThreshold = '2m' [GasEstimator] PriceDefault = '1 gwei' diff --git a/core/chains/evm/config/toml/defaults/Optimism_Mainnet.toml b/core/chains/evm/config/toml/defaults/Optimism_Mainnet.toml index 80702219ce..b0f56a49d9 100644 --- a/core/chains/evm/config/toml/defaults/Optimism_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/Optimism_Mainnet.toml @@ -6,6 +6,7 @@ LinkContractAddress = '0x350a791Bfc2C21F9Ed5d10980Dad2e2638ffa7f6' LogPollInterval = '2s' NoNewHeadsThreshold = '40s' MinIncomingConfirmations = 1 +NoNewFinalizedHeadsThreshold = '13m' [GasEstimator] EIP1559DynamicFees = true diff --git a/core/chains/evm/config/toml/defaults/Optimism_Sepolia.toml b/core/chains/evm/config/toml/defaults/Optimism_Sepolia.toml index 31218d5d7f..1c71aa5dd8 100644 --- a/core/chains/evm/config/toml/defaults/Optimism_Sepolia.toml +++ b/core/chains/evm/config/toml/defaults/Optimism_Sepolia.toml @@ -5,6 +5,7 @@ FinalityTagEnabled = true LogPollInterval = '2s' NoNewHeadsThreshold = '40s' MinIncomingConfirmations = 1 +NoNewFinalizedHeadsThreshold = '15m' [GasEstimator] EIP1559DynamicFees = true diff --git a/core/chains/evm/config/toml/defaults/Polygon_Amoy.toml b/core/chains/evm/config/toml/defaults/Polygon_Amoy.toml index d2958bc9bd..b05b3053a8 100644 --- a/core/chains/evm/config/toml/defaults/Polygon_Amoy.toml +++ b/core/chains/evm/config/toml/defaults/Polygon_Amoy.toml @@ -5,6 +5,7 @@ MinIncomingConfirmations = 5 NoNewHeadsThreshold = '30s' RPCBlockQueryDelay = 10 RPCDefaultBatchSize = 100 +NoNewFinalizedHeadsThreshold = '12m' [Transactions] MaxQueued = 5000 diff --git a/core/chains/evm/config/toml/defaults/Polygon_Mainnet.toml b/core/chains/evm/config/toml/defaults/Polygon_Mainnet.toml index b8d68b7f82..bf605cab3c 100644 --- a/core/chains/evm/config/toml/defaults/Polygon_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/Polygon_Mainnet.toml @@ -10,6 +10,7 @@ NoNewHeadsThreshold = '30s' # Must be set to something large here because Polygon has so many re-orgs that otherwise we are constantly refetching RPCBlockQueryDelay = 10 RPCDefaultBatchSize = 100 +NoNewFinalizedHeadsThreshold = '6m' [Transactions] # Matic nodes under high mempool pressure are liable to drop txes, we need to ensure we keep sending them diff --git a/core/chains/evm/config/toml/defaults/WeMix_Mainnet.toml b/core/chains/evm/config/toml/defaults/WeMix_Mainnet.toml index 4322842abb..7d3fcc6bc2 100644 --- a/core/chains/evm/config/toml/defaults/WeMix_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/WeMix_Mainnet.toml @@ -6,6 +6,7 @@ MinIncomingConfirmations = 1 # WeMix emits a block every 1 second, regardless of transactions LogPollInterval = '3s' NoNewHeadsThreshold = '30s' +NoNewFinalizedHeadsThreshold = '40s' [OCR] ContractConfirmations = 1 diff --git a/core/chains/evm/config/toml/defaults/WeMix_Testnet.toml b/core/chains/evm/config/toml/defaults/WeMix_Testnet.toml index 2c3fc606a7..5775097967 100644 --- a/core/chains/evm/config/toml/defaults/WeMix_Testnet.toml +++ b/core/chains/evm/config/toml/defaults/WeMix_Testnet.toml @@ -6,6 +6,7 @@ MinIncomingConfirmations = 1 # WeMix emits a block every 1 second, regardless of transactions LogPollInterval = '3s' NoNewHeadsThreshold = '30s' +NoNewFinalizedHeadsThreshold = '40s' [OCR] ContractConfirmations = 1 diff --git a/core/chains/evm/config/toml/defaults/fallback.toml b/core/chains/evm/config/toml/defaults/fallback.toml index 8bf3392210..e3136323f6 100644 --- a/core/chains/evm/config/toml/defaults/fallback.toml +++ b/core/chains/evm/config/toml/defaults/fallback.toml @@ -16,6 +16,7 @@ NoNewHeadsThreshold = '3m' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0' [Transactions] ForwardersEnabled = false diff --git a/core/config/docs/chains-evm.toml b/core/config/docs/chains-evm.toml index f053be9fa6..9efe0585b2 100644 --- a/core/config/docs/chains-evm.toml +++ b/core/config/docs/chains-evm.toml @@ -98,6 +98,11 @@ RPCBlockQueryDelay = 1 # Default # Block 64 will be treated as finalized by CL Node only when chain's latest finalized block is 65. As chain finalizes blocks in batches of 32, # CL Node has to wait for a whole new batch to be finalized to treat block 64 as finalized. FinalizedBlockOffset = 0 # Default +# NoNewFinalizedHeadsThreshold controls how long to wait for new finalized block before `NodePool` marks rpc endpoints as +# out-of-sync. Only applicable if `FinalityTagEnabled=true` +# +# Set to zero to disable. +NoNewFinalizedHeadsThreshold = '0' # Default [EVM.Transactions] # ForwardersEnabled enables or disables sending transactions through forwarder contracts. diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go index 4143bcbd50..121012b5dd 100644 --- a/core/services/chainlink/config_test.go +++ b/core/services/chainlink/config_test.go @@ -554,19 +554,20 @@ func TestConfig_Marshal(t *testing.T) { }, }, - LinkContractAddress: mustAddress("0x538aAaB4ea120b2bC2fe5D296852D948F07D849e"), - LogBackfillBatchSize: ptr[uint32](17), - LogPollInterval: &minute, - LogKeepBlocksDepth: ptr[uint32](100000), - LogPrunePageSize: ptr[uint32](10000), - BackupLogPollerBlockDelay: ptr[uint64](532), - MinContractPayment: commonassets.NewLinkFromJuels(math.MaxInt64), - MinIncomingConfirmations: ptr[uint32](13), - NonceAutoSync: ptr(true), - NoNewHeadsThreshold: &minute, - OperatorFactoryAddress: mustAddress("0xa5B85635Be42F21f94F28034B7DA440EeFF0F418"), - RPCDefaultBatchSize: ptr[uint32](17), - RPCBlockQueryDelay: ptr[uint16](10), + LinkContractAddress: mustAddress("0x538aAaB4ea120b2bC2fe5D296852D948F07D849e"), + LogBackfillBatchSize: ptr[uint32](17), + LogPollInterval: &minute, + LogKeepBlocksDepth: ptr[uint32](100000), + LogPrunePageSize: ptr[uint32](10000), + BackupLogPollerBlockDelay: ptr[uint64](532), + MinContractPayment: commonassets.NewLinkFromJuels(math.MaxInt64), + MinIncomingConfirmations: ptr[uint32](13), + NonceAutoSync: ptr(true), + NoNewHeadsThreshold: &minute, + OperatorFactoryAddress: mustAddress("0xa5B85635Be42F21f94F28034B7DA440EeFF0F418"), + RPCDefaultBatchSize: ptr[uint32](17), + RPCBlockQueryDelay: ptr[uint16](10), + NoNewFinalizedHeadsThreshold: &hour, Transactions: evmcfg.Transactions{ MaxInFlight: ptr[uint32](19), @@ -998,6 +999,7 @@ OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' RPCDefaultBatchSize = 17 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 16 +NoNewFinalizedHeadsThreshold = '1h0m0s' [EVM.Transactions] ForwardersEnabled = true diff --git a/core/services/chainlink/testdata/config-full.toml b/core/services/chainlink/testdata/config-full.toml index c166657a60..9df2e70508 100644 --- a/core/services/chainlink/testdata/config-full.toml +++ b/core/services/chainlink/testdata/config-full.toml @@ -291,6 +291,7 @@ OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' RPCDefaultBatchSize = 17 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 16 +NoNewFinalizedHeadsThreshold = '1h0m0s' [EVM.Transactions] ForwardersEnabled = true diff --git a/core/services/chainlink/testdata/config-multi-chain-effective.toml b/core/services/chainlink/testdata/config-multi-chain-effective.toml index 3aedc0dc9b..e560099c2c 100644 --- a/core/services/chainlink/testdata/config-multi-chain-effective.toml +++ b/core/services/chainlink/testdata/config-multi-chain-effective.toml @@ -278,6 +278,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 12 +NoNewFinalizedHeadsThreshold = '9m0s' [EVM.Transactions] ForwardersEnabled = false @@ -377,6 +378,7 @@ OperatorFactoryAddress = '0x8007e24251b1D2Fc518Eb843A701d9cD21fe0aA3' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [EVM.Transactions] ForwardersEnabled = false @@ -470,6 +472,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '6m0s' [EVM.Transactions] ForwardersEnabled = false diff --git a/core/web/resolver/testdata/config-full.toml b/core/web/resolver/testdata/config-full.toml index 506f542cf1..c5547f8698 100644 --- a/core/web/resolver/testdata/config-full.toml +++ b/core/web/resolver/testdata/config-full.toml @@ -291,6 +291,7 @@ OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' RPCDefaultBatchSize = 17 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '15m0s' [EVM.Transactions] ForwardersEnabled = true diff --git a/core/web/resolver/testdata/config-multi-chain-effective.toml b/core/web/resolver/testdata/config-multi-chain-effective.toml index faa18fd407..8c063d0e9e 100644 --- a/core/web/resolver/testdata/config-multi-chain-effective.toml +++ b/core/web/resolver/testdata/config-multi-chain-effective.toml @@ -278,6 +278,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '9m0s' [EVM.Transactions] ForwardersEnabled = false @@ -377,6 +378,7 @@ OperatorFactoryAddress = '0x8007e24251b1D2Fc518Eb843A701d9cD21fe0aA3' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [EVM.Transactions] ForwardersEnabled = false @@ -470,6 +472,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '6m0s' [EVM.Transactions] ForwardersEnabled = false diff --git a/docs/CONFIG.md b/docs/CONFIG.md index 1e0ebbf948..258adee5b1 100644 --- a/docs/CONFIG.md +++ b/docs/CONFIG.md @@ -1793,6 +1793,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '9m0s' [Transactions] ForwardersEnabled = false @@ -1886,6 +1887,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -1979,6 +1981,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -2072,6 +2075,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -2166,6 +2170,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '13m0s' [Transactions] ForwardersEnabled = false @@ -2259,6 +2264,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -2352,6 +2358,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -2446,6 +2453,7 @@ OperatorFactoryAddress = '0x8007e24251b1D2Fc518Eb843A701d9cD21fe0aA3' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -2539,6 +2547,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '45s' [Transactions] ForwardersEnabled = false @@ -2631,6 +2640,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -2723,6 +2733,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -2908,6 +2919,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '40s' [Transactions] ForwardersEnabled = false @@ -3002,6 +3014,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '2m0s' [Transactions] ForwardersEnabled = false @@ -3095,6 +3108,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3188,6 +3202,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '6m0s' [Transactions] ForwardersEnabled = false @@ -3281,6 +3296,7 @@ NoNewHeadsThreshold = '12m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 15 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3374,6 +3390,7 @@ NoNewHeadsThreshold = '6m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 15 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3467,6 +3484,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3560,6 +3578,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3653,6 +3672,7 @@ NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3746,6 +3766,7 @@ NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3839,6 +3860,7 @@ NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3933,6 +3955,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4026,6 +4049,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4304,6 +4328,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4397,6 +4422,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4490,6 +4516,7 @@ NoNewHeadsThreshold = '6m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 15 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4583,6 +4610,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '40s' [Transactions] ForwardersEnabled = false @@ -4676,6 +4704,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '40s' [Transactions] ForwardersEnabled = false @@ -4768,6 +4797,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4861,6 +4891,7 @@ NoNewHeadsThreshold = '12m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4954,6 +4985,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -5047,6 +5079,7 @@ NoNewHeadsThreshold = '12m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -5140,6 +5173,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -5326,6 +5360,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -5419,6 +5454,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '15m0s' [Transactions] ForwardersEnabled = false @@ -5512,6 +5548,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '2m0s' [Transactions] ForwardersEnabled = false @@ -5606,6 +5643,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -5981,6 +6019,7 @@ NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '1m0s' [Transactions] ForwardersEnabled = false @@ -6074,6 +6113,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '1m0s' [Transactions] ForwardersEnabled = false @@ -6167,6 +6207,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '1m0s' [Transactions] ForwardersEnabled = false @@ -6260,6 +6301,7 @@ NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '1m0s' [Transactions] ForwardersEnabled = false @@ -6352,6 +6394,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6444,6 +6487,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6536,6 +6580,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6629,6 +6674,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6815,6 +6861,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6907,6 +6954,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '12m0s' [Transactions] ForwardersEnabled = false @@ -7094,6 +7142,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -7188,6 +7237,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '12m0s' [Transactions] ForwardersEnabled = false @@ -7282,6 +7332,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -7376,6 +7427,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -7470,6 +7522,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -7563,6 +7616,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -7656,6 +7710,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -7749,6 +7804,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -7842,6 +7898,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '15m0s' [Transactions] ForwardersEnabled = false @@ -8029,6 +8086,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -8122,6 +8180,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -8383,6 +8442,15 @@ The latest finalized block on chain is 64, so block 63 is the latest finalized f Block 64 will be treated as finalized by CL Node only when chain's latest finalized block is 65. As chain finalizes blocks in batches of 32, CL Node has to wait for a whole new batch to be finalized to treat block 64 as finalized. +### NoNewFinalizedHeadsThreshold +```toml +NoNewFinalizedHeadsThreshold = '0' # Default +``` +NoNewFinalizedHeadsThreshold controls how long to wait for new finalized block before `NodePool` marks rpc endpoints as +out-of-sync. Only applicable if `FinalityTagEnabled=true` + +Set to zero to disable. + ## EVM.Transactions ```toml [EVM.Transactions] diff --git a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar index a3554fd41f..fe522ca1bb 100644 --- a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar +++ b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar @@ -334,6 +334,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '9m0s' [EVM.Transactions] ForwardersEnabled = false diff --git a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar index d0f2068c42..5a5ce42d6b 100644 --- a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar +++ b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar @@ -334,6 +334,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '9m0s' [EVM.Transactions] ForwardersEnabled = false diff --git a/testdata/scripts/node/validate/disk-based-logging.txtar b/testdata/scripts/node/validate/disk-based-logging.txtar index b68e513b7e..12773dc99c 100644 --- a/testdata/scripts/node/validate/disk-based-logging.txtar +++ b/testdata/scripts/node/validate/disk-based-logging.txtar @@ -334,6 +334,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '9m0s' [EVM.Transactions] ForwardersEnabled = false diff --git a/testdata/scripts/node/validate/invalid.txtar b/testdata/scripts/node/validate/invalid.txtar index 4c449c4929..e709b24d5b 100644 --- a/testdata/scripts/node/validate/invalid.txtar +++ b/testdata/scripts/node/validate/invalid.txtar @@ -324,6 +324,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '9m0s' [EVM.Transactions] ForwardersEnabled = false diff --git a/testdata/scripts/node/validate/valid.txtar b/testdata/scripts/node/validate/valid.txtar index f55cfa0969..9b22da4850 100644 --- a/testdata/scripts/node/validate/valid.txtar +++ b/testdata/scripts/node/validate/valid.txtar @@ -331,6 +331,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '9m0s' [EVM.Transactions] ForwardersEnabled = false From d496c993565bdf56bbee3bcc7ffa10253e0b6df4 Mon Sep 17 00:00:00 2001 From: Dmytro Haidashenko <34754799+dhaidashenko@users.noreply.github.com> Date: Wed, 31 Jul 2024 20:02:38 +0200 Subject: [PATCH 2/5] BCI-3842 custom timeouts for Hedera (#13876) * allow to configure RPCTimeouts * Custom (30s) timeout for Hedera RPC requests with large payloads (SendTransaction, CallContext, etc.) * fix linter --- .changeset/mean-brooms-agree.md | 5 ++ core/chains/evm/client/chain_client.go | 3 +- core/chains/evm/client/evm_client.go | 14 ++- core/chains/evm/client/helpers_test.go | 4 +- core/chains/evm/client/rpc_client.go | 81 +++++++++-------- core/chains/evm/client/rpc_client_test.go | 88 +++++++++++++++++-- core/chains/evm/config/chaintype/chaintype.go | 6 +- core/services/chainlink/config_test.go | 4 +- core/services/ocr/contract_tracker.go | 2 +- 9 files changed, 152 insertions(+), 55 deletions(-) create mode 100644 .changeset/mean-brooms-agree.md diff --git a/.changeset/mean-brooms-agree.md b/.changeset/mean-brooms-agree.md new file mode 100644 index 0000000000..0dd2ba7bd3 --- /dev/null +++ b/.changeset/mean-brooms-agree.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +Custom (30s) timeout for Hedera RPC requests with large payloads (SendTransaction, CallContext, etc.) #internal diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index 7597346914..c39214471c 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -20,7 +20,6 @@ import ( evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ) -const queryTimeout = 10 * time.Second const BALANCE_OF_ADDRESS_FUNCTION_SELECTOR = "0x70a08231" var _ Client = (*chainClient)(nil) @@ -99,7 +98,7 @@ type Client interface { } func ContextWithDefaultTimeout() (ctx context.Context, cancel context.CancelFunc) { - return context.WithTimeout(context.Background(), queryTimeout) + return context.WithTimeout(context.Background(), commonclient.QueryTimeout) } type chainClient struct { diff --git a/core/chains/evm/client/evm_client.go b/core/chains/evm/client/evm_client.go index c2373ee775..3676808683 100644 --- a/core/chains/evm/client/evm_client.go +++ b/core/chains/evm/client/evm_client.go @@ -3,6 +3,7 @@ package client import ( "math/big" "net/url" + "time" "github.com/smartcontractkit/chainlink-common/pkg/logger" @@ -17,16 +18,17 @@ func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, cli var empty url.URL var primaries []commonclient.Node[*big.Int, *evmtypes.Head, RPCClient] var sendonlys []commonclient.SendOnlyNode[*big.Int, RPCClient] + largePayloadRPCTimeout, defaultRPCTimeout := getRPCTimeouts(chainType) for i, node := range nodes { if node.SendOnly != nil && *node.SendOnly { rpc := NewRPCClient(lggr, empty, (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, - commonclient.Secondary, cfg.FinalizedBlockPollInterval()) + commonclient.Secondary, cfg.FinalizedBlockPollInterval(), largePayloadRPCTimeout, defaultRPCTimeout) sendonly := commonclient.NewSendOnlyNode(lggr, (url.URL)(*node.HTTPURL), *node.Name, chainID, rpc) sendonlys = append(sendonlys, sendonly) } else { rpc := NewRPCClient(lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), - chainID, commonclient.Primary, cfg.FinalizedBlockPollInterval()) + chainID, commonclient.Primary, cfg.FinalizedBlockPollInterval(), largePayloadRPCTimeout, defaultRPCTimeout) primaryNode := commonclient.NewNode(cfg, chainCfg, lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, *node.Order, rpc, "EVM") @@ -37,3 +39,11 @@ func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, cli return NewChainClient(lggr, cfg.SelectionMode(), cfg.LeaseDuration(), chainCfg.NodeNoNewHeadsThreshold(), primaries, sendonlys, chainID, chainType, clientErrors, cfg.DeathDeclarationDelay()) } + +func getRPCTimeouts(chainType chaintype.ChainType) (largePayload, defaultTimeout time.Duration) { + if chaintype.ChainHedera == chainType { + return 30 * time.Second, commonclient.QueryTimeout + } + + return commonclient.QueryTimeout, commonclient.QueryTimeout +} diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go index a2a55e1791..8caacb4190 100644 --- a/core/chains/evm/client/helpers_test.go +++ b/core/chains/evm/client/helpers_test.go @@ -140,7 +140,7 @@ func NewChainClientWithTestNode( } lggr := logger.Test(t) - rpc := NewRPCClient(lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary, 0) + rpc := NewRPCClient(lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) n := commonclient.NewNode[*big.Int, *evmtypes.Head, RPCClient]( nodeCfg, clientMocks.ChainConfig{NoNewHeadsThresholdVal: noNewHeadsThreshold}, lggr, *parsed, rpcHTTPURL, "eth-primary-node-0", id, chainID, 1, rpc, "EVM") @@ -152,7 +152,7 @@ func NewChainClientWithTestNode( return nil, pkgerrors.Errorf("sendonly ethereum rpc url scheme must be http(s): %s", u.String()) } var empty url.URL - rpc := NewRPCClient(lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary, 0) + rpc := NewRPCClient(lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) s := commonclient.NewSendOnlyNode[*big.Int, RPCClient]( lggr, u, fmt.Sprintf("eth-sendonly-%d", i), chainID, rpc) sendonlys = append(sendonlys, s) diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 1a0023227a..200703dd42 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -117,6 +117,8 @@ type rpcClient struct { id int32 chainID *big.Int tier commonclient.NodeTier + largePayloadRpcTimeout time.Duration + rpcTimeout time.Duration finalizedBlockPollInterval time.Duration ws rawclient @@ -152,8 +154,13 @@ func NewRPCClient( chainID *big.Int, tier commonclient.NodeTier, finalizedBlockPollInterval time.Duration, + largePayloadRpcTimeout time.Duration, + rpcTimeout time.Duration, ) RPCClient { - r := new(rpcClient) + r := &rpcClient{ + largePayloadRpcTimeout: largePayloadRpcTimeout, + rpcTimeout: rpcTimeout, + } r.name = name r.id = id r.chainID = chainID @@ -178,7 +185,7 @@ func NewRPCClient( // Not thread-safe, pure dial. func (r *rpcClient) Dial(callerCtx context.Context) error { - ctx, cancel := r.makeQueryCtx(callerCtx) + ctx, cancel := r.makeQueryCtx(callerCtx, r.rpcTimeout) defer cancel() promEVMPoolRPCNodeDials.WithLabelValues(r.chainID.String(), r.name).Inc() @@ -367,7 +374,7 @@ func (r *rpcClient) UnsubscribeAllExceptAliveLoop() { // CallContext implementation func (r *rpcClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.largePayloadRpcTimeout) defer cancel() lggr := r.newRqLggr().With( "method", method, @@ -390,7 +397,7 @@ func (r *rpcClient) CallContext(ctx context.Context, result interface{}, method } func (r *rpcClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.largePayloadRpcTimeout) defer cancel() lggr := r.newRqLggr().With("nBatchElems", len(b), "batchElems", b) @@ -411,7 +418,7 @@ func (r *rpcClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) err // TODO: Full transition from SubscribeNewHead to SubscribeToHeads is done in BCI-2875 func (r *rpcClient) SubscribeNewHead(ctx context.Context, channel chan<- *evmtypes.Head) (_ commontypes.Subscription, err error) { - ctx, cancel, chStopInFlight, ws, _ := r.acquireQueryCtx(ctx) + ctx, cancel, chStopInFlight, ws, _ := r.acquireQueryCtx(ctx, r.rpcTimeout) defer cancel() args := []interface{}{"newHeads"} lggr := r.newRqLggr().With("args", args) @@ -442,7 +449,7 @@ func (r *rpcClient) SubscribeNewHead(ctx context.Context, channel chan<- *evmtyp } func (r *rpcClient) SubscribeToHeads(ctx context.Context) (ch <-chan *evmtypes.Head, sub commontypes.Subscription, err error) { - ctx, cancel, chStopInFlight, ws, _ := r.acquireQueryCtx(ctx) + ctx, cancel, chStopInFlight, ws, _ := r.acquireQueryCtx(ctx, r.rpcTimeout) defer cancel() args := []interface{}{rpcSubscriptionMethodNewHeads} @@ -504,7 +511,7 @@ func (r *rpcClient) TransactionReceipt(ctx context.Context, txHash common.Hash) } func (r *rpcClient) TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr().With("txHash", txHash) @@ -527,7 +534,7 @@ func (r *rpcClient) TransactionReceiptGeth(ctx context.Context, txHash common.Ha return } func (r *rpcClient) TransactionByHash(ctx context.Context, txHash common.Hash) (tx *types.Transaction, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr().With("txHash", txHash) @@ -551,7 +558,7 @@ func (r *rpcClient) TransactionByHash(ctx context.Context, txHash common.Hash) ( } func (r *rpcClient) HeaderByNumber(ctx context.Context, number *big.Int) (header *types.Header, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr().With("number", number) @@ -572,7 +579,7 @@ func (r *rpcClient) HeaderByNumber(ctx context.Context, number *big.Int) (header } func (r *rpcClient) HeaderByHash(ctx context.Context, hash common.Hash) (header *types.Header, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr().With("hash", hash) @@ -604,7 +611,7 @@ func (r *rpcClient) BlockByNumber(ctx context.Context, number *big.Int) (head *e } func (r *rpcClient) blockByNumber(ctx context.Context, number string) (head *evmtypes.Head, err error) { - ctx, cancel, chStopInFlight, ws, http := r.acquireQueryCtx(ctx) + ctx, cancel, chStopInFlight, ws, http := r.acquireQueryCtx(ctx, r.rpcTimeout) defer cancel() const method = "eth_getBlockByNumber" args := []interface{}{number, false} @@ -656,7 +663,7 @@ func (r *rpcClient) BlockByHash(ctx context.Context, hash common.Hash) (head *ev } func (r *rpcClient) BlockByHashGeth(ctx context.Context, hash common.Hash) (block *types.Block, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr().With("hash", hash) @@ -679,7 +686,7 @@ func (r *rpcClient) BlockByHashGeth(ctx context.Context, hash common.Hash) (bloc } func (r *rpcClient) BlockByNumberGeth(ctx context.Context, number *big.Int) (block *types.Block, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr().With("number", number) @@ -702,7 +709,7 @@ func (r *rpcClient) BlockByNumberGeth(ctx context.Context, number *big.Int) (blo } func (r *rpcClient) SendTransaction(ctx context.Context, tx *types.Transaction) error { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.largePayloadRpcTimeout) defer cancel() lggr := r.newRqLggr().With("tx", tx) @@ -740,7 +747,7 @@ func (r *rpcClient) SendEmptyTransaction( // PendingSequenceAt returns one higher than the highest nonce from both mempool and mined transactions func (r *rpcClient) PendingSequenceAt(ctx context.Context, account common.Address) (nonce evmtypes.Nonce, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr().With("account", account) @@ -769,7 +776,7 @@ func (r *rpcClient) PendingSequenceAt(ctx context.Context, account common.Addres // mined nonce at the given block number, but it actually returns the total // transaction count which is the highest mined nonce + 1 func (r *rpcClient) SequenceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (nonce evmtypes.Nonce, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr().With("account", account, "blockNumber", blockNumber) @@ -795,7 +802,7 @@ func (r *rpcClient) SequenceAt(ctx context.Context, account common.Address, bloc } func (r *rpcClient) PendingCodeAt(ctx context.Context, account common.Address) (code []byte, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr().With("account", account) @@ -818,7 +825,7 @@ func (r *rpcClient) PendingCodeAt(ctx context.Context, account common.Address) ( } func (r *rpcClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) (code []byte, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr().With("account", account, "blockNumber", blockNumber) @@ -841,7 +848,7 @@ func (r *rpcClient) CodeAt(ctx context.Context, account common.Address, blockNum } func (r *rpcClient) EstimateGas(ctx context.Context, c interface{}) (gas uint64, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.largePayloadRpcTimeout) defer cancel() call := c.(ethereum.CallMsg) lggr := r.newRqLggr().With("call", call) @@ -865,7 +872,7 @@ func (r *rpcClient) EstimateGas(ctx context.Context, c interface{}) (gas uint64, } func (r *rpcClient) SuggestGasPrice(ctx context.Context) (price *big.Int, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr() @@ -888,7 +895,7 @@ func (r *rpcClient) SuggestGasPrice(ctx context.Context) (price *big.Int, err er } func (r *rpcClient) CallContract(ctx context.Context, msg interface{}, blockNumber *big.Int) (val []byte, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.largePayloadRpcTimeout) defer cancel() lggr := r.newRqLggr().With("callMsg", msg, "blockNumber", blockNumber) message := msg.(ethereum.CallMsg) @@ -916,7 +923,7 @@ func (r *rpcClient) CallContract(ctx context.Context, msg interface{}, blockNumb } func (r *rpcClient) PendingCallContract(ctx context.Context, msg interface{}) (val []byte, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.largePayloadRpcTimeout) defer cancel() lggr := r.newRqLggr().With("callMsg", msg) message := msg.(ethereum.CallMsg) @@ -950,7 +957,7 @@ func (r *rpcClient) LatestBlockHeight(ctx context.Context) (*big.Int, error) { } func (r *rpcClient) BlockNumber(ctx context.Context) (height uint64, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr() @@ -973,7 +980,7 @@ func (r *rpcClient) BlockNumber(ctx context.Context) (height uint64, err error) } func (r *rpcClient) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (balance *big.Int, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr().With("account", account.Hex(), "blockNumber", blockNumber) @@ -1038,7 +1045,7 @@ func (r *rpcClient) FilterEvents(ctx context.Context, q ethereum.FilterQuery) ([ } func (r *rpcClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) (l []types.Log, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr().With("q", q) @@ -1066,7 +1073,7 @@ func (r *rpcClient) ClientVersion(ctx context.Context) (version string, err erro } func (r *rpcClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (_ ethereum.Subscription, err error) { - ctx, cancel, chStopInFlight, ws, _ := r.acquireQueryCtx(ctx) + ctx, cancel, chStopInFlight, ws, _ := r.acquireQueryCtx(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr().With("q", q) @@ -1092,7 +1099,7 @@ func (r *rpcClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQu } func (r *rpcClient) SuggestGasTipCap(ctx context.Context) (tipCap *big.Int, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr() @@ -1117,7 +1124,7 @@ func (r *rpcClient) SuggestGasTipCap(ctx context.Context) (tipCap *big.Int, err // Returns the ChainID according to the geth client. This is useful for functions like verify() // the common node. func (r *rpcClient) ChainID(ctx context.Context) (chainID *big.Int, err error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() @@ -1172,12 +1179,12 @@ func (r *rpcClient) wrapHTTP(err error) error { } // makeLiveQueryCtxAndSafeGetClients wraps makeQueryCtx -func (r *rpcClient) makeLiveQueryCtxAndSafeGetClients(parentCtx context.Context) (ctx context.Context, cancel context.CancelFunc, ws rawclient, http *rawclient) { - ctx, cancel, _, ws, http = r.acquireQueryCtx(parentCtx) +func (r *rpcClient) makeLiveQueryCtxAndSafeGetClients(parentCtx context.Context, timeout time.Duration) (ctx context.Context, cancel context.CancelFunc, ws rawclient, http *rawclient) { + ctx, cancel, _, ws, http = r.acquireQueryCtx(parentCtx, timeout) return } -func (r *rpcClient) acquireQueryCtx(parentCtx context.Context) (ctx context.Context, cancel context.CancelFunc, +func (r *rpcClient) acquireQueryCtx(parentCtx context.Context, timeout time.Duration) (ctx context.Context, cancel context.CancelFunc, chStopInFlight chan struct{}, ws rawclient, http *rawclient) { // Need to wrap in mutex because state transition can cancel and replace the // context @@ -1189,7 +1196,7 @@ func (r *rpcClient) acquireQueryCtx(parentCtx context.Context) (ctx context.Cont http = &cp } r.stateMu.RUnlock() - ctx, cancel = makeQueryCtx(parentCtx, chStopInFlight) + ctx, cancel = makeQueryCtx(parentCtx, chStopInFlight, timeout) return } @@ -1197,10 +1204,10 @@ func (r *rpcClient) acquireQueryCtx(parentCtx context.Context) (ctx context.Cont // 1. Passed in ctx cancels // 2. Passed in channel is closed // 3. Default timeout is reached (queryTimeout) -func makeQueryCtx(ctx context.Context, ch services.StopChan) (context.Context, context.CancelFunc) { +func makeQueryCtx(ctx context.Context, ch services.StopChan, timeout time.Duration) (context.Context, context.CancelFunc) { var chCancel, timeoutCancel context.CancelFunc ctx, chCancel = ch.Ctx(ctx) - ctx, timeoutCancel = context.WithTimeout(ctx, queryTimeout) + ctx, timeoutCancel = context.WithTimeout(ctx, timeout) cancel := func() { chCancel() timeoutCancel() @@ -1208,12 +1215,12 @@ func makeQueryCtx(ctx context.Context, ch services.StopChan) (context.Context, c return ctx, cancel } -func (r *rpcClient) makeQueryCtx(ctx context.Context) (context.Context, context.CancelFunc) { - return makeQueryCtx(ctx, r.getChStopInflight()) +func (r *rpcClient) makeQueryCtx(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + return makeQueryCtx(ctx, r.getChStopInflight(), timeout) } func (r *rpcClient) IsSyncing(ctx context.Context) (bool, error) { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx) + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() lggr := r.newRqLggr() diff --git a/core/chains/evm/client/rpc_client_test.go b/core/chains/evm/client/rpc_client_test.go index 9b21aedbea..d6a11e0d01 100644 --- a/core/chains/evm/client/rpc_client_test.go +++ b/core/chains/evm/client/rpc_client_test.go @@ -3,10 +3,12 @@ package client_test import ( "context" "encoding/json" + "errors" "fmt" "math/big" "net/url" "testing" + "time" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/core/types" @@ -56,7 +58,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) // set to default values @@ -106,7 +108,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) ch := make(chan *evmtypes.Head) @@ -129,7 +131,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { t.Run("Block's chain ID matched configured", func(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) ch := make(chan *evmtypes.Head) @@ -146,7 +148,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { }) wsURL := server.WSURL() observedLggr, observed := logger.TestObserved(t, zap.DebugLevel) - rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) + rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) require.NoError(t, rpc.Dial(ctx)) server.Close() _, err := rpc.SubscribeNewHead(ctx, make(chan *evmtypes.Head)) @@ -156,7 +158,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { t.Run("Subscription error is properly wrapper", func(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) sub, err := rpc.SubscribeNewHead(ctx, make(chan *evmtypes.Head)) @@ -184,7 +186,7 @@ func TestRPCClient_SubscribeFilterLogs(t *testing.T) { }) wsURL := server.WSURL() observedLggr, observed := logger.TestObserved(t, zap.DebugLevel) - rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) + rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) require.NoError(t, rpc.Dial(ctx)) server.Close() _, err := rpc.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, make(chan types.Log)) @@ -201,7 +203,7 @@ func TestRPCClient_SubscribeFilterLogs(t *testing.T) { return resp }) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) sub, err := rpc.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, make(chan types.Log)) @@ -250,7 +252,7 @@ func TestRPCClient_LatestFinalizedBlock(t *testing.T) { } server := createRPCServer() - rpc := client.NewRPCClient(lggr, *server.URL, nil, "rpc", 1, chainId, commonclient.Primary, 0) + rpc := client.NewRPCClient(lggr, *server.URL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) require.NoError(t, rpc.Dial(ctx)) defer rpc.Close() server.Head = &evmtypes.Head{Number: 128} @@ -298,3 +300,73 @@ func TestRPCClient_LatestFinalizedBlock(t *testing.T) { assert.Equal(t, int64(0), latest.BlockNumber) assert.Equal(t, int64(0), latest.FinalizedBlockNumber) } + +func TestRpcClientLargePayloadTimeout(t *testing.T) { + t.Parallel() + + testCases := []struct { + Name string + Fn func(ctx context.Context, rpc client.RPCClient) error + }{ + { + Name: "SendTransaction", + Fn: func(ctx context.Context, rpc client.RPCClient) error { + return rpc.SendTransaction(ctx, types.NewTx(&types.LegacyTx{})) + }, + }, + { + Name: "EstimateGas", + Fn: func(ctx context.Context, rpc client.RPCClient) error { + _, err := rpc.EstimateGas(ctx, ethereum.CallMsg{}) + return err + }, + }, + { + Name: "CallContract", + Fn: func(ctx context.Context, rpc client.RPCClient) error { + _, err := rpc.CallContract(ctx, ethereum.CallMsg{}, nil) + return err + }, + }, + { + Name: "CallContext", + Fn: func(ctx context.Context, rpc client.RPCClient) error { + err := rpc.CallContext(ctx, nil, "rpc_call", nil) + return err + }, + }, + { + Name: "BatchCallContext", + Fn: func(ctx context.Context, rpc client.RPCClient) error { + err := rpc.BatchCallContext(ctx, nil) + return err + }, + }, + } + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.Name, func(t *testing.T) { + t.Parallel() + // use background context to ensure that the DeadlineExceeded is caused by timeout we've set on request + // level, instead of one that was set on test level. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + chainId := big.NewInt(123456) + rpcURL := testutils.NewWSServer(t, chainId, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + // block until test is done + <-ctx.Done() + return + }).WSURL() + + // use something unreasonably large for RPC timeout to ensure that we use largePayloadRPCTimeout + const rpcTimeout = time.Hour + const largePayloadRPCTimeout = tests.TestInterval + rpc := client.NewRPCClient(logger.Test(t), *rpcURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, largePayloadRPCTimeout, rpcTimeout) + require.NoError(t, rpc.Dial(ctx)) + defer rpc.Close() + err := testCase.Fn(ctx, rpc) + assert.True(t, errors.Is(err, context.DeadlineExceeded), fmt.Sprintf("Expected DedlineExceeded error, but got: %v", err)) + }) + } +} diff --git a/core/chains/evm/config/chaintype/chaintype.go b/core/chains/evm/config/chaintype/chaintype.go index 9b845969e4..e8abfc5abb 100644 --- a/core/chains/evm/config/chaintype/chaintype.go +++ b/core/chains/evm/config/chaintype/chaintype.go @@ -19,6 +19,7 @@ const ( ChainXLayer ChainType = "xlayer" ChainZkEvm ChainType = "zkevm" ChainZkSync ChainType = "zksync" + ChainHedera ChainType = "hedera" ) // IsL2 returns true if this chain is a Layer 2 chain. Notably: @@ -35,7 +36,7 @@ func (c ChainType) IsL2() bool { func (c ChainType) IsValid() bool { switch c { - case "", ChainArbitrum, ChainCelo, ChainGnosis, ChainKroma, ChainMetis, ChainOptimismBedrock, ChainScroll, ChainWeMix, ChainXLayer, ChainZkEvm, ChainZkSync: + case "", ChainArbitrum, ChainCelo, ChainGnosis, ChainKroma, ChainMetis, ChainOptimismBedrock, ChainScroll, ChainWeMix, ChainXLayer, ChainZkEvm, ChainZkSync, ChainHedera: return true } return false @@ -65,6 +66,8 @@ func ChainTypeFromSlug(slug string) ChainType { return ChainZkEvm case "zksync": return ChainZkSync + case "hedera": + return ChainHedera default: return ChainType(slug) } @@ -128,4 +131,5 @@ var ErrInvalidChainType = fmt.Errorf("must be one of %s or omitted", strings.Joi string(ChainXLayer), string(ChainZkEvm), string(ChainZkSync), + string(ChainHedera), }, ", ")) diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go index 121012b5dd..001dd45065 100644 --- a/core/services/chainlink/config_test.go +++ b/core/services/chainlink/config_test.go @@ -1304,7 +1304,7 @@ func TestConfig_Validate(t *testing.T) { - 1: 10 errors: - ChainType: invalid value (Foo): must not be set with this chain id - Nodes: missing: must have at least one node - - ChainType: invalid value (Foo): must be one of arbitrum, celo, gnosis, kroma, metis, optimismBedrock, scroll, wemix, xlayer, zkevm, zksync or omitted + - ChainType: invalid value (Foo): must be one of arbitrum, celo, gnosis, kroma, metis, optimismBedrock, scroll, wemix, xlayer, zkevm, zksync, hedera or omitted - HeadTracker.HistoryDepth: invalid value (30): must be greater than or equal to FinalizedBlockOffset - GasEstimator.BumpThreshold: invalid value (0): cannot be 0 if auto-purge feature is enabled for Foo - Transactions.AutoPurge.Threshold: missing: needs to be set if auto-purge feature is enabled for Foo @@ -1317,7 +1317,7 @@ func TestConfig_Validate(t *testing.T) { - 2: 5 errors: - ChainType: invalid value (Arbitrum): only "optimismBedrock" can be used with this chain id - Nodes: missing: must have at least one node - - ChainType: invalid value (Arbitrum): must be one of arbitrum, celo, gnosis, kroma, metis, optimismBedrock, scroll, wemix, xlayer, zkevm, zksync or omitted + - ChainType: invalid value (Arbitrum): must be one of arbitrum, celo, gnosis, kroma, metis, optimismBedrock, scroll, wemix, xlayer, zkevm, zksync, hedera or omitted - FinalityDepth: invalid value (0): must be greater than or equal to 1 - MinIncomingConfirmations: invalid value (0): must be greater than or equal to 1 - 3.Nodes: 5 errors: diff --git a/core/services/ocr/contract_tracker.go b/core/services/ocr/contract_tracker.go index 11dfacd1d1..9546c522a2 100644 --- a/core/services/ocr/contract_tracker.go +++ b/core/services/ocr/contract_tracker.go @@ -399,7 +399,7 @@ func (t *OCRContractTracker) LatestBlockHeight(ctx context.Context) (blockheight // care about the block height; we have no way of getting the L1 block // height anyway return 0, nil - case "", chaintype.ChainArbitrum, chaintype.ChainCelo, chaintype.ChainGnosis, chaintype.ChainKroma, chaintype.ChainOptimismBedrock, chaintype.ChainScroll, chaintype.ChainWeMix, chaintype.ChainXLayer, chaintype.ChainZkEvm, chaintype.ChainZkSync: + case "", chaintype.ChainArbitrum, chaintype.ChainCelo, chaintype.ChainGnosis, chaintype.ChainKroma, chaintype.ChainOptimismBedrock, chaintype.ChainScroll, chaintype.ChainWeMix, chaintype.ChainXLayer, chaintype.ChainZkEvm, chaintype.ChainZkSync, chaintype.ChainHedera: // continue } latestBlockHeight := t.getLatestBlockHeight() From 64a234343602b695cb9e4386131daf47ed17b1e0 Mon Sep 17 00:00:00 2001 From: amit-momin <108959691+amit-momin@users.noreply.github.com> Date: Thu, 1 Aug 2024 08:45:14 -0500 Subject: [PATCH 3/5] Add nonce validation after broadcast for Hedera (#13957) * Added post-broadcast nonce validation for Hedera * Added changeset * Updated chaintype docs for Hedera * Fixed lint errors * Added condition to handle on-chain seq less than tx seq and updated comment --- .changeset/friendly-impalas-sniff.md | 5 + common/txmgr/broadcaster.go | 92 +++++++++++++---- common/txmgr/confirmer.go | 12 ++- common/txmgr/types/tx_store.go | 1 + core/chains/evm/config/chaintype/chaintype.go | 10 +- core/chains/evm/txmgr/broadcaster_test.go | 98 ++++++++++++++++++- core/chains/evm/txmgr/builder.go | 6 +- core/chains/evm/types/types.go | 16 +++ core/config/docs/chains-evm.toml | 2 +- core/services/chainlink/config_test.go | 4 +- core/services/ocr/contract_tracker.go | 2 +- docs/CONFIG.md | 2 +- 12 files changed, 213 insertions(+), 37 deletions(-) create mode 100644 .changeset/friendly-impalas-sniff.md diff --git a/.changeset/friendly-impalas-sniff.md b/.changeset/friendly-impalas-sniff.md new file mode 100644 index 0000000000..8a041a338b --- /dev/null +++ b/.changeset/friendly-impalas-sniff.md @@ -0,0 +1,5 @@ +--- +"chainlink": minor +--- + +Added nonce validation immediately after broadcast for Hedera #internal diff --git a/common/txmgr/broadcaster.go b/common/txmgr/broadcaster.go index 25b028447f..be3d3ca2f6 100644 --- a/common/txmgr/broadcaster.go +++ b/common/txmgr/broadcaster.go @@ -34,6 +34,13 @@ const ( // TransmitCheckTimeout controls the maximum amount of time that will be // spent on the transmit check. TransmitCheckTimeout = 2 * time.Second + + // maxBroadcastRetries is the number of times a transaction broadcast is retried when the sequence fails to increment on Hedera + maxHederaBroadcastRetries = 3 + + // hederaChainType is the string representation of the Hedera chain type + // Temporary solution until the Broadcaster is moved to the EVM code base + hederaChainType = "hedera" ) var ( @@ -114,6 +121,7 @@ type Broadcaster[ sequenceTracker txmgrtypes.SequenceTracker[ADDR, SEQ] resumeCallback ResumeCallback chainID CHAIN_ID + chainType string config txmgrtypes.BroadcasterChainConfig feeConfig txmgrtypes.BroadcasterFeeConfig txConfig txmgrtypes.BroadcasterTransactionsConfig @@ -163,6 +171,7 @@ func NewBroadcaster[ lggr logger.Logger, checkerFactory TransmitCheckerFactory[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], autoSyncSequence bool, + chainType string, ) *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] { lggr = logger.Named(lggr, "Broadcaster") b := &Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]{ @@ -171,6 +180,7 @@ func NewBroadcaster[ client: client, TxAttemptBuilder: txAttemptBuilder, chainID: client.ConfiguredChainID(), + chainType: chainType, config: config, feeConfig: feeConfig, txConfig: txConfig, @@ -411,7 +421,7 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand return fmt.Errorf("handleAnyInProgressTx failed: %w", err), true } if etx != nil { - if err, retryable := eb.handleInProgressTx(ctx, *etx, etx.TxAttempts[0], etx.CreatedAt); err != nil { + if err, retryable := eb.handleInProgressTx(ctx, *etx, etx.TxAttempts[0], etx.CreatedAt, 0); err != nil { return fmt.Errorf("handleAnyInProgressTx failed: %w", err), retryable } } @@ -464,12 +474,12 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand return fmt.Errorf("processUnstartedTxs failed on UpdateTxUnstartedToInProgress: %w", err), true } - return eb.handleInProgressTx(ctx, *etx, attempt, time.Now()) + return eb.handleInProgressTx(ctx, *etx, attempt, time.Now(), 0) } // There can be at most one in_progress transaction per address. // Here we complete the job that we didn't finish last time. -func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) handleInProgressTx(ctx context.Context, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], initialBroadcastAt time.Time) (error, bool) { +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) handleInProgressTx(ctx context.Context, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], initialBroadcastAt time.Time, retryCount int) (error, bool) { if etx.State != TxInProgress { return fmt.Errorf("invariant violation: expected transaction %v to be in_progress, it was %s", etx.ID, etx.State), false } @@ -478,6 +488,11 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand lgr.Infow("Sending transaction", "txAttemptID", attempt.ID, "txHash", attempt.Hash, "meta", etx.Meta, "feeLimit", attempt.ChainSpecificFeeLimit, "callerProvidedFeeLimit", etx.FeeLimit, "attempt", attempt, "etx", etx) errType, err := eb.client.SendTransactionReturnCode(ctx, etx, attempt, lgr) + // The validation below is only applicable to Hedera because it has instant finality and a unique sequence behavior + if eb.chainType == hederaChainType { + errType, err = eb.validateOnChainSequence(ctx, lgr, errType, err, etx, retryCount) + } + if errType == client.Fatal || errType == client.TerminallyStuck { eb.SvcErrBuffer.Append(err) etx.Error = null.StringFrom(err.Error()) @@ -538,7 +553,7 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand eb.sequenceTracker.GenerateNextSequence(etx.FromAddress, *etx.Sequence) return err, true case client.Underpriced: - return eb.tryAgainBumpingGas(ctx, lgr, err, etx, attempt, initialBroadcastAt) + return eb.tryAgainBumpingGas(ctx, lgr, err, etx, attempt, initialBroadcastAt, retryCount+1) case client.InsufficientFunds: // NOTE: This bails out of the entire cycle and essentially "blocks" on // any transaction that gets insufficient_funds. This is OK if a @@ -600,6 +615,44 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand } } +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) validateOnChainSequence(ctx context.Context, lgr logger.SugaredLogger, errType client.SendTxReturnCode, err error, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], retryCount int) (client.SendTxReturnCode, error) { + // Only check if sequence was incremented if broadcast was successful, otherwise return the existing err type + if errType != client.Successful { + return errType, err + } + // Transaction sequence cannot be nil here since a sequence is required to broadcast + txSeq := *etx.Sequence + // Retrieve the latest mined sequence from on-chain + nextSeqOnChain, err := eb.client.SequenceAt(ctx, etx.FromAddress, nil) + if err != nil { + return errType, err + } + + // Check that the transaction count has incremented on-chain to include the broadcasted transaction + // Insufficient transaction fee is a common scenario in which the sequence is not incremented by the chain even though we got a successful response + // If the sequence failed to increment and hasn't reached the max retries, return the Underpriced error to try again with a bumped attempt + if nextSeqOnChain.Int64() == txSeq.Int64() && retryCount < maxHederaBroadcastRetries { + return client.Underpriced, nil + } + + // If the transaction reaches the retry limit and fails to get included, mark it as fatally errored + // Some unknown error other than insufficient tx fee could be the cause + if nextSeqOnChain.Int64() == txSeq.Int64() && retryCount >= maxHederaBroadcastRetries { + err := fmt.Errorf("failed to broadcast transaction on %s after %d retries", hederaChainType, retryCount) + lgr.Error(err.Error()) + return client.Fatal, err + } + + // Belts and braces approach to detect and handle sqeuence gaps if the broadcast is considered successful + if nextSeqOnChain.Int64() < txSeq.Int64() { + err := fmt.Errorf("next expected sequence on-chain (%s) is less than the broadcasted transaction's sequence (%s)", nextSeqOnChain.String(), txSeq.String()) + lgr.Criticalw("Sequence gap has been detected and needs to be filled", "error", err) + return client.Fatal, err + } + + return client.Successful, nil +} + // Finds next transaction in the queue, assigns a sequence, and moves it to "in_progress" state ready for broadcast. // Returns nil if no transactions are in queue func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) nextUnstartedTransactionWithSequence(fromAddress ADDR) (*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { @@ -622,23 +675,26 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) next return etx, nil } -func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) tryAgainBumpingGas(ctx context.Context, lgr logger.Logger, txError error, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], initialBroadcastAt time.Time) (err error, retryable bool) { - logger.With(lgr, - "sendError", txError, - "attemptFee", attempt.TxFee, - "maxGasPriceConfig", eb.feeConfig.MaxFeePrice(), - ).Errorf("attempt fee %v was rejected by the node for being too low. "+ - "Node returned: '%s'. "+ - "Will bump and retry. ACTION REQUIRED: This is a configuration error. "+ - "Consider increasing FeeEstimator.PriceDefault (current value: %s)", - attempt.TxFee, txError.Error(), eb.feeConfig.FeePriceDefault()) +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) tryAgainBumpingGas(ctx context.Context, lgr logger.Logger, txError error, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], initialBroadcastAt time.Time, retry int) (err error, retryable bool) { + // This log error is not applicable to Hedera since the action required would not be needed for its gas estimator + if eb.chainType != hederaChainType { + logger.With(lgr, + "sendError", txError, + "attemptFee", attempt.TxFee, + "maxGasPriceConfig", eb.feeConfig.MaxFeePrice(), + ).Errorf("attempt fee %v was rejected by the node for being too low. "+ + "Node returned: '%s'. "+ + "Will bump and retry. ACTION REQUIRED: This is a configuration error. "+ + "Consider increasing FeeEstimator.PriceDefault (current value: %s)", + attempt.TxFee, txError.Error(), eb.feeConfig.FeePriceDefault()) + } replacementAttempt, bumpedFee, bumpedFeeLimit, retryable, err := eb.NewBumpTxAttempt(ctx, etx, attempt, nil, lgr) if err != nil { return fmt.Errorf("tryAgainBumpFee failed: %w", err), retryable } - return eb.saveTryAgainAttempt(ctx, lgr, etx, attempt, replacementAttempt, initialBroadcastAt, bumpedFee, bumpedFeeLimit) + return eb.saveTryAgainAttempt(ctx, lgr, etx, attempt, replacementAttempt, initialBroadcastAt, bumpedFee, bumpedFeeLimit, retry) } func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) tryAgainWithNewEstimation(ctx context.Context, lgr logger.Logger, txError error, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], initialBroadcastAt time.Time) (err error, retryable bool) { @@ -655,15 +711,15 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) tryA lgr.Warnw("L2 rejected transaction due to incorrect fee, re-estimated and will try again", "etxID", etx.ID, "err", err, "newGasPrice", fee, "newGasLimit", feeLimit) - return eb.saveTryAgainAttempt(ctx, lgr, etx, attempt, replacementAttempt, initialBroadcastAt, fee, feeLimit) + return eb.saveTryAgainAttempt(ctx, lgr, etx, attempt, replacementAttempt, initialBroadcastAt, fee, feeLimit, 0) } -func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) saveTryAgainAttempt(ctx context.Context, lgr logger.Logger, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], replacementAttempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], initialBroadcastAt time.Time, newFee FEE, newFeeLimit uint64) (err error, retyrable bool) { +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) saveTryAgainAttempt(ctx context.Context, lgr logger.Logger, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], replacementAttempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], initialBroadcastAt time.Time, newFee FEE, newFeeLimit uint64, retry int) (err error, retyrable bool) { if err = eb.txStore.SaveReplacementInProgressAttempt(ctx, attempt, &replacementAttempt); err != nil { return fmt.Errorf("tryAgainWithNewFee failed: %w", err), true } lgr.Debugw("Bumped fee on initial send", "oldFee", attempt.TxFee.String(), "newFee", newFee.String(), "newFeeLimit", newFeeLimit) - return eb.handleInProgressTx(ctx, etx, replacementAttempt, initialBroadcastAt) + return eb.handleInProgressTx(ctx, etx, replacementAttempt, initialBroadcastAt, retry) } func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) saveFatallyErroredTransaction(lgr logger.Logger, etx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error { diff --git a/common/txmgr/confirmer.go b/common/txmgr/confirmer.go index 0806eeda26..0099f4a2ee 100644 --- a/common/txmgr/confirmer.go +++ b/common/txmgr/confirmer.go @@ -664,11 +664,15 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) bat } if receipt.GetStatus() == 0 { - rpcError, errExtract := ec.client.CallContract(ctx, attempt, receipt.GetBlockNumber()) - if errExtract == nil { - l.Warnw("transaction reverted on-chain", "hash", receipt.GetTxHash(), "rpcError", rpcError.String()) + if receipt.GetRevertReason() != nil { + l.Warnw("transaction reverted on-chain", "hash", receipt.GetTxHash(), "revertReason", *receipt.GetRevertReason()) } else { - l.Warnw("transaction reverted on-chain unable to extract revert reason", "hash", receipt.GetTxHash(), "err", err) + rpcError, errExtract := ec.client.CallContract(ctx, attempt, receipt.GetBlockNumber()) + if errExtract == nil { + l.Warnw("transaction reverted on-chain", "hash", receipt.GetTxHash(), "rpcError", rpcError.String()) + } else { + l.Warnw("transaction reverted on-chain unable to extract revert reason", "hash", receipt.GetTxHash(), "err", err) + } } // This might increment more than once e.g. in case of re-orgs going back and forth we might re-fetch the same receipt promRevertedTxCount.WithLabelValues(ec.chainID.String()).Add(1) diff --git a/common/txmgr/types/tx_store.go b/common/txmgr/types/tx_store.go index 25040ea3bd..875339cfba 100644 --- a/common/txmgr/types/tx_store.go +++ b/common/txmgr/types/tx_store.go @@ -132,4 +132,5 @@ type ChainReceipt[TX_HASH, BLOCK_HASH types.Hashable] interface { GetFeeUsed() uint64 GetTransactionIndex() uint GetBlockHash() BLOCK_HASH + GetRevertReason() *string } diff --git a/core/chains/evm/config/chaintype/chaintype.go b/core/chains/evm/config/chaintype/chaintype.go index e8abfc5abb..623a80f54f 100644 --- a/core/chains/evm/config/chaintype/chaintype.go +++ b/core/chains/evm/config/chaintype/chaintype.go @@ -11,6 +11,7 @@ const ( ChainArbitrum ChainType = "arbitrum" ChainCelo ChainType = "celo" ChainGnosis ChainType = "gnosis" + ChainHedera ChainType = "hedera" ChainKroma ChainType = "kroma" ChainMetis ChainType = "metis" ChainOptimismBedrock ChainType = "optimismBedrock" @@ -19,7 +20,6 @@ const ( ChainXLayer ChainType = "xlayer" ChainZkEvm ChainType = "zkevm" ChainZkSync ChainType = "zksync" - ChainHedera ChainType = "hedera" ) // IsL2 returns true if this chain is a Layer 2 chain. Notably: @@ -36,7 +36,7 @@ func (c ChainType) IsL2() bool { func (c ChainType) IsValid() bool { switch c { - case "", ChainArbitrum, ChainCelo, ChainGnosis, ChainKroma, ChainMetis, ChainOptimismBedrock, ChainScroll, ChainWeMix, ChainXLayer, ChainZkEvm, ChainZkSync, ChainHedera: + case "", ChainArbitrum, ChainCelo, ChainGnosis, ChainHedera, ChainKroma, ChainMetis, ChainOptimismBedrock, ChainScroll, ChainWeMix, ChainXLayer, ChainZkEvm, ChainZkSync: return true } return false @@ -50,6 +50,8 @@ func ChainTypeFromSlug(slug string) ChainType { return ChainCelo case "gnosis": return ChainGnosis + case "hedera": + return ChainHedera case "kroma": return ChainKroma case "metis": @@ -66,8 +68,6 @@ func ChainTypeFromSlug(slug string) ChainType { return ChainZkEvm case "zksync": return ChainZkSync - case "hedera": - return ChainHedera default: return ChainType(slug) } @@ -123,6 +123,7 @@ var ErrInvalidChainType = fmt.Errorf("must be one of %s or omitted", strings.Joi string(ChainArbitrum), string(ChainCelo), string(ChainGnosis), + string(ChainHedera), string(ChainKroma), string(ChainMetis), string(ChainOptimismBedrock), @@ -131,5 +132,4 @@ var ErrInvalidChainType = fmt.Errorf("must be one of %s or omitted", strings.Joi string(ChainXLayer), string(ChainZkEvm), string(ChainZkSync), - string(ChainHedera), }, ", ")) diff --git a/core/chains/evm/txmgr/broadcaster_test.go b/core/chains/evm/txmgr/broadcaster_test.go index 084fe4f2f9..cbbe522ba1 100644 --- a/core/chains/evm/txmgr/broadcaster_test.go +++ b/core/chains/evm/txmgr/broadcaster_test.go @@ -34,6 +34,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" evmconfig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/chaintype" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" gasmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/mocks" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/keystore" @@ -70,7 +71,7 @@ func NewTestEthBroadcaster( return gas.NewFixedPriceEstimator(config.EVM().GasEstimator(), nil, ge.BlockHistory(), lggr, nil) }, ge.EIP1559DynamicFees(), ge) txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), ge, keyStore, estimator) - ethBroadcaster := txmgrcommon.NewBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient, nil), txmgr.NewEvmTxmConfig(config.EVM()), txmgr.NewEvmTxmFeeConfig(config.EVM().GasEstimator()), config.EVM().Transactions(), gconfig.Database().Listener(), keyStore, txBuilder, nonceTracker, lggr, checkerFactory, nonceAutoSync) + ethBroadcaster := txmgrcommon.NewBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient, nil), txmgr.NewEvmTxmConfig(config.EVM()), txmgr.NewEvmTxmFeeConfig(config.EVM().GasEstimator()), config.EVM().Transactions(), gconfig.Database().Listener(), keyStore, txBuilder, nonceTracker, lggr, checkerFactory, nonceAutoSync, "") // Mark instance as test ethBroadcaster.XXXTestDisableUnstartedTxAutoProcessing() @@ -101,6 +102,7 @@ func TestEthBroadcaster_Lifecycle(t *testing.T) { logger.Test(t), &testCheckerFactory{}, false, + "", ) // Can't close an unstarted instance @@ -159,6 +161,7 @@ func TestEthBroadcaster_LoadNextSequenceMapFailure_StartupSuccess(t *testing.T) logger.Test(t), &testCheckerFactory{}, false, + "", ) // Instance starts without error even if loading next sequence map fails @@ -657,6 +660,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_OptimisticLockingOnEthTx(t *testi logger.Test(t), &testCheckerFactory{}, false, + "", ) eb.XXXTestDisableUnstartedTxAutoProcessing() @@ -1176,7 +1180,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) { }, evmcfg.EVM().GasEstimator().EIP1559DynamicFees(), evmcfg.EVM().GasEstimator()) txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), evmcfg.EVM().GasEstimator(), ethKeyStore, estimator) localNextNonce = getLocalNextNonce(t, nonceTracker, fromAddress) - eb2 := txmgr.NewEvmBroadcaster(txStore, txmClient, txmgr.NewEvmTxmConfig(evmcfg.EVM()), txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()), evmcfg.EVM().Transactions(), cfg.Database().Listener(), ethKeyStore, txBuilder, lggr, &testCheckerFactory{}, false) + eb2 := txmgr.NewEvmBroadcaster(txStore, txmClient, txmgr.NewEvmTxmConfig(evmcfg.EVM()), txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()), evmcfg.EVM().Transactions(), cfg.Database().Listener(), ethKeyStore, txBuilder, lggr, &testCheckerFactory{}, false, "") retryable, err := eb2.ProcessUnstartedTxs(ctx, fromAddress) assert.NoError(t, err) assert.False(t, retryable) @@ -1772,7 +1776,7 @@ func TestEthBroadcaster_SyncNonce(t *testing.T) { kst.On("EnabledAddressesForChain", mock.Anything, testutils.FixtureChainID).Return(addresses, nil).Once() ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() txmClient := txmgr.NewEvmTxmClient(ethClient, nil) - eb := txmgr.NewEvmBroadcaster(txStore, txmClient, evmTxmCfg, txmgr.NewEvmTxmFeeConfig(ge), evmcfg.EVM().Transactions(), cfg.Database().Listener(), kst, txBuilder, lggr, checkerFactory, false) + eb := txmgr.NewEvmBroadcaster(txStore, txmClient, evmTxmCfg, txmgr.NewEvmTxmFeeConfig(ge), evmcfg.EVM().Transactions(), cfg.Database().Listener(), kst, txBuilder, lggr, checkerFactory, false, "") err := eb.Start(ctx) assert.NoError(t, err) @@ -1821,6 +1825,94 @@ func TestEthBroadcaster_NonceTracker_InProgressTx(t *testing.T) { }) } +func TestEthBroadcaster_HederaBroadcastValidation(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db) + ethKeyStore := cltest.NewKeyStore(t, db).Eth() + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + ethClient := testutils.NewEthClientMockWithDefaultChain(t) + lggr, observed := logger.TestObserved(t, zapcore.DebugLevel) + ge := evmcfg.EVM().GasEstimator() + estimator := gas.NewEvmFeeEstimator(lggr, func(lggr logger.Logger) gas.EvmEstimator { + return gas.NewFixedPriceEstimator(evmcfg.EVM().GasEstimator(), nil, ge.BlockHistory(), lggr, nil) + }, ge.EIP1559DynamicFees(), ge) + txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), ge, ethKeyStore, estimator) + checkerFactory := &txmgr.CheckerFactory{Client: ethClient} + ctx := tests.Context(t) + + t.Run("transaction successfully broadcasted and increments on-chain nonce", func(t *testing.T) { + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + localNonce := uint64(0) + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNonce + }), fromAddress).Return(commonclient.Successful, nil).Once() + ethClient.On("SequenceAt", mock.Anything, fromAddress, mock.Anything).Return(evmtypes.Nonce(1), nil).Once() + + mustInsertInProgressEthTxWithAttempt(t, txStore, evmtypes.Nonce(localNonce), fromAddress) + nonceTracker := txmgr.NewNonceTracker(lggr, txStore, txmgr.NewEvmTxmClient(ethClient, nil)) + eb := txmgrcommon.NewBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient, nil), txmgr.NewEvmTxmConfig(evmcfg.EVM()), txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()), evmcfg.EVM().Transactions(), cfg.Database().Listener(), ethKeyStore, txBuilder, nonceTracker, lggr, checkerFactory, false, string(chaintype.ChainHedera)) + // Mark instance as test + eb.XXXTestDisableUnstartedTxAutoProcessing() + servicetest.Run(t, eb) + + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + require.NoError(t, err) + require.False(t, retryable) + }) + + t.Run("transaction successfully broadcasted, failed to increment on-chain nonce, succeeded on bumped retry attempt", func(t *testing.T) { + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + localNonce := uint64(0) + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNonce + }), fromAddress).Return(commonclient.Successful, nil).Twice() + ethClient.On("SequenceAt", mock.Anything, fromAddress, mock.Anything).Return(evmtypes.Nonce(0), nil).Once() + ethClient.On("SequenceAt", mock.Anything, fromAddress, mock.Anything).Return(evmtypes.Nonce(1), nil).Once() + + mustInsertInProgressEthTxWithAttempt(t, txStore, evmtypes.Nonce(localNonce), fromAddress) + nonceTracker := txmgr.NewNonceTracker(lggr, txStore, txmgr.NewEvmTxmClient(ethClient, nil)) + eb := txmgrcommon.NewBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient, nil), txmgr.NewEvmTxmConfig(evmcfg.EVM()), txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()), evmcfg.EVM().Transactions(), cfg.Database().Listener(), ethKeyStore, txBuilder, nonceTracker, lggr, checkerFactory, false, string(chaintype.ChainHedera)) + // Mark instance as test + eb.XXXTestDisableUnstartedTxAutoProcessing() + servicetest.Run(t, eb) + + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + tests.AssertLogEventually(t, observed, "Bumped fee on initial send") + require.NoError(t, err) + require.False(t, retryable) + }) + + t.Run("transaction successfully broadcasted, failed to increment on-chain nonce on every retry", func(t *testing.T) { + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + localNonce := uint64(0) + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNonce + }), fromAddress).Return(commonclient.Successful, nil).Times(4) + ethClient.On("SequenceAt", mock.Anything, fromAddress, mock.Anything).Return(evmtypes.Nonce(0), nil).Times(4) + + etx := mustInsertInProgressEthTxWithAttempt(t, txStore, evmtypes.Nonce(localNonce), fromAddress) + nonceTracker := txmgr.NewNonceTracker(lggr, txStore, txmgr.NewEvmTxmClient(ethClient, nil)) + eb := txmgrcommon.NewBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient, nil), txmgr.NewEvmTxmConfig(evmcfg.EVM()), txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()), evmcfg.EVM().Transactions(), cfg.Database().Listener(), ethKeyStore, txBuilder, nonceTracker, lggr, checkerFactory, false, string(chaintype.ChainHedera)) + // Mark instance as test + eb.XXXTestDisableUnstartedTxAutoProcessing() + servicetest.Run(t, eb) + + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + tests.AssertLogEventually(t, observed, "Bumped fee on initial send") + require.NoError(t, err) + require.False(t, retryable) + tests.AssertLogEventually(t, observed, "failed to broadcast transaction on hedera after 3 retries") + + etx, err = txStore.FindTxWithAttempts(ctx, etx.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxFatalError, etx.State) + require.Error(t, etx.GetError(), "failed to broadcast transaction on hedera after 3 retries") + }) +} + type testCheckerFactory struct { err error } diff --git a/core/chains/evm/txmgr/builder.go b/core/chains/evm/txmgr/builder.go index dcf15a4fa2..8234d55b96 100644 --- a/core/chains/evm/txmgr/builder.go +++ b/core/chains/evm/txmgr/builder.go @@ -10,6 +10,7 @@ import ( txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/chaintype" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/forwarders" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/keystore" @@ -49,7 +50,7 @@ func NewTxm( feeCfg := NewEvmTxmFeeConfig(fCfg) // wrap Evm specific config txmClient := NewEvmTxmClient(client, clientErrors) // wrap Evm specific client chainID := txmClient.ConfiguredChainID() - evmBroadcaster := NewEvmBroadcaster(txStore, txmClient, txmCfg, feeCfg, txConfig, listenerConfig, keyStore, txAttemptBuilder, lggr, checker, chainConfig.NonceAutoSync()) + evmBroadcaster := NewEvmBroadcaster(txStore, txmClient, txmCfg, feeCfg, txConfig, listenerConfig, keyStore, txAttemptBuilder, lggr, checker, chainConfig.NonceAutoSync(), chainConfig.ChainType()) evmTracker := NewEvmTracker(txStore, keyStore, chainID, lggr) stuckTxDetector := NewStuckTxDetector(lggr, client.ConfiguredChainID(), chainConfig.ChainType(), fCfg.PriceMax(), txConfig.AutoPurge(), estimator, txStore, client) evmConfirmer := NewEvmConfirmer(txStore, txmClient, txmCfg, feeCfg, txConfig, dbConfig, keyStore, txAttemptBuilder, lggr, stuckTxDetector) @@ -138,7 +139,8 @@ func NewEvmBroadcaster( logger logger.Logger, checkerFactory TransmitCheckerFactory, autoSyncNonce bool, + chainType chaintype.ChainType, ) *Broadcaster { nonceTracker := NewNonceTracker(logger, txStore, client) - return txmgr.NewBroadcaster(txStore, client, chainConfig, feeConfig, txConfig, listenerConfig, keystore, txAttemptBuilder, nonceTracker, logger, checkerFactory, autoSyncNonce) + return txmgr.NewBroadcaster(txStore, client, chainConfig, feeConfig, txConfig, listenerConfig, keystore, txAttemptBuilder, nonceTracker, logger, checkerFactory, autoSyncNonce, string(chainType)) } diff --git a/core/chains/evm/types/types.go b/core/chains/evm/types/types.go index 57a53bce67..c834ffeb86 100644 --- a/core/chains/evm/types/types.go +++ b/core/chains/evm/types/types.go @@ -63,6 +63,7 @@ type Receipt struct { BlockHash common.Hash `json:"blockHash,omitempty"` BlockNumber *big.Int `json:"blockNumber,omitempty"` TransactionIndex uint `json:"transactionIndex"` + RevertReason []byte `json:"revertReason,omitempty"` // Only provided by Hedera } // FromGethReceipt converts a gethTypes.Receipt to a Receipt @@ -86,6 +87,7 @@ func FromGethReceipt(gr *gethTypes.Receipt) *Receipt { gr.BlockHash, gr.BlockNumber, gr.TransactionIndex, + nil, } } @@ -119,6 +121,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) { BlockHash common.Hash `json:"blockHash,omitempty"` BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` TransactionIndex hexutil.Uint `json:"transactionIndex"` + RevertReason hexutil.Bytes `json:"revertReason,omitempty"` // Only provided by Hedera } var enc Receipt enc.PostState = r.PostState @@ -132,6 +135,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) { enc.BlockHash = r.BlockHash enc.BlockNumber = (*hexutil.Big)(r.BlockNumber) enc.TransactionIndex = hexutil.Uint(r.TransactionIndex) + enc.RevertReason = r.RevertReason return json.Marshal(&enc) } @@ -149,6 +153,7 @@ func (r *Receipt) UnmarshalJSON(input []byte) error { BlockHash *common.Hash `json:"blockHash,omitempty"` BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` TransactionIndex *hexutil.Uint `json:"transactionIndex"` + RevertReason *hexutil.Bytes `json:"revertReason,omitempty"` // Only provided by Hedera } var dec Receipt if err := json.Unmarshal(input, &dec); err != nil { @@ -185,6 +190,9 @@ func (r *Receipt) UnmarshalJSON(input []byte) error { if dec.TransactionIndex != nil { r.TransactionIndex = uint(*dec.TransactionIndex) } + if dec.RevertReason != nil { + r.RevertReason = *dec.RevertReason + } return nil } @@ -225,6 +233,14 @@ func (r *Receipt) GetBlockHash() common.Hash { return r.BlockHash } +func (r *Receipt) GetRevertReason() *string { + if len(r.RevertReason) == 0 { + return nil + } + revertReason := string(r.RevertReason) + return &revertReason +} + type Confirmations int const ( diff --git a/core/config/docs/chains-evm.toml b/core/config/docs/chains-evm.toml index 9efe0585b2..73ea0ebb35 100644 --- a/core/config/docs/chains-evm.toml +++ b/core/config/docs/chains-evm.toml @@ -14,7 +14,7 @@ BlockBackfillDepth = 10 # Default # BlockBackfillSkip enables skipping of very long backfills. BlockBackfillSkip = false # Default # ChainType is automatically detected from chain ID. Set this to force a certain chain type regardless of chain ID. -# Available types: `arbitrum`, `celo`, `gnosis`, `kroma`, `metis`, `optimismBedrock`, `scroll`, `wemix`, `xlayer`, `zksync` +# Available types: `arbitrum`, `celo`, `gnosis`, `hedera`, `kroma`, `metis`, `optimismBedrock`, `scroll`, `wemix`, `xlayer`, `zksync` ChainType = 'arbitrum' # Example # FinalityDepth is the number of blocks after which an ethereum transaction is considered "final". Note that the default is automatically set based on chain ID, so it should not be necessary to change this under normal operation. # BlocksConsideredFinal determines how deeply we look back to ensure that transactions are confirmed onto the longest chain diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go index 001dd45065..7e91d80cc8 100644 --- a/core/services/chainlink/config_test.go +++ b/core/services/chainlink/config_test.go @@ -1304,7 +1304,7 @@ func TestConfig_Validate(t *testing.T) { - 1: 10 errors: - ChainType: invalid value (Foo): must not be set with this chain id - Nodes: missing: must have at least one node - - ChainType: invalid value (Foo): must be one of arbitrum, celo, gnosis, kroma, metis, optimismBedrock, scroll, wemix, xlayer, zkevm, zksync, hedera or omitted + - ChainType: invalid value (Foo): must be one of arbitrum, celo, gnosis, hedera, kroma, metis, optimismBedrock, scroll, wemix, xlayer, zkevm, zksync or omitted - HeadTracker.HistoryDepth: invalid value (30): must be greater than or equal to FinalizedBlockOffset - GasEstimator.BumpThreshold: invalid value (0): cannot be 0 if auto-purge feature is enabled for Foo - Transactions.AutoPurge.Threshold: missing: needs to be set if auto-purge feature is enabled for Foo @@ -1317,7 +1317,7 @@ func TestConfig_Validate(t *testing.T) { - 2: 5 errors: - ChainType: invalid value (Arbitrum): only "optimismBedrock" can be used with this chain id - Nodes: missing: must have at least one node - - ChainType: invalid value (Arbitrum): must be one of arbitrum, celo, gnosis, kroma, metis, optimismBedrock, scroll, wemix, xlayer, zkevm, zksync, hedera or omitted + - ChainType: invalid value (Arbitrum): must be one of arbitrum, celo, gnosis, hedera, kroma, metis, optimismBedrock, scroll, wemix, xlayer, zkevm, zksync or omitted - FinalityDepth: invalid value (0): must be greater than or equal to 1 - MinIncomingConfirmations: invalid value (0): must be greater than or equal to 1 - 3.Nodes: 5 errors: diff --git a/core/services/ocr/contract_tracker.go b/core/services/ocr/contract_tracker.go index 9546c522a2..6651e4b65d 100644 --- a/core/services/ocr/contract_tracker.go +++ b/core/services/ocr/contract_tracker.go @@ -399,7 +399,7 @@ func (t *OCRContractTracker) LatestBlockHeight(ctx context.Context) (blockheight // care about the block height; we have no way of getting the L1 block // height anyway return 0, nil - case "", chaintype.ChainArbitrum, chaintype.ChainCelo, chaintype.ChainGnosis, chaintype.ChainKroma, chaintype.ChainOptimismBedrock, chaintype.ChainScroll, chaintype.ChainWeMix, chaintype.ChainXLayer, chaintype.ChainZkEvm, chaintype.ChainZkSync, chaintype.ChainHedera: + case "", chaintype.ChainArbitrum, chaintype.ChainCelo, chaintype.ChainGnosis, chaintype.ChainHedera, chaintype.ChainKroma, chaintype.ChainOptimismBedrock, chaintype.ChainScroll, chaintype.ChainWeMix, chaintype.ChainXLayer, chaintype.ChainZkEvm, chaintype.ChainZkSync: // continue } latestBlockHeight := t.getLatestBlockHeight() diff --git a/docs/CONFIG.md b/docs/CONFIG.md index 258adee5b1..b6d6566d44 100644 --- a/docs/CONFIG.md +++ b/docs/CONFIG.md @@ -8290,7 +8290,7 @@ BlockBackfillSkip enables skipping of very long backfills. ChainType = 'arbitrum' # Example ``` ChainType is automatically detected from chain ID. Set this to force a certain chain type regardless of chain ID. -Available types: `arbitrum`, `celo`, `gnosis`, `kroma`, `metis`, `optimismBedrock`, `scroll`, `wemix`, `xlayer`, `zksync` +Available types: `arbitrum`, `celo`, `gnosis`, `hedera`, `kroma`, `metis`, `optimismBedrock`, `scroll`, `wemix`, `xlayer`, `zksync` ### FinalityDepth ```toml From 611a928b94630d3ca638a366b27eaa58d557c138 Mon Sep 17 00:00:00 2001 From: Dmytro Haidashenko <34754799+dhaidashenko@users.noreply.github.com> Date: Fri, 9 Aug 2024 17:05:10 +0200 Subject: [PATCH 4/5] Custom Astar finality (#14021) * Custom Astar finality * fix merge artifact * fix lint issue * simplify isRequestingFinalizedBlock * avoid iterating through the whole batch again * fix errors wrapping --- .changeset/warm-houses-build.md | 5 + core/chains/evm/client/chain_client.go | 4 + core/chains/evm/client/chain_client_test.go | 4 +- core/chains/evm/client/evm_client.go | 4 +- core/chains/evm/client/helpers_test.go | 4 +- core/chains/evm/client/rpc_client.go | 170 +++++++++++++++--- core/chains/evm/client/rpc_client_test.go | 115 +++++++++++- core/chains/evm/config/chaintype/chaintype.go | 6 +- core/chains/evm/testutils/client.go | 8 +- core/services/chainlink/config_test.go | 4 +- core/services/ocr/contract_tracker.go | 2 +- 11 files changed, 277 insertions(+), 49 deletions(-) create mode 100644 .changeset/warm-houses-build.md diff --git a/.changeset/warm-houses-build.md b/.changeset/warm-houses-build.md new file mode 100644 index 0000000000..6ce6215a88 --- /dev/null +++ b/.changeset/warm-houses-build.md @@ -0,0 +1,5 @@ +--- +"chainlink": minor +--- + +Added custom finality calculation for Astar #internal diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index c39214471c..c27d294ebf 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -160,9 +160,13 @@ func (c *chainClient) BalanceAt(ctx context.Context, account common.Address, blo return c.multiNode.BalanceAt(ctx, account, blockNumber) } +// BatchCallContext - sends all given requests as a single batch. // Request specific errors for batch calls are returned to the individual BatchElem. // Ensure the same BatchElem slice provided by the caller is passed through the call stack // to ensure the caller has access to the errors. +// Note: some chains (e.g Astar) have custom finality requests, so even when FinalityTagEnabled=true, finality tag +// might not be properly handled and returned results might have weaker finality guarantees. It's highly recommended +// to use HeadTracker to identify latest finalized block. func (c *chainClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { return c.multiNode.BatchCallContext(ctx, b) } diff --git a/core/chains/evm/client/chain_client_test.go b/core/chains/evm/client/chain_client_test.go index a0b89cabbc..47041e40e9 100644 --- a/core/chains/evm/client/chain_client_test.go +++ b/core/chains/evm/client/chain_client_test.go @@ -328,7 +328,7 @@ func TestEthClient_HeaderByNumber(t *testing.T) { `{"difficulty":"0xf3a00","extraData":"0xd883010503846765746887676f312e372e318664617277696e","gasLimit":"0xffc001","gasUsed":"0x0","hash":"0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0xd1aeb42885a43b72b518182ef893125814811048","mixHash":"0x0f98b15f1a4901a7e9204f3c500a7bd527b3fb2c3340e12176a44b83e414a69e","nonce":"0x0ece08ea8c49dfd9","number":"0x1","parentHash":"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x218","stateRoot":"0xc7b01007a10da045eacb90385887dd0c38fcb5db7393006bdde24b93873c334b","timestamp":"0x58318da2","totalDifficulty":"0x1f3a00","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]}`}, {"happy parity", expectedBlockNum, expectedBlockNum.Int64(), nil, `{"author":"0xd1aeb42885a43b72b518182ef893125814811048","difficulty":"0xf3a00","extraData":"0xd883010503846765746887676f312e372e318664617277696e","gasLimit":"0xffc001","gasUsed":"0x0","hash":"0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0xd1aeb42885a43b72b518182ef893125814811048","mixHash":"0x0f98b15f1a4901a7e9204f3c500a7bd527b3fb2c3340e12176a44b83e414a69e","nonce":"0x0ece08ea8c49dfd9","number":"0x1","parentHash":"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":["0xa00f98b15f1a4901a7e9204f3c500a7bd527b3fb2c3340e12176a44b83e414a69e","0x880ece08ea8c49dfd9"],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x218","stateRoot":"0xc7b01007a10da045eacb90385887dd0c38fcb5db7393006bdde24b93873c334b","timestamp":"0x58318da2","totalDifficulty":"0x1f3a00","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]}`}, - {"missing header", expectedBlockNum, 0, fmt.Errorf("no live nodes available for chain %s", testutils.FixtureChainID.String()), + {"missing header", expectedBlockNum, 0, fmt.Errorf("RPCClient returned error (eth-primary-rpc-0): not found"), `null`}, } @@ -366,7 +366,7 @@ func TestEthClient_HeaderByNumber(t *testing.T) { ctx, cancel := context.WithTimeout(tests.Context(t), 5*time.Second) result, err := ethClient.HeadByNumber(ctx, expectedBlockNum) if test.error != nil { - require.Error(t, err, test.error) + require.EqualError(t, err, test.error.Error()) } else { require.NoError(t, err) require.Equal(t, expectedBlockHash, result.Hash.Hex()) diff --git a/core/chains/evm/client/evm_client.go b/core/chains/evm/client/evm_client.go index 3676808683..1fd533d6aa 100644 --- a/core/chains/evm/client/evm_client.go +++ b/core/chains/evm/client/evm_client.go @@ -22,13 +22,13 @@ func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, cli for i, node := range nodes { if node.SendOnly != nil && *node.SendOnly { rpc := NewRPCClient(lggr, empty, (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, - commonclient.Secondary, cfg.FinalizedBlockPollInterval(), largePayloadRPCTimeout, defaultRPCTimeout) + commonclient.Secondary, cfg.FinalizedBlockPollInterval(), largePayloadRPCTimeout, defaultRPCTimeout, chainType) sendonly := commonclient.NewSendOnlyNode(lggr, (url.URL)(*node.HTTPURL), *node.Name, chainID, rpc) sendonlys = append(sendonlys, sendonly) } else { rpc := NewRPCClient(lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), - chainID, commonclient.Primary, cfg.FinalizedBlockPollInterval(), largePayloadRPCTimeout, defaultRPCTimeout) + chainID, commonclient.Primary, cfg.FinalizedBlockPollInterval(), largePayloadRPCTimeout, defaultRPCTimeout, chainType) primaryNode := commonclient.NewNode(cfg, chainCfg, lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, *node.Order, rpc, "EVM") diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go index 8caacb4190..e996ccc5e4 100644 --- a/core/chains/evm/client/helpers_test.go +++ b/core/chains/evm/client/helpers_test.go @@ -140,7 +140,7 @@ func NewChainClientWithTestNode( } lggr := logger.Test(t) - rpc := NewRPCClient(lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) + rpc := NewRPCClient(lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout, "") n := commonclient.NewNode[*big.Int, *evmtypes.Head, RPCClient]( nodeCfg, clientMocks.ChainConfig{NoNewHeadsThresholdVal: noNewHeadsThreshold}, lggr, *parsed, rpcHTTPURL, "eth-primary-node-0", id, chainID, 1, rpc, "EVM") @@ -152,7 +152,7 @@ func NewChainClientWithTestNode( return nil, pkgerrors.Errorf("sendonly ethereum rpc url scheme must be http(s): %s", u.String()) } var empty url.URL - rpc := NewRPCClient(lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) + rpc := NewRPCClient(lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout, "") s := commonclient.NewSendOnlyNode[*big.Int, RPCClient]( lggr, u, fmt.Sprintf("eth-sendonly-%d", i), chainID, rpc) sendonlys = append(sendonlys, s) diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 200703dd42..07aa86fc45 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -2,6 +2,7 @@ package client import ( "context" + "encoding/json" "errors" "fmt" "math/big" @@ -28,6 +29,7 @@ import ( commonclient "github.com/smartcontractkit/chainlink/v2/common/client" commontypes "github.com/smartcontractkit/chainlink/v2/common/types" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/chaintype" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" @@ -120,6 +122,7 @@ type rpcClient struct { largePayloadRpcTimeout time.Duration rpcTimeout time.Duration finalizedBlockPollInterval time.Duration + chainType chaintype.ChainType ws rawclient http *rawclient @@ -156,10 +159,12 @@ func NewRPCClient( finalizedBlockPollInterval time.Duration, largePayloadRpcTimeout time.Duration, rpcTimeout time.Duration, + chainType chaintype.ChainType, ) RPCClient { r := &rpcClient{ largePayloadRpcTimeout: largePayloadRpcTimeout, rpcTimeout: rpcTimeout, + chainType: chainType, } r.name = name r.id = id @@ -396,8 +401,28 @@ func (r *rpcClient) CallContext(ctx context.Context, result interface{}, method return err } -func (r *rpcClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { - ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.largePayloadRpcTimeout) +func (r *rpcClient) BatchCallContext(rootCtx context.Context, b []rpc.BatchElem) error { + // Astar's finality tags provide weaker finality guarantees than we require. + // Fetch latest finalized block using Astar's custom requests and populate it after batch request completes + var astarRawLatestFinalizedBlock json.RawMessage + var requestedFinalizedBlock bool + if r.chainType == chaintype.ChainAstar { + for _, el := range b { + if !isRequestingFinalizedBlock(el) { + continue + } + + requestedFinalizedBlock = true + err := r.astarLatestFinalizedBlock(rootCtx, &astarRawLatestFinalizedBlock) + if err != nil { + return fmt.Errorf("failed to get astar latest finalized block: %w", err) + } + + break + } + } + + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(rootCtx, r.largePayloadRpcTimeout) defer cancel() lggr := r.newRqLggr().With("nBatchElems", len(b), "batchElems", b) @@ -412,8 +437,46 @@ func (r *rpcClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) err duration := time.Since(start) r.logResult(lggr, err, duration, r.getRPCDomain(), "BatchCallContext") + if err != nil { + return err + } - return err + if r.chainType == chaintype.ChainAstar && requestedFinalizedBlock { + // populate requested finalized block with correct value + for _, el := range b { + if !isRequestingFinalizedBlock(el) { + continue + } + + el.Error = nil + err = json.Unmarshal(astarRawLatestFinalizedBlock, el.Result) + if err != nil { + el.Error = fmt.Errorf("failed to unmarshal astar finalized block into provided struct: %w", err) + } + } + } + + return nil +} + +func isRequestingFinalizedBlock(el rpc.BatchElem) bool { + isGetBlock := el.Method == "eth_getBlockByNumber" && len(el.Args) > 0 + if !isGetBlock { + return false + } + + if el.Args[0] == rpc.FinalizedBlockNumber { + return true + } + + switch arg := el.Args[0].(type) { + case string: + return arg == rpc.FinalizedBlockNumber.String() + case fmt.Stringer: + return arg.String() == rpc.FinalizedBlockNumber.String() + default: + return false + } } // TODO: Full transition from SubscribeNewHead to SubscribeToHeads is done in BCI-2875 @@ -601,17 +664,84 @@ func (r *rpcClient) HeaderByHash(ctx context.Context, hash common.Hash) (header return } -func (r *rpcClient) LatestFinalizedBlock(ctx context.Context) (*evmtypes.Head, error) { - return r.blockByNumber(ctx, rpc.FinalizedBlockNumber.String()) +func (r *rpcClient) LatestFinalizedBlock(ctx context.Context) (head *evmtypes.Head, err error) { + // capture chStopInFlight to ensure we are not updating chainInfo with observations related to previous life cycle + ctx, cancel, chStopInFlight, _, _ := r.acquireQueryCtx(ctx, r.rpcTimeout) + defer cancel() + if r.chainType == chaintype.ChainAstar { + // astar's finality tags provide weaker guarantee. Use their custom request to request latest finalized block + err = r.astarLatestFinalizedBlock(ctx, &head) + } else { + err = r.ethGetBlockByNumber(ctx, rpc.FinalizedBlockNumber.String(), &head) + } + + if err != nil { + return + } + + if head == nil { + err = r.wrapRPCClientError(ethereum.NotFound) + return + } + + head.EVMChainID = ubig.New(r.chainID) + + r.onNewFinalizedHead(ctx, chStopInFlight, head) + return +} + +func (r *rpcClient) astarLatestFinalizedBlock(ctx context.Context, result interface{}) (err error) { + var hashResult string + err = r.CallContext(ctx, &hashResult, "chain_getFinalizedHead") + if err != nil { + return fmt.Errorf("failed to get astar latest finalized hash: %w", err) + } + + var astarHead struct { + Number *hexutil.Big `json:"number"` + } + err = r.CallContext(ctx, &astarHead, "chain_getHeader", hashResult, false) + if err != nil { + return fmt.Errorf("failed to get astar head by hash: %w", err) + } + + if astarHead.Number == nil { + return r.wrapRPCClientError(fmt.Errorf("expected non empty head number of finalized block")) + } + + err = r.ethGetBlockByNumber(ctx, astarHead.Number.String(), result) + if err != nil { + return fmt.Errorf("failed to get astar finalized block: %w", err) + } + + return nil } func (r *rpcClient) BlockByNumber(ctx context.Context, number *big.Int) (head *evmtypes.Head, err error) { - hex := ToBlockNumArg(number) - return r.blockByNumber(ctx, hex) + ctx, cancel, chStopInFlight, _, _ := r.acquireQueryCtx(ctx, r.rpcTimeout) + defer cancel() + hexNumber := ToBlockNumArg(number) + err = r.ethGetBlockByNumber(ctx, hexNumber, &head) + if err != nil { + return + } + + if head == nil { + err = r.wrapRPCClientError(ethereum.NotFound) + return + } + + head.EVMChainID = ubig.New(r.chainID) + + if hexNumber == rpc.LatestBlockNumber.String() { + r.onNewHead(ctx, chStopInFlight, head) + } + + return } -func (r *rpcClient) blockByNumber(ctx context.Context, number string) (head *evmtypes.Head, err error) { - ctx, cancel, chStopInFlight, ws, http := r.acquireQueryCtx(ctx, r.rpcTimeout) +func (r *rpcClient) ethGetBlockByNumber(ctx context.Context, number string, result interface{}) (err error) { + ctx, cancel, ws, http := r.makeLiveQueryCtxAndSafeGetClients(ctx, r.rpcTimeout) defer cancel() const method = "eth_getBlockByNumber" args := []interface{}{number, false} @@ -623,30 +753,14 @@ func (r *rpcClient) blockByNumber(ctx context.Context, number string) (head *evm lggr.Debug("RPC call: evmclient.Client#CallContext") start := time.Now() if http != nil { - err = r.wrapHTTP(http.rpc.CallContext(ctx, &head, method, args...)) + err = r.wrapHTTP(http.rpc.CallContext(ctx, result, method, args...)) } else { - err = r.wrapWS(ws.rpc.CallContext(ctx, &head, method, args...)) + err = r.wrapWS(ws.rpc.CallContext(ctx, result, method, args...)) } duration := time.Since(start) r.logResult(lggr, err, duration, r.getRPCDomain(), "CallContext") - if err != nil { - return nil, err - } - if head == nil { - err = r.wrapRPCClientError(ethereum.NotFound) - return - } - head.EVMChainID = ubig.New(r.chainID) - - switch number { - case rpc.FinalizedBlockNumber.String(): - r.onNewFinalizedHead(ctx, chStopInFlight, head) - case rpc.LatestBlockNumber.String(): - r.onNewHead(ctx, chStopInFlight, head) - } - - return + return err } func (r *rpcClient) BlockByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) { diff --git a/core/chains/evm/client/rpc_client_test.go b/core/chains/evm/client/rpc_client_test.go index d6a11e0d01..1282188099 100644 --- a/core/chains/evm/client/rpc_client_test.go +++ b/core/chains/evm/client/rpc_client_test.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" @@ -23,6 +24,7 @@ import ( commonclient "github.com/smartcontractkit/chainlink/v2/common/client" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/chaintype" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/testutils" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ) @@ -58,7 +60,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout, "") defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) // set to default values @@ -108,7 +110,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout, "") defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) ch := make(chan *evmtypes.Head) @@ -131,7 +133,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { t.Run("Block's chain ID matched configured", func(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout, "") defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) ch := make(chan *evmtypes.Head) @@ -148,7 +150,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { }) wsURL := server.WSURL() observedLggr, observed := logger.TestObserved(t, zap.DebugLevel) - rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) + rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout, "") require.NoError(t, rpc.Dial(ctx)) server.Close() _, err := rpc.SubscribeNewHead(ctx, make(chan *evmtypes.Head)) @@ -158,7 +160,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { t.Run("Subscription error is properly wrapper", func(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout, "") defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) sub, err := rpc.SubscribeNewHead(ctx, make(chan *evmtypes.Head)) @@ -186,7 +188,7 @@ func TestRPCClient_SubscribeFilterLogs(t *testing.T) { }) wsURL := server.WSURL() observedLggr, observed := logger.TestObserved(t, zap.DebugLevel) - rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) + rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout, "") require.NoError(t, rpc.Dial(ctx)) server.Close() _, err := rpc.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, make(chan types.Log)) @@ -203,7 +205,7 @@ func TestRPCClient_SubscribeFilterLogs(t *testing.T) { return resp }) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout, "") defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) sub, err := rpc.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, make(chan types.Log)) @@ -252,7 +254,7 @@ func TestRPCClient_LatestFinalizedBlock(t *testing.T) { } server := createRPCServer() - rpc := client.NewRPCClient(lggr, *server.URL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout) + rpc := client.NewRPCClient(lggr, *server.URL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout, "") require.NoError(t, rpc.Dial(ctx)) defer rpc.Close() server.Head = &evmtypes.Head{Number: 128} @@ -362,7 +364,7 @@ func TestRpcClientLargePayloadTimeout(t *testing.T) { // use something unreasonably large for RPC timeout to ensure that we use largePayloadRPCTimeout const rpcTimeout = time.Hour const largePayloadRPCTimeout = tests.TestInterval - rpc := client.NewRPCClient(logger.Test(t), *rpcURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, largePayloadRPCTimeout, rpcTimeout) + rpc := client.NewRPCClient(logger.Test(t), *rpcURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, largePayloadRPCTimeout, rpcTimeout, "") require.NoError(t, rpc.Dial(ctx)) defer rpc.Close() err := testCase.Fn(ctx, rpc) @@ -370,3 +372,98 @@ func TestRpcClientLargePayloadTimeout(t *testing.T) { }) } } + +func TestAstarCustomFinality(t *testing.T) { + t.Parallel() + + chainId := big.NewInt(123456) + // create new server that returns 4 block for Astar custom finality and 8 block for finality tag. + wsURL := testutils.NewWSServer(t, chainId, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "chain_getFinalizedHead": + resp.Result = `"0xf14c499253fd7bbcba142e5dd77dad8b5ad598c1dc414a66bacdd8dae14a6759"` + case "chain_getHeader": + if assert.True(t, params.IsArray()) && assert.Equal(t, "0xf14c499253fd7bbcba142e5dd77dad8b5ad598c1dc414a66bacdd8dae14a6759", params.Array()[0].String()) { + resp.Result = `{"parentHash":"0x1311773bc6b4efc8f438ed1f094524b2a1233baf8a35396f641fcc42a378fc62","number":"0x4","stateRoot":"0x0e4920dc5516b587e1f74a0b65963134523a12cc11478bb314e52895758fbfa2","extrinsicsRoot":"0x5b02446dcab0659eb07d4a38f28f181c1b78a71b2aba207bb0ea1f0f3468e6bd","digest":{"logs":["0x066175726120ad678e0800000000","0x04525053529023158dc8e8fd0180bf26d88233a3d94eed2f4e43480395f0809f28791965e4d34e9b3905","0x0466726f6e88017441e97acf83f555e0deefef86db636bc8a37eb84747603412884e4df4d2280400","0x056175726101018a0a57edf70cc5474323114a47ee1e7f645b8beea5a1560a996416458e89f42bdf4955e24d32b5da54e1bf628aaa7ce4b8c0fa2b95c175a139d88786af12a88c"]}}` + } + case "eth_getBlockByNumber": + assert.True(t, params.IsArray()) + switch params.Array()[0].String() { + case "0x4": + resp.Result = `{"author":"0x5accb3bf9194a5f81b2087d4bd6ac47c62775d49","baseFeePerGas":"0xb576270823","difficulty":"0x0","extraData":"0x","gasLimit":"0xe4e1c0","gasUsed":"0x0","hash":"0x7441e97acf83f555e0deefef86db636bc8a37eb84747603412884e4df4d22804","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x5accb3bf9194a5f81b2087d4bd6ac47c62775d49","nonce":"0x0000000000000000","number":"0x4","parentHash":"0x6ba069c318b692bf2cc0bd7ea070a9382a20c2f52413c10554b57c2e381bf2bb","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x201","stateRoot":"0x17c46d359b9af773312c747f1d20032c67658d9a2923799f00533b73789cf49b","timestamp":"0x66acdc22","totalDifficulty":"0x0","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]}` + case "finalized": + resp.Result = `{"author":"0x1687736326c9fea17e25fc5287613693c912909c","baseFeePerGas":"0x3b9aca00","difficulty":"0x0","extraData":"0x","gasLimit":"0xe4e1c0","gasUsed":"0x0","hash":"0x62f03413681948b06882e7d9f91c4949bc39ded98d36336ab03faea038ec8e3d","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x1687736326c9fea17e25fc5287613693c912909c","nonce":"0x0000000000000000","number":"0x8","parentHash":"0x43f504afdc639cbb8daf5fd5328a37762164b73f9c70ed54e1928c1fca6d8f23","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x200","stateRoot":"0x0cb938d51ad83bdf401e3f5f7f989e60df64fdea620d394af41a3e72629f7495","timestamp":"0x61bd8d1a","totalDifficulty":"0x0","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]}` + default: + assert.Fail(t, fmt.Sprintf("unexpected eth_getBlockByNumber param: %v", params.Array())) + } + default: + assert.Fail(t, fmt.Sprintf("unexpected method: %s", method)) + } + return + }).WSURL() + + const expectedFinalizedBlockNumber = int64(4) + const expectedFinalizedBlockHash = "0x7441e97acf83f555e0deefef86db636bc8a37eb84747603412884e4df4d22804" + rpcClient := client.NewRPCClient(logger.Test(t), *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0, commonclient.QueryTimeout, commonclient.QueryTimeout, chaintype.ChainAstar) + defer rpcClient.Close() + err := rpcClient.Dial(tests.Context(t)) + require.NoError(t, err) + + testCases := []struct { + Name string + GetLatestFinalized func(ctx context.Context) (*evmtypes.Head, error) + }{ + { + Name: "Direct LatestFinalized call", + GetLatestFinalized: func(ctx context.Context) (*evmtypes.Head, error) { + return rpcClient.LatestFinalizedBlock(ctx) + }, + }, + { + Name: "BatchCallContext with Finalized tag as string", + GetLatestFinalized: func(ctx context.Context) (*evmtypes.Head, error) { + result := &evmtypes.Head{} + req := rpc.BatchElem{ + Method: "eth_getBlockByNumber", + Args: []interface{}{rpc.FinalizedBlockNumber.String(), false}, + Result: result, + } + err := rpcClient.BatchCallContext(ctx, []rpc.BatchElem{ + req, + }) + if err != nil { + return nil, err + } + + return result, req.Error + }, + }, + { + Name: "BatchCallContext with Finalized tag as BlockNumber", + GetLatestFinalized: func(ctx context.Context) (*evmtypes.Head, error) { + result := &evmtypes.Head{} + req := rpc.BatchElem{ + Method: "eth_getBlockByNumber", + Args: []interface{}{rpc.FinalizedBlockNumber, false}, + Result: result, + } + err := rpcClient.BatchCallContext(ctx, []rpc.BatchElem{req}) + if err != nil { + return nil, err + } + + return result, req.Error + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + lf, err := testCase.GetLatestFinalized(tests.Context(t)) + require.NoError(t, err) + require.NotNil(t, lf) + assert.Equal(t, expectedFinalizedBlockHash, lf.Hash.String()) + assert.Equal(t, expectedFinalizedBlockNumber, lf.Number) + }) + } +} diff --git a/core/chains/evm/config/chaintype/chaintype.go b/core/chains/evm/config/chaintype/chaintype.go index 623a80f54f..07ea620624 100644 --- a/core/chains/evm/config/chaintype/chaintype.go +++ b/core/chains/evm/config/chaintype/chaintype.go @@ -9,6 +9,7 @@ type ChainType string const ( ChainArbitrum ChainType = "arbitrum" + ChainAstar ChainType = "astar" ChainCelo ChainType = "celo" ChainGnosis ChainType = "gnosis" ChainHedera ChainType = "hedera" @@ -36,7 +37,7 @@ func (c ChainType) IsL2() bool { func (c ChainType) IsValid() bool { switch c { - case "", ChainArbitrum, ChainCelo, ChainGnosis, ChainHedera, ChainKroma, ChainMetis, ChainOptimismBedrock, ChainScroll, ChainWeMix, ChainXLayer, ChainZkEvm, ChainZkSync: + case "", ChainArbitrum, ChainAstar, ChainCelo, ChainGnosis, ChainHedera, ChainKroma, ChainMetis, ChainOptimismBedrock, ChainScroll, ChainWeMix, ChainXLayer, ChainZkEvm, ChainZkSync: return true } return false @@ -46,6 +47,8 @@ func ChainTypeFromSlug(slug string) ChainType { switch slug { case "arbitrum": return ChainArbitrum + case "astar": + return ChainAstar case "celo": return ChainCelo case "gnosis": @@ -121,6 +124,7 @@ func (c *ChainTypeConfig) String() string { var ErrInvalidChainType = fmt.Errorf("must be one of %s or omitted", strings.Join([]string{ string(ChainArbitrum), + string(ChainAstar), string(ChainCelo), string(ChainGnosis), string(ChainHedera), diff --git a/core/chains/evm/testutils/client.go b/core/chains/evm/testutils/client.go index 89c97b01e6..1e5523fbff 100644 --- a/core/chains/evm/testutils/client.go +++ b/core/chains/evm/testutils/client.go @@ -148,8 +148,12 @@ func (ts *testWSServer) newWSHandler(chainID *big.Int, callback JSONRPCHandler) ts.t.Log("Received message", string(data)) req := gjson.ParseBytes(data) if !req.IsObject() { - ts.t.Logf("Request must be object: %v", req.Type) - return + if isSingleObjectArray := req.IsArray() && len(req.Array()) == 1; !isSingleObjectArray { + ts.t.Logf("Request must be object: %v", req.Type) + return + } + + req = req.Array()[0] } if e := req.Get("error"); e.Exists() { ts.t.Logf("Received jsonrpc error: %v", e) diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go index 7e91d80cc8..e9ed223b9b 100644 --- a/core/services/chainlink/config_test.go +++ b/core/services/chainlink/config_test.go @@ -1304,7 +1304,7 @@ func TestConfig_Validate(t *testing.T) { - 1: 10 errors: - ChainType: invalid value (Foo): must not be set with this chain id - Nodes: missing: must have at least one node - - ChainType: invalid value (Foo): must be one of arbitrum, celo, gnosis, hedera, kroma, metis, optimismBedrock, scroll, wemix, xlayer, zkevm, zksync or omitted + - ChainType: invalid value (Foo): must be one of arbitrum, astar, celo, gnosis, hedera, kroma, metis, optimismBedrock, scroll, wemix, xlayer, zkevm, zksync or omitted - HeadTracker.HistoryDepth: invalid value (30): must be greater than or equal to FinalizedBlockOffset - GasEstimator.BumpThreshold: invalid value (0): cannot be 0 if auto-purge feature is enabled for Foo - Transactions.AutoPurge.Threshold: missing: needs to be set if auto-purge feature is enabled for Foo @@ -1317,7 +1317,7 @@ func TestConfig_Validate(t *testing.T) { - 2: 5 errors: - ChainType: invalid value (Arbitrum): only "optimismBedrock" can be used with this chain id - Nodes: missing: must have at least one node - - ChainType: invalid value (Arbitrum): must be one of arbitrum, celo, gnosis, hedera, kroma, metis, optimismBedrock, scroll, wemix, xlayer, zkevm, zksync or omitted + - ChainType: invalid value (Arbitrum): must be one of arbitrum, astar, celo, gnosis, hedera, kroma, metis, optimismBedrock, scroll, wemix, xlayer, zkevm, zksync or omitted - FinalityDepth: invalid value (0): must be greater than or equal to 1 - MinIncomingConfirmations: invalid value (0): must be greater than or equal to 1 - 3.Nodes: 5 errors: diff --git a/core/services/ocr/contract_tracker.go b/core/services/ocr/contract_tracker.go index 6651e4b65d..d7199874a9 100644 --- a/core/services/ocr/contract_tracker.go +++ b/core/services/ocr/contract_tracker.go @@ -399,7 +399,7 @@ func (t *OCRContractTracker) LatestBlockHeight(ctx context.Context) (blockheight // care about the block height; we have no way of getting the L1 block // height anyway return 0, nil - case "", chaintype.ChainArbitrum, chaintype.ChainCelo, chaintype.ChainGnosis, chaintype.ChainHedera, chaintype.ChainKroma, chaintype.ChainOptimismBedrock, chaintype.ChainScroll, chaintype.ChainWeMix, chaintype.ChainXLayer, chaintype.ChainZkEvm, chaintype.ChainZkSync: + case "", chaintype.ChainArbitrum, chaintype.ChainAstar, chaintype.ChainCelo, chaintype.ChainGnosis, chaintype.ChainHedera, chaintype.ChainKroma, chaintype.ChainOptimismBedrock, chaintype.ChainScroll, chaintype.ChainWeMix, chaintype.ChainXLayer, chaintype.ChainZkEvm, chaintype.ChainZkSync: // continue } latestBlockHeight := t.getLatestBlockHeight() From 655289d65da6ad969b1bc399b26a6f1efd2205aa Mon Sep 17 00:00:00 2001 From: simsonraj Date: Mon, 19 Aug 2024 22:51:44 +0530 Subject: [PATCH 5/5] Docs update --- docs/CONFIG.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/CONFIG.md b/docs/CONFIG.md index b6d6566d44..e18abcf811 100644 --- a/docs/CONFIG.md +++ b/docs/CONFIG.md @@ -2826,6 +2826,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4142,6 +4143,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4235,6 +4237,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -5267,6 +5270,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -5738,6 +5742,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -5831,6 +5836,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -5926,6 +5932,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6768,6 +6775,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -7048,6 +7056,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -7992,6 +8001,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false