diff --git a/core/cmd/app.go b/core/cmd/app.go index 53c96980de4..e45c61d2c4a 100644 --- a/core/cmd/app.go +++ b/core/cmd/app.go @@ -199,6 +199,7 @@ func NewApp(s *Shell) *cli.App { keysCommand("Solana", NewSolanaKeysClient(s)), keysCommand("StarkNet", NewStarkNetKeysClient(s)), keysCommand("Aptos", NewAptosKeysClient(s)), + keysCommand("Tron", NewTronKeysClient(s)), initVRFKeysSubCmd(s), }, @@ -296,6 +297,7 @@ func NewApp(s *Shell) *cli.App { chainCommand("Solana", SolanaChainClient(s), cli.StringFlag{Name: "id", Usage: "chain ID, options: [mainnet, testnet, devnet, localnet]"}), chainCommand("StarkNet", StarkNetChainClient(s), cli.StringFlag{Name: "id", Usage: "chain ID"}), + chainCommand("Tron", TronChainClient(s), cli.StringFlag{Name: "id", Usage: "chain ID"}), }, }, { @@ -306,6 +308,7 @@ func NewApp(s *Shell) *cli.App { initCosmosNodeSubCmd(s), initSolanaNodeSubCmd(s), initStarkNetNodeSubCmd(s), + initTronNodeSubCmd(s), }, }, { diff --git a/core/cmd/nodes_commands.go b/core/cmd/nodes_commands.go index efee10bb156..a9b1dd7de4d 100644 --- a/core/cmd/nodes_commands.go +++ b/core/cmd/nodes_commands.go @@ -23,6 +23,10 @@ func initSolanaNodeSubCmd(s *Shell) cli.Command { return nodeCommand("Solana", NewSolanaNodeClient(s)) } +func initTronNodeSubCmd(s *Shell) cli.Command { + return nodeCommand("Tron", NewTronNodeClient(s)) +} + // nodeCommand returns a cli.Command with subcommands for the given NodeClient. // A string cli.Flag for "name" is automatically included. func nodeCommand(typ string, client NodeClient) cli.Command { diff --git a/core/cmd/shell.go b/core/cmd/shell.go index c862b936140..38edb9f6006 100644 --- a/core/cmd/shell.go +++ b/core/cmd/shell.go @@ -259,6 +259,13 @@ func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.G } initOps = append(initOps, chainlink.InitAptos(ctx, relayerFactory, aptosCfg)) } + if cfg.TronEnabled() { + tronCfg := chainlink.TronFactoryConfig{ + Keystore: keyStore.Tron(), + TOMLConfigs: cfg.TronConfigs(), + } + initOps = append(initOps, chainlink.InitTron(ctx, relayerFactory, tronCfg)) + } relayChainInterops, err := chainlink.NewCoreRelayerChainInteroperators(initOps...) if err != nil { diff --git a/core/cmd/shell_local.go b/core/cmd/shell_local.go index 0a3ab13113f..586018c0ffe 100644 --- a/core/cmd/shell_local.go +++ b/core/cmd/shell_local.go @@ -438,6 +438,9 @@ func (s *Shell) runNode(c *cli.Context) error { if s.Config.AptosEnabled() { enabledChains = append(enabledChains, chaintype.Aptos) } + if s.Config.TronEnabled() { + enabledChains = append(enabledChains, chaintype.Tron) + } err2 := app.GetKeyStore().OCR2().EnsureKeys(rootCtx, enabledChains...) if err2 != nil { return errors.Wrap(err2, "failed to ensure ocr key") @@ -473,6 +476,12 @@ func (s *Shell) runNode(c *cli.Context) error { return errors.Wrap(err2, "failed to ensure aptos key") } } + if s.Config.TronEnabled() { + err2 := app.GetKeyStore().Tron().EnsureKey(rootCtx) + if err2 != nil { + return errors.Wrap(err2, "failed to ensure tron key") + } + } err2 := app.GetKeyStore().CSA().EnsureKey(rootCtx) if err2 != nil { diff --git a/core/cmd/shell_test.go b/core/cmd/shell_test.go index a93be2fb9ea..67dc392e4dc 100644 --- a/core/cmd/shell_test.go +++ b/core/cmd/shell_test.go @@ -582,6 +582,106 @@ func TestSetupStarkNetRelayer(t *testing.T) { require.Contains(t, err.Error(), "failed to create StarkNet LOOP command") }) } +func TestSetupTronRelayer(t *testing.T) { + lggr := logger.TestLogger(t) + reg := plugins.NewLoopRegistry(lggr, nil, nil) + ks := mocks.NewTron(t) + // config 3 chains but only enable 2 => should only be 2 relayer + nEnabledChains := 2 + tConfig := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) { + c.Tron = chainlink.RawConfigs{ + chainlink.RawConfig{ + "ChainID": "tron-id-1", + "Enabled": true, + }, + chainlink.RawConfig{ + "ChainID": "tron-id-2", + "Enabled": true, + }, + chainlink.RawConfig{ + "ChainID": "disabled-tron-id-1", + "Enabled": false, + }, + } + }) + + t2Config := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) { + c.Tron = chainlink.RawConfigs{ + chainlink.RawConfig{ + "ChainID": "tron-id-3", + "Enabled": true, + }, + } + }) + rf := chainlink.RelayerFactory{ + Logger: lggr, + LoopRegistry: reg, + } + + // not parallel; shared state + t.Run("no plugin", func(t *testing.T) { + relayers, err := rf.NewTron(ks, tConfig.TronConfigs()) + require.NoError(t, err) + require.NotNil(t, relayers) + require.Len(t, relayers, nEnabledChains) + // no using plugin, so registry should be empty + require.Len(t, reg.List(), 0) + }) + + t.Run("plugin", func(t *testing.T) { + t.Setenv("CL_STARKNET_CMD", "phony_starknet_cmd") + + relayers, err := rf.NewTron(ks, tConfig.TronConfigs()) + require.NoError(t, err) + require.NotNil(t, relayers) + require.Len(t, relayers, nEnabledChains) + // make sure registry has the plugin + require.Len(t, reg.List(), nEnabledChains) + }) + + // test that duplicate enabled chains is an error when + duplicateConfig := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) { + c.Tron = chainlink.RawConfigs{ + chainlink.RawConfig{ + "ChainID": "dupe", + "Enabled": true, + }, + chainlink.RawConfig{ + "ChainID": "dupe", + "Enabled": true, + }, + } + }) + + // not parallel; shared state + t.Run("no plugin, duplicate chains", func(t *testing.T) { + _, err := rf.NewTron(ks, duplicateConfig.TronConfigs()) + require.Error(t, err) + }) + + t.Run("plugin, duplicate chains", func(t *testing.T) { + t.Setenv("CL_STARKNET_CMD", "phony_starknet_cmd") + _, err := rf.NewTron(ks, duplicateConfig.TronConfigs()) + require.Error(t, err) + }) + + t.Run("plugin env parsing fails", func(t *testing.T) { + t.Setenv("CL_STARKNET_CMD", "phony_starknet_cmd") + t.Setenv("CL_STARKNET_ENV", "fake_path") + + _, err := rf.NewTron(ks, t2Config.TronConfigs()) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to parse Starknet env file") + }) + + t.Run("plugin already registered", func(t *testing.T) { + t.Setenv("CL_STARKNET_CMD", "phony_starknet_cmd") + + _, err := rf.NewTron(ks, tConfig.TronConfigs()) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to create StarkNet LOOP command") + }) +} // flagSetApplyFromAction applies the flags from action to the flagSet. // `parentCommand` will filter the app commands and only applies the flags if the command/subcommand has a parent with that name, if left empty no filtering is done diff --git a/core/cmd/tron_chains_commands.go b/core/cmd/tron_chains_commands.go new file mode 100644 index 00000000000..3556456f329 --- /dev/null +++ b/core/cmd/tron_chains_commands.go @@ -0,0 +1,48 @@ +package cmd + +import ( + "strconv" + + "github.com/smartcontractkit/chainlink/v2/core/web/presenters" +) + +// TronChainPresenter implements TableRenderer for a TronChainResource +type TronChainPresenter struct { + presenters.TronChainResource +} + +// ToRow presents the TronChainResource as a slice of strings. +func (p *TronChainPresenter) ToRow() []string { + return []string{p.GetID(), strconv.FormatBool(p.Enabled), p.Config} +} + +// RenderTable implements TableRenderer +// Just renders a single row +func (p TronChainPresenter) RenderTable(rt RendererTable) error { + rows := [][]string{} + rows = append(rows, p.ToRow()) + + renderList(chainHeaders, rows, rt.Writer) + + return nil +} + +// TronChainPresenters implements TableRenderer for a slice of TronChainPresenters. +type TronChainPresenters []TronChainPresenter + +// RenderTable implements TableRenderer +func (ps TronChainPresenters) RenderTable(rt RendererTable) error { + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(chainHeaders, rows, rt.Writer) + + return nil +} + +func TronChainClient(s *Shell) ChainClient { + return newChainClient[TronChainPresenters](s, "tron") +} diff --git a/core/cmd/tron_keys_commands.go b/core/cmd/tron_keys_commands.go new file mode 100644 index 00000000000..67b3242e1f5 --- /dev/null +++ b/core/cmd/tron_keys_commands.go @@ -0,0 +1,57 @@ +package cmd + +import ( + "github.com/smartcontractkit/chainlink-common/pkg/utils" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/tronkey" + "github.com/smartcontractkit/chainlink/v2/core/web/presenters" +) + +type TronKeyPresenter struct { + JAID + presenters.TronKeyResource +} + +// RenderTable implements TableRenderer +func (p TronKeyPresenter) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Public key"} + rows := [][]string{p.ToRow()} + + if _, err := rt.Write([]byte("🔑 Tron Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return utils.JustError(rt.Write([]byte("\n"))) +} + +func (p *TronKeyPresenter) ToRow() []string { + row := []string{ + p.ID, + p.PubKey, + } + + return row +} + +type TronKeyPresenters []TronKeyPresenter + +// RenderTable implements TableRenderer +func (ps TronKeyPresenters) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Public key"} + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + if _, err := rt.Write([]byte("🔑 Tron Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return utils.JustError(rt.Write([]byte("\n"))) +} + +func NewTronKeysClient(s *Shell) KeysClient { + return newKeysClient[tronkey.Key, TronKeyPresenter, TronKeyPresenters]("Tron", s) +} diff --git a/core/cmd/tron_keys_commands_test.go b/core/cmd/tron_keys_commands_test.go new file mode 100644 index 00000000000..b76edbf7c2e --- /dev/null +++ b/core/cmd/tron_keys_commands_test.go @@ -0,0 +1,175 @@ +package cmd_test + +import ( + "bytes" + "context" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/smartcontractkit/chainlink-common/pkg/utils" + "github.com/smartcontractkit/chainlink/v2/core/cmd" + "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/tronkey" + "github.com/smartcontractkit/chainlink/v2/core/web/presenters" +) + +func TestTronKeyPresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + id = "1" + pubKey = "somepubkey" + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + p := cmd.TronKeyPresenter{ + JAID: cmd.JAID{ID: id}, + TronKeyResource: presenters.TronKeyResource{ + JAID: presenters.NewJAID(id), + PubKey: pubKey, + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, pubKey) + + // Render many resources + buffer.Reset() + ps := cmd.TronKeyPresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, pubKey) +} + +func TestShell_TronKeys(t *testing.T) { + app := startNewApplicationV2(t, nil) + ks := app.GetKeyStore().Tron() + cleanup := func() { + ctx := context.Background() + keys, err := ks.GetAll() + require.NoError(t, err) + for _, key := range keys { + require.NoError(t, utils.JustError(ks.Delete(ctx, key.ID()))) + } + requireTronKeyCount(t, app, 0) + } + + t.Run("ListTronKeys", func(tt *testing.T) { + defer cleanup() + ctx := testutils.Context(t) + client, r := app.NewShellAndRenderer() + key, err := app.GetKeyStore().Tron().Create(ctx) + require.NoError(t, err) + requireTronKeyCount(t, app, 1) + assert.Nil(t, cmd.NewTronKeysClient(client).ListKeys(cltest.EmptyCLIContext())) + require.Equal(t, 1, len(r.Renders)) + keys := *r.Renders[0].(*cmd.TronKeyPresenters) + assert.True(t, key.PublicKeyStr() == keys[0].PubKey) + }) + + t.Run("CreateTronKey", func(tt *testing.T) { + defer cleanup() + client, _ := app.NewShellAndRenderer() + require.NoError(t, cmd.NewTronKeysClient(client).CreateKey(nilContext)) + keys, err := app.GetKeyStore().Tron().GetAll() + require.NoError(t, err) + require.Len(t, keys, 1) + }) + + t.Run("DeleteTronKey", func(tt *testing.T) { + defer cleanup() + ctx := testutils.Context(t) + client, _ := app.NewShellAndRenderer() + key, err := app.GetKeyStore().Tron().Create(ctx) + require.NoError(t, err) + requireTronKeyCount(t, app, 1) + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(cmd.NewTronKeysClient(client).DeleteKey, set, "tron") + + require.NoError(tt, set.Set("yes", "true")) + + strID := key.ID() + err = set.Parse([]string{strID}) + require.NoError(t, err) + c := cli.NewContext(nil, set, nil) + err = cmd.NewTronKeysClient(client).DeleteKey(c) + require.NoError(t, err) + requireTronKeyCount(t, app, 0) + }) + + t.Run("ImportExportTronKey", func(tt *testing.T) { + defer cleanup() + defer deleteKeyExportFile(t) + ctx := testutils.Context(t) + client, _ := app.NewShellAndRenderer() + + _, err := app.GetKeyStore().Tron().Create(ctx) + require.NoError(t, err) + + keys := requireTronKeyCount(t, app, 1) + key := keys[0] + keyName := keyNameForTest(t) + + // Export test invalid id + set := flag.NewFlagSet("test Tron export", 0) + flagSetApplyFromAction(cmd.NewTronKeysClient(client).ExportKey, set, "tron") + + require.NoError(tt, set.Parse([]string{"0"})) + require.NoError(tt, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(tt, set.Set("output", keyName)) + + c := cli.NewContext(nil, set, nil) + err = cmd.NewTronKeysClient(client).ExportKey(c) + require.Error(t, err, "Error exporting") + require.Error(t, utils.JustError(os.Stat(keyName))) + + // Export test + set = flag.NewFlagSet("test Tron export", 0) + flagSetApplyFromAction(cmd.NewTronKeysClient(client).ExportKey, set, "tron") + + require.NoError(tt, set.Parse([]string{fmt.Sprint(key.ID())})) + require.NoError(tt, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(tt, set.Set("output", keyName)) + + c = cli.NewContext(nil, set, nil) + + require.NoError(t, cmd.NewTronKeysClient(client).ExportKey(c)) + require.NoError(t, utils.JustError(os.Stat(keyName))) + + require.NoError(t, utils.JustError(app.GetKeyStore().Tron().Delete(ctx, key.ID()))) + requireTronKeyCount(t, app, 0) + + set = flag.NewFlagSet("test Tron import", 0) + flagSetApplyFromAction(cmd.NewTronKeysClient(client).ImportKey, set, "tron") + + require.NoError(tt, set.Parse([]string{keyName})) + require.NoError(tt, set.Set("old-password", "../internal/fixtures/incorrect_password.txt")) + c = cli.NewContext(nil, set, nil) + require.NoError(t, cmd.NewTronKeysClient(client).ImportKey(c)) + + requireTronKeyCount(t, app, 1) + }) +} + +func requireTronKeyCount(t *testing.T, app chainlink.Application, length int) []tronkey.Key { + t.Helper() + keys, err := app.GetKeyStore().Tron().GetAll() + require.NoError(t, err) + require.Len(t, keys, length) + return keys +} diff --git a/core/cmd/tron_node_commands.go b/core/cmd/tron_node_commands.go new file mode 100644 index 00000000000..bc174d21cb1 --- /dev/null +++ b/core/cmd/tron_node_commands.go @@ -0,0 +1,44 @@ +package cmd + +import ( + "github.com/smartcontractkit/chainlink/v2/core/web/presenters" +) + +// TronNodePresenter implements TableRenderer for a TronNodeResource. +type TronNodePresenter struct { + presenters.TronNodeResource +} + +// ToRow presents the TronNodeResource as a slice of strings. +func (p *TronNodePresenter) ToRow() []string { + return []string{p.Name, p.ChainID, p.State, p.Config} +} + +// RenderTable implements TableRenderer +func (p TronNodePresenter) RenderTable(rt RendererTable) error { + var rows [][]string + rows = append(rows, p.ToRow()) + renderList(nodeHeaders, rows, rt.Writer) + + return nil +} + +// TronNodePresenters implements TableRenderer for a slice of TronNodePresenter. +type TronNodePresenters []TronNodePresenter + +// RenderTable implements TableRenderer +func (ps TronNodePresenters) RenderTable(rt RendererTable) error { + var rows [][]string + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(nodeHeaders, rows, rt.Writer) + + return nil +} + +func NewTronNodeClient(s *Shell) NodeClient { + return newNodeClient[TronNodePresenters](s, "tron") +} diff --git a/core/config/app_config.go b/core/config/app_config.go index 4cb7f1f610c..134838d6bbb 100644 --- a/core/config/app_config.go +++ b/core/config/app_config.go @@ -26,6 +26,7 @@ type AppConfig interface { SolanaEnabled() bool StarkNetEnabled() bool AptosEnabled() bool + TronEnabled() bool Validate() error ValidateDB() error diff --git a/core/config/docs/chains-tron.toml b/core/config/docs/chains-tron.toml new file mode 100644 index 00000000000..226c26366e1 --- /dev/null +++ b/core/config/docs/chains-tron.toml @@ -0,0 +1,13 @@ +[[Tron]] +# ChainID is the Tron chain ID. +ChainID = 'foobar' # Example +# FeederURL is required to get tx metadata (that the RPC can't) +FeederURL = 'http://feeder.url' # Example +# Enabled enables this chain. +Enabled = true # Default + +[[Tron.Nodes]] +# Name is a unique (per-chain) identifier for this node. +Name = 'primary' # Example +# URL is the base HTTP(S) endpoint for this node. +URL = 'http://api.trongrid.io' # Example \ No newline at end of file diff --git a/core/config/env/env.go b/core/config/env/env.go index c34cd7f4f5e..68b79c7575c 100644 --- a/core/config/env/env.go +++ b/core/config/env/env.go @@ -29,6 +29,7 @@ var ( MercuryPlugin = NewPlugin("mercury") SolanaPlugin = NewPlugin("solana") StarknetPlugin = NewPlugin("starknet") + TronPlugin = NewPlugin("tron") // PrometheusDiscoveryHostName is the externally accessible hostname // published by the node in the `/discovery` endpoint. Generally, it is expected to match // the public hostname of node. diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index f1eb1970c05..a16196cc1fa 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -81,6 +81,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/solkey" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/starkkey" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/tronkey" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/vrfkey" "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/services/pipeline" @@ -132,6 +133,7 @@ var ( DefaultSolanaKey = solkey.MustNewInsecure(keystest.NewRandReaderFromSeed(KeyBigIntSeed)) DefaultStarkNetKey = starkkey.MustNewInsecure(keystest.NewRandReaderFromSeed(KeyBigIntSeed)) DefaultAptosKey = aptoskey.MustNewInsecure(keystest.NewRandReaderFromSeed(KeyBigIntSeed)) + DefaultTronKey = tronkey.MustNewInsecure(keystest.NewRandReaderFromSeed(KeyBigIntSeed)) DefaultVRFKey = vrfkey.MustNewV2XXXTestingOnly(big.NewInt(KeyBigIntSeed)) ) @@ -465,6 +467,13 @@ func NewApplicationWithConfig(t testing.TB, cfg chainlink.GeneralConfig, flagsAn } initOps = append(initOps, chainlink.InitAptos(testCtx, relayerFactory, aptosCfg)) } + if cfg.TronEnabled() { + tronCfg := chainlink.TronFactoryConfig{ + Keystore: keyStore.Tron(), + TOMLConfigs: cfg.TronConfigs(), + } + initOps = append(initOps, chainlink.InitTron(testCtx, relayerFactory, tronCfg)) + } relayChainInterops, err := chainlink.NewCoreRelayerChainInteroperators(initOps...) if err != nil { diff --git a/core/services/chainlink/config.go b/core/services/chainlink/config.go index 476e758ccbb..bc60585aef6 100644 --- a/core/services/chainlink/config.go +++ b/core/services/chainlink/config.go @@ -44,6 +44,8 @@ type Config struct { Starknet stkcfg.TOMLConfigs `toml:",omitempty"` Aptos RawConfigs `toml:",omitempty"` + + Tron RawConfigs `toml:",omitempty"` } // RawConfigs is a list of RawConfig. @@ -97,6 +99,7 @@ func (c *Config) TOMLString() (string, error) { // warnings aggregates warnings from valueWarnings and deprecationWarnings func (c *Config) warnings() (err error) { deprecationErr := c.deprecationWarnings() + warningErr := c.valueWarnings() err = multierr.Append(deprecationErr, warningErr) _, list := commonconfig.MultiErrorList(err) @@ -187,6 +190,7 @@ func (c *Config) SetFrom(f *Config) (err error) { // the plugin should handle it's own defaults and merging c.Aptos = f.Aptos + c.Tron = f.Tron _, err = commonconfig.MultiErrorList(err) diff --git a/core/services/chainlink/config_general.go b/core/services/chainlink/config_general.go index dd0dc87b59a..e67d92fefc9 100644 --- a/core/services/chainlink/config_general.go +++ b/core/services/chainlink/config_general.go @@ -213,6 +213,10 @@ func (g *generalConfig) AptosConfigs() RawConfigs { return g.c.Aptos } +func (g *generalConfig) TronConfigs() RawConfigs { + return g.c.Tron +} + func (g *generalConfig) Validate() error { return g.validate(g.secrets.Validate) } @@ -358,6 +362,15 @@ func (g *generalConfig) AptosEnabled() bool { return false } +func (g *generalConfig) TronEnabled() bool { + for _, c := range g.c.Tron { + if c.IsEnabled() { + return true + } + } + return false +} + func (g *generalConfig) WebServer() config.WebServer { return &webServerConfig{c: g.c.WebServer, s: g.secrets.WebServer, rootDir: g.RootDir} } diff --git a/core/services/chainlink/config_general_test.go b/core/services/chainlink/config_general_test.go index 29393ee0fdd..3f02b880baf 100644 --- a/core/services/chainlink/config_general_test.go +++ b/core/services/chainlink/config_general_test.go @@ -28,6 +28,7 @@ func TestTOMLGeneralConfig_Defaults(t *testing.T) { assert.False(t, config.CosmosEnabled()) assert.False(t, config.SolanaEnabled()) assert.False(t, config.StarkNetEnabled()) + assert.False(t, config.TronEnabled()) assert.Equal(t, false, config.JobPipeline().ExternalInitiatorsEnabled()) assert.Equal(t, 15*time.Minute, config.WebServer().SessionTimeout().Duration()) } diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go index be7c0a08530..b7ff62bb173 100644 --- a/core/services/chainlink/config_test.go +++ b/core/services/chainlink/config_test.go @@ -235,6 +235,11 @@ var ( }, }, }, + Tron: []RawConfig{ + { + "ChainID": "foobar", + }, + }, } ) @@ -797,6 +802,11 @@ func TestConfig_Marshal(t *testing.T) { }, }, } + full.Tron = RawConfigs{ + RawConfig{ + "ChainID": "foobar", + }, + } full.Cosmos = []*coscfg.TOMLConfig{ { ChainID: ptr("Malaga-420"), @@ -1507,7 +1517,8 @@ func TestConfig_Validate(t *testing.T) { - 1: 2 errors: - ChainID: missing: required for all chains - Nodes: missing: must have at least one node - - Aptos.0.Enabled: invalid value (1): expected bool`}, + - Aptos.0.Enabled: invalid value (1): expected bool + - Tron.0.Enabled: invalid value (1): expected bool`}, } { t.Run(tt.name, func(t *testing.T) { var c Config diff --git a/core/services/chainlink/mocks/general_config.go b/core/services/chainlink/mocks/general_config.go index 63a846c6edb..7ea75740c89 100644 --- a/core/services/chainlink/mocks/general_config.go +++ b/core/services/chainlink/mocks/general_config.go @@ -1959,6 +1959,44 @@ func (_c *GeneralConfig_Tracing_Call) RunAndReturn(run func() config.Tracing) *G return _c } +// TronConfigs provides a mock function with given fields: +func (_m *GeneralConfig) TronConfigs() chainlink.RawConfigs { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TronConfigs") + } + + var r0 chainlink.RawConfigs + if rf, ok := ret.Get(0).(func() chainlink.RawConfigs); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(chainlink.RawConfigs) + } + } + + return r0 +} + +// TronEnabled provides a mock function with given fields: +func (_m *GeneralConfig) TronEnabled() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TronEnabled") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + // Validate provides a mock function with given fields: func (_m *GeneralConfig) Validate() error { ret := _m.Called() diff --git a/core/services/chainlink/relayer_chain_interoperators.go b/core/services/chainlink/relayer_chain_interoperators.go index 8197b12ec7b..2abfef226ed 100644 --- a/core/services/chainlink/relayer_chain_interoperators.go +++ b/core/services/chainlink/relayer_chain_interoperators.go @@ -202,6 +202,23 @@ func InitAptos(ctx context.Context, factory RelayerFactory, config AptosFactoryC } } +// InitTron is a option for instantiating Tron relayers +func InitTron(ctx context.Context, factory RelayerFactory, config TronFactoryConfig) CoreRelayerChainInitFunc { + return func(op *CoreRelayerChainInteroperators) error { + tronRelayers, err := factory.NewTron(config.Keystore, config.TOMLConfigs) + if err != nil { + return fmt.Errorf("failed to setup Tron relayer: %w", err) + } + + for id, relayer := range tronRelayers { + op.srvs = append(op.srvs, relayer) + op.loopRelayers[id] = relayer + } + + return nil + } +} + // Get a [loop.Relayer] by id func (rs *CoreRelayerChainInteroperators) Get(id types.RelayID) (loop.Relayer, error) { rs.mu.Lock() diff --git a/core/services/chainlink/relayer_chain_interoperators_test.go b/core/services/chainlink/relayer_chain_interoperators_test.go index e83c2881c93..13fe569afe9 100644 --- a/core/services/chainlink/relayer_chain_interoperators_test.go +++ b/core/services/chainlink/relayer_chain_interoperators_test.go @@ -37,6 +37,7 @@ func TestCoreRelayerChainInteroperators(t *testing.T) { solanaChainID1, solanaChainID2 := "solana-id-1", "solana-id-2" starknetChainID1, starknetChainID2 := "starknet-id-1", "starknet-id-2" cosmosChainID1, cosmosChainID2 := "cosmos-id-1", "cosmos-id-2" + tronChainID1, tronChainID2 := "tron-id-1", "tron-id-2" cfg := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) { cfg := evmcfg.Defaults(evmChainID1) @@ -167,6 +168,16 @@ func TestCoreRelayerChainInteroperators(t *testing.T) { }, }, } + c.Tron = chainlink.RawConfigs{ + chainlink.RawConfig{ + "ChainID": tronChainID1, + "Enabled": true, + }, + chainlink.RawConfig{ + "ChainID": tronChainID2, + "Enabled": true, + }, + } }) db := pgtest.NewSqlxDB(t) @@ -207,6 +218,10 @@ func TestCoreRelayerChainInteroperators(t *testing.T) { expectedCosmosChainCnt int expectedCosmosNodeCnt int expectedCosmosRelayerIds []types.RelayID + + expectedTronChainCnt int + expectedTronNodeCnt int + expectedTronRelayerIds []types.RelayID }{ {name: "2 evm chains with 3 nodes", @@ -279,6 +294,23 @@ func TestCoreRelayerChainInteroperators(t *testing.T) { expectedRelayerNetworks: map[string]struct{}{relay.NetworkCosmos: {}}, }, + { + name: "2 tron chains with 2 nodes", + initFuncs: []chainlink.CoreRelayerChainInitFunc{ + chainlink.InitTron(testctx, factory, chainlink.TronFactoryConfig{ + Keystore: keyStore.Tron(), + TOMLConfigs: cfg.TronConfigs(), + }), + }, + expectedTronChainCnt: 2, + expectedTronNodeCnt: 2, + expectedTronRelayerIds: []types.RelayID{ + {Network: relay.NetworkTron, ChainID: tronChainID1}, + {Network: relay.NetworkTron, ChainID: tronChainID2}, + }, + expectedRelayerNetworks: map[string]struct{}{relay.NetworkTron: {}}, + }, + {name: "all chains", initFuncs: []chainlink.CoreRelayerChainInitFunc{chainlink.InitSolana(testctx, factory, chainlink.SolanaFactoryConfig{ @@ -330,7 +362,18 @@ func TestCoreRelayerChainInteroperators(t *testing.T) { {Network: relay.NetworkCosmos, ChainID: cosmosChainID2}, }, - expectedRelayerNetworks: map[string]struct{}{relay.NetworkEVM: {}, relay.NetworkCosmos: {}, relay.NetworkSolana: {}, relay.NetworkStarkNet: {}}, + expectedTronChainCnt: 2, + expectedTronNodeCnt: 2, + expectedTronRelayerIds: []types.RelayID{ + {Network: relay.NetworkTron, ChainID: tronChainID1}, + {Network: relay.NetworkTron, ChainID: tronChainID2}, + }, + + expectedRelayerNetworks: map[string]struct{}{ + relay.NetworkEVM: {}, relay.NetworkCosmos: {}, + relay.NetworkSolana: {}, relay.NetworkStarkNet: {}, + relay.NetworkTron: {}, + }, }, } for _, tt := range tests { @@ -374,6 +417,8 @@ func TestCoreRelayerChainInteroperators(t *testing.T) { expectedChainCnt, expectedNodeCnt = tt.expectedSolanaChainCnt, tt.expectedSolanaNodeCnt case relay.NetworkStarkNet: expectedChainCnt, expectedNodeCnt = tt.expectedStarknetChainCnt, tt.expectedStarknetNodeCnt + case relay.NetworkTron: + expectedChainCnt, expectedNodeCnt = tt.expectedTronChainCnt, tt.expectedTronNodeCnt case relay.NetworkDummy: expectedChainCnt, expectedNodeCnt = tt.expectedDummyChainCnt, tt.expectedDummyNodeCnt case relay.NetworkAptos: diff --git a/core/services/chainlink/relayer_factory.go b/core/services/chainlink/relayer_factory.go index bbd9f283add..2bbb1e73175 100644 --- a/core/services/chainlink/relayer_factory.go +++ b/core/services/chainlink/relayer_factory.go @@ -362,3 +362,14 @@ func (r *RelayerFactory) NewLOOPRelayer(name string, network string, plugin env. } return relayers, nil } + +type TronFactoryConfig struct { + Keystore keystore.Tron + TOMLConfigs RawConfigs +} + +func (r *RelayerFactory) NewTron(ks keystore.Tron, chainCfgs RawConfigs) (map[types.RelayID]loop.Relayer, error) { + plugin := env.NewPlugin("tron") + loopKs := &keystore.TronLoopKeystore{Tron: ks} + return r.NewLOOPRelayer("Tron", relay.NetworkTron, plugin, loopKs, chainCfgs) +} diff --git a/core/services/chainlink/testdata/config-full.toml b/core/services/chainlink/testdata/config-full.toml index 35b8903878d..4ff8f3d1eeb 100644 --- a/core/services/chainlink/testdata/config-full.toml +++ b/core/services/chainlink/testdata/config-full.toml @@ -547,3 +547,11 @@ ConfirmationPoll = '42s' Name = 'primary' URL = 'http://stark.node' APIKey = 'key' + +[[Starknet]] +ChainID = 'foobar' +Enabled = true + +[[Tron.Nodes]] +Name = 'primary' +URL = 'http://api.trongrid.io' \ No newline at end of file diff --git a/core/services/chainlink/testdata/config-invalid.toml b/core/services/chainlink/testdata/config-invalid.toml index ca22e68c22c..31b80f5deef 100644 --- a/core/services/chainlink/testdata/config-invalid.toml +++ b/core/services/chainlink/testdata/config-invalid.toml @@ -162,6 +162,15 @@ APIKey = 'key' [[Aptos]] Enabled = 1 +[[Tron]] +Enabled = 1 + +[[Tron.Nodes]] +Name = 'tron-test' + +[[Tron.Nodes]] +Name = 'tron-test' + [OCR2] Enabled = true diff --git a/core/services/chainlink/testdata/config-multi-chain-effective.toml b/core/services/chainlink/testdata/config-multi-chain-effective.toml index 3d82a30e88c..6d7c2ccefdd 100644 --- a/core/services/chainlink/testdata/config-multi-chain-effective.toml +++ b/core/services/chainlink/testdata/config-multi-chain-effective.toml @@ -739,3 +739,10 @@ ConfirmationPoll = '1h0m0s' Name = 'primary' URL = 'http://stark.node' APIKey = 'key' + +[[Tron]] +ChainID = 'tron-test' + +[[Tron.Nodes]] +Name = 'primary' +URL = 'http://api.trongrid.io' \ No newline at end of file diff --git a/core/services/chainlink/testdata/config-multi-chain.toml b/core/services/chainlink/testdata/config-multi-chain.toml index e15fd143665..9e7cde39aa4 100644 --- a/core/services/chainlink/testdata/config-multi-chain.toml +++ b/core/services/chainlink/testdata/config-multi-chain.toml @@ -147,3 +147,10 @@ ConfirmationPoll = '1h0m0s' Name = 'primary' URL = 'http://stark.node' APIKey = 'key' + +[[Tron]] +ChainID = 'foobar' + +[[Tron.Nodes]] +Name = 'primary' +URL = 'http://api.trongrid.io' \ No newline at end of file diff --git a/core/services/chainlink/types.go b/core/services/chainlink/types.go index 74ffc5dc66d..53e7d2a9366 100644 --- a/core/services/chainlink/types.go +++ b/core/services/chainlink/types.go @@ -16,6 +16,7 @@ type GeneralConfig interface { SolanaConfigs() solcfg.TOMLConfigs StarknetConfigs() stkcfg.TOMLConfigs AptosConfigs() RawConfigs + TronConfigs() RawConfigs // ConfigTOML returns both the user provided and effective configuration as TOML. ConfigTOML() (user, effective string) } diff --git a/core/services/job/job_orm_test.go b/core/services/job/job_orm_test.go index 14aab17721d..d11cf08dda4 100644 --- a/core/services/job/job_orm_test.go +++ b/core/services/job/job_orm_test.go @@ -1006,6 +1006,18 @@ func TestORM_ValidateKeyStoreMatch(t *testing.T) { require.NoError(t, err) }) + t.Run(("test Tron key validation"), func(t *testing.T) { + ctx := testutils.Context(t) + jb.OCR2OracleSpec.Relay = relay.NetworkTron + err := job.ValidateKeyStoreMatch(ctx, jb.OCR2OracleSpec, keyStore, "bad key") + require.EqualError(t, err, "no Tron key matching: \"bad key\"") + + tronKey, err := keyStore.Tron().Create(ctx) + require.NoError(t, err) + err = job.ValidateKeyStoreMatch(ctx, jb.OCR2OracleSpec, keyStore, tronKey.ID()) + require.NoError(t, err) + }) + t.Run("test Mercury ETH key validation", func(t *testing.T) { ctx := testutils.Context(t) jb.OCR2OracleSpec.PluginType = types.Mercury diff --git a/core/services/job/orm.go b/core/services/job/orm.go index f306a68d98b..90ff48cc38d 100644 --- a/core/services/job/orm.go +++ b/core/services/job/orm.go @@ -631,6 +631,11 @@ func validateKeyStoreMatchForRelay(ctx context.Context, network string, keyStore if err != nil { return errors.Errorf("no Aptos key matching: %q", key) } + case relay.NetworkTron: + _, err := keyStore.Tron().Get(key) + if err != nil { + return errors.Errorf("no Tron key matching: %q", key) + } } return nil } diff --git a/core/services/keystore/chaintype/chaintype.go b/core/services/keystore/chaintype/chaintype.go index 419dfa2d073..a0055a45ca6 100644 --- a/core/services/keystore/chaintype/chaintype.go +++ b/core/services/keystore/chaintype/chaintype.go @@ -21,6 +21,8 @@ const ( StarkNet ChainType = "starknet" // Aptos for the Aptos chain Aptos ChainType = "aptos" + // Tron for the Tron chain + Tron ChainType = "tron" ) type ChainTypes []ChainType @@ -71,7 +73,7 @@ func (c ChainType) Type() (uint8, error) { } // SupportedChainTypes contain all chains that are supported -var SupportedChainTypes = ChainTypes{EVM, Cosmos, Solana, StarkNet, Aptos} +var SupportedChainTypes = ChainTypes{EVM, Cosmos, Solana, StarkNet, Aptos, Tron} // ErrInvalidChainType is an error to indicate an unsupported chain type var ErrInvalidChainType error diff --git a/core/services/keystore/keys/ocr2key/export.go b/core/services/keystore/keys/ocr2key/export.go index 8fa5ffedfed..eb7fe5f5eb9 100644 --- a/core/services/keystore/keys/ocr2key/export.go +++ b/core/services/keystore/keys/ocr2key/export.go @@ -48,6 +48,8 @@ func FromEncryptedJSON(keyJSON []byte, password string) (KeyBundle, error) { kb = newKeyBundle(new(starkkey.OCR2Key)) case chaintype.Aptos: kb = newKeyBundle(new(aptosKeyring)) + case chaintype.Tron: + kb = newKeyBundle(new(evmKeyring)) default: return nil, chaintype.NewErrInvalidChainType(export.ChainType) } diff --git a/core/services/keystore/keys/ocr2key/export_test.go b/core/services/keystore/keys/ocr2key/export_test.go index b0ffa2db009..fd1e867dfa9 100644 --- a/core/services/keystore/keys/ocr2key/export_test.go +++ b/core/services/keystore/keys/ocr2key/export_test.go @@ -19,6 +19,7 @@ func TestExport(t *testing.T) { {chain: chaintype.Solana}, {chain: chaintype.StarkNet}, {chain: chaintype.Aptos}, + {chain: chaintype.Tron}, } for _, tc := range tt { tc := tc diff --git a/core/services/keystore/keys/ocr2key/key_bundle.go b/core/services/keystore/keys/ocr2key/key_bundle.go index 2c25a159fef..9338426d03e 100644 --- a/core/services/keystore/keys/ocr2key/key_bundle.go +++ b/core/services/keystore/keys/ocr2key/key_bundle.go @@ -60,6 +60,8 @@ func New(chainType chaintype.ChainType) (KeyBundle, error) { return newKeyBundleRand(chaintype.StarkNet, starkkey.NewOCR2Key) case chaintype.Aptos: return newKeyBundleRand(chaintype.Aptos, newAptosKeyring) + case chaintype.Tron: + return newKeyBundleRand(chaintype.Tron, newEVMKeyring) } return nil, chaintype.NewErrInvalidChainType(chainType) } @@ -77,6 +79,8 @@ func MustNewInsecure(reader io.Reader, chainType chaintype.ChainType) KeyBundle return mustNewKeyBundleInsecure(chaintype.StarkNet, starkkey.NewOCR2Key, reader) case chaintype.Aptos: return mustNewKeyBundleInsecure(chaintype.Aptos, newAptosKeyring, reader) + case chaintype.Tron: + return mustNewKeyBundleInsecure(chaintype.Tron, newEVMKeyring, reader) } panic(chaintype.NewErrInvalidChainType(chainType)) } @@ -128,6 +132,8 @@ func (raw Raw) Key() (kb KeyBundle) { kb = newKeyBundle(new(starkkey.OCR2Key)) case chaintype.Aptos: kb = newKeyBundle(new(aptosKeyring)) + case chaintype.Tron: + kb = newKeyBundle(new(evmKeyring)) default: return nil } diff --git a/core/services/keystore/keys/tronkey/account.go b/core/services/keystore/keys/tronkey/account.go new file mode 100644 index 00000000000..a48cdf5f81c --- /dev/null +++ b/core/services/keystore/keys/tronkey/account.go @@ -0,0 +1,174 @@ +package tronkey + +import ( + "crypto/ecdsa" + "crypto/sha256" + "encoding/hex" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/mr-tron/base58" +) + +// Extracted from go-tron sdk: https://github.com/fbsobreira/gotron-sdk + +const ( + // HashLength is the expected length of the hash + HashLength = 32 + // AddressLengthBase58 is the expected length of the address in base58format + AddressLengthBase58 = 34 + // Tron Address Prefix + prefixMainnet = 0x41 + // TronBytePrefix is the hex prefix to address + TronBytePrefix = byte(prefixMainnet) + // Tron address should should have 20 bytes + 4 checksum + 1 Prefix + AddressLength = 20 +) + +// Address represents the 21 byte address of an Tron account. +type Address []byte + +// Bytes get bytes from address +func (a Address) Bytes() []byte { + return a[:] +} + +// Hex get bytes from address in string +func (a Address) Hex() string { + return BytesToHexString(a[:]) +} + +// HexToAddress returns Address with byte values of s. +// If s is larger than len(h), s will be cropped from the left. +func HexToAddress(s string) Address { + addr, err := FromHex(s) + if err != nil { + return nil + } + return addr +} + +// Base58ToAddress returns Address with byte values of s. +func Base58ToAddress(s string) (Address, error) { + addr, err := DecodeCheck(s) + if err != nil { + return nil, err + } + return addr, nil +} + +// String implements fmt.Stringer. +// Returns the address as a base58 encoded string. +func (a Address) String() string { + if len(a) == 0 { + return "" + } + + if a[0] == 0 { + return new(big.Int).SetBytes(a.Bytes()).String() + } + return EncodeCheck(a.Bytes()) +} + +// PubkeyToAddress returns address from ecdsa public key +func PubkeyToAddress(p ecdsa.PublicKey) Address { + address := crypto.PubkeyToAddress(p) + + addressTron := make([]byte, 0) + addressTron = append(addressTron, TronBytePrefix) + addressTron = append(addressTron, address.Bytes()...) + return addressTron +} + +// BytesToHexString encodes bytes as a hex string. +func BytesToHexString(bytes []byte) string { + encode := make([]byte, len(bytes)*2) + hex.Encode(encode, bytes) + return "0x" + string(encode) +} + +// FromHex returns the bytes represented by the hexadecimal string s. +// s may be prefixed with "0x". +func FromHex(s string) ([]byte, error) { + if Has0xPrefix(s) { + s = s[2:] + } + if len(s)%2 == 1 { + s = "0" + s + } + return HexToBytes(s) +} + +// Has0xPrefix validates str begins with '0x' or '0X'. +func Has0xPrefix(str string) bool { + return len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X') +} + +// HexToBytes returns the bytes represented by the hexadecimal string str. +func HexToBytes(str string) ([]byte, error) { + return hex.DecodeString(str) +} + +func Encode(input []byte) string { + return base58.Encode(input) +} + +func EncodeCheck(input []byte) string { + h256h0 := sha256.New() + h256h0.Write(input) + h0 := h256h0.Sum(nil) + + h256h1 := sha256.New() + h256h1.Write(h0) + h1 := h256h1.Sum(nil) + + inputCheck := input + inputCheck = append(inputCheck, h1[:4]...) + + return Encode(inputCheck) +} + +func DecodeCheck(input string) ([]byte, error) { + decodeCheck, err := Decode(input) + if err != nil { + return nil, err + } + + if len(decodeCheck) < 4 { + return nil, fmt.Errorf("b58 check error") + } + + // tron address should should have 20 bytes + 4 checksum + 1 Prefix + if len(decodeCheck) != AddressLength+4+1 { + return nil, fmt.Errorf("invalid address length: %d", len(decodeCheck)) + } + + // check prefix + if decodeCheck[0] != prefixMainnet { + return nil, fmt.Errorf("invalid prefix") + } + + decodeData := decodeCheck[:len(decodeCheck)-4] + + h256h0 := sha256.New() + h256h0.Write(decodeData) + h0 := h256h0.Sum(nil) + + h256h1 := sha256.New() + h256h1.Write(h0) + h1 := h256h1.Sum(nil) + + if h1[0] == decodeCheck[len(decodeData)] && + h1[1] == decodeCheck[len(decodeData)+1] && + h1[2] == decodeCheck[len(decodeData)+2] && + h1[3] == decodeCheck[len(decodeData)+3] { + return decodeData, nil + } + + return nil, fmt.Errorf("b58 check error") +} + +func Decode(input string) ([]byte, error) { + return base58.Decode(input) +} diff --git a/core/services/keystore/keys/tronkey/account_test.go b/core/services/keystore/keys/tronkey/account_test.go new file mode 100644 index 00000000000..aa05d92d217 --- /dev/null +++ b/core/services/keystore/keys/tronkey/account_test.go @@ -0,0 +1,142 @@ +package tronkey + +import ( + "bytes" + "regexp" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_DecodeBase58(t *testing.T) { + invalidAddresses := []string{ + "TronEnergyioE1Z3ukeRv38sYkv5Jn55bL", + "TronEnergyioNijNo8g3LF2ABKUAae6D2Z", + "TronEnergyio3ZMcXA5hSjrTxaioKGgqyr", + } + + validAddresses := []string{ + "TR7NHqjeKQxGTCi8q8ZY4pL8otSzgjLj6t", + "TVj7RNVHy6thbM7BWdSe9G6gXwKhjhdNZS", + "THPvaUhoh2Qn2y9THCZML3H815hhFhn5YC", + } + + for _, addr := range invalidAddresses { + _, err := DecodeCheck(addr) + assert.NotNil(t, err) + } + + for _, addr := range validAddresses { + _, err := DecodeCheck(addr) + assert.Nil(t, err) + } +} + +func TestAddress(t *testing.T) { + t.Run("Valid Addresses", func(t *testing.T) { + validAddresses := []string{ + "TR7NHqjeKQxGTCi8q8ZY4pL8otSzgjLj6t", + "TVj7RNVHy6thbM7BWdSe9G6gXwKhjhdNZS", + "THPvaUhoh2Qn2y9THCZML3H815hhFhn5YC", + } + + for _, addrStr := range validAddresses { + t.Run(addrStr, func(t *testing.T) { + addr, err := Base58ToAddress(addrStr) + assert.Nil(t, err) + assert.Equal(t, addrStr, addr.String()) + + decoded, err := DecodeCheck(addrStr) + assert.Nil(t, err) + assert.True(t, bytes.Equal(decoded, addr.Bytes())) + }) + } + }) + + t.Run("Invalid Addresses", func(t *testing.T) { + invalidAddresses := []string{ + "TronEnergyioE1Z3ukeRv38sYkv5Jn55bL", + "TronEnergyioNijNo8g3LF2ABKUAae6D2Z", + "TronEnergyio3ZMcXA5hSjrTxaioKGgqyr", + } + + for _, addrStr := range invalidAddresses { + t.Run(addrStr, func(t *testing.T) { + _, err := Base58ToAddress(addrStr) + assert.NotNil(t, err) + + _, err = DecodeCheck(addrStr) + assert.NotNil(t, err) + }) + } + }) + + t.Run("Address Conversion", func(t *testing.T) { + addrStr := "TSvT6Bg3siokv3dbdtt9o4oM1CTXmymGn1" + addr, err := Base58ToAddress(addrStr) + assert.Nil(t, err) + + t.Run("To Bytes", func(t *testing.T) { + bytes := addr.Bytes() + assert.Equal(t, 21, len(bytes)) + }) + + t.Run("To Hex", func(t *testing.T) { + hex := addr.Hex() + assert.True(t, hex[:2] == "0x") + assert.Equal(t, 44, len(hex)) // first 2 bytes are 0x + }) + }) + + t.Run("Address Validity", func(t *testing.T) { + t.Run("Valid Address", func(t *testing.T) { + addr, _ := Base58ToAddress("TSvT6Bg3siokv3dbdtt9o4oM1CTXmymGn1") + assert.True(t, isValid(addr)) + }) + + t.Run("Zero Address", func(t *testing.T) { + addr := Address{} + assert.False(t, isValid(addr)) + }) + }) +} + +// Helper Functions for testing + +// isValid checks if the address is a valid TRON address +func isValid(a Address) bool { + // Check if it's a valid Base58 address + base58Str := a.String() + if isValidBase58Address(base58Str) { + return true + } + + // Check if it's a valid hex address + hexStr := a.Hex() + return isValidHexAddress(strings.TrimPrefix(hexStr, "0x")) +} + +// isValidBase58Address check if a string is a valid Base58 TRON address +func isValidBase58Address(address string) bool { + // Check if the address starts with 'T' and is 34 characters long + if len(address) != 34 || address[0] != 'T' { + return false + } + + // Check if the address contains only valid Base58 characters + validChars := regexp.MustCompile("^[123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]+$") + return validChars.MatchString(address) +} + +// isValidHexAddressto check if a string is a valid hex TRON address +func isValidHexAddress(address string) bool { + // Check if the address starts with '41' and is 42 characters long + if len(address) != 42 || address[:2] != "41" { + return false + } + + // Check if the address contains only valid hexadecimal characters + validChars := regexp.MustCompile("^[0-9A-Fa-f]+$") + return validChars.MatchString(address[2:]) // Check the part after '41' +} diff --git a/core/services/keystore/keys/tronkey/export.go b/core/services/keystore/keys/tronkey/export.go new file mode 100644 index 00000000000..7688650c58d --- /dev/null +++ b/core/services/keystore/keys/tronkey/export.go @@ -0,0 +1,46 @@ +package tronkey + +import ( + "github.com/ethereum/go-ethereum/accounts/keystore" + + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys" + "github.com/smartcontractkit/chainlink/v2/core/utils" +) + +const keyTypeIdentifier = "Tron" + +// FromEncryptedJSON gets key from json and password +func FromEncryptedJSON(keyJSON []byte, password string) (Key, error) { + return keys.FromEncryptedJSON( + keyTypeIdentifier, + keyJSON, + password, + adulteratedPassword, + func(_ keys.EncryptedKeyExport, rawPrivKey []byte) (Key, error) { + return Raw(rawPrivKey).Key(), nil + }, + ) +} + +// ToEncryptedJSON returns encrypted JSON representing key +func (key Key) ToEncryptedJSON(password string, scryptParams utils.ScryptParams) (export []byte, err error) { + return keys.ToEncryptedJSON( + keyTypeIdentifier, + key.Raw(), + key, + password, + scryptParams, + adulteratedPassword, + func(id string, key Key, cryptoJSON keystore.CryptoJSON) keys.EncryptedKeyExport { + return keys.EncryptedKeyExport{ + KeyType: id, + PublicKey: key.PublicKeyStr(), + Crypto: cryptoJSON, + } + }, + ) +} + +func adulteratedPassword(password string) string { + return "tronkey" + password +} diff --git a/core/services/keystore/keys/tronkey/export_test.go b/core/services/keystore/keys/tronkey/export_test.go new file mode 100644 index 00000000000..5e3e605ed34 --- /dev/null +++ b/core/services/keystore/keys/tronkey/export_test.go @@ -0,0 +1,19 @@ +package tronkey + +import ( + "testing" + + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys" +) + +func TestTronKeys_ExportImport(t *testing.T) { + keys.RunKeyExportImportTestcase(t, createKey, decryptKey) +} + +func createKey() (keys.KeyType, error) { + return New() +} + +func decryptKey(keyJSON []byte, password string) (keys.KeyType, error) { + return FromEncryptedJSON(keyJSON, password) +} diff --git a/core/services/keystore/keys/tronkey/key.go b/core/services/keystore/keys/tronkey/key.go new file mode 100644 index 00000000000..b9ab66ded1f --- /dev/null +++ b/core/services/keystore/keys/tronkey/key.go @@ -0,0 +1,116 @@ +package tronkey + +import ( + "crypto/ecdsa" + "crypto/rand" + "encoding/hex" + "fmt" + "io" + "math/big" + + "github.com/ethereum/go-ethereum/crypto" +) + +// Tron uses the same elliptic curve cryptography as Ethereum (ECDSA with secp256k1) +var curve = crypto.S256() + +// Raw represents the Tron private key +type Raw []byte + +// Key generates a public-private key pair from the raw private key +func (raw Raw) Key() Key { + var privKey ecdsa.PrivateKey + d := big.NewInt(0).SetBytes(raw) + privKey.PublicKey.Curve = curve + privKey.D = d + privKey.PublicKey.X, privKey.PublicKey.Y = curve.ScalarBaseMult(d.Bytes()) + return Key{ + pubKey: &privKey.PublicKey, + privKey: &privKey, + } +} + +func (raw Raw) String() string { + return "" +} + +func (raw Raw) GoString() string { + return raw.String() +} + +var _ fmt.GoStringer = &Key{} + +type Key struct { + privKey *ecdsa.PrivateKey + pubKey *ecdsa.PublicKey +} + +func New() (Key, error) { + privKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader) + if err != nil { + return Key{}, err + } + return Key{ + privKey: privKeyECDSA, + pubKey: &privKeyECDSA.PublicKey, + }, nil +} + +// MustNewInsecure return Key if no error +// This insecure function is used for testing purposes only +func MustNewInsecure(reader io.Reader) Key { + key, err := newFrom(reader) + if err != nil { + panic(err) + } + return key +} + +func newFrom(reader io.Reader) (Key, error) { + privKeyECDSA, err := ecdsa.GenerateKey(curve, reader) + if err != nil { + return Key{}, err + } + return Key{ + privKey: privKeyECDSA, + pubKey: &privKeyECDSA.PublicKey, + }, nil +} + +func (key Key) ID() string { + return key.Base58Address() +} + +func (key Key) Raw() Raw { + return key.privKey.D.Bytes() +} + +func (key Key) ToEcdsaPrivKey() *ecdsa.PrivateKey { + return key.privKey +} + +func (key Key) String() string { + return fmt.Sprintf("TronKey{PrivateKey: , Address: %s}", key.Base58Address()) +} + +// GoString wraps String() +func (key Key) GoString() string { + return key.String() +} + +// Sign is used to sign a message +func (key Key) Sign(msg []byte) ([]byte, error) { + return crypto.Sign(msg, key.privKey) +} + +// PublicKeyStr returns the public key as a hexadecimal string +func (key Key) PublicKeyStr() string { + pubKeyBytes := crypto.FromECDSAPub(key.pubKey) + return hex.EncodeToString(pubKeyBytes) +} + +// Base58Address returns the Tron address in Base58 format with checksum +func (key Key) Base58Address() string { + address := PubkeyToAddress(*key.pubKey) + return address.String() +} diff --git a/core/services/keystore/keys/tronkey/key_test.go b/core/services/keystore/keys/tronkey/key_test.go new file mode 100644 index 00000000000..d3714228483 --- /dev/null +++ b/core/services/keystore/keys/tronkey/key_test.go @@ -0,0 +1,85 @@ +package tronkey + +import ( + "crypto/ecdsa" + "crypto/rand" + "encoding/hex" + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTronKeyRawPrivateKey(t *testing.T) { + t.Run("Create from raw bytes and check string representation", func(t *testing.T) { + // Generate a private key + privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader) + require.NoError(t, err, "Failed to generate ECDSA key") + + // Create TronKey from raw bytes + tronKey := Raw(privateKeyECDSA.D.Bytes()) + + // Check string representation + expectedStr := "" + assert.Equal(t, expectedStr, tronKey.String(), "Unexpected string representation") + assert.Equal(t, expectedStr, tronKey.GoString(), "String() and GoString() should return the same value") + }) +} + +func TestTronKeyNewKeyGeneration(t *testing.T) { + t.Run("Generate new key and verify its components", func(t *testing.T) { + // Generate a new key + key, err := New() + require.NoError(t, err, "Failed to generate new TronKey") + + // Verify key components + assert.NotNil(t, key.pubKey, "Public key should not be nil") + assert.NotNil(t, key.privKey, "Private key should not be nil") + }) + + t.Run("Multiple key generations produce unique keys", func(t *testing.T) { + key1, err := New() + require.NoError(t, err, "Failed to generate first key") + + key2, err := New() + require.NoError(t, err, "Failed to generate second key") + + assert.NotEqual(t, key1.privKey, key2.privKey, "Generated private keys should be unique") + assert.NotEqual(t, key1.pubKey, key2.pubKey, "Generated public keys should be unique") + }) +} + +func TestKeyAddress(t *testing.T) { + t.Run("Known private key and expected address", func(t *testing.T) { + // Tests cases from https://developers.tron.network/docs/account + privateKeyHex := "b406adb115b43e103c7b1dc8b5931f63279a5b6b2cf7328638814c43171a2908" + expectedAddress := "TDdcf5iMDkB61oGM27TNak55eVX214thBG" + + privateKeyBytes, err := hex.DecodeString(privateKeyHex) + require.NoError(t, err, "Failed to decode private key hex") + + privateKey, err := crypto.ToECDSA(privateKeyBytes) + require.NoError(t, err, "Failed to convert private key to ECDSA") + + key := Key{ + privKey: privateKey, + pubKey: &privateKey.PublicKey, + } + require.NotNil(t, key.privKey, "Private key is nil") + + address := key.Base58Address() + require.Equal(t, expectedAddress, address, "Generated address does not match expected address") + }) + + t.Run("Generate new key and check address format", func(t *testing.T) { + newKey, err := New() + if err != nil { + t.Fatalf("Failed to generate new key: %v", err) + } + + newAddress := newKey.Base58Address() + isValid := isValidBase58Address(newAddress) + require.True(t, isValid, "Generated address is not valid") + }) +} diff --git a/core/services/keystore/keystoretest.go b/core/services/keystore/keystoretest.go index e179b51bb54..a48415da183 100644 --- a/core/services/keystore/keystoretest.go +++ b/core/services/keystore/keystoretest.go @@ -74,6 +74,7 @@ func NewInMemory(ds sqlutil.DataSource, scryptParams utils.ScryptParams, lggr lo solana: newSolanaKeyStore(km), starknet: newStarkNetKeyStore(km), aptos: newAptosKeyStore(km), + tron: newTronKeyStore(km), vrf: newVRFKeyStore(km), } } diff --git a/core/services/keystore/master.go b/core/services/keystore/master.go index 47136f1f2ec..9d708104764 100644 --- a/core/services/keystore/master.go +++ b/core/services/keystore/master.go @@ -20,6 +20,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/solkey" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/starkkey" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/tronkey" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/vrfkey" "github.com/smartcontractkit/chainlink/v2/core/utils" ) @@ -44,6 +45,7 @@ type Master interface { Cosmos() Cosmos StarkNet() StarkNet Aptos() Aptos + Tron() Tron VRF() VRF Unlock(ctx context.Context, password string) error IsEmpty(ctx context.Context) (bool, error) @@ -60,6 +62,7 @@ type master struct { solana *solana starknet *starknet aptos *aptos + tron *tron vrf *vrf } @@ -88,6 +91,7 @@ func newMaster(ds sqlutil.DataSource, scryptParams utils.ScryptParams, lggr logg solana: newSolanaKeyStore(km), starknet: newStarkNetKeyStore(km), aptos: newAptosKeyStore(km), + tron: newTronKeyStore(km), vrf: newVRFKeyStore(km), } } @@ -128,6 +132,10 @@ func (ks *master) Aptos() Aptos { return ks.aptos } +func (ks *master) Tron() Tron { + return ks.tron +} + func (ks *master) VRF() VRF { return ks.vrf } @@ -265,6 +273,8 @@ func GetFieldNameForKey(unknownKey Key) (string, error) { return "StarkNet", nil case aptoskey.Key: return "Aptos", nil + case tronkey.Key: + return "Tron", nil case vrfkey.KeyV2: return "VRF", nil } diff --git a/core/services/keystore/mocks/master.go b/core/services/keystore/mocks/master.go index c027a9c2105..c8b0840542b 100644 --- a/core/services/keystore/mocks/master.go +++ b/core/services/keystore/mocks/master.go @@ -501,6 +501,26 @@ func (_c *Master_StarkNet_Call) RunAndReturn(run func() keystore.StarkNet) *Mast return _c } +// Tron provides a mock function with given fields: +func (_m *Master) Tron() keystore.Tron { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Tron") + } + + var r0 keystore.Tron + if rf, ok := ret.Get(0).(func() keystore.Tron); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keystore.Tron) + } + } + + return r0 +} + // Unlock provides a mock function with given fields: ctx, password func (_m *Master) Unlock(ctx context.Context, password string) error { ret := _m.Called(ctx, password) diff --git a/core/services/keystore/mocks/tron.go b/core/services/keystore/mocks/tron.go new file mode 100644 index 00000000000..ee7d548ea96 --- /dev/null +++ b/core/services/keystore/mocks/tron.go @@ -0,0 +1,268 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + tronkey "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/tronkey" +) + +// Tron is an autogenerated mock type for the Tron type +type Tron struct { + mock.Mock +} + +// Add provides a mock function with given fields: ctx, key +func (_m *Tron) Add(ctx context.Context, key tronkey.Key) error { + ret := _m.Called(ctx, key) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, tronkey.Key) error); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Create provides a mock function with given fields: ctx +func (_m *Tron) Create(ctx context.Context) (tronkey.Key, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 tronkey.Key + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (tronkey.Key, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) tronkey.Key); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(tronkey.Key) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Delete provides a mock function with given fields: ctx, id +func (_m *Tron) Delete(ctx context.Context, id string) (tronkey.Key, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 tronkey.Key + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (tronkey.Key, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, string) tronkey.Key); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(tronkey.Key) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EnsureKey provides a mock function with given fields: ctx +func (_m *Tron) EnsureKey(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for EnsureKey") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Export provides a mock function with given fields: id, password +func (_m *Tron) Export(id string, password string) ([]byte, error) { + ret := _m.Called(id, password) + + if len(ret) == 0 { + panic("no return value specified for Export") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(string, string) ([]byte, error)); ok { + return rf(id, password) + } + if rf, ok := ret.Get(0).(func(string, string) []byte); ok { + r0 = rf(id, password) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(id, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Get provides a mock function with given fields: id +func (_m *Tron) Get(id string) (tronkey.Key, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 tronkey.Key + var r1 error + if rf, ok := ret.Get(0).(func(string) (tronkey.Key, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) tronkey.Key); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(tronkey.Key) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAll provides a mock function with given fields: +func (_m *Tron) GetAll() ([]tronkey.Key, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAll") + } + + var r0 []tronkey.Key + var r1 error + if rf, ok := ret.Get(0).(func() ([]tronkey.Key, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []tronkey.Key); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]tronkey.Key) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Import provides a mock function with given fields: ctx, keyJSON, password +func (_m *Tron) Import(ctx context.Context, keyJSON []byte, password string) (tronkey.Key, error) { + ret := _m.Called(ctx, keyJSON, password) + + if len(ret) == 0 { + panic("no return value specified for Import") + } + + var r0 tronkey.Key + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, string) (tronkey.Key, error)); ok { + return rf(ctx, keyJSON, password) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, string) tronkey.Key); ok { + r0 = rf(ctx, keyJSON, password) + } else { + r0 = ret.Get(0).(tronkey.Key) + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, string) error); ok { + r1 = rf(ctx, keyJSON, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Sign provides a mock function with given fields: ctx, id, msg +func (_m *Tron) Sign(ctx context.Context, id string, msg []byte) ([]byte, error) { + ret := _m.Called(ctx, id, msg) + + if len(ret) == 0 { + panic("no return value specified for Sign") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, []byte) ([]byte, error)); ok { + return rf(ctx, id, msg) + } + if rf, ok := ret.Get(0).(func(context.Context, string, []byte) []byte); ok { + r0 = rf(ctx, id, msg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, []byte) error); ok { + r1 = rf(ctx, id, msg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewTron creates a new instance of Tron. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTron(t interface { + mock.TestingT + Cleanup(func()) +}) *Tron { + mock := &Tron{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/keystore/models.go b/core/services/keystore/models.go index d5eec6802b9..7d48d558d64 100644 --- a/core/services/keystore/models.go +++ b/core/services/keystore/models.go @@ -20,6 +20,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/solkey" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/starkkey" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/tronkey" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/vrfkey" "github.com/smartcontractkit/chainlink/v2/core/utils" ) @@ -157,6 +158,7 @@ type keyRing struct { Solana map[string]solkey.Key StarkNet map[string]starkkey.Key Aptos map[string]aptoskey.Key + Tron map[string]tronkey.Key VRF map[string]vrfkey.KeyV2 LegacyKeys LegacyKeyStorage } @@ -172,6 +174,7 @@ func newKeyRing() *keyRing { Solana: make(map[string]solkey.Key), StarkNet: make(map[string]starkkey.Key), Aptos: make(map[string]aptoskey.Key), + Tron: make(map[string]tronkey.Key), VRF: make(map[string]vrfkey.KeyV2), } } @@ -233,6 +236,9 @@ func (kr *keyRing) raw() (rawKeys rawKeyRing) { for _, aptoskey := range kr.Aptos { rawKeys.Aptos = append(rawKeys.Aptos, aptoskey.Raw()) } + for _, tronkey := range kr.Tron { + rawKeys.Tron = append(rawKeys.Tron, tronkey.Raw()) + } for _, vrfKey := range kr.VRF { rawKeys.VRF = append(rawKeys.VRF, vrfKey.Raw()) } @@ -277,6 +283,10 @@ func (kr *keyRing) logPubKeys(lggr logger.Logger) { for _, aptosKey := range kr.Aptos { aptosIDs = append(aptosIDs, aptosKey.ID()) } + var tronIDs []string + for _, tronKey := range kr.Tron { + tronIDs = append(tronIDs, tronKey.ID()) + } var vrfIDs []string for _, VRFKey := range kr.VRF { vrfIDs = append(vrfIDs, VRFKey.ID()) @@ -308,6 +318,9 @@ func (kr *keyRing) logPubKeys(lggr logger.Logger) { if len(aptosIDs) > 0 { lggr.Infow(fmt.Sprintf("Unlocked %d Aptos keys", len(aptosIDs)), "keys", aptosIDs) } + if len(tronIDs) > 0 { + lggr.Infow(fmt.Sprintf("Unlocked %d Tron keys", len(tronIDs)), "keys", tronIDs) + } if len(vrfIDs) > 0 { lggr.Infow(fmt.Sprintf("Unlocked %d VRF keys", len(vrfIDs)), "keys", vrfIDs) } @@ -329,6 +342,7 @@ type rawKeyRing struct { Solana []solkey.Raw StarkNet []starkkey.Raw Aptos []aptoskey.Raw + Tron []tronkey.Raw VRF []vrfkey.Raw LegacyKeys LegacyKeyStorage `json:"-"` } @@ -372,6 +386,10 @@ func (rawKeys rawKeyRing) keys() (*keyRing, error) { aptosKey := rawAptosKey.Key() keyRing.Aptos[aptosKey.ID()] = aptosKey } + for _, rawTronKey := range rawKeys.Tron { + tronKey := rawTronKey.Key() + keyRing.Tron[tronKey.ID()] = tronKey + } for _, rawVRFKey := range rawKeys.VRF { vrfKey := rawVRFKey.Key() keyRing.VRF[vrfKey.ID()] = vrfKey diff --git a/core/services/keystore/models_test.go b/core/services/keystore/models_test.go index 8f7881809ea..ebd69a89af0 100644 --- a/core/services/keystore/models_test.go +++ b/core/services/keystore/models_test.go @@ -16,6 +16,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocrkey" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/solkey" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/tronkey" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/vrfkey" "github.com/smartcontractkit/chainlink/v2/core/utils" ) @@ -40,6 +41,7 @@ func TestKeyRing_Encrypt_Decrypt(t *testing.T) { sol1, sol2 := solkey.MustNewInsecure(rand.Reader), solkey.MustNewInsecure(rand.Reader) vrf1, vrf2 := vrfkey.MustNewV2XXXTestingOnly(big.NewInt(1)), vrfkey.MustNewV2XXXTestingOnly(big.NewInt(2)) tk1, tk2 := cosmoskey.MustNewInsecure(rand.Reader), cosmoskey.MustNewInsecure(rand.Reader) + uk1, uk2 := tronkey.MustNewInsecure(rand.Reader), tronkey.MustNewInsecure(rand.Reader) originalKeyRingRaw := rawKeyRing{ CSA: []csakey.Raw{csa1.Raw(), csa2.Raw()}, Eth: []ethkey.Raw{eth1.Raw(), eth2.Raw()}, @@ -49,6 +51,7 @@ func TestKeyRing_Encrypt_Decrypt(t *testing.T) { Solana: []solkey.Raw{sol1.Raw(), sol2.Raw()}, VRF: []vrfkey.Raw{vrf1.Raw(), vrf2.Raw()}, Cosmos: []cosmoskey.Raw{tk1.Raw(), tk2.Raw()}, + Tron: []tronkey.Raw{uk1.Raw(), uk2.Raw()}, } originalKeyRing, kerr := originalKeyRingRaw.keys() require.NoError(t, kerr) @@ -62,6 +65,10 @@ func TestKeyRing_Encrypt_Decrypt(t *testing.T) { require.Equal(t, 2, len(decryptedKeyRing.Cosmos)) require.Equal(t, originalKeyRing.Cosmos[tk1.ID()].PublicKey(), decryptedKeyRing.Cosmos[tk1.ID()].PublicKey()) require.Equal(t, originalKeyRing.Cosmos[tk2.ID()].PublicKey(), decryptedKeyRing.Cosmos[tk2.ID()].PublicKey()) + // compare tron keys + require.Equal(t, 2, len(decryptedKeyRing.Tron)) + require.Equal(t, originalKeyRing.Tron[tk1.ID()].Base58Address(), decryptedKeyRing.Tron[tk1.ID()].Base58Address()) + require.Equal(t, originalKeyRing.Tron[tk2.ID()].Base58Address(), decryptedKeyRing.Tron[tk2.ID()].Base58Address()) // compare csa keys require.Equal(t, 2, len(decryptedKeyRing.CSA)) require.Equal(t, originalKeyRing.CSA[csa1.ID()].PublicKey, decryptedKeyRing.CSA[csa1.ID()].PublicKey) diff --git a/core/services/keystore/tron.go b/core/services/keystore/tron.go new file mode 100644 index 00000000000..7969cacf1a9 --- /dev/null +++ b/core/services/keystore/tron.go @@ -0,0 +1,189 @@ +package keystore + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink-common/pkg/loop" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/tronkey" +) + +//go:generate mockery --quiet --name Tron --output ./mocks/ --case=underscore --filename tron.go + +type Tron interface { + Get(id string) (tronkey.Key, error) + GetAll() ([]tronkey.Key, error) + Create(ctx context.Context) (tronkey.Key, error) + Add(ctx context.Context, key tronkey.Key) error + Delete(ctx context.Context, id string) (tronkey.Key, error) + Import(ctx context.Context, keyJSON []byte, password string) (tronkey.Key, error) + Export(id string, password string) ([]byte, error) + EnsureKey(ctx context.Context) error + Sign(ctx context.Context, id string, msg []byte) (signature []byte, err error) +} + +type tron struct { + *keyManager +} + +var _ Tron = &tron{} + +func newTronKeyStore(km *keyManager) *tron { + return &tron{ + km, + } +} + +func (ks *tron) Get(id string) (tronkey.Key, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return tronkey.Key{}, ErrLocked + } + return ks.getByID(id) +} + +func (ks *tron) GetAll() (keys []tronkey.Key, _ error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + for _, key := range ks.keyRing.Tron { + keys = append(keys, key) + } + return keys, nil +} + +func (ks *tron) Create(ctx context.Context) (tronkey.Key, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return tronkey.Key{}, ErrLocked + } + key, err := tronkey.New() + if err != nil { + return tronkey.Key{}, err + } + return key, ks.safeAddKey(ctx, key) +} + +func (ks *tron) Add(ctx context.Context, key tronkey.Key) error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + if _, found := ks.keyRing.Tron[key.ID()]; found { + return fmt.Errorf("key with ID %s already exists", key.ID()) + } + return ks.safeAddKey(ctx, key) +} + +func (ks *tron) Delete(ctx context.Context, id string) (tronkey.Key, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return tronkey.Key{}, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return tronkey.Key{}, err + } + err = ks.safeRemoveKey(ctx, key) + return key, err +} + +func (ks *tron) Import(ctx context.Context, keyJSON []byte, password string) (tronkey.Key, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return tronkey.Key{}, ErrLocked + } + key, err := tronkey.FromEncryptedJSON(keyJSON, password) + if err != nil { + return tronkey.Key{}, errors.Wrap(err, "TronKeyStore#ImportKey failed to decrypt key") + } + if _, found := ks.keyRing.Tron[key.ID()]; found { + return tronkey.Key{}, fmt.Errorf("key with ID %s already exists", key.ID()) + } + return key, ks.keyManager.safeAddKey(ctx, key) +} + +func (ks *tron) Export(id string, password string) ([]byte, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return nil, err + } + return key.ToEncryptedJSON(password, ks.scryptParams) +} + +func (ks *tron) EnsureKey(ctx context.Context) error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + + if len(ks.keyRing.Tron) > 0 { + return nil + } + + key, err := tronkey.New() + if err != nil { + return err + } + + ks.logger.Infof("Created Tron key with ID %s", key.ID()) + + return ks.safeAddKey(ctx, key) +} + +func (ks *tron) getByID(id string) (tronkey.Key, error) { + key, found := ks.keyRing.Tron[id] + if !found { + return tronkey.Key{}, KeyNotFoundError{ID: id, KeyType: "Tron"} + } + return key, nil +} + +func (ks *tron) Sign(_ context.Context, id string, msg []byte) (signature []byte, err error) { + k, err := ks.Get(id) + if err != nil { + return nil, err + } + // loopp spec requires passing nil hash to check existence of id + if msg == nil { + return nil, nil + } + return k.Sign(msg) +} + +// TronLoopKeystore implements the [github.com/smartcontractkit/chainlink-common/pkg/loop.Keystore] interface and +// handles signing for Tron messages. +type TronLoopKeystore struct { + Tron +} + +var _ loop.Keystore = &TronLoopKeystore{} + +func (lk *TronLoopKeystore) Accounts(ctx context.Context) ([]string, error) { + keys, err := lk.GetAll() + if err != nil { + return nil, err + } + + accounts := []string{} + for _, k := range keys { + accounts = append(accounts, k.PublicKeyStr()) + } + + return accounts, nil +} diff --git a/core/services/keystore/tron_test.go b/core/services/keystore/tron_test.go new file mode 100644 index 00000000000..f98da43001d --- /dev/null +++ b/core/services/keystore/tron_test.go @@ -0,0 +1,241 @@ +package keystore_test + +import ( + "context" + "crypto/sha256" + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/utils" + "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/tronkey" +) + +func Test_TronKeyStore_E2E(t *testing.T) { + db := pgtest.NewSqlxDB(t) + + keyStore := keystore.ExposedNewMaster(t, db) + require.NoError(t, keyStore.Unlock(testutils.Context(t), cltest.Password)) + ks := keyStore.Tron() + reset := func() { + ctx := context.Background() // Executed on cleanup + require.NoError(t, utils.JustError(db.Exec("DELETE FROM encrypted_key_rings"))) + keyStore.ResetXXXTestOnly() + require.NoError(t, keyStore.Unlock(ctx, cltest.Password)) + } + + t.Run("initializes with an empty state", func(t *testing.T) { + defer reset() + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + }) + + t.Run("errors when getting non-existent ID", func(t *testing.T) { + defer reset() + _, err := ks.Get("non-existent-id") + require.Error(t, err) + }) + + t.Run("creates a key", func(t *testing.T) { + defer reset() + ctx := testutils.Context(t) + key, err := ks.Create(ctx) + require.NoError(t, err) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, key, retrievedKey) + }) + + t.Run("imports and exports a key", func(t *testing.T) { + defer reset() + ctx := testutils.Context(t) + key, err := ks.Create(ctx) + require.NoError(t, err) + exportJSON, err := ks.Export(key.ID(), cltest.Password) + require.NoError(t, err) + _, err = ks.Export("non-existent", cltest.Password) + assert.Error(t, err) + _, err = ks.Delete(ctx, key.ID()) + require.NoError(t, err) + _, err = ks.Get(key.ID()) + require.Error(t, err) + importedKey, err := ks.Import(ctx, exportJSON, cltest.Password) + require.NoError(t, err) + _, err = ks.Import(ctx, exportJSON, cltest.Password) + assert.Error(t, err) + _, err = ks.Import(ctx, []byte(""), cltest.Password) + assert.Error(t, err) + require.Equal(t, key.ID(), importedKey.ID()) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, importedKey, retrievedKey) + }) + + t.Run("adds an externally created key / deletes a key", func(t *testing.T) { + defer reset() + ctx := testutils.Context(t) + newKey, err := tronkey.New() + require.NoError(t, err) + err = ks.Add(ctx, newKey) + require.NoError(t, err) + err = ks.Add(ctx, newKey) + assert.Error(t, err) + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + _, err = ks.Delete(ctx, newKey.ID()) + require.NoError(t, err) + _, err = ks.Delete(ctx, newKey.ID()) + assert.Error(t, err) + keys, err = ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + _, err = ks.Get(newKey.ID()) + require.Error(t, err) + }) + + t.Run("ensures key", func(t *testing.T) { + defer reset() + ctx := testutils.Context(t) + err := ks.EnsureKey(ctx) + assert.NoError(t, err) + + err = ks.EnsureKey(ctx) + assert.NoError(t, err) + + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + }) + + t.Run("sign tx", func(t *testing.T) { + defer reset() + ctx := testutils.Context(t) + newKey, err := tronkey.New() + require.NoError(t, err) + require.NoError(t, ks.Add(ctx, newKey)) + + // sign unknown ID + _, err = ks.Sign(testutils.Context(t), "not-real", nil) + assert.Error(t, err) + + // sign known key + + // Create a mock transaction + mockTx := createMockTronTransaction(newKey.PublicKeyStr(), "TJRabPrwbZy45sbavfcjinPJC18kjpRTv8", 1000000) + serializedTx, err := serializeMockTransaction(mockTx) + require.NoError(t, err) + + hash := sha256.Sum256(serializedTx) + txHash := hash[:] + sig, err := ks.Sign(testutils.Context(t), newKey.ID(), txHash) + require.NoError(t, err) + + directSig, err := newKey.Sign(txHash) + require.NoError(t, err) + + // signatures should match using keystore sign or key sign + assert.Equal(t, directSig, sig) + }) +} + +// MockTronTransaction represents a mock TRON transaction +// This is based on https://developers.tron.network/docs/tron-protocol-transaction +type MockTronTransaction struct { + RawData struct { + Contract []struct { + Parameter struct { + Value struct { + Amount int64 `json:"amount"` + OwnerAddress string `json:"owner_address"` + ToAddress string `json:"to_address"` + } `json:"value"` + TypeURL string `json:"type_url"` + } `json:"parameter"` + Type string `json:"type"` + } `json:"contract"` + RefBlockBytes string `json:"ref_block_bytes"` + RefBlockHash string `json:"ref_block_hash"` + Expiration int64 `json:"expiration"` + Timestamp int64 `json:"timestamp"` + FeeLimit int64 `json:"fee_limit"` + } `json:"raw_data"` + Signature []string `json:"signature"` + TxID string `json:"txID"` +} + +// CreateMockTronTransaction generates a mock TRON transaction for testing +func createMockTronTransaction(ownerAddress, toAddress string, amount int64) MockTronTransaction { + return MockTronTransaction{ + RawData: struct { + Contract []struct { + Parameter struct { + Value struct { + Amount int64 `json:"amount"` + OwnerAddress string `json:"owner_address"` + ToAddress string `json:"to_address"` + } `json:"value"` + TypeURL string `json:"type_url"` + } `json:"parameter"` + Type string `json:"type"` + } `json:"contract"` + RefBlockBytes string `json:"ref_block_bytes"` + RefBlockHash string `json:"ref_block_hash"` + Expiration int64 `json:"expiration"` + Timestamp int64 `json:"timestamp"` + FeeLimit int64 `json:"fee_limit"` + }{ + Contract: []struct { + Parameter struct { + Value struct { + Amount int64 `json:"amount"` + OwnerAddress string `json:"owner_address"` + ToAddress string `json:"to_address"` + } `json:"value"` + TypeURL string `json:"type_url"` + } `json:"parameter"` + Type string `json:"type"` + }{ + { + Parameter: struct { + Value struct { + Amount int64 `json:"amount"` + OwnerAddress string `json:"owner_address"` + ToAddress string `json:"to_address"` + } `json:"value"` + TypeURL string `json:"type_url"` + }{ + Value: struct { + Amount int64 `json:"amount"` + OwnerAddress string `json:"owner_address"` + ToAddress string `json:"to_address"` + }{ + Amount: amount, + OwnerAddress: ownerAddress, + ToAddress: toAddress, + }, + TypeURL: "type.googleapis.com/protocol.TransferContract", + }, + Type: "TransferContract", + }, + }, + RefBlockBytes: "1234", + RefBlockHash: "abcdef0123456789", + Expiration: time.Now().Unix() + 60*60, + Timestamp: time.Now().Unix(), + FeeLimit: 10000000, + }, + } +} + +func serializeMockTransaction(tx MockTronTransaction) ([]byte, error) { + return json.Marshal(tx) +} diff --git a/core/services/relay/relay.go b/core/services/relay/relay.go index 913923a9b2f..3211ee984db 100644 --- a/core/services/relay/relay.go +++ b/core/services/relay/relay.go @@ -14,6 +14,7 @@ const ( NetworkSolana = "solana" NetworkStarkNet = "starknet" NetworkAptos = "aptos" + NetworkTron = "tron" NetworkDummy = "dummy" ) @@ -24,6 +25,7 @@ var SupportedNetworks = map[string]struct{}{ NetworkSolana: {}, NetworkStarkNet: {}, NetworkAptos: {}, + NetworkTron: {}, NetworkDummy: {}, } diff --git a/core/web/auth/auth_test.go b/core/web/auth/auth_test.go index 25479409545..df869a8b1a3 100644 --- a/core/web/auth/auth_test.go +++ b/core/web/auth/auth_test.go @@ -276,14 +276,17 @@ var routesRolesMap = [...]routeRules{ {"GET", "/v2/keys/cosmos", true, true, true}, {"GET", "/v2/keys/starknet", true, true, true}, {"GET", "/v2/keys/aptos", true, true, true}, + {"GET", "/v2/keys/tron", true, true, true}, {"POST", "/v2/keys/solana", false, false, true}, {"POST", "/v2/keys/cosmos", false, false, true}, {"POST", "/v2/keys/starknet", false, false, true}, {"POST", "/v2/keys/aptos", false, false, true}, + {"POST", "/v2/keys/tron", false, false, true}, {"DELETE", "/v2/keys/solana/MOCK", false, false, false}, {"DELETE", "/v2/keys/cosmos/MOCK", false, false, false}, {"DELETE", "/v2/keys/starknet/MOCK", false, false, false}, {"DELETE", "/v2/keys/aptos/MOCK", false, false, false}, + {"DELETE", "/v2/keys/tron/MOCK", false, false, false}, {"POST", "/v2/keys/solana/import", false, false, false}, {"POST", "/v2/keys/cosmos/import", false, false, false}, {"POST", "/v2/keys/starknet/import", false, false, false}, @@ -292,6 +295,7 @@ var routesRolesMap = [...]routeRules{ {"POST", "/v2/keys/cosmos/export/MOCK", false, false, false}, {"POST", "/v2/keys/starknet/export/MOCK", false, false, false}, {"POST", "/v2/keys/aptos/export/MOCK", false, false, false}, + {"POST", "/v2/keys/tron/export/MOCK", false, false, false}, {"GET", "/v2/keys/vrf", true, true, true}, {"POST", "/v2/keys/vrf", false, false, true}, {"DELETE", "/v2/keys/vrf/MOCK", false, false, false}, diff --git a/core/web/presenters/node_test.go b/core/web/presenters/node_test.go index 34210a52166..02acf45f9fb 100644 --- a/core/web/presenters/node_test.go +++ b/core/web/presenters/node_test.go @@ -16,7 +16,7 @@ func TestNodeResource(t *testing.T) { var r interface{} state := "test" cfg := "cfg" - testCases := []string{"solana", "cosmos", "starknet"} + testCases := []string{"solana", "cosmos", "starknet", "tron"} for _, tc := range testCases { chainID := fmt.Sprintf("%s chain ID", tc) nodeName := fmt.Sprintf("%s_node", tc) @@ -62,6 +62,16 @@ func TestNodeResource(t *testing.T) { }) r = starknetNodeResource nodeResource = starknetNodeResource.NodeResource + case "tron": + tronNodeResource := NewTronNodeResource( + types.NodeStatus{ + ChainID: chainID, + Name: nodeName, + Config: cfg, + State: state, + }) + r = tronNodeResource + nodeResource = tronNodeResource.NodeResource default: t.Fail() } diff --git a/core/web/presenters/tron_chain.go b/core/web/presenters/tron_chain.go new file mode 100644 index 00000000000..7ab6109bd39 --- /dev/null +++ b/core/web/presenters/tron_chain.go @@ -0,0 +1,45 @@ +package presenters + +import ( + "github.com/smartcontractkit/chainlink-common/pkg/types" +) + +// TronChainResource is an Tron chain JSONAPI resource. +type TronChainResource struct { + ChainResource +} + +// GetName implements the api2go EntityNamer interface +func (r TronChainResource) GetName() string { + return "tron_chain" +} + +// NewTronChainResource returns a new TronChainResource for chain. +func NewTronChainResource(chain types.ChainStatus) TronChainResource { + return TronChainResource{ChainResource{ + JAID: NewJAID(chain.ID), + Config: chain.Config, + Enabled: chain.Enabled, + }} +} + +// TronNodeResource is a Tron node JSONAPI resource. +type TronNodeResource struct { + NodeResource +} + +// GetName implements the api2go EntityNamer interface +func (r TronNodeResource) GetName() string { + return "tron_node" +} + +// NewTronNodeResource returns a new TronNodeResource for node. +func NewTronNodeResource(node types.NodeStatus) TronNodeResource { + return TronNodeResource{NodeResource{ + JAID: NewPrefixedJAID(node.Name, node.ChainID), + ChainID: node.ChainID, + Name: node.Name, + State: node.State, + Config: node.Config, + }} +} diff --git a/core/web/presenters/tron_key.go b/core/web/presenters/tron_key.go new file mode 100644 index 00000000000..abe74ed7f41 --- /dev/null +++ b/core/web/presenters/tron_key.go @@ -0,0 +1,34 @@ +package presenters + +import ( + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/tronkey" +) + +// TronKeyResource represents a Tron key JSONAPI resource. +type TronKeyResource struct { + JAID + PubKey string `json:"publicKey"` +} + +// GetName implements the api2go EntityNamer interface +func (TronKeyResource) GetName() string { + return "encryptedTronKeys" +} + +func NewTronKeyResource(key tronkey.Key) *TronKeyResource { + r := &TronKeyResource{ + JAID: JAID{ID: key.ID()}, + PubKey: key.PublicKeyStr(), + } + + return r +} + +func NewTronKeyResources(keys []tronkey.Key) []TronKeyResource { + rs := []TronKeyResource{} + for _, key := range keys { + rs = append(rs, *NewTronKeyResource(key)) + } + + return rs +} diff --git a/core/web/resolver/ocr2_keys.go b/core/web/resolver/ocr2_keys.go index d04ebbd0e2f..0ca2735518d 100644 --- a/core/web/resolver/ocr2_keys.go +++ b/core/web/resolver/ocr2_keys.go @@ -27,6 +27,8 @@ const ( OCR2ChainTypeStarkNet = "STARKNET" // OCRChainTypeAptos defines OCR Aptos Chain Type OCRChainTypeAptos = "APTOS" + // OCR2ChainTypeTron defines OCR2 Tron Tron Type + OCR2ChainTypeTron = "TRON" ) // ToOCR2ChainType turns a valid string into a OCR2ChainType @@ -42,6 +44,8 @@ func ToOCR2ChainType(s string) (OCR2ChainType, error) { return OCR2ChainTypeStarkNet, nil case string(chaintype.Aptos): return OCRChainTypeAptos, nil + case string(chaintype.Tron): + return OCR2ChainTypeTron, nil default: return "", errors.New("unknown ocr2 chain type") } @@ -60,6 +64,8 @@ func FromOCR2ChainType(ct OCR2ChainType) string { return string(chaintype.StarkNet) case OCRChainTypeAptos: return string(chaintype.Aptos) + case OCR2ChainTypeTron: + return string(chaintype.Tron) default: return strings.ToLower(string(ct)) } diff --git a/core/web/resolver/ocr2_keys_test.go b/core/web/resolver/ocr2_keys_test.go index 033d22799b1..e131aa0b5f5 100644 --- a/core/web/resolver/ocr2_keys_test.go +++ b/core/web/resolver/ocr2_keys_test.go @@ -42,6 +42,7 @@ func TestResolver_GetOCR2KeyBundles(t *testing.T) { ocr2key.MustNewInsecure(keystest.NewRandReaderFromSeed(1), "solana"), ocr2key.MustNewInsecure(keystest.NewRandReaderFromSeed(1), "starknet"), ocr2key.MustNewInsecure(keystest.NewRandReaderFromSeed(1), "aptos"), + ocr2key.MustNewInsecure(keystest.NewRandReaderFromSeed(1), "tron"), } expectedBundles := []map[string]interface{}{} for _, k := range fakeKeys { diff --git a/core/web/resolver/testdata/config-full.toml b/core/web/resolver/testdata/config-full.toml index 6f8a6d5e777..d9779425276 100644 --- a/core/web/resolver/testdata/config-full.toml +++ b/core/web/resolver/testdata/config-full.toml @@ -546,3 +546,11 @@ ConfirmationPoll = '42s' Name = 'primary' URL = 'http://stark.node' APIKey = 'key' + +[[Tron]] +ChainID = 'foobar' +Enabled = true + +[[Tron.Nodes]] +Name = 'primary' +URL = 'https://api.trongrid.io' diff --git a/core/web/resolver/testdata/config-multi-chain-effective.toml b/core/web/resolver/testdata/config-multi-chain-effective.toml index 612f2eaf390..f4daacb038b 100644 --- a/core/web/resolver/testdata/config-multi-chain-effective.toml +++ b/core/web/resolver/testdata/config-multi-chain-effective.toml @@ -739,3 +739,11 @@ ConfirmationPoll = '1h0m0s' Name = 'primary' URL = 'http://stark.node' APIKey = 'key' + +[[Tron]] +ChainID = 'foobar' +Enabled = true + +[[Tron.Nodes]] +Name = 'primary' +URL = 'https://api.trongrid.io' \ No newline at end of file diff --git a/core/web/resolver/testdata/config-multi-chain.toml b/core/web/resolver/testdata/config-multi-chain.toml index a305332b5ff..728fd41df8e 100644 --- a/core/web/resolver/testdata/config-multi-chain.toml +++ b/core/web/resolver/testdata/config-multi-chain.toml @@ -155,3 +155,11 @@ ConfirmationPoll = '1h0m0s' Name = 'primary' URL = 'http://stark.node' APIKey = 'key' + +[[Tron]] +ChainID = 'foobar' +Enabled = true + +[[Tron.Nodes]] +Name = 'primary' +URL = 'https://api.trongrid.io' diff --git a/core/web/router.go b/core/web/router.go index 6e96b47981b..0f81ed807dc 100644 --- a/core/web/router.go +++ b/core/web/router.go @@ -351,6 +351,7 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) { {"cosmos", NewCosmosKeysController(app)}, {"starknet", NewStarkNetKeysController(app)}, {"aptos", NewAptosKeysController(app)}, + {"tron", NewTronKeysController(app)}, } { authv2.GET("/keys/"+keys.path, keys.kc.Index) authv2.POST("/keys/"+keys.path, auth.RequiresEditRole(keys.kc.Create)) @@ -398,6 +399,7 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) { {"solana", NewSolanaChainsController(app)}, {"starknet", NewStarkNetChainsController(app)}, {"cosmos", NewCosmosChainsController(app)}, + {"tron", NewTronChainsController(app)}, } { chains.GET(chain.path, paginatedRequest(chain.cc.Index)) chains.GET(chain.path+"/:ID", chain.cc.Show) @@ -412,6 +414,7 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) { {"solana", NewSolanaNodesController(app)}, {"starknet", NewStarkNetNodesController(app)}, {"cosmos", NewCosmosNodesController(app)}, + {"tron", NewTronNodesController(app)}, } { if chain.path == "evm" { // TODO still EVM only . Archive ticket: story/26276/multi-chain-type-ui-node-chain-configuration diff --git a/core/web/schema/type/ocr2_keys.graphql b/core/web/schema/type/ocr2_keys.graphql index c25148c686a..89125d86b54 100644 --- a/core/web/schema/type/ocr2_keys.graphql +++ b/core/web/schema/type/ocr2_keys.graphql @@ -4,6 +4,7 @@ enum OCR2ChainType { SOLANA STARKNET APTOS + TRON } type OCR2KeyBundle { diff --git a/core/web/tron_chains_controller.go b/core/web/tron_chains_controller.go new file mode 100644 index 00000000000..4993f8f7a51 --- /dev/null +++ b/core/web/tron_chains_controller.go @@ -0,0 +1,17 @@ +package web + +import ( + "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" + "github.com/smartcontractkit/chainlink/v2/core/services/relay" + "github.com/smartcontractkit/chainlink/v2/core/web/presenters" +) + +func NewTronChainsController(app chainlink.Application) ChainsController { + return newChainsController( + relay.NetworkTron, + app.GetRelayers().List(chainlink.FilterRelayersByType(relay.NetworkTron)), + ErrTronNotEnabled, + presenters.NewTronChainResource, + app.GetLogger(), + app.GetAuditLogger()) +} diff --git a/core/web/tron_keys_controller.go b/core/web/tron_keys_controller.go new file mode 100644 index 00000000000..e9ac2e0252e --- /dev/null +++ b/core/web/tron_keys_controller.go @@ -0,0 +1,12 @@ +package web + +import ( + "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/tronkey" + "github.com/smartcontractkit/chainlink/v2/core/web/presenters" +) + +func NewTronKeysController(app chainlink.Application) KeysController { + return NewKeysController[tronkey.Key, presenters.TronKeyResource](app.GetKeyStore().Tron(), app.GetLogger(), app.GetAuditLogger(), + "tronKey", presenters.NewTronKeyResource, presenters.NewTronKeyResources) +} diff --git a/core/web/tron_keys_controller_test.go b/core/web/tron_keys_controller_test.go new file mode 100644 index 00000000000..b4839a88207 --- /dev/null +++ b/core/web/tron_keys_controller_test.go @@ -0,0 +1,107 @@ +package web_test + +import ( + "fmt" + "net/http" + "testing" + + "github.com/smartcontractkit/chainlink-common/pkg/utils" + "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore" + "github.com/smartcontractkit/chainlink/v2/core/web" + "github.com/smartcontractkit/chainlink/v2/core/web/presenters" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTronKeysController_Index_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupTronKeysControllerTests(t) + keys, _ := keyStore.Tron().GetAll() + + response, cleanup := client.Get("/v2/keys/tron") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + resources := []presenters.TronKeyResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resources) + assert.NoError(t, err) + + require.Len(t, resources, len(keys)) + + assert.Equal(t, keys[0].ID(), resources[0].ID) + assert.Equal(t, keys[0].PublicKeyStr(), resources[0].PubKey) +} + +func TestTronKeysController_Create_HappyPath(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + keyStore := app.GetKeyStore() + + response, cleanup := client.Post("/v2/keys/tron", nil) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + keys, _ := keyStore.Tron().GetAll() + require.Len(t, keys, 1) + + resource := presenters.TronKeyResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resource) + assert.NoError(t, err) + + assert.Equal(t, keys[0].ID(), resource.ID) + assert.Equal(t, keys[0].PublicKeyStr(), resource.PubKey) + + _, err = keyStore.Tron().Get(resource.ID) + require.NoError(t, err) +} + +func TestTronKeysController_Delete_NonExistentTronKeyID(t *testing.T) { + t.Parallel() + + client, _ := setupTronKeysControllerTests(t) + + nonExistentTronKeyID := "foobar" + response, cleanup := client.Delete("/v2/keys/tron/" + nonExistentTronKeyID) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusNotFound, response.StatusCode) +} + +func TestTronKeysController_Delete_HappyPath(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + + client, keyStore := setupTronKeysControllerTests(t) + + keys, _ := keyStore.Tron().GetAll() + initialLength := len(keys) + key, _ := keyStore.Tron().Create(ctx) + + response, cleanup := client.Delete(fmt.Sprintf("/v2/keys/tron/%s", key.ID())) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusOK, response.StatusCode) + assert.Error(t, utils.JustError(keyStore.Tron().Get(key.ID()))) + + keys, _ = keyStore.Tron().GetAll() + assert.Equal(t, initialLength, len(keys)) +} + +func setupTronKeysControllerTests(t *testing.T) (cltest.HTTPClientCleaner, keystore.Master) { + t.Helper() + ctx := testutils.Context(t) + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(ctx)) + require.NoError(t, app.KeyStore.OCR().Add(ctx, cltest.DefaultOCRKey)) + require.NoError(t, app.KeyStore.Tron().Add(ctx, cltest.DefaultTronKey)) + + client := app.NewHTTPClient(nil) + + return client, app.GetKeyStore() +} diff --git a/core/web/tron_nodes_controller.go b/core/web/tron_nodes_controller.go new file mode 100644 index 00000000000..ba65cd5ffc3 --- /dev/null +++ b/core/web/tron_nodes_controller.go @@ -0,0 +1,17 @@ +package web + +import ( + "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" + "github.com/smartcontractkit/chainlink/v2/core/services/relay" + "github.com/smartcontractkit/chainlink/v2/core/web/presenters" +) + +// ErrTronNotEnabled is returned when Starknet.Enabled is not true. +var ErrTronNotEnabled = errChainDisabled{name: "Tron", tomlKey: "Tron.Enabled"} + +func NewTronNodesController(app chainlink.Application) NodesController { + scopedNodeStatuser := NewNetworkScopedNodeStatuser(app.GetRelayers(), relay.NetworkTron) + + return newNodesController[presenters.TronNodeResource]( + scopedNodeStatuser, ErrTronNotEnabled, presenters.NewTronNodeResource, app.GetAuditLogger()) +} diff --git a/docs/CONFIG.md b/docs/CONFIG.md index 67818325b37..f07e4b6d6ac 100644 --- a/docs/CONFIG.md +++ b/docs/CONFIG.md @@ -1,4 +1,4 @@ -[//]: # (Documentation generated from docs/*.toml - DO NOT EDIT.) +[//]: # "Documentation generated from docs/*.toml - DO NOT EDIT." This document describes the TOML format for configuration. @@ -19,33 +19,41 @@ HTTPURL = 'https://foo.bar' # Required ``` ## Global + ```toml InsecureFastScrypt = false # Default RootDir = '~/.chainlink' # Default ShutdownGracePeriod = '5s' # Default ``` - ### InsecureFastScrypt + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml InsecureFastScrypt = false # Default ``` + InsecureFastScrypt causes all key stores to encrypt using "fast" scrypt params instead. This is insecure and only useful for local testing. DO NOT ENABLE THIS IN PRODUCTION. ### RootDir + ```toml RootDir = '~/.chainlink' # Default ``` + RootDir is the Chainlink node's root directory. This is the default directory for logging, database backups, cookies, and other misc Chainlink node files. Chainlink nodes will always ensure this directory has 700 permissions because it might contain sensitive data. ### ShutdownGracePeriod + ```toml ShutdownGracePeriod = '5s' # Default ``` + ShutdownGracePeriod is the maximum time allowed to shut down gracefully. If exceeded, the node will terminate immediately to avoid being SIGKILLed. ## Feature + ```toml [Feature] FeedsManager = true # Default @@ -55,23 +63,28 @@ CCIP = true # Default MultiFeedsManagers = false # Default ``` - ### FeedsManager + ```toml FeedsManager = true # Default ``` + FeedsManager enables the feeds manager service. ### LogPoller + ```toml LogPoller = false # Default ``` + LogPoller enables the log poller, an experimental approach to processing logs, required if also using Evm.UseForwarders or OCR2. ### UICSAKeys + ```toml UICSAKeys = false # Default ``` + UICSAKeys enables CSA Keys in the UI. ### CCIP @@ -87,6 +100,7 @@ MultiFeedsManagers = false # Default MultiFeedsManagers enables support for multiple feeds manager connections. ## Database + ```toml [Database] DefaultIdleInTxSessionTimeout = '1h' # Default @@ -98,54 +112,68 @@ MaxOpenConns = 100 # Default MigrateOnStartup = true # Default ``` - ### DefaultIdleInTxSessionTimeout + ```toml DefaultIdleInTxSessionTimeout = '1h' # Default ``` + DefaultIdleInTxSessionTimeout is the maximum time allowed for a transaction to be open and idle before timing out. See Postgres `idle_in_transaction_session_timeout` for more details. ### DefaultLockTimeout + ```toml DefaultLockTimeout = '15s' # Default ``` + DefaultLockTimeout is the maximum time allowed to wait for database lock of any kind before timing out. See Postgres `lock_timeout` for more details. ### DefaultQueryTimeout + ```toml DefaultQueryTimeout = '10s' # Default ``` + DefaultQueryTimeout is the maximum time allowed for standard queries before timing out. ### LogQueries + ```toml LogQueries = false # Default ``` + LogQueries tells the Chainlink node to log database queries made using the default logger. SQL statements will be logged at `debug` level. Not all statements can be logged. The best way to get a true log of all SQL statements is to enable SQL statement logging on Postgres. ### MaxIdleConns + ```toml MaxIdleConns = 10 # Default ``` + MaxIdleConns configures the maximum number of idle database connections that the Chainlink node will keep open. Think of this as the baseline number of database connections per Chainlink node instance. Increasing this number can help to improve performance under database-heavy workloads. Postgres has connection limits, so you must use caution when increasing this value. If you are running several instances of a Chainlink node or another application on a single database server, you might run out of Postgres connection slots if you raise this value too high. ### MaxOpenConns + ```toml MaxOpenConns = 100 # Default ``` + MaxOpenConns configures the maximum number of database connections that a Chainlink node will have open at any one time. Think of this as the maximum burst upper bound limit of database connections per Chainlink node instance. Increasing this number can help to improve performance under database-heavy workloads. Postgres has connection limits, so you must use caution when increasing this value. If you are running several instances of a Chainlink node or another application on a single database server, you might run out of Postgres connection slots if you raise this value too high. ### MigrateOnStartup + ```toml MigrateOnStartup = true # Default ``` + MigrateOnStartup controls whether a Chainlink node will attempt to automatically migrate the database on boot. If you want more control over your database migration process, set this variable to `false` and manually migrate the database using the CLI `migrate` command instead. ## Database.Backup + ```toml [Database.Backup] Mode = 'none' # Default @@ -153,14 +181,17 @@ Dir = 'test/backup/dir' # Example OnVersionUpgrade = true # Default Frequency = '1h' # Default ``` + As a best practice, take regular database backups in case of accidental data loss. This best practice is especially important when you upgrade your Chainlink node to a new version. Chainlink nodes support automated database backups to make this process easier. NOTE: Dumps can cause high load and massive database latencies, which will negatively impact the normal functioning of the Chainlink node. For this reason, it is recommended to set a `URL` and point it to a read replica if you enable automatic backups. ### Mode + ```toml Mode = 'none' # Default ``` + Mode sets the type of automatic database backup, which can be one of _none_, `lite`, or `full`. If enabled, the Chainlink node will always dump a backup on every boot before running migrations. Additionally, it will automatically take database backups that overwrite the backup file for the given version at regular intervals if `Frequency` is set to a non-zero interval. _none_ - Disables backups. @@ -170,61 +201,79 @@ _none_ - Disables backups. It will write to a file like `'Dir'/backup/cl_backup_.dump`. There is one backup dump file per version of the Chainlink node. If you upgrade the node, it will keep the backup taken right before the upgrade migration so you can restore to an older version if necessary. ### Dir + ```toml Dir = 'test/backup/dir' # Example ``` + Dir sets the directory to use for saving the backup file. Use this if you want to save the backup file in a directory other than the default ROOT directory. ### OnVersionUpgrade + ```toml OnVersionUpgrade = true # Default ``` + OnVersionUpgrade enables automatic backups of the database before running migrations, when you are upgrading to a new version. ### Frequency + ```toml Frequency = '1h' # Default ``` + Frequency sets the interval for database dumps, if set to a positive duration and `Mode` is not _none_. Set to `0` to disable periodic backups. ## Database.Listener + :warning: **_ADVANCED_**: _Do not change these settings unless you know what you are doing._ + ```toml [Database.Listener] MaxReconnectDuration = '10m' # Default MinReconnectInterval = '1m' # Default FallbackPollInterval = '30s' # Default ``` + These settings control the postgres event listener. ### MaxReconnectDuration + ```toml MaxReconnectDuration = '10m' # Default ``` + MaxReconnectDuration is the maximum duration to wait between reconnect attempts. ### MinReconnectInterval + ```toml MinReconnectInterval = '1m' # Default ``` -MinReconnectInterval controls the duration to wait before trying to re-establish the database connection after connection loss. After each consecutive failure this interval is doubled, until MaxReconnectInterval is reached. Successfully completing the connection establishment procedure resets the interval back to MinReconnectInterval. + +MinReconnectInterval controls the duration to wait before trying to re-establish the database connection after connection loss. After each consecutive failure this interval is doubled, until MaxReconnectInterval is reached. Successfully completing the connection establishment procedure resets the interval back to MinReconnectInterval. ### FallbackPollInterval + ```toml FallbackPollInterval = '30s' # Default ``` + FallbackPollInterval controls how often clients should manually poll as a fallback in case the postgres event was missed/dropped. ## Database.Lock + :warning: **_ADVANCED_**: _Do not change these settings unless you know what you are doing._ + ```toml [Database.Lock] Enabled = true # Default LeaseDuration = '10s' # Default LeaseRefreshInterval = '1s' # Default ``` + Ideally, you should use a container orchestration system like [Kubernetes](https://kubernetes.io/) to ensure that only one Chainlink node instance can ever use a specific Postgres database. However, some node operators do not have the technical capacity to do this. Common use cases run multiple Chainlink node instances in failover mode as recommended by our official documentation. The first instance takes a lock on the database and subsequent instances will wait trying to take this lock in case the first instance fails. - If your nodes or applications hold locks open for several hours or days, Postgres is unable to complete internal cleanup tasks. The Postgres maintainers explicitly discourage holding locks open for long periods of time. @@ -236,24 +285,31 @@ Because of the complications with advisory locks, Chainlink nodes with v2.0 and - If node A comes back, it attempts to take out a lease, realizes that the database has been leased to another process, and exits the entire application immediately. ### Enabled + ```toml Enabled = true # Default ``` + Enabled enables the database lock. ### LeaseDuration + ```toml LeaseDuration = '10s' # Default ``` + LeaseDuration is how long the lease lock will last before expiring. ### LeaseRefreshInterval + ```toml LeaseRefreshInterval = '1s' # Default ``` + LeaseRefreshInterval determines how often to refresh the lease lock. Also controls how often a standby node will check to see if it can grab the lease. ## TelemetryIngress + ```toml [TelemetryIngress] UniConn = false # Default @@ -265,50 +321,64 @@ SendTimeout = '10s' # Default UseBatchSend = true # Default ``` - ### UniConn + ```toml UniConn = false # Default ``` + UniConn toggles which ws connection style is used. ### Logging + ```toml Logging = false # Default ``` + Logging toggles verbose logging of the raw telemetry messages being sent. ### BufferSize + ```toml BufferSize = 100 # Default ``` + BufferSize is the number of telemetry messages to buffer before dropping new ones. ### MaxBatchSize + ```toml MaxBatchSize = 50 # Default ``` + MaxBatchSize is the maximum number of messages to batch into one telemetry request. ### SendInterval + ```toml SendInterval = '500ms' # Default ``` + SendInterval determines how often batched telemetry is sent to the ingress server. ### SendTimeout + ```toml SendTimeout = '10s' # Default ``` + SendTimeout is the max duration to wait for the request to complete when sending batch telemetry. ### UseBatchSend + ```toml UseBatchSend = true # Default ``` + UseBatchSend toggles sending telemetry to the ingress server using the batch client. ## TelemetryIngress.Endpoints + ```toml [[TelemetryIngress.Endpoints]] # Example Network = 'EVM' # Example @@ -317,32 +387,40 @@ ServerPubKey = 'test-pub-key-111551111-evm' # Example URL = 'localhost-111551111-evm:9000' # Example ``` - ### Network + ```toml Network = 'EVM' # Example ``` + Network aka EVM, Solana, Starknet ### ChainID + ```toml ChainID = '111551111' # Example ``` + ChainID of the network ### ServerPubKey + ```toml ServerPubKey = 'test-pub-key-111551111-evm' # Example ``` + ServerPubKey is the public key of the telemetry server. ### URL + ```toml URL = 'localhost-111551111-evm:9000' # Example ``` + URL is where to send telemetry. ## AuditLogger + ```toml [AuditLogger] Enabled = false # Default @@ -351,32 +429,40 @@ JsonWrapperKey = 'event' # Example Headers = ['Authorization: token', 'X-SomeOther-Header: value with spaces | and a bar+*'] # Example ``` - ### Enabled + ```toml Enabled = false # Default ``` + Enabled determines if this logger should be configured at all ### ForwardToUrl + ```toml ForwardToUrl = 'http://localhost:9898' # Example ``` + ForwardToUrl is where you want to forward logs to ### JsonWrapperKey + ```toml JsonWrapperKey = 'event' # Example ``` + JsonWrapperKey if set wraps the map of data under another single key to make parsing easier ### Headers + ```toml Headers = ['Authorization: token', 'X-SomeOther-Header: value with spaces | and a bar+*'] # Example ``` + Headers is the set of headers you wish to pass along with each request ## Log + ```toml [Log] Level = 'info' # Default @@ -384,14 +470,16 @@ JSONConsole = false # Default UnixTS = false # Default ``` - ### Level + ```toml Level = 'info' # Default ``` + Level determines both what is printed on the screen and what is written to the log file. The available levels are: + - "debug": Useful for forensic debugging of issues. - "info": High-level informational messages. (default) - "warn": A mild error occurred that might require non-urgent action. Check these warnings semi-regularly to see if any of them require attention. These warnings usually happen due to factors outside of the control of the node operator. Examples: Unexpected responses from a remote API or misleading networking errors. @@ -401,20 +489,25 @@ The available levels are: - "fatal": The node encountered an unrecoverable problem and had to exit. ### JSONConsole + ```toml JSONConsole = false # Default ``` + JSONConsole enables JSON logging. Otherwise, the log is saved in a human-friendly console format. ### UnixTS + ```toml UnixTS = false # Default ``` + UnixTS enables legacy unix timestamps. Previous versions of Chainlink nodes wrote JSON logs with a unix timestamp. As of v1.1.0 and up, the default has changed to use ISO8601 timestamps for better readability. ## Log.File + ```toml [Log.File] Dir = '/my/log/directory' # Example @@ -423,17 +516,20 @@ MaxAgeDays = 0 # Default MaxBackups = 1 # Default ``` - ### Dir + ```toml Dir = '/my/log/directory' # Example ``` + Dir sets the log directory. By default, Chainlink nodes write log data to `$ROOT/log.jsonl`. ### MaxSize + ```toml MaxSize = '5120mb' # Default ``` + MaxSize determines the log file's max size in megabytes before file rotation. Having this not set will disable logging to disk. If your disk doesn't have enough disk space, the logging will pause and the application will log errors until space is available again. Values must have suffixes with a unit like: `5120mb` (5,120 megabytes). If no unit suffix is provided, the value defaults to `b` (bytes). The list of valid unit suffixes are: @@ -445,18 +541,23 @@ Values must have suffixes with a unit like: `5120mb` (5,120 megabytes). If no un - tb (terabytes) ### MaxAgeDays + ```toml MaxAgeDays = 0 # Default ``` + MaxAgeDays determines the log file's max age in days before file rotation. Keeping this config with the default value will not remove log files based on age. ### MaxBackups + ```toml MaxBackups = 1 # Default ``` + MaxBackups determines the maximum number of old log files to retain. Keeping this config with the default value retains all old log files. The `MaxAgeDays` variable can still cause them to get deleted. ## WebServer + ```toml [WebServer] AuthenticationMethod = 'local' # Default @@ -473,17 +574,20 @@ StartTimeout = '15s' # Default ListenIP = '0.0.0.0' # Default ``` - ### AuthenticationMethod + ```toml AuthenticationMethod = 'local' # Default ``` + AuthenticationMethod defines which pluggable auth interface to use for user login and role assumption. Options include 'local' and 'ldap'. See docs for more details ### AllowOrigins + ```toml AllowOrigins = 'http://localhost:3000,http://localhost:6688' # Default ``` + AllowOrigins controls the URLs Chainlink nodes emit in the `Allow-Origins` header of its API responses. The setting can be a comma-separated list with no spaces. You might experience CORS issues if this is not set correctly. You should set this to the external URL that you use to access the Chainlink UI. @@ -491,69 +595,91 @@ You should set this to the external URL that you use to access the Chainlink UI. You can set `AllowOrigins = '*'` to allow the UI to work from any URL, but it is recommended for security reasons to make it explicit instead. ### BridgeCacheTTL + ```toml BridgeCacheTTL = '0s' # Default ``` + BridgeCacheTTL controls the cache TTL for all bridge tasks to use old values in newer observations in case of intermittent failure. It's disabled by default. ### BridgeResponseURL + ```toml BridgeResponseURL = 'https://my-chainlink-node.example.com:6688' # Example ``` + BridgeResponseURL defines the URL for bridges to send a response to. This _must_ be set when using async external adapters. Usually this will be the same as the URL/IP and port you use to connect to the Chainlink UI. ### HTTPWriteTimeout + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml HTTPWriteTimeout = '10s' # Default ``` + HTTPWriteTimeout controls how long the Chainlink node's API server can hold a socket open for writing a response to an HTTP request. Sometimes, this must be increased for pprof. ### HTTPPort + ```toml HTTPPort = 6688 # Default ``` + HTTPPort is the port used for the Chainlink Node API, [CLI](/docs/configuration-variables/#cli-client), and GUI. ### SecureCookies + ```toml SecureCookies = true # Default ``` + SecureCookies requires the use of secure cookies for authentication. Set to false to enable standard HTTP requests along with `TLSPort = 0`. ### SessionTimeout + ```toml SessionTimeout = '15m' # Default ``` + SessionTimeout determines the amount of idle time to elapse before session cookies expire. This signs out GUI users from their sessions. ### SessionReaperExpiration + ```toml SessionReaperExpiration = '240h' # Default ``` + SessionReaperExpiration represents how long an API session lasts before expiring and requiring a new login. ### HTTPMaxSize + ```toml HTTPMaxSize = '32768b' # Default ``` + HTTPMaxSize defines the maximum size for HTTP requests and responses made by the node server. ### StartTimeout + ```toml StartTimeout = '15s' # Default ``` + StartTimeout defines the maximum amount of time the node will wait for a server to start. ### ListenIP + ```toml ListenIP = '0.0.0.0' # Default ``` + ListenIP specifies the IP to bind the HTTP server to ## WebServer.LDAP + ```toml [WebServer.LDAP] ServerTLS = true # Default @@ -574,112 +700,148 @@ UserAPITokenDuration = '240h0m0s' # Default UpstreamSyncInterval = '0s' # Default UpstreamSyncRateLimit = '2m0s' # Default ``` + Optional LDAP config if WebServer.AuthenticationMethod is set to 'ldap' LDAP queries are all parameterized to support custom LDAP 'dn', 'cn', and attributes ### ServerTLS + ```toml ServerTLS = true # Default ``` + ServerTLS defines the option to require the secure ldaps ### SessionTimeout + ```toml SessionTimeout = '15m0s' # Default ``` + SessionTimeout determines the amount of idle time to elapse before session cookies expire. This signs out GUI users from their sessions. ### QueryTimeout + ```toml QueryTimeout = '2m0s' # Default ``` + QueryTimeout defines how long queries should wait before timing out, defined in seconds ### BaseUserAttr + ```toml BaseUserAttr = 'uid' # Default ``` + BaseUserAttr defines the base attribute used to populate LDAP queries such as "uid=$", default is example ### BaseDN + ```toml BaseDN = 'dc=custom,dc=example,dc=com' # Example ``` + BaseDN defines the base LDAP 'dn' search filter to apply to every LDAP query, replace example,com with the appropriate LDAP server's structure ### UsersDN + ```toml UsersDN = 'ou=users' # Default ``` + UsersDN defines the 'dn' query to use when querying for the 'users' 'ou' group ### GroupsDN + ```toml GroupsDN = 'ou=groups' # Default ``` + GroupsDN defines the 'dn' query to use when querying for the 'groups' 'ou' group ### ActiveAttribute + ```toml ActiveAttribute = '' # Default ``` + ActiveAttribute is an optional user field to check truthiness for if a user is valid/active. This is only required if the LDAP provider lists inactive users as members of groups ### ActiveAttributeAllowedValue + ```toml ActiveAttributeAllowedValue = '' # Default ``` + ActiveAttributeAllowedValue is the value to check against for the above optional user attribute ### AdminUserGroupCN + ```toml AdminUserGroupCN = 'NodeAdmins' # Default ``` + AdminUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Admin' role ### EditUserGroupCN + ```toml EditUserGroupCN = 'NodeEditors' # Default ``` + EditUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Edit' role ### RunUserGroupCN + ```toml RunUserGroupCN = 'NodeRunners' # Default ``` + RunUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Run' role ### ReadUserGroupCN + ```toml ReadUserGroupCN = 'NodeReadOnly' # Default ``` + ReadUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Read' role ### UserApiTokenEnabled + ```toml UserApiTokenEnabled = false # Default ``` + UserApiTokenEnabled enables the users to issue API tokens with the same access of their role ### UserAPITokenDuration + ```toml UserAPITokenDuration = '240h0m0s' # Default ``` + UserAPITokenDuration is the duration of time an API token is active for before expiring ### UpstreamSyncInterval + ```toml UpstreamSyncInterval = '0s' # Default ``` + UpstreamSyncInterval is the interval at which the background LDAP sync task will be called. A '0s' value disables the background sync being run on an interval. This check is already performed during login/logout actions, all sessions and API tokens stored in the local ldap tables are updated to match the remote server ### UpstreamSyncRateLimit + ```toml UpstreamSyncRateLimit = '2m0s' # Default ``` + UpstreamSyncRateLimit defines a duration to limit the number of query/API calls to the upstream LDAP provider. It prevents the sync functionality from being called multiple times within the defined duration ## WebServer.RateLimit + ```toml [WebServer.RateLimit] Authenticated = 1000 # Default @@ -688,52 +850,66 @@ Unauthenticated = 5 # Default UnauthenticatedPeriod = '20s' # Default ``` - ### Authenticated + ```toml Authenticated = 1000 # Default ``` + Authenticated defines the threshold to which authenticated requests get limited. More than this many authenticated requests per `AuthenticatedRateLimitPeriod` will be rejected. ### AuthenticatedPeriod + ```toml AuthenticatedPeriod = '1m' # Default ``` + AuthenticatedPeriod defines the period to which authenticated requests get limited. ### Unauthenticated + ```toml Unauthenticated = 5 # Default ``` + Unauthenticated defines the threshold to which authenticated requests get limited. More than this many unauthenticated requests per `UnAuthenticatedRateLimitPeriod` will be rejected. ### UnauthenticatedPeriod + ```toml UnauthenticatedPeriod = '20s' # Default ``` + UnauthenticatedPeriod defines the period to which unauthenticated requests get limited. ## WebServer.MFA + ```toml [WebServer.MFA] RPID = 'localhost' # Example RPOrigin = 'http://localhost:6688/' # Example ``` + The Operator UI frontend supports enabling Multi Factor Authentication via Webauthn per account. When enabled, logging in will require the account password and a hardware or OS security key such as Yubikey. To enroll, log in to the operator UI and click the circle purple profile button at the top right and then click **Register MFA Token**. Tap your hardware security key or use the OS public key management feature to enroll a key. Next time you log in, this key will be required to authenticate. ### RPID + ```toml RPID = 'localhost' # Example ``` + RPID is the FQDN of where the Operator UI is served. When serving locally, the value should be `localhost`. ### RPOrigin + ```toml RPOrigin = 'http://localhost:6688/' # Example ``` + RPOrigin is the origin URL where WebAuthn requests initiate, including scheme and port. When serving locally, the value should be `http://localhost:6688/`. ## WebServer.TLS + ```toml [WebServer.TLS] CertPath = '~/.cl/certs' # Example @@ -743,45 +919,59 @@ HTTPSPort = 6689 # Default ForceRedirect = false # Default ListenIP = '0.0.0.0' # Default ``` + The TLS settings apply only if you want to enable TLS security on your Chainlink node. ### CertPath + ```toml CertPath = '~/.cl/certs' # Example ``` + CertPath is the location of the TLS certificate file. ### Host + ```toml Host = 'tls-host' # Example ``` + Host is the hostname configured for TLS to be used by the Chainlink node. This is useful if you configured a domain name specific for your Chainlink node. ### KeyPath + ```toml KeyPath = '/home/$USER/.chainlink/tls/server.key' # Example ``` + KeyPath is the location of the TLS private key file. ### HTTPSPort + ```toml HTTPSPort = 6689 # Default ``` + HTTPSPort is the port used for HTTPS connections. Set this to `0` to disable HTTPS. Disabling HTTPS also relieves Chainlink nodes of the requirement for a TLS certificate. ### ForceRedirect + ```toml ForceRedirect = false # Default ``` + ForceRedirect forces TLS redirect for unencrypted connections. ### ListenIP + ```toml ListenIP = '0.0.0.0' # Default ``` + ListenIP specifies the IP to bind the HTTPS server to ## JobPipeline + ```toml [JobPipeline] ExternalInitiatorsEnabled = false # Default @@ -793,23 +983,28 @@ ResultWriteQueueDepth = 100 # Default VerboseLogging = true # Default ``` - ### ExternalInitiatorsEnabled + ```toml ExternalInitiatorsEnabled = false # Default ``` + ExternalInitiatorsEnabled enables the External Initiator feature. If disabled, `webhook` jobs can ONLY be initiated by a logged-in user. If enabled, `webhook` jobs can be initiated by a whitelisted external initiator. ### MaxRunDuration + ```toml MaxRunDuration = '10m' # Default ``` + MaxRunDuration is the maximum time allowed for a single job run. If it takes longer, it will exit early and be marked errored. If set to zero, disables the time limit completely. ### MaxSuccessfulRuns + ```toml MaxSuccessfulRuns = 10000 # Default ``` + MaxSuccessfulRuns caps the number of completed successful runs per pipeline spec in the database. You can set it to zero as a performance optimisation; this will avoid saving any successful run. @@ -818,30 +1013,39 @@ Note this is not a hard cap, it can drift slightly larger than this but not by more than 5% or so. ### ReaperInterval + ```toml ReaperInterval = '1h' # Default ``` + ReaperInterval controls how often the job pipeline reaper will run to delete completed jobs older than ReaperThreshold, in order to keep database size manageable. Set to `0` to disable the periodic reaper. ### ReaperThreshold + ```toml ReaperThreshold = '24h' # Default ``` + ReaperThreshold determines the age limit for job runs. Completed job runs older than this will be automatically purged from the database. ### ResultWriteQueueDepth + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml ResultWriteQueueDepth = 100 # Default ``` + ResultWriteQueueDepth controls how many writes will be buffered before subsequent writes are dropped, for jobs that write results asynchronously for performance reasons, such as OCR. ### VerboseLogging + ```toml VerboseLogging = true # Default ``` + VerboseLogging enables detailed logging of pipeline execution steps. This can be useful for debugging failed runs without relying on the UI or database. @@ -849,47 +1053,57 @@ or database. You may disable if this results in excessive log volume. ## JobPipeline.HTTPRequest + ```toml [JobPipeline.HTTPRequest] DefaultTimeout = '15s' # Default MaxSize = '32768' # Default ``` - ### DefaultTimeout + ```toml DefaultTimeout = '15s' # Default ``` + DefaultTimeout defines the default timeout for HTTP requests made by `http` and `bridge` adapters. ### MaxSize + ```toml MaxSize = '32768' # Default ``` + MaxSize defines the maximum size for HTTP requests and responses made by `http` and `bridge` adapters. ## FluxMonitor + ```toml [FluxMonitor] DefaultTransactionQueueDepth = 1 # Default SimulateTransactions = false # Default ``` - ### DefaultTransactionQueueDepth + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml DefaultTransactionQueueDepth = 1 # Default ``` + DefaultTransactionQueueDepth controls the queue size for `DropOldestStrategy` in Flux Monitor. Set to 0 to use `SendEvery` strategy instead. ### SimulateTransactions + ```toml SimulateTransactions = false # Default ``` + SimulateTransactions enables transaction simulation for Flux Monitor. ## OCR2 + ```toml [OCR2] Enabled = false # Default @@ -907,17 +1121,20 @@ SimulateTransactions = false # Default TraceLogging = false # Default ``` - ### Enabled + ```toml Enabled = false # Default ``` + Enabled enables OCR2 jobs. ### ContractConfirmations + ```toml ContractConfirmations = 3 # Default ``` + ContractConfirmations is the number of block confirmations to wait for before enacting an on-chain configuration change. This value doesn't need to be very high (in particular, it does not need to protect against malicious re-orgs). @@ -948,79 +1165,102 @@ Contract config confirmations: 1 CONFIRMED ### BlockchainTimeout + ```toml BlockchainTimeout = '20s' # Default ``` + BlockchainTimeout is the timeout for blockchain queries (mediated through ContractConfigTracker and ContractTransmitter). (This is necessary because an oracle's operations are serialized, so blocking forever on a chain interaction would break the oracle.) ### ContractPollInterval + ```toml ContractPollInterval = '1m' # Default ``` + ContractPollInterval is the polling interval at which ContractConfigTracker is queried for# updated on-chain configurations. Recommended values are between fifteen seconds and two minutes. ### ContractSubscribeInterval + ```toml ContractSubscribeInterval = '2m' # Default ``` + ContractSubscribeInterval is the interval at which we try to establish a subscription on ContractConfigTracker if one doesn't exist. Recommended values are between two and five minutes. ### ContractTransmitterTransmitTimeout + ```toml ContractTransmitterTransmitTimeout = '10s' # Default ``` + ContractTransmitterTransmitTimeout is the timeout for ContractTransmitter.Transmit calls. ### DatabaseTimeout + ```toml DatabaseTimeout = '10s' # Default ``` + DatabaseTimeout is the timeout for database interactions. (This is necessary because an oracle's operations are serialized, so blocking forever on an observation would break the oracle.) ### KeyBundleID + ```toml KeyBundleID = '7a5f66bbe6594259325bf2b4f5b1a9c900000000000000000000000000000000' # Example ``` + KeyBundleID is a sha256 hexadecimal hash identifier. ### CaptureEATelemetry + ```toml CaptureEATelemetry = false # Default ``` + CaptureEATelemetry toggles collecting extra information from External Adaptares ### CaptureAutomationCustomTelemetry + ```toml CaptureAutomationCustomTelemetry = true # Default ``` + CaptureAutomationCustomTelemetry toggles collecting automation specific telemetry ### DefaultTransactionQueueDepth + ```toml DefaultTransactionQueueDepth = 1 # Default ``` + DefaultTransactionQueueDepth controls the queue size for `DropOldestStrategy` in OCR2. Set to 0 to use `SendEvery` strategy instead. ### SimulateTransactions + ```toml SimulateTransactions = false # Default ``` + SimulateTransactions enables transaction simulation for OCR2. ### TraceLogging + ```toml TraceLogging = false # Default ``` + TraceLogging enables trace level logging. ## OCR + ```toml [OCR] Enabled = false # Default @@ -1035,84 +1275,109 @@ TransmitterAddress = '0xa0788FC17B1dEe36f057c42B6F373A34B014687e' # Example CaptureEATelemetry = false # Default TraceLogging = false # Default ``` + This section applies only if you are running off-chain reporting jobs. ### Enabled + ```toml Enabled = false # Default ``` + Enabled enables OCR jobs. ### ObservationTimeout + ```toml ObservationTimeout = '5s' # Default ``` + ObservationTimeout is the timeout for making observations using the DataSource.Observe method. (This is necessary because an oracle's operations are serialized, so blocking forever on an observation would break the oracle.) ### BlockchainTimeout + ```toml BlockchainTimeout = '20s' # Default ``` + BlockchainTimeout is the timeout for blockchain queries (mediated through ContractConfigTracker and ContractTransmitter). (This is necessary because an oracle's operations are serialized, so blocking forever on a chain interaction would break the oracle.) ### ContractPollInterval + ```toml ContractPollInterval = '1m' # Default ``` + ContractPollInterval is the polling interval at which ContractConfigTracker is queried for updated on-chain configurations. Recommended values are between fifteen seconds and two minutes. ### ContractSubscribeInterval + ```toml ContractSubscribeInterval = '2m' # Default ``` + ContractSubscribeInterval is the interval at which we try to establish a subscription on ContractConfigTracker if one doesn't exist. Recommended values are between two and five minutes. ### DefaultTransactionQueueDepth + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml DefaultTransactionQueueDepth = 1 # Default ``` + DefaultTransactionQueueDepth controls the queue size for `DropOldestStrategy` in OCR. Set to 0 to use `SendEvery` strategy instead. ### KeyBundleID + ```toml KeyBundleID = 'acdd42797a8b921b2910497badc5000600000000000000000000000000000000' # Example ``` + KeyBundleID is the default key bundle ID to use for OCR jobs. If you have an OCR job that does not explicitly specify a key bundle ID, it will fall back to this value. ### SimulateTransactions + ```toml SimulateTransactions = false # Default ``` + SimulateTransactions enables transaction simulation for OCR. ### TransmitterAddress + ```toml TransmitterAddress = '0xa0788FC17B1dEe36f057c42B6F373A34B014687e' # Example ``` + TransmitterAddress is the default sending address to use for OCR. If you have an OCR job that does not explicitly specify a transmitter address, it will fall back to this value. ### CaptureEATelemetry + ```toml CaptureEATelemetry = false # Default ``` + CaptureEATelemetry toggles collecting extra information from External Adaptares ### TraceLogging + ```toml TraceLogging = false # Default ``` + TraceLogging enables trace level logging. ## P2P + ```toml [P2P] IncomingMessageBufferSize = 10 # Default @@ -1120,21 +1385,26 @@ OutgoingMessageBufferSize = 10 # Default PeerID = '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw' # Example TraceLogging = false # Default ``` + P2P has a versioned networking stack. Currenly only `[P2P.V2]` is supported. All nodes in the OCR network should share the same networking stack. ### IncomingMessageBufferSize + ```toml IncomingMessageBufferSize = 10 # Default ``` + IncomingMessageBufferSize is the per-remote number of incoming messages to buffer. Any additional messages received on top of those already in the queue will be dropped. ### OutgoingMessageBufferSize + ```toml OutgoingMessageBufferSize = 10 # Default ``` + OutgoingMessageBufferSize is the per-remote number of outgoing messages to buffer. Any additional messages send on top of those already in the queue will displace the oldest. @@ -1143,18 +1413,23 @@ IncomingMessageBufferSize to give the remote enough space to process them all in case we regained connection and now send a bunch at once ### PeerID + ```toml PeerID = '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw' # Example ``` + PeerID is the default peer ID to use for OCR jobs. If unspecified, uses the first available peer ID. ### TraceLogging + ```toml TraceLogging = false # Default ``` + TraceLogging enables trace level logging. ## P2P.V2 + ```toml [P2P.V2] Enabled = true # Default @@ -1165,26 +1440,31 @@ DeltaReconcile = '1m' # Default ListenAddresses = ['1.2.3.4:9999', '[a52d:0:a88:1274::abcd]:1337'] # Example ``` - ### Enabled + ```toml Enabled = true # Default ``` + Enabled enables P2P V2. Note: V1.Enabled is true by default, so it must be set false in order to run V2 only. ### AnnounceAddresses + ```toml AnnounceAddresses = ['1.2.3.4:9999', '[a52d:0:a88:1274::abcd]:1337'] # Example ``` + AnnounceAddresses is the addresses the peer will advertise on the network in `host:port` form as accepted by the TCP version of Go’s `net.Dial`. The addresses should be reachable by other nodes on the network. When attempting to connect to another node, a node will attempt to dial all of the other node’s AnnounceAddresses in round-robin fashion. ### DefaultBootstrappers + ```toml DefaultBootstrappers = ['12D3KooWMHMRLQkgPbFSYHwD3NBuwtS1AmxhvKVUrcfyaGDASR4U@1.2.3.4:9999', '12D3KooWM55u5Swtpw9r8aFLQHEtw7HR4t44GdNs654ej5gRs2Dh@example.com:1234'] # Example ``` + DefaultBootstrappers is the default bootstrapper peers for libocr's v2 networking stack. Oracle nodes typically only know each other’s PeerIDs, but not their hostnames, IP addresses, or ports. @@ -1195,25 +1475,32 @@ received from its DefaultBootstrappers or other discovered nodes. To facilitate nodes will regularly broadcast signed announcements containing their PeerID and AnnounceAddresses. ### DeltaDial + ```toml DeltaDial = '15s' # Default ``` + DeltaDial controls how far apart Dial attempts are ### DeltaReconcile + ```toml DeltaReconcile = '1m' # Default ``` + DeltaReconcile controls how often a Reconcile message is sent to every peer. ### ListenAddresses + ```toml ListenAddresses = ['1.2.3.4:9999', '[a52d:0:a88:1274::abcd]:1337'] # Example ``` + ListenAddresses is the addresses the peer will listen to on the network in `host:port` form as accepted by `net.Listen()`, but the host and port must be fully specified and cannot be empty. You can specify `0.0.0.0` (IPv4) or `::` (IPv6) to listen on all interfaces, but that is not recommended. ## Capabilities.ExternalRegistry + ```toml [Capabilities.ExternalRegistry] Address = '0x0' # Example @@ -1221,23 +1508,28 @@ NetworkID = 'evm' # Default ChainID = '1' # Default ``` - ### Address + ```toml Address = '0x0' # Example ``` + Address is the address for the capabilities registry contract. ### NetworkID + ```toml NetworkID = 'evm' # Default ``` + NetworkID identifies the target network where the remote registry is located. ### ChainID + ```toml ChainID = '1' # Default ``` + ChainID identifies the target chain id where the remote registry is located. ## Capabilities.Dispatcher @@ -1295,6 +1587,7 @@ PerSenderBurst = 50 # Default PerSenderBurst is the per-sender burst limit for the dispatcher. ## Capabilities.Peering + ```toml [Capabilities.Peering] IncomingMessageBufferSize = 10 # Default @@ -1303,19 +1596,22 @@ PeerID = '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw' # Example TraceLogging = false # Default ``` - ### IncomingMessageBufferSize + ```toml IncomingMessageBufferSize = 10 # Default ``` + IncomingMessageBufferSize is the per-remote number of incoming messages to buffer. Any additional messages received on top of those already in the queue will be dropped. ### OutgoingMessageBufferSize + ```toml OutgoingMessageBufferSize = 10 # Default ``` + OutgoingMessageBufferSize is the per-remote number of outgoing messages to buffer. Any additional messages send on top of those already in the queue will displace the oldest. @@ -1324,18 +1620,23 @@ IncomingMessageBufferSize to give the remote enough space to process them all in case we regained connection and now send a bunch at once ### PeerID + ```toml PeerID = '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw' # Example ``` + PeerID is the default peer ID to use for OCR jobs. If unspecified, uses the first available peer ID. ### TraceLogging + ```toml TraceLogging = false # Default ``` + TraceLogging enables trace level logging. ## Capabilities.Peering.V2 + ```toml [Capabilities.Peering.V2] Enabled = false # Default @@ -1346,25 +1647,30 @@ DeltaReconcile = '1m' # Default ListenAddresses = ['1.2.3.4:9999', '[a52d:0:a88:1274::abcd]:1337'] # Example ``` - ### Enabled + ```toml Enabled = false # Default ``` + Enabled enables P2P V2. ### AnnounceAddresses + ```toml AnnounceAddresses = ['1.2.3.4:9999', '[a52d:0:a88:1274::abcd]:1337'] # Example ``` + AnnounceAddresses is the addresses the peer will advertise on the network in `host:port` form as accepted by the TCP version of Go’s `net.Dial`. The addresses should be reachable by other nodes on the network. When attempting to connect to another node, a node will attempt to dial all of the other node’s AnnounceAddresses in round-robin fashion. ### DefaultBootstrappers + ```toml DefaultBootstrappers = ['12D3KooWMHMRLQkgPbFSYHwD3NBuwtS1AmxhvKVUrcfyaGDASR4U@1.2.3.4:9999', '12D3KooWM55u5Swtpw9r8aFLQHEtw7HR4t44GdNs654ej5gRs2Dh@example.com:1234'] # Example ``` + DefaultBootstrappers is the default bootstrapper peers for libocr's v2 networking stack. Oracle nodes typically only know each other’s PeerIDs, but not their hostnames, IP addresses, or ports. @@ -1375,21 +1681,27 @@ received from its DefaultBootstrappers or other discovered nodes. To facilitate nodes will regularly broadcast signed announcements containing their PeerID and AnnounceAddresses. ### DeltaDial + ```toml DeltaDial = '15s' # Default ``` + DeltaDial controls how far apart Dial attempts are ### DeltaReconcile + ```toml DeltaReconcile = '1m' # Default ``` + DeltaReconcile controls how often a Reconcile message is sent to every peer. ### ListenAddresses + ```toml ListenAddresses = ['1.2.3.4:9999', '[a52d:0:a88:1274::abcd]:1337'] # Example ``` + ListenAddresses is the addresses the peer will listen to on the network in `host:port` form as accepted by `net.Listen()`, but the host and port must be fully specified and cannot be empty. You can specify `0.0.0.0` (IPv4) or `::` (IPv6) to listen on all interfaces, but that is not recommended. @@ -1462,6 +1774,7 @@ URL = 'wss://localhost:8081/node' # Example URL of the Gateway ## Keeper + ```toml [Keeper] DefaultTransactionQueueDepth = 1 # Default @@ -1472,49 +1785,66 @@ MaxGracePeriod = 100 # Default TurnLookBack = 1_000 # Default ``` - ### DefaultTransactionQueueDepth + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml DefaultTransactionQueueDepth = 1 # Default ``` + DefaultTransactionQueueDepth controls the queue size for `DropOldestStrategy` in Keeper. Set to 0 to use `SendEvery` strategy instead. ### GasPriceBufferPercent + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml GasPriceBufferPercent = 20 # Default ``` + GasPriceBufferPercent specifies the percentage to add to the gas price used for checking whether to perform an upkeep. Only applies in legacy mode (EIP-1559 off). ### GasTipCapBufferPercent + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml GasTipCapBufferPercent = 20 # Default ``` + GasTipCapBufferPercent specifies the percentage to add to the gas price used for checking whether to perform an upkeep. Only applies in EIP-1559 mode. ### BaseFeeBufferPercent + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml BaseFeeBufferPercent = 20 # Default ``` + BaseFeeBufferPercent specifies the percentage to add to the base fee used for checking whether to perform an upkeep. Applies only in EIP-1559 mode. ### MaxGracePeriod + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml MaxGracePeriod = 100 # Default ``` + MaxGracePeriod is the maximum number of blocks that a keeper will wait after performing an upkeep before it resumes checking that upkeep ### TurnLookBack + ```toml TurnLookBack = 1_000 # Default ``` + TurnLookBack is the number of blocks in the past to look back when getting a block for a turn. ## Keeper.Registry + ```toml [Keeper.Registry] CheckGasOverhead = 200_000 # Default @@ -1524,43 +1854,58 @@ MaxPerformDataSize = 5_000 # Default SyncUpkeepQueueSize = 10 # Default ``` - ### CheckGasOverhead + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml CheckGasOverhead = 200_000 # Default ``` + CheckGasOverhead is the amount of extra gas to provide checkUpkeep() calls to account for the gas consumed by the keeper registry. ### PerformGasOverhead + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml PerformGasOverhead = 300_000 # Default ``` + PerformGasOverhead is the amount of extra gas to provide performUpkeep() calls to account for the gas consumed by the keeper registry ### SyncInterval + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml SyncInterval = '30m' # Default ``` + SyncInterval is the interval in which the RegistrySynchronizer performs a full sync of the keeper registry contract it is tracking. ### MaxPerformDataSize + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml MaxPerformDataSize = 5_000 # Default ``` + MaxPerformDataSize is the max size of perform data. ### SyncUpkeepQueueSize + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml SyncUpkeepQueueSize = 10 # Default ``` + SyncUpkeepQueueSize represents the maximum number of upkeeps that can be synced in parallel. ## AutoPprof + ```toml [AutoPprof] Enabled = false # Default @@ -1576,103 +1921,133 @@ MutexProfileFraction = 1 # Default MemThreshold = '4gb' # Default GoroutineThreshold = 5000 # Default ``` + The Chainlink node is equipped with an internal "nurse" service that can perform automatic `pprof` profiling when the certain resource thresholds are exceeded, such as memory and goroutine count. These profiles are saved to disk to facilitate fine-grained debugging of performance-related issues. In general, if you notice that your node has begun to accumulate profiles, forward them to the Chainlink team. To learn more about these profiles, read the [Profiling Go programs with pprof](https://jvns.ca/blog/2017/09/24/profiling-go-with-pprof/) guide. ### Enabled + ```toml Enabled = false # Default ``` + Enabled enables the automatic profiling service. ### ProfileRoot + ```toml ProfileRoot = 'prof/root' # Example ``` + ProfileRoot sets the location on disk where pprof profiles will be stored. Defaults to `RootDir`. ### PollInterval + ```toml PollInterval = '10s' # Default ``` + PollInterval is the interval at which the node's resources are checked. ### GatherDuration + ```toml GatherDuration = '10s' # Default ``` + GatherDuration is the duration for which profiles are gathered when profiling starts. ### GatherTraceDuration + ```toml GatherTraceDuration = '5s' # Default ``` + GatherTraceDuration is the duration for which traces are gathered when profiling is kicked off. This is separately configurable because traces are significantly larger than other types of profiles. ### MaxProfileSize + ```toml MaxProfileSize = '100mb' # Default ``` + MaxProfileSize is the maximum amount of disk space that profiles may consume before profiling is disabled. ### CPUProfileRate + ```toml CPUProfileRate = 1 # Default ``` + CPUProfileRate sets the rate for CPU profiling. See https://pkg.go.dev/runtime#SetCPUProfileRate. ### MemProfileRate + ```toml MemProfileRate = 1 # Default ``` + MemProfileRate sets the rate for memory profiling. See https://pkg.go.dev/runtime#pkg-variables. ### BlockProfileRate + ```toml BlockProfileRate = 1 # Default ``` + BlockProfileRate sets the fraction of blocking events for goroutine profiling. See https://pkg.go.dev/runtime#SetBlockProfileRate. ### MutexProfileFraction + ```toml MutexProfileFraction = 1 # Default ``` + MutexProfileFraction sets the fraction of contention events for mutex profiling. See https://pkg.go.dev/runtime#SetMutexProfileFraction. ### MemThreshold + ```toml MemThreshold = '4gb' # Default ``` + MemThreshold sets the maximum amount of memory the node can actively consume before profiling begins. ### GoroutineThreshold + ```toml GoroutineThreshold = 5000 # Default ``` + GoroutineThreshold is the maximum number of actively-running goroutines the node can spawn before profiling begins. ## Pyroscope + ```toml [Pyroscope] ServerAddress = 'http://localhost:4040' # Example Environment = 'mainnet' # Default ``` - ### ServerAddress + ```toml ServerAddress = 'http://localhost:4040' # Example ``` + ServerAddress sets the address that will receive the profile logs. It enables the profiling service. ### Environment + ```toml Environment = 'mainnet' # Default ``` + Environment sets the target environment tag in which profiles will be added to. ## Sentry + ```toml [Sentry] Debug = false # Default @@ -1681,33 +2056,42 @@ Environment = 'my-custom-env' # Example Release = 'v1.2.3' # Example ``` - ### Debug + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml Debug = false # Default ``` + Debug enables printing of Sentry SDK debug messages. ### DSN + ```toml DSN = 'sentry-dsn' # Example ``` + DSN is the data source name where events will be sent. Sentry is completely disabled if this is left blank. ### Environment + ```toml Environment = 'my-custom-env' # Example ``` + Environment overrides the Sentry environment to the given value. Otherwise autodetects between dev/prod. ### Release + ```toml Release = 'v1.2.3' # Example ``` + Release overrides the Sentry release to the given value. Otherwise uses the compiled-in version number. ## Insecure + ```toml [Insecure] DevWebServer = false # Default @@ -1715,34 +2099,45 @@ OCRDevelopmentMode = false # Default InfiniteDepthQueries = false # Default DisableRateLimiting = false # Default ``` + Insecure config family is only allowed in development builds. ### DevWebServer + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml DevWebServer = false # Default ``` + DevWebServer skips secure configuration for webserver AllowedHosts, SSL, etc. ### OCRDevelopmentMode + ```toml OCRDevelopmentMode = false # Default ``` + OCRDevelopmentMode run OCR in development mode. ### InfiniteDepthQueries + ```toml InfiniteDepthQueries = false # Default ``` + InfiniteDepthQueries skips graphql query depth limit checks. ### DisableRateLimiting + ```toml DisableRateLimiting = false # Default ``` + DisableRateLimiting skips ratelimiting on asset requests. ## Tracing + ```toml [Tracing] Enabled = false # Default @@ -1753,57 +2148,73 @@ Mode = 'tls' # Default TLSCertPath = '/path/to/cert.pem' # Example ``` - ### Enabled + ```toml Enabled = false # Default ``` + Enabled turns trace collection on or off. On requires an OTEL Tracing Collector. ### CollectorTarget + ```toml CollectorTarget = 'localhost:4317' # Example ``` + CollectorTarget is the logical address of the OTEL Tracing Collector. ### NodeID + ```toml NodeID = 'NodeID' # Example ``` + NodeID is an unique name for this node relative to any other node traces are collected for. ### SamplingRatio + ```toml SamplingRatio = 1.0 # Example ``` + SamplingRatio is the ratio of traces to sample for this node. ### Mode + ```toml Mode = 'tls' # Default ``` + Mode is a string value. `tls` or `unencrypted` are the only values allowed. If set to `unencrypted`, `TLSCertPath` can be unset, meaning traces will be sent over plaintext to the collector. ### TLSCertPath + ```toml TLSCertPath = '/path/to/cert.pem' # Example ``` + TLSCertPath is the file path to the TLS certificate used for secure communication with an OTEL Tracing Collector. ## Tracing.Attributes + ```toml [Tracing.Attributes] env = 'test' # Example ``` + Tracing.Attributes are user specified key-value pairs to associate in the context of the traces ### env + ```toml env = 'test' # Example ``` + env is an example user specified key-value pair ## Mercury + ```toml [Mercury] VerboseLogging = false # Default @@ -1819,18 +2230,22 @@ can be expensive since they may serialize large structs, so they are disabled by default. ## Mercury.Cache + ```toml [Mercury.Cache] LatestReportTTL = "1s" # Default MaxStaleAge = "1h" # Default LatestReportDeadline = "5s" # Default ``` + Mercury.Cache controls settings for the price retrieval cache querying a mercury server ### LatestReportTTL + ```toml LatestReportTTL = "1s" # Default ``` + LatestReportTTL controls how "stale" we will allow a price to be e.g. if set to 1s, a new price will always be fetched if the last result was from 1 second ago or older. @@ -1841,9 +2256,11 @@ price that was queried from now-LatestReportTTL or before. Setting to zero disables caching entirely. ### MaxStaleAge + ```toml MaxStaleAge = "1h" # Default ``` + MaxStaleAge is that maximum amount of time that a value can be stale before it is deleted from the cache (a form of garbage collection). @@ -1851,37 +2268,47 @@ This should generally be set to something much larger than LatestReportTTL. Setting to zero disables garbage collection. ### LatestReportDeadline + ```toml LatestReportDeadline = "5s" # Default ``` + LatestReportDeadline controls how long to wait for a response from the mercury server before retrying. Setting this to zero will wait indefinitely. ## Mercury.TLS + ```toml [Mercury.TLS] CertFile = "/path/to/client/certs.pem" # Example ``` + Mercury.TLS controls client settings for when the node talks to traditional web servers or load balancers. ### CertFile + ```toml CertFile = "/path/to/client/certs.pem" # Example ``` + CertFile is the path to a PEM file of trusted root certificate authority certificates ## Mercury.Transmitter + ```toml [Mercury.Transmitter] TransmitQueueMaxSize = 10_000 # Default TransmitTimeout = "5s" # Default ``` + Mercury.Transmitter controls settings for the mercury transmitter ### TransmitQueueMaxSize + ```toml TransmitQueueMaxSize = 10_000 # Default ``` + TransmitQueueMaxSize controls the size of the transmit queue. This is scoped per OCR instance. If the queue is full, the transmitter will start dropping the oldest messages in order to make space. @@ -1890,9 +2317,11 @@ This is useful if mercury server goes offline and the nop needs to buffer transmissions. ### TransmitTimeout + ```toml TransmitTimeout = "5s" # Default ``` + TransmitTimeout controls how long the transmitter will wait for a response when sending a message to the mercury server, before aborting and considering the transmission to be failed. @@ -1956,6 +2385,7 @@ foo = "bar" # Example foo is an example resource attribute ## EVM + EVM defaults depend on ChainID:
Ethereum Mainnet (1)

@@ -8791,49 +9221,63 @@ GasLimitDefault = 400000

- ### ChainID + ```toml ChainID = '1' # Example ``` + ChainID is the EVM chain ID. Mandatory. ### Enabled + ```toml Enabled = true # Default ``` + Enabled enables this chain. ### AutoCreateKey + ```toml AutoCreateKey = true # Default ``` + AutoCreateKey, if set to true, will ensure that there is always at least one transmit key for the given chain. ### BlockBackfillDepth + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml BlockBackfillDepth = 10 # Default ``` + BlockBackfillDepth specifies the number of blocks before the current HEAD that the log broadcaster will try to re-consume logs from. ### BlockBackfillSkip + ```toml BlockBackfillSkip = false # Default ``` + BlockBackfillSkip enables skipping of very long backfills. ### ChainType + ```toml ChainType = 'arbitrum' # Example ``` + ChainType is automatically detected from chain ID. Set this to force a certain chain type regardless of chain ID. Available types: `arbitrum`, `celo`, `gnosis`, `hedera`, `kroma`, `metis`, `optimismBedrock`, `scroll`, `wemix`, `xlayer`, `zksync` ### FinalityDepth + ```toml FinalityDepth = 50 # Default ``` + FinalityDepth is the number of blocks after which an ethereum transaction is considered "final". Note that the default is automatically set based on chain ID, so it should not be necessary to change this under normal operation. BlocksConsideredFinal determines how deeply we look back to ensure that transactions are confirmed onto the longest chain There is not a large performance penalty to setting this relatively high (on the order of hundreds) @@ -8856,105 +9300,142 @@ A re-org occurs at height 46 starting at block 41, transaction is marked for reb A re-org occurs at height 47 starting at block 41, transaction is NOT marked for rebroadcast ### FinalityTagEnabled + ```toml FinalityTagEnabled = false # Default ``` + FinalityTagEnabled means that the chain supports the finalized block tag when querying for a block. If FinalityTagEnabled is set to true for a chain, then FinalityDepth field is ignored. Finality for a block is solely defined by the finality related tags provided by the chain's RPC API. This is a placeholder and hasn't been implemented yet. ### FlagsContractAddress + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml FlagsContractAddress = '0xae4E781a6218A8031764928E88d457937A954fC3' # Example ``` + FlagsContractAddress can optionally point to a [Flags contract](../contracts/src/v0.8/Flags.sol). If set, the node will lookup that contract for each job that supports flags contracts (currently OCR and FM jobs are supported). If the job's contractAddress is set as hibernating in the FlagsContractAddress address, it overrides the standard update parameters (such as heartbeat/threshold). ### LinkContractAddress + ```toml LinkContractAddress = '0x538aAaB4ea120b2bC2fe5D296852D948F07D849e' # Example ``` + LinkContractAddress is the canonical ERC-677 LINK token contract address on the given chain. Note that this is usually autodetected from chain ID. ### LogBackfillBatchSize + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml LogBackfillBatchSize = 1000 # Default ``` + LogBackfillBatchSize sets the batch size for calling FilterLogs when we backfill missing logs. ### LogPollInterval + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml LogPollInterval = '15s' # Default ``` + LogPollInterval works in conjunction with Feature.LogPoller. Controls how frequently the log poller polls for logs. Defaults to the block production rate. ### LogKeepBlocksDepth + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml LogKeepBlocksDepth = 100000 # Default ``` + LogKeepBlocksDepth works in conjunction with Feature.LogPoller. Controls how many blocks the poller will keep, must be greater than FinalityDepth+1. ### LogPrunePageSize + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml LogPrunePageSize = 0 # Default ``` + LogPrunePageSize defines size of the page for pruning logs. Controls how many logs/blocks (at most) are deleted in a single prune tick. Default value 0 means no paging, delete everything at once. ### BackupLogPollerBlockDelay + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml BackupLogPollerBlockDelay = 100 # Default ``` + BackupLogPollerBlockDelay works in conjunction with Feature.LogPoller. Controls the block delay of Backup LogPoller, affecting how far behind the latest finalized block it starts and how often it runs. BackupLogPollerDelay=0 will disable Backup LogPoller (_not recommended for production environment_). ### MinContractPayment + ```toml MinContractPayment = '10000000000000 juels' # Default ``` + MinContractPayment is the minimum payment in LINK required to execute a direct request job. This can be overridden on a per-job basis. ### MinIncomingConfirmations + ```toml MinIncomingConfirmations = 3 # Default ``` + MinIncomingConfirmations is the minimum required confirmations before a log event will be consumed. ### NonceAutoSync + ```toml NonceAutoSync = true # Default ``` + NonceAutoSync enables automatic nonce syncing on startup. Chainlink nodes will automatically try to sync its local nonce with the remote chain on startup and fast forward if necessary. This is almost always safe but can be disabled in exceptional cases by setting this value to false. ### NoNewHeadsThreshold + ```toml NoNewHeadsThreshold = '3m' # Default ``` + NoNewHeadsThreshold controls how long to wait after receiving no new heads before `NodePool` marks rpc endpoints as out-of-sync, and `HeadTracker` logs warnings. Set to zero to disable out-of-sync checking. ### OperatorFactoryAddress + ```toml OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' # Example ``` + OperatorFactoryAddress is the address of the canonical operator forwarder contract on the given chain. Note that this is usually autodetected from chain ID. ### RPCDefaultBatchSize + ```toml RPCDefaultBatchSize = 250 # Default ``` + RPCDefaultBatchSize is the default batch size for batched RPC calls. ### RPCBlockQueryDelay + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml RPCBlockQueryDelay = 1 # Default ``` + RPCBlockQueryDelay controls the number of blocks to trail behind head in the block history estimator and balance monitor. For example, if this is set to 3, and we receive block 10, block history estimator will fetch block 7. @@ -8964,9 +9445,11 @@ available from the connected node via RPC, due to race conditions in the code of "zero" blocks that are missing transactions. ### FinalizedBlockOffset + ```toml FinalizedBlockOffset = 0 # Default ``` + FinalizedBlockOffset defines the number of blocks by which the latest finalized block will be shifted/delayed. For example, suppose RPC returns block 100 as the latest finalized. In that case, the CL Node will treat block `100 - FinalizedBlockOffset` as the latest finalized block and `latest - FinalityDepth - FinalizedBlockOffset` in case of `FinalityTagEnabled = false.` With `EnforceRepeatableRead = true,` RPC is considered healthy only if its most recent finalized block is larger or equal to the highest finalized block observed by the CL Node minus `FinalizedBlockOffset.` @@ -8995,6 +9478,7 @@ out-of-sync. Only applicable if `FinalityTagEnabled=true` Set to zero to disable. ## EVM.Transactions + ```toml [EVM.Transactions] ForwardersEnabled = false # Default @@ -9005,17 +9489,20 @@ ReaperThreshold = '168h' # Default ResendAfterThreshold = '1m' # Default ``` - ### ForwardersEnabled + ```toml ForwardersEnabled = false # Default ``` + ForwardersEnabled enables or disables sending transactions through forwarder contracts. ### MaxInFlight + ```toml MaxInFlight = 16 # Default ``` + MaxInFlight controls how many transactions are allowed to be "in-flight" i.e. broadcast but unconfirmed at any one time. You can consider this a form of transaction throttling. The default is set conservatively at 16 because this is a pessimistic minimum that both geth and parity will hold without evicting local transactions. If your node is falling behind and you need higher throughput, you can increase this setting, but you MUST make sure that your ETH node is configured properly otherwise you can get nonce gapped and your node will get stuck. @@ -9023,9 +9510,11 @@ The default is set conservatively at 16 because this is a pessimistic minimum th 0 value disables the limit. Use with caution. ### MaxQueued + ```toml MaxQueued = 250 # Default ``` + MaxQueued is the maximum number of unbroadcast transactions per key that are allowed to be enqueued before jobs will start failing and rejecting send of any further transactions. This represents a sanity limit and generally indicates a problem with your ETH node (transactions are not getting mined). Do NOT blindly increase this value thinking it will fix things if you start hitting this limit because transactions are not getting mined, you will instead only make things worse. @@ -9035,24 +9524,31 @@ In deployments with very high burst rates, or on chains with large re-orgs, you 0 value disables any limit on queue size. Use with caution. ### ReaperInterval + ```toml ReaperInterval = '1h' # Default ``` + ReaperInterval controls how often the EthTx reaper will run. ### ReaperThreshold + ```toml ReaperThreshold = '168h' # Default ``` + ReaperThreshold indicates how old an EthTx ought to be before it can be reaped. ### ResendAfterThreshold + ```toml ResendAfterThreshold = '1m' # Default ``` + ResendAfterThreshold controls how long to wait before re-broadcasting a transaction that has not yet been confirmed. ## EVM.Transactions.AutoPurge + ```toml [EVM.Transactions.AutoPurge] Enabled = false # Default @@ -9061,45 +9557,55 @@ Threshold = 5 # Example MinAttempts = 3 # Example ``` - ### Enabled + ```toml Enabled = false # Default ``` + Enabled enables or disables automatically purging transactions that have been idenitified as terminally stuck (will never be included on-chain). This feature is only expected to be used by ZK chains. ### DetectionApiUrl + ```toml DetectionApiUrl = 'https://example.api.io' # Example ``` + DetectionApiUrl configures the base url of a custom endpoint used to identify terminally stuck transactions. ### Threshold + ```toml Threshold = 5 # Example ``` + Threshold configures the number of blocks a transaction has to remain unconfirmed before it is evaluated for being terminally stuck. This threshold is only applied if there is no custom API to identify stuck transactions provided by the chain. ### MinAttempts + ```toml MinAttempts = 3 # Example ``` + MinAttempts configures the minimum number of broadcasted attempts a transaction has to have before it is evaluated further for being terminally stuck. This threshold is only applied if there is no custom API to identify stuck transactions provided by the chain. Ensure the gas estimator configs take more bump attempts before reaching the configured max gas price. ## EVM.BalanceMonitor + ```toml [EVM.BalanceMonitor] Enabled = true # Default ``` - ### Enabled + ```toml Enabled = true # Default ``` + Enabled balance monitoring for all keys. ## EVM.GasEstimator + ```toml [EVM.GasEstimator] Mode = 'BlockHistory' # Default @@ -9121,11 +9627,12 @@ TipCapDefault = '1 wei' # Default TipCapMin = '1 wei' # Default ``` - ### Mode + ```toml Mode = 'BlockHistory' # Default ``` + Mode controls what type of gas estimator is used. - `FixedPrice` uses static configured values for gas price (can be set via API call). @@ -9141,9 +9648,11 @@ You can also use your own estimator for gas price by selecting the `FixedPrice` An important point to note is that the Chainlink node does _not_ ship with built-in support for go-ethereum's `estimateGas` call. This is for several reasons, including security and reliability. We have found empirically that it is not generally safe to rely on the remote ETH node's idea of what gas price should be. ### PriceDefault + ```toml PriceDefault = '20 gwei' # Default ``` + PriceDefault is the default gas price to use when submitting transactions to the blockchain. Will be overridden by the built-in `BlockHistoryEstimator` if enabled, and might be increased if gas bumping is enabled. (Only applies to legacy transactions) @@ -9151,18 +9660,22 @@ PriceDefault is the default gas price to use when submitting transactions to the Can be used with the `chainlink setgasprice` to be updated while the node is still running. ### PriceMax + ```toml PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' # Default ``` + PriceMax is the maximum gas price. Chainlink nodes will never pay more than this for a transaction. This applies to both legacy and EIP1559 transactions. Note that it is impossible to disable the maximum limit. Setting this value to zero will prevent paying anything for any transaction (which can be useful in some rare cases). -Most chains by default have the maximum set to 2**256-1 Wei which is the maximum allowed gas price on EVM-compatible chains, and is so large it may as well be unlimited. +Most chains by default have the maximum set to 2\*\*256-1 Wei which is the maximum allowed gas price on EVM-compatible chains, and is so large it may as well be unlimited. ### PriceMin + ```toml PriceMin = '1 gwei' # Default ``` + PriceMin is the minimum gas price. Chainlink nodes will never pay less than this for a transaction. (Only applies to legacy transactions) @@ -9179,30 +9692,38 @@ Mode = 'FixedPrice' ``` ### LimitDefault + ```toml LimitDefault = 500_000 # Default ``` + LimitDefault sets default gas limit for outgoing transactions. This should not need to be changed in most cases. Some job types, such as Keeper jobs, might set their own gas limit unrelated to this value. ### LimitMax + ```toml LimitMax = 500_000 # Default ``` + LimitMax sets a maximum for _estimated_ gas limits. This currently only applies to `Arbitrum` `GasEstimatorMode`. ### LimitMultiplier + ```toml LimitMultiplier = '1.0' # Default ``` + LimitMultiplier is the factor by which a transaction's GasLimit is multiplied before transmission. So if the value is 1.1, and the GasLimit for a transaction is 10, 10% will be added before transmission. This factor is always applied, so includes L2 transactions which uses a default gas limit of 1 and is also applied to `LimitDefault`. ### LimitTransfer + ```toml LimitTransfer = 21_000 # Default ``` + LimitTransfer is the gas limit used for an ordinary ETH transfer. ### EstimateLimit @@ -9212,35 +9733,45 @@ EstimateLimit = false # Default EstimateLimit enables estimating gas limits for transactions. This feature respects the gas limit provided during transaction creation as an upper bound. ### BumpMin + ```toml BumpMin = '5 gwei' # Default ``` + BumpMin is the minimum fixed amount of wei by which gas is bumped on each transaction attempt. ### BumpPercent + ```toml BumpPercent = 20 # Default ``` + BumpPercent is the percentage by which to bump gas on a transaction that has exceeded `BumpThreshold`. The larger of `BumpPercent` and `BumpMin` is taken for gas bumps. The `SuggestedPriceEstimator` adds the larger of `BumpPercent` and `BumpMin` on top of the price provided by the RPC when bumping a transaction's gas. ### BumpThreshold + ```toml BumpThreshold = 3 # Default ``` + BumpThreshold is the number of blocks to wait for a transaction stuck in the mempool before automatically bumping the gas price. Set to 0 to disable gas bumping completely. ### BumpTxDepth + ```toml BumpTxDepth = 16 # Example ``` + BumpTxDepth is the number of transactions to gas bump starting from oldest. Set to 0 for no limit (i.e. bump all). Can not be greater than EVM.Transactions.MaxInFlight. If not set, defaults to EVM.Transactions.MaxInFlight. ### EIP1559DynamicFees + ```toml EIP1559DynamicFees = false # Default ``` + EIP1559DynamicFees torces EIP-1559 transaction mode. Enabling EIP-1559 mode can help reduce gas costs on chains that support it. This is supported only on official Ethereum mainnet and testnets. It is not recommended to enable this setting on Polygon because the EIP-1559 fee market appears to be broken on all Polygon chains and EIP-1559 transactions are less likely to be included than legacy transactions. #### Technical details @@ -9258,10 +9789,12 @@ In EIP-1559 mode, the total price for the transaction is the minimum of base fee Chainlink's implementation of EIP-1559 works as follows: If you are using FixedPriceEstimator: + - With gas bumping disabled, it will submit all transactions with `feecap=PriceMax` and `tipcap=GasTipCapDefault` - With gas bumping enabled, it will submit all transactions initially with `feecap=GasFeeCapDefault` and `tipcap=GasTipCapDefault`. If you are using BlockHistoryEstimator (default for most chains): + - With gas bumping disabled, it will submit all transactions with `feecap=PriceMax` and `tipcap=` - With gas bumping enabled (default for most chains) it will submit all transactions initially with `feecap = ( current block base fee * (1.125 ^ N) + tipcap )` where N is configurable by setting `EVM.GasEstimator.BlockHistory.EIP1559FeeCapBufferBlocks` but defaults to `gas bump threshold+1` and `tipcap=` @@ -9287,23 +9820,29 @@ In EIP-1559 mode, the following changes occur to how configuration works: - `Keeper.GasTipCapBufferPercent` is ignored in EIP-1559 mode and `Keeper.GasTipCapBufferPercent` is used instead ### FeeCapDefault + ```toml FeeCapDefault = '100 gwei' # Default ``` + FeeCapDefault controls the fixed initial fee cap, if EIP1559 mode is enabled and `FixedPrice` gas estimator is used. ### TipCapDefault + ```toml TipCapDefault = '1 wei' # Default ``` + TipCapDefault is the default gas tip to use when submitting transactions to the blockchain. Will be overridden by the built-in `BlockHistoryEstimator` if enabled, and might be increased if gas bumping is enabled. (Only applies to EIP-1559 transactions) ### TipCapMin + ```toml TipCapMin = '1 wei' # Default ``` + TipCapMinimum is the minimum gas tip to use when submitting transactions to the blockchain. (Only applies to EIP-1559 transactions) @@ -9336,6 +9875,7 @@ CustomGasPriceCalldata = '' # Default CustomGasPriceCalldata is optional and can be set to call a custom gas price function at the given OracleAddress. ## EVM.GasEstimator.LimitJobType + ```toml [EVM.GasEstimator.LimitJobType] OCR = 100_000 # Example @@ -9346,44 +9886,56 @@ FM = 100_000 # Example Keeper = 100_000 # Example ``` - ### OCR + ```toml OCR = 100_000 # Example ``` + OCR overrides LimitDefault for OCR jobs. ### OCR2 + ```toml OCR2 = 100_000 # Example ``` + OCR2 overrides LimitDefault for OCR2 jobs. ### DR + ```toml DR = 100_000 # Example ``` + DR overrides LimitDefault for Direct Request jobs. ### VRF + ```toml VRF = 100_000 # Example ``` + VRF overrides LimitDefault for VRF jobs. ### FM + ```toml FM = 100_000 # Example ``` + FM overrides LimitDefault for Flux Monitor jobs. ### Keeper + ```toml Keeper = 100_000 # Example ``` + Keeper overrides LimitDefault for Keeper jobs. ## EVM.GasEstimator.BlockHistory + ```toml [EVM.GasEstimator.BlockHistory] BatchSize = 25 # Default @@ -9393,52 +9945,66 @@ CheckInclusionPercentile = 90 # Default EIP1559FeeCapBufferBlocks = 13 # Example TransactionPercentile = 60 # Default ``` + These settings allow you to configure how your node calculates gas prices when using the block history estimator. In most cases, leaving these values at their defaults should give good results. ### BatchSize + ```toml BatchSize = 25 # Default ``` + BatchSize sets the maximum number of blocks to fetch in one batch in the block history estimator. If the `BatchSize` variable is set to 0, it defaults to `EVM.RPCDefaultBatchSize`. ### BlockHistorySize + ```toml BlockHistorySize = 8 # Default ``` + BlockHistorySize controls the number of past blocks to keep in memory to use as a basis for calculating a percentile gas price. ### CheckInclusionBlocks + ```toml CheckInclusionBlocks = 12 # Default ``` + CheckInclusionBlocks is the number of recent blocks to use to detect if there is a transaction propagation/connectivity issue, and to prevent bumping in these cases. This can help avoid the situation where RPC nodes are not propagating transactions for some non-price-related reason (e.g. go-ethereum bug, networking issue etc) and bumping gas would not help. Set to zero to disable connectivity checking completely. ### CheckInclusionPercentile + ```toml CheckInclusionPercentile = 90 # Default ``` + CheckInclusionPercentile controls the percentile that a transaction must have been higher than for all the blocks in the inclusion check window in order to register as a connectivity issue. For example, if CheckInclusionBlocks=12 and CheckInclusionPercentile=90 then further bumping will be prevented for any transaction with any attempt that has a higher price than the 90th percentile for the most recent 12 blocks. ### EIP1559FeeCapBufferBlocks + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml EIP1559FeeCapBufferBlocks = 13 # Example ``` + EIP1559FeeCapBufferBlocks controls the buffer blocks to add to the current base fee when sending a transaction. By default, the gas bumping threshold + 1 block is used. (Only applies to EIP-1559 transactions) ### TransactionPercentile + ```toml TransactionPercentile = 60 # Default ``` + TransactionPercentile specifies gas price to choose. E.g. if the block history contains four transactions with gas prices `[100, 200, 300, 400]` then picking 25 for this number will give a value of 200. If the calculated gas price is higher than `GasPriceDefault` then the higher price will be used as the base price for new transactions. Must be in range 0-100. @@ -9470,6 +10036,7 @@ the timeout. The estimator is already adding a buffer to account for a potential the prices and end up in stale values. ## EVM.HeadTracker + ```toml [EVM.HeadTracker] HistoryDepth = 100 # Default @@ -9479,47 +10046,59 @@ FinalityTagBypass = true # Default MaxAllowedFinalityDepth = 10000 # Default PersistenceEnabled = true # Default ``` + The head tracker continually listens for new heads from the chain. In addition to these settings, it log warnings if `EVM.NoNewHeadsThreshold` is exceeded without any new blocks being emitted. ### HistoryDepth + ```toml HistoryDepth = 100 # Default ``` + HistoryDepth tracks the top N blocks on top of the latest finalized block to keep in the `heads` database table. -Note that this can easily result in MORE than `N + finality depth` records since in the case of re-orgs we keep multiple heads for a particular block height. +Note that this can easily result in MORE than `N + finality depth` records since in the case of re-orgs we keep multiple heads for a particular block height. This number should be at least as large as `FinalityDepth`. There may be a small performance penalty to setting this to something very large (10,000+) ### MaxBufferSize + ```toml MaxBufferSize = 3 # Default ``` + MaxBufferSize is the maximum number of heads that may be buffered in front of the head tracker before older heads start to be dropped. You may think of it as something like the maximum permittable "lag" for the head tracker before we start dropping heads to keep up. ### SamplingInterval + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml SamplingInterval = '1s' # Default ``` + SamplingInterval means that head tracker callbacks will at maximum be made once in every window of this duration. This is a performance optimisation for fast chains. Set to 0 to disable sampling entirely. ### FinalityTagBypass + ```toml FinalityTagBypass = true # Default ``` + FinalityTagBypass disables FinalityTag support in HeadTracker and makes it track blocks up to FinalityDepth from the most recent head. It should only be used on chains with an extremely large actual finality depth (the number of blocks between the most recent head and the latest finalized block). Has no effect if `FinalityTagsEnabled` = false ### MaxAllowedFinalityDepth + ```toml MaxAllowedFinalityDepth = 10000 # Default ``` + MaxAllowedFinalityDepth - defines maximum number of blocks between the most recent head and the latest finalized block. If actual finality depth exceeds this number, HeadTracker aborts backfill and returns an error. Has no effect if `FinalityTagsEnabled` = false @@ -9534,26 +10113,31 @@ On chains with fast finality, the persistence layer does not improve the chain's NOTE: persistence should not be disabled for products that use LogBroadcaster, as it might lead to missed on-chain events. ## EVM.KeySpecific + ```toml [[EVM.KeySpecific]] Key = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example GasEstimator.PriceMax = '79 gwei' # Example ``` - ### Key + ```toml Key = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example ``` + Key is the account to apply these settings to ### PriceMax + ```toml GasEstimator.PriceMax = '79 gwei' # Example ``` + GasEstimator.PriceMax overrides the maximum gas price for this key. See EVM.GasEstimator.PriceMax. ## EVM.NodePool + ```toml [EVM.NodePool] PollFailureThreshold = 5 # Default @@ -9567,49 +10151,61 @@ EnforceRepeatableRead = false # Default DeathDeclarationDelay = '10s' # Default NewHeadsPollInterval = '0s' # Default ``` + The node pool manages multiple RPC endpoints. In addition to these settings, `EVM.NoNewHeadsThreshold` controls how long to wait after receiving no new heads before marking the node as out-of-sync. ### PollFailureThreshold + ```toml PollFailureThreshold = 5 # Default ``` + PollFailureThreshold indicates how many consecutive polls must fail in order to mark a node as unreachable. Set to zero to disable poll checking. ### PollInterval + ```toml PollInterval = '10s' # Default ``` + PollInterval controls how often to poll the node to check for liveness. Set to zero to disable poll checking. ### SelectionMode + ```toml SelectionMode = 'HighestHead' # Default ``` + SelectionMode controls node selection strategy: + - HighestHead: use the node with the highest head number - RoundRobin: rotate through nodes, per-request - PriorityLevel: use the node with the smallest order number - TotalDifficulty: use the node with the greatest total difficulty ### SyncThreshold + ```toml SyncThreshold = 5 # Default ``` + SyncThreshold controls how far a node may lag behind the best node before being marked out-of-sync. Depending on `SelectionMode`, this represents a difference in the number of blocks (`HighestHead`, `RoundRobin`, `PriorityLevel`), or total difficulty (`TotalDifficulty`). Set to 0 to disable this check. ### LeaseDuration + ```toml LeaseDuration = '0s' # Default ``` + LeaseDuration is the minimum duration that the selected "best" node (as defined by SelectionMode) will be used, before switching to a better one if available. It also controls how often the lease check is done. Setting this to a low value (under 1m) might cause RPC to switch too aggressively. @@ -9618,9 +10214,11 @@ Recommended value is over 5m Set to '0s' to disable ### NodeIsSyncingEnabled + ```toml NodeIsSyncingEnabled = false # Default ``` + NodeIsSyncingEnabled is a flag that enables `syncing` health check on each reconnection to an RPC. Node transitions and remains in `Syncing` state while RPC signals this state (In case of Ethereum `eth_syncing` returns anything other than false). All of the requests to node in state `Syncing` are rejected. @@ -9628,9 +10226,11 @@ All of the requests to node in state `Syncing` are rejected. Set true to enable this check ### FinalizedBlockPollInterval + ```toml FinalizedBlockPollInterval = '5s' # Default ``` + FinalizedBlockPollInterval controls how often to poll RPC for new finalized blocks. The finalized block is only used to report to the `pool_rpc_node_highest_finalized_block` metric. We plan to use it in RPCs health assessment in the future. @@ -9640,9 +10240,11 @@ reported based on latest block and finality depth. Set to 0 to disable. ### EnforceRepeatableRead + ```toml EnforceRepeatableRead = false # Default ``` + EnforceRepeatableRead defines if Core should only use RPCs whose most recently finalized block is greater or equal to `highest finalized block - FinalizedBlockOffset`. In other words, exclude RPCs lagging on latest finalized block. @@ -9650,9 +10252,11 @@ block. Set false to disable ### DeathDeclarationDelay + ```toml DeathDeclarationDelay = '10s' # Default ``` + DeathDeclarationDelay defines the minimum duration an RPC must be in unhealthy state before producing an error log message. Larger values might be helpful to reduce the noisiness of health checks like `EnforceRepeatableRead = true', which might be falsely trigger declaration of `FinalizedBlockOutOfSync` due to insignificant network delays in broadcasting of the finalized state among RPCs. @@ -9667,7 +10271,9 @@ NewHeadsPollInterval define an interval for polling new block periodically using Set to 0 to disable. ## EVM.NodePool.Errors + :warning: **_ADVANCED_**: _Do not change these settings unless you know what you are doing._ + ```toml [EVM.NodePool.Errors] NonceTooLow = '(: |^)nonce too low' # Example @@ -9686,90 +10292,119 @@ Fatal = '(: |^)fatal' # Example ServiceUnavailable = '(: |^)service unavailable' # Example TooManyResults = '(: |^)too many results' # Example ``` + Errors enable the node to provide custom regex patterns to match against error messages from RPCs. ### NonceTooLow + ```toml NonceTooLow = '(: |^)nonce too low' # Example ``` + NonceTooLow is a regex pattern to match against nonce too low errors. ### NonceTooHigh + ```toml NonceTooHigh = '(: |^)nonce too high' # Example ``` + NonceTooHigh is a regex pattern to match against nonce too high errors. ### ReplacementTransactionUnderpriced + ```toml ReplacementTransactionUnderpriced = '(: |^)replacement transaction underpriced' # Example ``` + ReplacementTransactionUnderpriced is a regex pattern to match against replacement transaction underpriced errors. ### LimitReached + ```toml LimitReached = '(: |^)limit reached' # Example ``` + LimitReached is a regex pattern to match against limit reached errors. ### TransactionAlreadyInMempool + ```toml TransactionAlreadyInMempool = '(: |^)transaction already in mempool' # Example ``` + TransactionAlreadyInMempool is a regex pattern to match against transaction already in mempool errors. ### TerminallyUnderpriced + ```toml TerminallyUnderpriced = '(: |^)terminally underpriced' # Example ``` + TerminallyUnderpriced is a regex pattern to match against terminally underpriced errors. ### InsufficientEth + ```toml InsufficientEth = '(: |^)insufficeint eth' # Example ``` + InsufficientEth is a regex pattern to match against insufficient eth errors. ### TxFeeExceedsCap + ```toml TxFeeExceedsCap = '(: |^)tx fee exceeds cap' # Example ``` + TxFeeExceedsCap is a regex pattern to match against tx fee exceeds cap errors. ### L2FeeTooLow + ```toml L2FeeTooLow = '(: |^)l2 fee too low' # Example ``` + L2FeeTooLow is a regex pattern to match against l2 fee too low errors. ### L2FeeTooHigh + ```toml L2FeeTooHigh = '(: |^)l2 fee too high' # Example ``` + L2FeeTooHigh is a regex pattern to match against l2 fee too high errors. ### L2Full + ```toml L2Full = '(: |^)l2 full' # Example ``` + L2Full is a regex pattern to match against l2 full errors. ### TransactionAlreadyMined + ```toml TransactionAlreadyMined = '(: |^)transaction already mined' # Example ``` + TransactionAlreadyMined is a regex pattern to match against transaction already mined errors. ### Fatal + ```toml Fatal = '(: |^)fatal' # Example ``` + Fatal is a regex pattern to match against fatal errors. ### ServiceUnavailable + ```toml ServiceUnavailable = '(: |^)service unavailable' # Example ``` + ServiceUnavailable is a regex pattern to match against service unavailable errors. ### TooManyResults @@ -9779,6 +10414,7 @@ TooManyResults = '(: |^)too many results' # Example TooManyResults is a regex pattern to match an eth_getLogs error indicating the result set is too large to return ## EVM.OCR + ```toml [EVM.OCR] ContractConfirmations = 4 # Default @@ -9789,48 +10425,62 @@ DeltaCJitterOverride = "1h" # Default ObservationGracePeriod = '1s' # Default ``` - ### ContractConfirmations + ```toml ContractConfirmations = 4 # Default ``` + ContractConfirmations sets `OCR.ContractConfirmations` for this EVM chain. ### ContractTransmitterTransmitTimeout + ```toml ContractTransmitterTransmitTimeout = '10s' # Default ``` + ContractTransmitterTransmitTimeout sets `OCR.ContractTransmitterTransmitTimeout` for this EVM chain. ### DatabaseTimeout + ```toml DatabaseTimeout = '10s' # Default ``` + DatabaseTimeout sets `OCR.DatabaseTimeout` for this EVM chain. ### DeltaCOverride + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml DeltaCOverride = "168h" # Default ``` + DeltaCOverride (and `DeltaCJitterOverride`) determine the config override DeltaC. DeltaC is the maximum age of the latest report in the contract. If the maximum age is exceeded, a new report will be created by the report generation protocol. ### DeltaCJitterOverride + :warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ + ```toml DeltaCJitterOverride = "1h" # Default ``` + DeltaCJitterOverride is the range for jitter to add to `DeltaCOverride`. ### ObservationGracePeriod + ```toml ObservationGracePeriod = '1s' # Default ``` + ObservationGracePeriod sets `OCR.ObservationGracePeriod` for this EVM chain. ## EVM.Nodes + ```toml [[EVM.Nodes]] Name = 'foo' # Example @@ -9840,51 +10490,67 @@ SendOnly = false # Default Order = 100 # Default ``` - ### Name + ```toml Name = 'foo' # Example ``` + Name is a unique (per-chain) identifier for this node. ### WSURL + ```toml WSURL = 'wss://web.socket/test' # Example ``` +<<<<<<< HEAD WSURL is the WS(S) endpoint for this node. Required for primary nodes when `LogBroadcasterEnabled` is `true` +======= + +WSURL is the WS(S) endpoint for this node. Required for primary nodes. +>>>>>>> 22b67663b3 (Added tron config test data) ### HTTPURL + ```toml HTTPURL = 'https://foo.web' # Example ``` + HTTPURL is the HTTP(S) endpoint for this node. Required for all nodes. ### SendOnly + ```toml SendOnly = false # Default ``` + SendOnly limits usage to sending transaction broadcasts only. With this enabled, only HTTPURL is required, and WSURL is not used. ### Order + ```toml Order = 100 # Default ``` + Order of the node in the pool, will takes effect if `SelectionMode` is `PriorityLevel` or will be used as a tie-breaker for `HighestHead` and `TotalDifficulty` ## EVM.OCR2.Automation + ```toml [EVM.OCR2.Automation] GasLimit = 5400000 # Default ``` - ### GasLimit + ```toml GasLimit = 5400000 # Default ``` + GasLimit controls the gas limit for transmit transactions from ocr2automation job. ## EVM.Workflow + ```toml [EVM.Workflow] FromAddress = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example @@ -9892,17 +10558,20 @@ ForwarderAddress = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example GasLimitDefault = 400_000 # Default ``` - ### FromAddress + ```toml FromAddress = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example ``` + FromAddress is Address of the transmitter key to use for workflow writes. ### ForwarderAddress + ```toml ForwarderAddress = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example ``` + ForwarderAddress is the keystone forwarder contract address on chain. ### GasLimitDefault @@ -9912,6 +10581,7 @@ GasLimitDefault = 400_000 # Default GasLimitDefault is the default gas limit for workflow transactions. ## Cosmos + ```toml [[Cosmos]] ChainID = 'Malaga-420' # Example @@ -9929,106 +10599,136 @@ OCR2CacheTTL = '1m' # Default TxMsgTimeout = '10m' # Default ``` - ### ChainID + ```toml ChainID = 'Malaga-420' # Example ``` + ChainID is the Cosmos chain ID. Mandatory. ### Enabled + ```toml Enabled = true # Default ``` + Enabled enables this chain. ### Bech32Prefix + ```toml Bech32Prefix = 'wasm' # Default ``` + Bech32Prefix is the human-readable prefix for addresses on this Cosmos chain. See https://docs.cosmos.network/v0.47/spec/addresses/bech32. ### BlockRate + ```toml BlockRate = '6s' # Default ``` + BlockRate is the average time between blocks. ### BlocksUntilTxTimeout + ```toml BlocksUntilTxTimeout = 30 # Default ``` + BlocksUntilTxTimeout is the number of blocks to wait before giving up on the tx getting confirmed. ### ConfirmPollPeriod + ```toml ConfirmPollPeriod = '1s' # Default ``` + ConfirmPollPeriod sets how often check for tx confirmation. ### FallbackGasPrice + ```toml FallbackGasPrice = '0.015' # Default ``` + FallbackGasPrice sets a fallback gas price to use when the estimator is not available. ### GasToken + ```toml GasToken = 'ucosm' # Default ``` + GasToken is the token denomination which is being used to pay gas fees on this chain. ### GasLimitMultiplier + ```toml GasLimitMultiplier = '1.5' # Default ``` + GasLimitMultiplier scales the estimated gas limit. ### MaxMsgsPerBatch + ```toml MaxMsgsPerBatch = 100 # Default ``` + MaxMsgsPerBatch limits the numbers of mesages per transaction batch. ### OCR2CachePollPeriod + ```toml OCR2CachePollPeriod = '4s' # Default ``` + OCR2CachePollPeriod is the rate to poll for the OCR2 state cache. ### OCR2CacheTTL + ```toml OCR2CacheTTL = '1m' # Default ``` + OCR2CacheTTL is the stale OCR2 cache deadline. ### TxMsgTimeout + ```toml TxMsgTimeout = '10m' # Default ``` + TxMsgTimeout is the maximum age for resending transaction before they expire. ## Cosmos.Nodes + ```toml [[Cosmos.Nodes]] Name = 'primary' # Example TendermintURL = 'http://tender.mint' # Example ``` - ### Name + ```toml Name = 'primary' # Example ``` + Name is a unique (per-chain) identifier for this node. ### TendermintURL + ```toml TendermintURL = 'http://tender.mint' # Example ``` + TendermintURL is the HTTP(S) tendermint endpoint for this node. ## Solana + ```toml [[Solana]] ChainID = 'mainnet' # Example @@ -10052,114 +10752,149 @@ BlockHistoryPollPeriod = '5s' # Default ComputeUnitLimitDefault = 200_000 # Default ``` - ### ChainID + ```toml ChainID = 'mainnet' # Example ``` + ChainID is the Solana chain ID. Must be one of: mainnet, testnet, devnet, localnet. Mandatory. ### Enabled + ```toml Enabled = false # Default ``` + Enabled enables this chain. ### BalancePollPeriod + ```toml BalancePollPeriod = '5s' # Default ``` + BalancePollPeriod is the rate to poll for SOL balance and update Prometheus metrics. ### ConfirmPollPeriod + ```toml ConfirmPollPeriod = '500ms' # Default ``` + ConfirmPollPeriod is the rate to poll for signature confirmation. ### OCR2CachePollPeriod + ```toml OCR2CachePollPeriod = '1s' # Default ``` + OCR2CachePollPeriod is the rate to poll for the OCR2 state cache. ### OCR2CacheTTL + ```toml OCR2CacheTTL = '1m' # Default ``` + OCR2CacheTTL is the stale OCR2 cache deadline. ### TxTimeout + ```toml TxTimeout = '1m' # Default ``` + TxTimeout is the timeout for sending txes to an RPC endpoint. ### TxRetryTimeout + ```toml TxRetryTimeout = '10s' # Default ``` + TxRetryTimeout is the duration for tx manager to attempt rebroadcasting to RPC, before giving up. ### TxConfirmTimeout + ```toml TxConfirmTimeout = '30s' # Default ``` + TxConfirmTimeout is the duration to wait when confirming a tx signature, before discarding as unconfirmed. ### SkipPreflight + ```toml SkipPreflight = true # Default ``` + SkipPreflight enables or disables preflight checks when sending txs. ### Commitment + ```toml Commitment = 'confirmed' # Default ``` + Commitment is the confirmation level for solana state and transactions. ([documentation](https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment)) ### MaxRetries + ```toml MaxRetries = 0 # Default ``` + MaxRetries is the maximum number of times the RPC node will automatically rebroadcast a tx. The default is 0 for custom txm rebroadcasting method, set to -1 to use the RPC node's default retry strategy. ### FeeEstimatorMode + ```toml FeeEstimatorMode = 'fixed' # Default ``` + FeeEstimatorMode is the method used to determine the base fee ### ComputeUnitPriceMax + ```toml ComputeUnitPriceMax = 1000 # Default ``` + ComputeUnitPriceMax is the maximum price per compute unit that a transaction can be bumped to ### ComputeUnitPriceMin + ```toml ComputeUnitPriceMin = 0 # Default ``` + ComputeUnitPriceMin is the minimum price per compute unit that transaction can have ### ComputeUnitPriceDefault + ```toml ComputeUnitPriceDefault = 0 # Default ``` + ComputeUnitPriceDefault is the default price per compute unit price, and the starting base fee when FeeEstimatorMode = 'fixed' ### FeeBumpPeriod + ```toml FeeBumpPeriod = '3s' # Default ``` + FeeBumpPeriod is the amount of time before a tx is retried with a fee bump ### BlockHistoryPollPeriod + ```toml BlockHistoryPollPeriod = '5s' # Default ``` + BlockHistoryPollPeriod is the rate to poll for blocks in the block history fee estimator ### ComputeUnitLimitDefault @@ -10280,6 +11015,7 @@ FinalizedBlockOffset = 0 # Default FinalizedBlockOffset is the offset from the finalized block to use for finality tags. ## Solana.Nodes + ```toml [[Solana.Nodes]] Name = 'primary' # Example @@ -10287,17 +11023,20 @@ URL = 'http://solana.web' # Example SendOnly = false # Default ``` - ### Name + ```toml Name = 'primary' # Example ``` + Name is a unique (per-chain) identifier for this node. ### URL + ```toml URL = 'http://solana.web' # Example ``` + URL is the HTTP(S) endpoint for this node. ### SendOnly @@ -10307,6 +11046,7 @@ SendOnly = false # Default SendOnly is a multinode config that only sends transactions to a node and does not read state ## Starknet + ```toml [[Starknet]] ChainID = 'foobar' # Example @@ -10319,56 +11059,72 @@ TxTimeout = '10s' # Default ConfirmationPoll = '5s' # Default ``` - ### ChainID + ```toml ChainID = 'foobar' # Example ``` + ChainID is the Starknet chain ID. ### FeederURL + ```toml FeederURL = 'http://feeder.url' # Example ``` + FeederURL is required to get tx metadata (that the RPC can't) ### Enabled + ```toml Enabled = true # Default ``` + Enabled enables this chain. ### OCR2CachePollPeriod + ```toml OCR2CachePollPeriod = '5s' # Default ``` + OCR2CachePollPeriod is the rate to poll for the OCR2 state cache. ### OCR2CacheTTL + ```toml OCR2CacheTTL = '1m' # Default ``` + OCR2CacheTTL is the stale OCR2 cache deadline. ### RequestTimeout + ```toml RequestTimeout = '10s' # Default ``` + RequestTimeout is the RPC client timeout. ### TxTimeout + ```toml TxTimeout = '10s' # Default ``` + TxTimeout is the timeout for sending txes to an RPC endpoint. ### ConfirmationPoll + ```toml ConfirmationPoll = '5s' # Default ``` + ConfirmationPoll is how often to confirmer checks for tx inclusion on chain. ## Starknet.Nodes + ```toml [[Starknet.Nodes]] Name = 'primary' # Example @@ -10376,22 +11132,121 @@ URL = 'http://stark.node' # Example APIKey = 'key' # Example ``` - ### Name + ```toml Name = 'primary' # Example ``` + Name is a unique (per-chain) identifier for this node. ### URL + ```toml URL = 'http://stark.node' # Example ``` + URL is the base HTTP(S) endpoint for this node. ### APIKey + ```toml APIKey = 'key' # Example ``` + APIKey Header is optional and only required for Nethermind RPCs +Here's the modified version with "Starknet" changed to "Tron" using the same capitalization: + +## Tron + +```toml +[[Tron]] +ChainID = 'foobar' # Example +Enabled = true # Default +OCR2CachePollPeriod = '5s' # Default +OCR2CacheTTL = '1m' # Default +RequestTimeout = '10s' # Default +TxTimeout = '10s' # Default +ConfirmationPoll = '5s' # Default +``` + +### ChainID + +```toml +ChainID = 'foobar' # Example +``` + +ChainID is the Tron chain ID. + +### Enabled + +```toml +Enabled = true # Default +``` + +Enabled enables this chain. + +### OCR2CachePollPeriod + +```toml +OCR2CachePollPeriod = '5s' # Default +``` + +OCR2CachePollPeriod is the rate to poll for the OCR2 state cache. + +### OCR2CacheTTL + +```toml +OCR2CacheTTL = '1m' # Default +``` + +OCR2CacheTTL is the stale OCR2 cache deadline. + +### RequestTimeout + +```toml +RequestTimeout = '10s' # Default +``` + +RequestTimeout is the RPC client timeout. + +### TxTimeout + +```toml +TxTimeout = '10s' # Default +``` + +TxTimeout is the timeout for sending txes to an RPC endpoint. + +### ConfirmationPoll + +```toml +ConfirmationPoll = '5s' # Default +``` + +ConfirmationPoll is how often to confirmer checks for tx inclusion on chain. + +## Tron.Nodes + +```toml +[[Tron.Nodes]] +Name = 'primary' # Example +URL = 'http://api.trongrid.io' # Example +``` + +### Name + +```toml +Name = 'primary' # Example +``` + +Name is a unique (per-chain) identifier for this node. + +### URL + +```toml +URL = 'http://api.trongrid.io' # Example +``` + +URL is the base HTTP(S) endpoint for this node. diff --git a/integration-tests/client/chainlink.go b/integration-tests/client/chainlink.go index ebfd97c48b4..9626e7a6bba 100644 --- a/integration-tests/client/chainlink.go +++ b/integration-tests/client/chainlink.go @@ -1005,6 +1005,34 @@ func (c *ChainlinkClient) CreateStarkNetNode(node *StarkNetNodeAttributes) (*Sta return &response, resp.RawResponse, err } +// CreateTronChain creates a tron chain +func (c *ChainlinkClient) CreateTronChain(chain *TronChainAttributes) (*TronChainCreate, *http.Response, error) { + response := TronChainCreate{} + c.l.Info().Str(NodeURL, c.Config.URL).Str("Chain ID", chain.ChainID).Msg("Creating Tron Chain") + resp, err := c.APIClient.R(). + SetBody(chain). + SetResult(&response). + Post("/v2/chains/tron") + if err != nil { + return nil, nil, err + } + return &response, resp.RawResponse, err +} + +// CreateTronNode creates a tron node +func (c *ChainlinkClient) CreateTronNode(node *TronNodeAttributes) (*TronNodeCreate, *http.Response, error) { + response := TronNodeCreate{} + c.l.Info().Str(NodeURL, c.Config.URL).Str("Name", node.Name).Msg("Creating Tron Node") + resp, err := c.APIClient.R(). + SetBody(node). + SetResult(&response). + Post("/v2/nodes/tron") + if err != nil { + return nil, nil, err + } + return &response, resp.RawResponse, err +} + // InternalIP retrieves the inter-cluster IP of the Chainlink node, for use with inter-node communications func (c *ChainlinkClient) InternalIP() string { return c.Config.InternalIP diff --git a/integration-tests/client/chainlink_models.go b/integration-tests/client/chainlink_models.go index 86e9f75902d..d10658c671c 100644 --- a/integration-tests/client/chainlink_models.go +++ b/integration-tests/client/chainlink_models.go @@ -520,6 +520,48 @@ type StarkNetNodeCreate struct { Data StarkNetNode `json:"data"` } +type TronChainConfig struct { + OCR2CachePollPeriod null.String + OCR2CacheTTL null.String + RequestTimeout null.String + TxTimeout null.Bool + TxSendFrequency null.String + TxMaxBatchSize null.String +} + +// TronChainAttributes is the model that represents the tron chain +type TronChainAttributes struct { + ChainID string `json:"chainID"` + Config TronChainConfig `json:"config"` +} + +// TronChain is the model that represents the tron chain when read +type TronChain struct { + Attributes TronChainAttributes `json:"attributes"` +} + +// TronChainCreate is the model that represents the tron chain when created +type TronChainCreate struct { + Data TronChain `json:"data"` +} + +// TronNodeAttributes is the model that represents the tron node +type TronNodeAttributes struct { + Name string `json:"name"` + ChainID string `json:"chainId"` + Url string `json:"url"` +} + +// TronNode is the model that represents the tron node when read +type TronNode struct { + Attributes TronNodeAttributes `json:"attributes"` +} + +// TronNodeCreate is the model that represents the tron node when created +type TronNodeCreate struct { + Data TronNode `json:"data"` +} + // SpecForm is the form used when creating a v2 job spec, containing the TOML of the v2 job type SpecForm struct { TOML string `json:"toml"` diff --git a/testdata/scripts/chains/help.txtar b/testdata/scripts/chains/help.txtar index 83a342925e1..d4ef2378755 100644 --- a/testdata/scripts/chains/help.txtar +++ b/testdata/scripts/chains/help.txtar @@ -13,6 +13,7 @@ COMMANDS: cosmos Commands for handling Cosmos chains solana Commands for handling Solana chains starknet Commands for handling StarkNet chains + tron Commands for handling Tron chains OPTIONS: --help, -h show help diff --git a/testdata/scripts/chains/tron/help.txtar b/testdata/scripts/chains/tron/help.txtar new file mode 100644 index 00000000000..0b23bdd530e --- /dev/null +++ b/testdata/scripts/chains/tron/help.txtar @@ -0,0 +1,16 @@ +exec chainlink chains tron --help +cmp stdout out.txt + +-- out.txt -- +NAME: + chainlink chains tron - Commands for handling Tron chains + +USAGE: + chainlink chains tron command [command options] [arguments...] + +COMMANDS: + list List all existing Tron chains + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/chains/tron/list/help.txtar b/testdata/scripts/chains/tron/list/help.txtar new file mode 100644 index 00000000000..eeef9a2728b --- /dev/null +++ b/testdata/scripts/chains/tron/list/help.txtar @@ -0,0 +1,9 @@ +exec chainlink chains tron list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + chainlink chains tron list - List all existing Solana chains + +USAGE: + chainlink chains tron list [arguments...] diff --git a/testdata/scripts/health/multi-chain.txtar b/testdata/scripts/health/multi-chain.txtar index 5d1c25d18fd..2ca511a0eaf 100644 --- a/testdata/scripts/health/multi-chain.txtar +++ b/testdata/scripts/health/multi-chain.txtar @@ -67,6 +67,13 @@ ChainID = 'Baz' Name = 'primary' URL = 'http://stark.node' +[[Tron]] +ChainID = 'TronId' + +[[Tron.Nodes]] +Name = 'primary' +URL = 'http://api.trongrid.io' + -- out.txt -- ok Cosmos.Foo.Chain ok Cosmos.Foo.Relayer @@ -100,6 +107,7 @@ ok StarkNet.Baz.Chain ok StarkNet.Baz.Relayer ok StarkNet.Baz.Txm ok TelemetryManager +ok Tron.TronId -- out-unhealthy.txt -- ! EVM.1.HeadTracker.HeadListener @@ -378,6 +386,15 @@ ok TelemetryManager "output": "" } }, + { + "type": "checks", + "id": "Tron.TronId", + "attributes": { + "name": "Tron.TronId", + "status": "passing", + "output": "" + } + }, { "type": "checks", "id": "TelemetryManager", diff --git a/testdata/scripts/help-all/help-all.txtar b/testdata/scripts/help-all/help-all.txtar index 372b149bd19..0b8d1975be0 100644 --- a/testdata/scripts/help-all/help-all.txtar +++ b/testdata/scripts/help-all/help-all.txtar @@ -32,6 +32,8 @@ chains solana # Commands for handling Solana chains chains solana list # List all existing Solana chains chains starknet # Commands for handling StarkNet chains chains starknet list # List all existing StarkNet chains +chains tron # Commands for handling Tron chains +chains tron list # List all existing Tron chains config # Commands for the node's configuration config loglevel # Set log level config logsql # Enable/disable SQL statement logging @@ -109,6 +111,12 @@ keys starknet delete # Delete StarkNet key if present keys starknet export # Export StarkNet key to keyfile keys starknet import # Import StarkNet key from keyfile keys starknet list # List the StarkNet keys +keys tron # Remote commands for administering the node's Tron keys +keys tron create # Create a Tron key +keys tron delete # Delete Tron key if present +keys tron export # Export Tron key to keyfile +keys tron import # Import Tron key from keyfile +keys tron list # List the Tron keys keys vrf # Remote commands for administering the node's vrf keys keys vrf create # Create a VRF key keys vrf delete # Archive or delete VRF key from memory and the database, if present. Note that jobs referencing the removed key will also be removed. @@ -140,6 +148,8 @@ nodes solana # Commands for handling Solana node configuration nodes solana list # List all existing Solana nodes nodes starknet # Commands for handling StarkNet node configuration nodes starknet list # List all existing StarkNet nodes +nodes tron # Commands for handling Tron node configuration +nodes tron list # List all existing Tron nodes txs # Commands for handling transactions txs cosmos # Commands for handling Cosmos transactions txs cosmos create # Send of from node Cosmos account to destination . diff --git a/testdata/scripts/keys/help.txtar b/testdata/scripts/keys/help.txtar index 83253d6906d..7b3ce35bd82 100644 --- a/testdata/scripts/keys/help.txtar +++ b/testdata/scripts/keys/help.txtar @@ -18,8 +18,8 @@ COMMANDS: solana Remote commands for administering the node's Solana keys starknet Remote commands for administering the node's StarkNet keys aptos Remote commands for administering the node's Aptos keys + tron Remote commands for administering the node's Tron keys vrf Remote commands for administering the node's vrf keys OPTIONS: --help, -h show help - diff --git a/testdata/scripts/keys/tron/help.txtar b/testdata/scripts/keys/tron/help.txtar new file mode 100644 index 00000000000..6e0b8bf31a2 --- /dev/null +++ b/testdata/scripts/keys/tron/help.txtar @@ -0,0 +1,20 @@ +exec chainlink keys tron --help +cmp stdout out.txt + +-- out.txt -- +NAME: + chainlink keys tron - Remote commands for administering the node's Tron keys + +USAGE: + chainlink keys tron command [command options] [arguments...] + +COMMANDS: + create Create a Tron key + import Import Tron key from keyfile + export Export Tron key to keyfile + delete Delete Tron key if present + list List the Tron keys + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/node/validate/invalid-duplicates.txtar b/testdata/scripts/node/validate/invalid-duplicates.txtar index 84e6c23aa71..416b5d78c53 100644 --- a/testdata/scripts/node/validate/invalid-duplicates.txtar +++ b/testdata/scripts/node/validate/invalid-duplicates.txtar @@ -63,6 +63,19 @@ URL = 'http://stark.node' Name = 'primary' URL = 'http://stark.node' +[[Tron]] +ChainID = '1' + +[[Tron]] +ChainID = '1' + +[[Tron.Nodes]] +Name = 'fake' +URL = 'https://foo.bar' + +[[Tron.Nodes]] +Name = 'fake' +URL = 'https://foo.bar' -- secrets.toml -- [Database] @@ -92,3 +105,7 @@ Error running app: invalid configuration: 4 errors: - 1.ChainID: invalid value (foobar): duplicate - must be unique - 1.Nodes.1.Name: invalid value (primary): duplicate - must be unique - 1.Nodes.1.URL: invalid value (http://stark.node): duplicate - must be unique + - Tron: 3 errors: + - 1.ChainID: invalid value (1): duplicate - must be unique + - 1.Nodes.1.Name: invalid value (fake): duplicate - must be unique + - 1.Nodes.1.URL: invalid value (https://foo.bar): duplicate - must be unique diff --git a/testdata/scripts/nodes/help.txtar b/testdata/scripts/nodes/help.txtar index 8a8f31f4166..2a9754a3fea 100644 --- a/testdata/scripts/nodes/help.txtar +++ b/testdata/scripts/nodes/help.txtar @@ -13,6 +13,7 @@ COMMANDS: cosmos Commands for handling Cosmos node configuration solana Commands for handling Solana node configuration starknet Commands for handling StarkNet node configuration + tron Commands for handling Tron node configuration OPTIONS: --help, -h show help diff --git a/testdata/scripts/nodes/tron/help.txtar b/testdata/scripts/nodes/tron/help.txtar new file mode 100644 index 00000000000..28a8ca07bd9 --- /dev/null +++ b/testdata/scripts/nodes/tron/help.txtar @@ -0,0 +1,16 @@ +exec chainlink nodes tron --help +cmp stdout out.txt + +-- out.txt -- +NAME: + chainlink nodes tron - Commands for handling Tron node configuration + +USAGE: + chainlink nodes tron command [command options] [arguments...] + +COMMANDS: + list List all existing Tron nodes + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/nodes/tron/list/help.txtar b/testdata/scripts/nodes/tron/list/help.txtar new file mode 100644 index 00000000000..bbed67b9429 --- /dev/null +++ b/testdata/scripts/nodes/tron/list/help.txtar @@ -0,0 +1,9 @@ +exec chainlink nodes tron list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + chainlink nodes tron list - List all existing Tron nodes + +USAGE: + chainlink nodes tron list [arguments...]