Skip to content

Commit

Permalink
integration-tests/load: fix linter issues
Browse files Browse the repository at this point in the history
  • Loading branch information
jmank88 committed Jan 21, 2025
1 parent 344d591 commit 5d152ff
Show file tree
Hide file tree
Showing 15 changed files with 86 additions and 95 deletions.
22 changes: 10 additions & 12 deletions integration-tests/load/automationv2_1/automationv2_1_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ Load Config:
testNetwork := networks.MustGetSelectedNetworkConfig(loadedTestConfig.Network)[0]
testType := "load"
loadDuration := time.Duration(*loadedTestConfig.Automation.General.Duration) * time.Second
automationDefaultLinkFunds := big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(10000))) //10000 LINK
automationDefaultLinkFunds := big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(10000))) // 10000 LINK

nsLabels, err := environment.GetRequiredChainLinkNamespaceLabels(string(tc.Automation), testType)
require.NoError(t, err, "Error creating required chain.link labels for namespace")
Expand Down Expand Up @@ -253,7 +253,6 @@ Load Config:
dbSpec = map[string]interface{}{"stateful": true}
default:
// minimum:

}

if *loadedTestConfig.Pyroscope.Enabled {
Expand Down Expand Up @@ -383,7 +382,7 @@ Load Config:
loadConfigs = append(loadConfigs, deploymentData.LoadConfigs...)
}

require.Equal(t, expectedTotalUpkeepCount, len(consumerContracts), "Incorrect number of consumer/trigger contracts deployed")
require.Len(t, consumerContracts, expectedTotalUpkeepCount, "Incorrect number of consumer/trigger contracts deployed")

for i, consumerContract := range consumerContracts {
logTriggerConfigStruct := ac.IAutomationV21PlusCommonLogTriggerConfig{
Expand Down Expand Up @@ -429,16 +428,16 @@ Load Config:
upkeepConfigs = append(upkeepConfigs, upkeepConfig)
}

require.Equal(t, expectedTotalUpkeepCount, len(upkeepConfigs), "Incorrect number of upkeep configs created")
require.Len(t, upkeepConfigs, expectedTotalUpkeepCount, "Incorrect number of upkeep configs created")
registrationTxHashes, err := a.RegisterUpkeeps(upkeepConfigs, maxDeploymentConcurrency)
require.NoError(t, err, "Error registering upkeeps")

upkeepIds, err := a.ConfirmUpkeepsRegistered(registrationTxHashes, maxDeploymentConcurrency)
upkeepIDs, err := a.ConfirmUpkeepsRegistered(registrationTxHashes, maxDeploymentConcurrency)
require.NoError(t, err, "Error confirming upkeeps registered")
require.Equal(t, expectedTotalUpkeepCount, len(upkeepIds), "Incorrect number of upkeeps registered")
require.Len(t, upkeepIDs, expectedTotalUpkeepCount, "Incorrect number of upkeeps registered")

l.Info().Msg("Successfully registered all Automation Upkeeps")
l.Info().Interface("Upkeep IDs", upkeepIds).Msg("Upkeeps Registered")
l.Info().Interface("Upkeep IDs", upkeepIDs).Msg("Upkeeps Registered")
l.Info().Str("STARTUP_WAIT_TIME", StartupWaitTime.String()).Msg("Waiting for plugin to start")
time.Sleep(StartupWaitTime)

Expand All @@ -458,7 +457,7 @@ Load Config:
NumberOfSpamMatchingEvents: int64(*loadConfigs[i].NumberOfSpamMatchingEvents),
NumberOfSpamNonMatchingEvents: int64(*loadConfigs[i].NumberOfSpamNonMatchingEvents),
}
numberOfEventsEmittedPerSec = numberOfEventsEmittedPerSec + int64(*loadConfigs[i].NumberOfEvents)
numberOfEventsEmittedPerSec += int64(*loadConfigs[i].NumberOfEvents)
configs = append(configs, c)
}

Expand Down Expand Up @@ -550,7 +549,7 @@ Load Config:
ToBlock: big.NewInt(0).SetUint64(fromBlock + batchSize),
Topics: [][]common.Hash{{consumerABI.Events["PerformingUpkeep"].ID}},
}
err = fmt.Errorf("initial error") // to ensure our for loop runs at least once
err = errors.New("initial error") // to ensure our for loop runs at least once
for err != nil {
var (
logsInBatch []types.Log
Expand Down Expand Up @@ -615,7 +614,7 @@ Load Config:
ToBlock: big.NewInt(0).SetUint64(fromBlock + batchSize),
Topics: [][]common.Hash{{emitterABI.Events["Log4"].ID}, {bytes1}, {bytes1}},
}
err = fmt.Errorf("initial error") // to ensure our for loop runs at least once
err = errors.New("initial error") // to ensure our for loop runs at least once
for err != nil {
var (
logsInBatch []types.Log
Expand All @@ -641,7 +640,7 @@ Load Config:
logs = append(logs, logsInBatch...)
}
}
numberOfEventsEmitted = numberOfEventsEmitted + int64(len(logs))
numberOfEventsEmitted += int64(len(logs))
}

l.Info().Int64("Number of Events Emitted", numberOfEventsEmitted).Msg("Number of Events Emitted")
Expand Down Expand Up @@ -753,5 +752,4 @@ Test Duration: %s`
}
}
})

}
4 changes: 2 additions & 2 deletions integration-tests/load/automationv2_1/gun.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,14 +44,14 @@ func generateCallData(int1 int64, int2 int64, count int64) []byte {

func NewLogTriggerUser(
logger zerolog.Logger,
TriggerConfigs []LogTriggerConfig,
triggerConfigs []LogTriggerConfig,
client *seth.Client,
multicallAddress string,
) *LogTriggerGun {
var data [][]byte
var addresses []string

for _, c := range TriggerConfigs {
for _, c := range triggerConfigs {
if c.NumberOfEvents > 0 {
d := generateCallData(1, 1, c.NumberOfEvents)
data = append(data, d)
Expand Down
16 changes: 8 additions & 8 deletions integration-tests/load/automationv2_1/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,18 +30,18 @@ func sendSlackNotification(header string, l zerolog.Logger, config *tc.TestConfi

headerText := ":chainlink-keepers: Automation Load Test " + header

grafanaUrl, err := config.GetGrafanaBaseURL()
grafanaURL, err := config.GetGrafanaBaseURL()
if err != nil {
return "", err
}

dashboardUrl, err := config.GetGrafanaDashboardURL()
dashboardURL, err := config.GetGrafanaDashboardURL()
if err != nil {
return "", err
}

formattedDashboardUrl := fmt.Sprintf("%s%s?orgId=1&from=%s&to=%s&var-namespace=%s&var-number_of_nodes=%s", grafanaUrl, dashboardUrl, startingTime, endingTime, namespace, numberOfNodes)
l.Info().Str("Dashboard", formattedDashboardUrl).Msg("Dashboard URL")
formattedDashboardURL := fmt.Sprintf("%s%s?orgId=1&from=%s&to=%s&var-namespace=%s&var-number_of_nodes=%s", grafanaURL, dashboardURL, startingTime, endingTime, namespace, numberOfNodes)
l.Info().Str("Dashboard", formattedDashboardURL).Msg("Dashboard URL")

var notificationBlocks []slack.Block

Expand All @@ -54,15 +54,15 @@ func sendSlackNotification(header string, l zerolog.Logger, config *tc.TestConfi
pyroscopeServer := *config.Pyroscope.ServerUrl
pyroscopeEnvironment := *config.Pyroscope.Environment

formattedPyroscopeUrl := fmt.Sprintf("%s/?query=chainlink-node.cpu{Environment=\"%s\"}&from=%s&to=%s", pyroscopeServer, pyroscopeEnvironment, startingTime, endingTime)
l.Info().Str("Pyroscope", formattedPyroscopeUrl).Msg("Dashboard URL")
formattedPyroscopeURL := fmt.Sprintf("%s/?query=chainlink-node.cpu{Environment=\"%s\"}&from=%s&to=%s", pyroscopeServer, pyroscopeEnvironment, startingTime, endingTime)
l.Info().Str("Pyroscope", formattedPyroscopeURL).Msg("Dashboard URL")
notificationBlocks = append(notificationBlocks, slack.NewSectionBlock(slack.NewTextBlockObject("mrkdwn",
fmt.Sprintf("<%s|Pyroscope>",
formattedPyroscopeUrl), false, true), nil, nil))
formattedPyroscopeURL), false, true), nil, nil))
}
notificationBlocks = append(notificationBlocks, slack.NewSectionBlock(slack.NewTextBlockObject("mrkdwn",
fmt.Sprintf("<%s|Test Dashboard> \nNotifying <@%s>",
formattedDashboardUrl, reportModel.SlackUserID), false, true), nil, nil))
formattedDashboardURL, reportModel.SlackUserID), false, true), nil, nil))

if len(extraBlocks) > 0 {
notificationBlocks = append(notificationBlocks, extraBlocks...)
Expand Down
3 changes: 2 additions & 1 deletion integration-tests/load/functions/gateway.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"math"
"time"
Expand Down Expand Up @@ -117,7 +118,7 @@ func UploadS4Secrets(rc *resty.Client, s4Cfg *S4SecretsCfg) (uint8, uint64, erro
log.Debug().Interface("Result", result).Msg("S4 secrets_set response result")
for _, nodeResponse := range result.Result.Body.Payload.NodeResponses {
if !nodeResponse.Body.Payload.Success {
return 0, 0, fmt.Errorf("node response was not successful")
return 0, 0, errors.New("node response was not successful")
}
}
if envelope.SlotID > math.MaxUint8 {
Expand Down
4 changes: 2 additions & 2 deletions integration-tests/load/functions/gateway_gun.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ func callSecretsSet(m *GatewaySecretsSetGun) *wasp.Response {
}
network := m.Cfg.GetNetworkConfig().SelectedNetworks[0]
if len(m.Cfg.GetNetworkConfig().WalletKeys[network]) < 1 {
panic(fmt.Sprintf("no wallet keys found for %s", network))
panic("no wallet keys found for " + network)
}

cfg := m.Cfg.GetFunctionsConfig()
Expand Down Expand Up @@ -103,7 +103,7 @@ func callSecretsList(m *GatewaySecretsSetGun) *wasp.Response {
expiration := int64(60 * 60 * 1000)
network := m.Cfg.GetNetworkConfig().SelectedNetworks[0]
if len(m.Cfg.GetNetworkConfig().WalletKeys[network]) < 1 {
panic(fmt.Sprintf("no wallet keys found for %s", network))
panic("no wallet keys found for " + network)
}
cfg := m.Cfg.GetFunctionsConfig()
if err := ListS4Secrets(m.Resty, &S4SecretsCfg{
Expand Down
28 changes: 14 additions & 14 deletions integration-tests/load/functions/request_gun.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ type SingleFunctionCallGun struct {
slotID uint8
slotVersion uint64
args []string
subscriptionId uint64
jobId [32]byte
subscriptionID uint64
jobID [32]byte
}

func NewSingleFunctionCallGun(
Expand All @@ -32,8 +32,8 @@ func NewSingleFunctionCallGun(
slotID uint8,
slotVersion uint64,
args []string,
subscriptionId uint64,
jobId [32]byte,
subscriptionID uint64,
jobID [32]byte,
) *SingleFunctionCallGun {
return &SingleFunctionCallGun{
ft: ft,
Expand All @@ -43,8 +43,8 @@ func NewSingleFunctionCallGun(
slotID: slotID,
slotVersion: slotVersion,
args: args,
subscriptionId: subscriptionId,
jobId: jobId,
subscriptionID: subscriptionID,
jobID: jobID,
}
}

Expand All @@ -55,8 +55,8 @@ func (m *SingleFunctionCallGun) callReal() *wasp.Response {
m.slotID,
m.slotVersion,
m.args,
m.subscriptionId,
m.jobId,
m.subscriptionID,
m.jobID,
)
if err != nil {
return &wasp.Response{Error: err.Error(), Failed: true}
Expand All @@ -71,23 +71,23 @@ func (m *SingleFunctionCallGun) callWithSecrets() *wasp.Response {
m.slotID,
m.slotVersion,
m.args,
m.subscriptionId,
m.jobId,
m.subscriptionID,
m.jobID,
)
if err != nil {
return &wasp.Response{Error: err.Error(), Failed: true}
}
return &wasp.Response{}
}

func (m *SingleFunctionCallGun) callWithHttp() *wasp.Response {
func (m *SingleFunctionCallGun) callWithHTTP() *wasp.Response {
err := m.ft.LoadTestClient.SendRequest(
m.times,
m.source,
[]byte{},
m.args,
m.subscriptionId,
m.jobId,
m.subscriptionID,
m.jobID,
)
if err != nil {
return &wasp.Response{Error: err.Error(), Failed: true}
Expand All @@ -101,7 +101,7 @@ func (m *SingleFunctionCallGun) Call(_ *wasp.Generator) *wasp.Response {
case ModeSecretsOnlyPayload:
return m.callWithSecrets()
case ModeHTTPPayload:
return m.callWithHttp()
return m.callWithHTTP()
case ModeReal:
return m.callReal()
default:
Expand Down
4 changes: 2 additions & 2 deletions integration-tests/load/ocr/vu.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ package ocr

import (
"context"
"fmt"
"strconv"
"sync/atomic"
"time"

Expand Down Expand Up @@ -85,7 +85,7 @@ func (m *VU) Setup(_ *wasp.Generator) error {
if err != nil {
return err
}
err = actions.CreateOCRJobs(ocrInstances, m.bootstrapNode, m.workerNodes, 5, m.msClient, fmt.Sprint(m.seth.ChainID))
err = actions.CreateOCRJobs(ocrInstances, m.bootstrapNode, m.workerNodes, 5, m.msClient, strconv.FormatInt(m.seth.ChainID, 10))
if err != nil {
return err
}
Expand Down
10 changes: 5 additions & 5 deletions integration-tests/load/vrfv2/gun.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func (m *BHSTestGun) Call(_ *wasp.Generator) *wasp.Response {
*m.testConfig.General.RandomnessRequestCountPerRequestDeviation,
seth_utils.AvailableSethKeyNum(m.sethClient),
)
//todo - might need to store randRequestBlockNumber and blockhash to verify that it was stored in BHS contract at the end of the test
// todo - might need to store randRequestBlockNumber and blockhash to verify that it was stored in BHS contract at the end of the test
if err != nil {
return &wasp.Response{Error: err.Error(), Failed: true}
}
Expand Down Expand Up @@ -95,17 +95,17 @@ func NewSingleHashGun(

// Call implements example gun call, assertions on response bodies should be done here
func (m *SingleHashGun) Call(_ *wasp.Generator) *wasp.Response {
//todo - should work with multiple consumers and consumers having different keyhashes and wallets
// todo - should work with multiple consumers and consumers having different keyhashes and wallets

vrfv2Config := m.testConfig.General
//randomly increase/decrease randomness request count per TX
// randomly increase/decrease randomness request count per TX
randomnessRequestCountPerRequest := deviateValue(*vrfv2Config.RandomnessRequestCountPerRequest, *vrfv2Config.RandomnessRequestCountPerRequestDeviation)
_, _, err := vrfv2.RequestRandomnessAndWaitForFulfillment(
m.logger,
//the same consumer is used for all requests and in all subs
// the same consumer is used for all requests and in all subs
m.contracts.VRFV2Consumers[0],
m.contracts.CoordinatorV2,
//randomly pick a subID from pool of subIDs
// randomly pick a subID from pool of subIDs
m.subIDs[randInRange(0, len(m.subIDs)-1)],
&vrfcommon.VRFKeyData{KeyHash: m.keyHash},
*vrfv2Config.MinimumConfirmations,
Expand Down
30 changes: 13 additions & 17 deletions integration-tests/load/vrfv2/vrfv2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,13 +81,11 @@ func TestVRFV2Performance(t *testing.T) {
l.Info().
Str("Network Name", sethClient.Cfg.Network.Name).
Msg("Network is a simulated network. Skipping fund return for Coordinator Subscriptions.")
} else {
if *vrfv2Config.General.CancelSubsAfterTestRun {
// wait for all txs to be mined in order to avoid nonce issues
time.Sleep(10 * time.Second)
//cancel subs and return funds to sub owner
vrfv2.CancelSubsAndReturnFunds(testcontext.Get(t), vrfContracts, sethClient.MustGetRootKeyAddress().Hex(), subIDsForCancellingAfterTest, l)
}
} else if *vrfv2Config.General.CancelSubsAfterTestRun {
// wait for all txs to be mined in order to avoid nonce issues
time.Sleep(10 * time.Second)
// cancel subs and return funds to sub owner
vrfv2.CancelSubsAndReturnFunds(testcontext.Get(t), vrfContracts, sethClient.MustGetRootKeyAddress().Hex(), subIDsForCancellingAfterTest, l)
}
if !*vrfv2Config.General.UseExistingEnv {
if err := testEnv.Cleanup(test_env.CleanupOpts{}); err != nil {
Expand Down Expand Up @@ -167,7 +165,7 @@ func TestVRFV2Performance(t *testing.T) {

var wg sync.WaitGroup
wg.Add(1)
//todo - timeout should be configurable depending on the perf test type
// todo - timeout should be configurable depending on the perf test type
requestCount, fulfilmentCount, err := vrfcommon.WaitForRequestCountEqualToFulfilmentCount(testcontext.Get(t), vrfContracts.VRFV2Consumers[0], 2*time.Minute, &wg)
require.NoError(t, err)
wg.Wait()
Expand Down Expand Up @@ -224,11 +222,9 @@ func TestVRFV2BHSPerformance(t *testing.T) {
l.Info().
Str("Network Name", sethClient.Cfg.Network.Name).
Msg("Network is a simulated network. Skipping fund return for Coordinator Subscriptions.")
} else {
if *vrfv2Config.General.CancelSubsAfterTestRun {
//cancel subs and return funds to sub owner
vrfv2.CancelSubsAndReturnFunds(testcontext.Get(t), vrfContracts, sethClient.MustGetRootKeyAddress().Hex(), subIDsForCancellingAfterTest, l)
}
} else if *vrfv2Config.General.CancelSubsAfterTestRun {
// cancel subs and return funds to sub owner
vrfv2.CancelSubsAndReturnFunds(testcontext.Get(t), vrfContracts, sethClient.MustGetRootKeyAddress().Hex(), subIDsForCancellingAfterTest, l)
}
if !*vrfv2Config.General.UseExistingEnv {
if err := testEnv.Cleanup(test_env.CleanupOpts{}); err != nil {
Expand All @@ -253,7 +249,7 @@ func TestVRFV2BHSPerformance(t *testing.T) {

t.Run("vrfv2 and bhs performance test", func(t *testing.T) {
configCopy := testConfig.MustCopy().(tc.TestConfig)
//Underfund Subscription
// Underfund Subscription
configCopy.VRFv2.General.SubscriptionFundingAmountLink = ptr.Ptr(float64(0))

underfundedSubIDs, consumers, err := vrfv2.SetupSubsAndConsumersForExistingEnv(
Expand Down Expand Up @@ -308,7 +304,7 @@ func TestVRFV2BHSPerformance(t *testing.T) {

var wgBlockNumberTobe sync.WaitGroup
wgBlockNumberTobe.Add(1)
//Wait at least 256 blocks
// Wait at least 256 blocks
latestBlockNumber, err := sethClient.Client.BlockNumber(testcontext.Get(t))
require.NoError(t, err, "error getting latest block number")
_, err = actions.WaitForBlockNumberToBe(
Expand Down Expand Up @@ -362,10 +358,10 @@ func teardown(
testType string,
testConfig *tc.TestConfig,
) {
//send final results to Loki
// send final results to Loki
metrics := GetLoadTestMetrics(testcontext.Get(t), consumer)
SendMetricsToLoki(metrics, lc, updatedLabels)
//set report data for Slack notification
// set report data for Slack notification
testReporter.SetReportData(
testType,
testreporters.VRFLoadTestMetrics{
Expand Down
2 changes: 1 addition & 1 deletion integration-tests/load/vrfv2/vrfv2cmd/dashboard.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import (
)

func main() {
//TODO switch to TOML too?
// TODO switch to TOML too?
lokiDS := os.Getenv("DATA_SOURCE_NAME")
d, err := db.NewDashboard(nil,
[]dashboard.Option{
Expand Down
Loading

0 comments on commit 5d152ff

Please sign in to comment.