Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[chore]: enable perfsprint linter for receivers #36197

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions receiver/aerospikereceiver/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ package aerospikereceiver // import "github.com/open-telemetry/opentelemetry-col

import (
"crypto/tls"
"fmt"
"strings"
"sync"
"time"
Expand Down Expand Up @@ -266,7 +265,7 @@ func allNamespaceInfo(n cluster.Node, policy *as.InfoPolicy) (metricsMap, error)

commands := make([]string, len(names))
for i, name := range names {
commands[i] = fmt.Sprintf("namespace/%s", name)
commands[i] = "namespace/" + name
}

res, err = n.RequestInfo(policy, commands...)
Expand Down
72 changes: 36 additions & 36 deletions receiver/apachesparkreceiver/scraper.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,122 +126,122 @@ func (s *sparkScraper) scrape(_ context.Context) (pmetric.Metrics, error) {
}

func (s *sparkScraper) recordCluster(clusterStats *models.ClusterProperties, now pcommon.Timestamp, appID string, appName string) {
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.BlockManager.disk.diskSpaceUsed_MB", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.BlockManager.disk.diskSpaceUsed_MB"]; ok {
s.mb.RecordSparkDriverBlockManagerDiskUsageDataPoint(now, int64(stat.Value))
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.BlockManager.memory.offHeapMemUsed_MB", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.BlockManager.memory.offHeapMemUsed_MB"]; ok {
s.mb.RecordSparkDriverBlockManagerMemoryUsageDataPoint(now, int64(stat.Value), metadata.AttributeLocationOffHeap, metadata.AttributeStateUsed)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.BlockManager.memory.onHeapMemUsed_MB", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.BlockManager.memory.onHeapMemUsed_MB"]; ok {
s.mb.RecordSparkDriverBlockManagerMemoryUsageDataPoint(now, int64(stat.Value), metadata.AttributeLocationOnHeap, metadata.AttributeStateUsed)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.BlockManager.memory.remainingOffHeapMem_MB", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.BlockManager.memory.remainingOffHeapMem_MB"]; ok {
s.mb.RecordSparkDriverBlockManagerMemoryUsageDataPoint(now, int64(stat.Value), metadata.AttributeLocationOffHeap, metadata.AttributeStateFree)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.BlockManager.memory.remainingOnHeapMem_MB", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.BlockManager.memory.remainingOnHeapMem_MB"]; ok {
s.mb.RecordSparkDriverBlockManagerMemoryUsageDataPoint(now, int64(stat.Value), metadata.AttributeLocationOnHeap, metadata.AttributeStateFree)
}

if stat, ok := clusterStats.Counters[fmt.Sprintf("%s.driver.HiveExternalCatalog.fileCacheHits", appID)]; ok {
if stat, ok := clusterStats.Counters[appID+".driver.HiveExternalCatalog.fileCacheHits"]; ok {
s.mb.RecordSparkDriverHiveExternalCatalogFileCacheHitsDataPoint(now, stat.Count)
}
if stat, ok := clusterStats.Counters[fmt.Sprintf("%s.driver.HiveExternalCatalog.filesDiscovered", appID)]; ok {
if stat, ok := clusterStats.Counters[appID+".driver.HiveExternalCatalog.filesDiscovered"]; ok {
s.mb.RecordSparkDriverHiveExternalCatalogFilesDiscoveredDataPoint(now, stat.Count)
}
if stat, ok := clusterStats.Counters[fmt.Sprintf("%s.driver.HiveExternalCatalog.hiveClientCalls", appID)]; ok {
if stat, ok := clusterStats.Counters[appID+".driver.HiveExternalCatalog.hiveClientCalls"]; ok {
s.mb.RecordSparkDriverHiveExternalCatalogHiveClientCallsDataPoint(now, stat.Count)
}
if stat, ok := clusterStats.Counters[fmt.Sprintf("%s.driver.HiveExternalCatalog.parallelListingJobCount", appID)]; ok {
if stat, ok := clusterStats.Counters[appID+".driver.HiveExternalCatalog.parallelListingJobCount"]; ok {
s.mb.RecordSparkDriverHiveExternalCatalogParallelListingJobsDataPoint(now, stat.Count)
}
if stat, ok := clusterStats.Counters[fmt.Sprintf("%s.driver.HiveExternalCatalog.partitionsFetched", appID)]; ok {
if stat, ok := clusterStats.Counters[appID+".driver.HiveExternalCatalog.partitionsFetched"]; ok {
s.mb.RecordSparkDriverHiveExternalCatalogPartitionsFetchedDataPoint(now, stat.Count)
}

if stat, ok := clusterStats.Histograms[fmt.Sprintf("%s.driver.CodeGenerator.compilationTime", appID)]; ok {
if stat, ok := clusterStats.Histograms[appID+".driver.CodeGenerator.compilationTime"]; ok {
s.mb.RecordSparkDriverCodeGeneratorCompilationCountDataPoint(now, stat.Count)
s.mb.RecordSparkDriverCodeGeneratorCompilationAverageTimeDataPoint(now, stat.Mean)
}
if stat, ok := clusterStats.Histograms[fmt.Sprintf("%s.driver.CodeGenerator.generatedClassSize", appID)]; ok {
if stat, ok := clusterStats.Histograms[appID+".driver.CodeGenerator.generatedClassSize"]; ok {
s.mb.RecordSparkDriverCodeGeneratorGeneratedClassCountDataPoint(now, stat.Count)
s.mb.RecordSparkDriverCodeGeneratorGeneratedClassAverageSizeDataPoint(now, stat.Mean)
}
if stat, ok := clusterStats.Histograms[fmt.Sprintf("%s.driver.CodeGenerator.generatedMethodSize", appID)]; ok {
if stat, ok := clusterStats.Histograms[appID+".driver.CodeGenerator.generatedMethodSize"]; ok {
s.mb.RecordSparkDriverCodeGeneratorGeneratedMethodCountDataPoint(now, stat.Count)
s.mb.RecordSparkDriverCodeGeneratorGeneratedMethodAverageSizeDataPoint(now, stat.Mean)
}
if stat, ok := clusterStats.Histograms[fmt.Sprintf("%s.driver.CodeGenerator.sourceCodeSize", appID)]; ok {
if stat, ok := clusterStats.Histograms[appID+".driver.CodeGenerator.sourceCodeSize"]; ok {
s.mb.RecordSparkDriverCodeGeneratorSourceCodeOperationsDataPoint(now, stat.Count)
s.mb.RecordSparkDriverCodeGeneratorSourceCodeAverageSizeDataPoint(now, stat.Mean)
}

if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.DAGScheduler.job.activeJobs", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.DAGScheduler.job.activeJobs"]; ok {
s.mb.RecordSparkDriverDagSchedulerJobActiveDataPoint(now, int64(stat.Value))
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.DAGScheduler.job.allJobs", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.DAGScheduler.job.allJobs"]; ok {
s.mb.RecordSparkDriverDagSchedulerJobCountDataPoint(now, int64(stat.Value))
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.DAGScheduler.stage.failedStages", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.DAGScheduler.stage.failedStages"]; ok {
s.mb.RecordSparkDriverDagSchedulerStageFailedDataPoint(now, int64(stat.Value))
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.DAGScheduler.stage.runningStages", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.DAGScheduler.stage.runningStages"]; ok {
s.mb.RecordSparkDriverDagSchedulerStageCountDataPoint(now, int64(stat.Value), metadata.AttributeSchedulerStatusRunning)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.DAGScheduler.stage.waitingStages", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.DAGScheduler.stage.waitingStages"]; ok {
s.mb.RecordSparkDriverDagSchedulerStageCountDataPoint(now, int64(stat.Value), metadata.AttributeSchedulerStatusWaiting)
}

if stat, ok := clusterStats.Counters[fmt.Sprintf("%s.driver.LiveListenerBus.numEventsPosted", appID)]; ok {
if stat, ok := clusterStats.Counters[appID+".driver.LiveListenerBus.numEventsPosted"]; ok {
s.mb.RecordSparkDriverLiveListenerBusPostedDataPoint(now, stat.Count)
}
if stat, ok := clusterStats.Timers[fmt.Sprintf("%s.driver.LiveListenerBus.queue.appStatus.listenerProcessingTime", appID)]; ok {
if stat, ok := clusterStats.Timers[appID+".driver.LiveListenerBus.queue.appStatus.listenerProcessingTime"]; ok {
s.mb.RecordSparkDriverLiveListenerBusProcessingTimeAverageDataPoint(now, stat.Mean)
}
if stat, ok := clusterStats.Counters[fmt.Sprintf("%s.driver.LiveListenerBus.queue.appStatus.numDroppedEvents", appID)]; ok {
if stat, ok := clusterStats.Counters[appID+".driver.LiveListenerBus.queue.appStatus.numDroppedEvents"]; ok {
s.mb.RecordSparkDriverLiveListenerBusDroppedDataPoint(now, stat.Count)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.LiveListenerBus.queue.appStatus.size", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.LiveListenerBus.queue.appStatus.size"]; ok {
s.mb.RecordSparkDriverLiveListenerBusQueueSizeDataPoint(now, int64(stat.Value))
}

if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.JVMCPU.jvmCpuTime", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.JVMCPU.jvmCpuTime"]; ok {
s.mb.RecordSparkDriverJvmCPUTimeDataPoint(now, int64(stat.Value))
}

if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.JVMOffHeapMemory", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.JVMOffHeapMemory"]; ok {
s.mb.RecordSparkDriverExecutorMemoryJvmDataPoint(now, int64(stat.Value), metadata.AttributeLocationOffHeap)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.JVMHeapMemory", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.JVMHeapMemory"]; ok {
s.mb.RecordSparkDriverExecutorMemoryJvmDataPoint(now, int64(stat.Value), metadata.AttributeLocationOnHeap)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.OffHeapExecutionMemory", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.OffHeapExecutionMemory"]; ok {
s.mb.RecordSparkDriverExecutorMemoryExecutionDataPoint(now, int64(stat.Value), metadata.AttributeLocationOffHeap)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.OnHeapExecutionMemory", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.OnHeapExecutionMemory"]; ok {
s.mb.RecordSparkDriverExecutorMemoryExecutionDataPoint(now, int64(stat.Value), metadata.AttributeLocationOnHeap)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.OffHeapStorageMemory", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.OffHeapStorageMemory"]; ok {
s.mb.RecordSparkDriverExecutorMemoryStorageDataPoint(now, int64(stat.Value), metadata.AttributeLocationOffHeap)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.OnHeapStorageMemory", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.OnHeapStorageMemory"]; ok {
s.mb.RecordSparkDriverExecutorMemoryStorageDataPoint(now, int64(stat.Value), metadata.AttributeLocationOnHeap)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.DirectPoolMemory", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.DirectPoolMemory"]; ok {
s.mb.RecordSparkDriverExecutorMemoryPoolDataPoint(now, int64(stat.Value), metadata.AttributePoolMemoryTypeDirect)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.MappedPoolMemory", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.MappedPoolMemory"]; ok {
s.mb.RecordSparkDriverExecutorMemoryPoolDataPoint(now, int64(stat.Value), metadata.AttributePoolMemoryTypeMapped)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.MinorGCCount", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.MinorGCCount"]; ok {
s.mb.RecordSparkDriverExecutorGcOperationsDataPoint(now, int64(stat.Value), metadata.AttributeGcTypeMinor)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.MajorGCCount", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.MajorGCCount"]; ok {
s.mb.RecordSparkDriverExecutorGcOperationsDataPoint(now, int64(stat.Value), metadata.AttributeGcTypeMajor)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.MinorGCTime", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.MinorGCTime"]; ok {
s.mb.RecordSparkDriverExecutorGcTimeDataPoint(now, int64(stat.Value), metadata.AttributeGcTypeMinor)
}
if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.MajorGCTime", appID)]; ok {
if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.MajorGCTime"]; ok {
s.mb.RecordSparkDriverExecutorGcTimeDataPoint(now, int64(stat.Value), metadata.AttributeGcTypeMajor)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -164,10 +164,10 @@ func getMetricKey(metric *CAdvisorMetric) string {
switch metricType {
case ci.TypeInstance:
// merge cpu, memory, net metric for type Instance
metricKey = fmt.Sprintf("metricType:%s", ci.TypeInstance)
metricKey = "metricType:" + ci.TypeInstance
case ci.TypeNode:
// merge cpu, memory, net metric for type Node
metricKey = fmt.Sprintf("metricType:%s", ci.TypeNode)
metricKey = "metricType:" + ci.TypeNode
case ci.TypePod:
// merge cpu, memory, net metric for type Pod
metricKey = fmt.Sprintf("metricType:%s,podId:%s", ci.TypePod, metric.GetTags()[ci.PodIDKey])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ package ecsinfo // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"bufio"
"context"
"errors"
"fmt"
"log"
"math"
Expand Down Expand Up @@ -227,7 +228,7 @@ func getCGroupMountPoint(mountConfigPath string) (string, error) {
return filepath.Dir(fields[4]), nil
}
}
return "", fmt.Errorf("mount point not existed")
return "", errors.New("mount point not existed")
}

func getCGroupPathForTask(cgroupMount, controller, taskID, clusterName string) (string, error) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ func (k *K8sAPIServer) startLeaderElection(ctx context.Context, lock resourceloc
RetryPeriod: 5 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
k.logger.Info(fmt.Sprintf("k8sapiserver OnStartedLeading: %s", k.nodeName))
k.logger.Info("k8sapiserver OnStartedLeading: " + k.nodeName)
// we're notified when we start
k.mu.Lock()
k.leading = true
Expand Down Expand Up @@ -292,7 +292,7 @@ func (k *K8sAPIServer) startLeaderElection(ctx context.Context, lock resourceloc
}
},
OnStoppedLeading: func() {
k.logger.Info(fmt.Sprintf("k8sapiserver OnStoppedLeading: %s", k.nodeName))
k.logger.Info("k8sapiserver OnStoppedLeading: " + k.nodeName)
// we can do cleanup here, or after the RunOrDie method returns
k.mu.Lock()
defer k.mu.Unlock()
Expand All @@ -302,14 +302,14 @@ func (k *K8sAPIServer) startLeaderElection(ctx context.Context, lock resourceloc
k.k8sClient.ShutdownPodClient()
},
OnNewLeader: func(identity string) {
k.logger.Info(fmt.Sprintf("k8sapiserver Switch New Leader: %s", identity))
k.logger.Info("k8sapiserver Switch New Leader: " + identity)
},
},
})

select {
case <-ctx.Done(): // when leader election ends, the channel ctx.Done() will be closed
k.logger.Info(fmt.Sprintf("k8sapiserver shutdown Leader Election: %s", k.nodeName))
k.logger.Info("k8sapiserver shutdown Leader Election: " + k.nodeName)
return
default:
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ func (p *PodStore) Decorate(ctx context.Context, metric CIMetric, kubernetesBlob
p.addPodOwnersAndPodName(metric, &entry.pod, kubernetesBlob)
addLabels(&entry.pod, kubernetesBlob)
} else {
p.logger.Warn(fmt.Sprintf("no pod information is found in podstore for pod %s", podKey))
p.logger.Warn("no pod information is found in podstore for pod " + podKey)
return false
}
}
Expand Down Expand Up @@ -262,7 +262,7 @@ func (p *PodStore) refreshInternal(now time.Time, podList []corev1.Pod) {
pod := podList[i]
podKey := createPodKeyFromMetaData(&pod)
if podKey == "" {
p.logger.Warn(fmt.Sprintf("podKey is unavailable, refresh pod store for pod %s", pod.Name))
p.logger.Warn("podKey is unavailable, refresh pod store for pod " + pod.Name)
continue
}
if pod.Status.Phase != corev1.PodSucceeded && pod.Status.Phase != corev1.PodFailed {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
package awsecscontainermetrics

import (
"fmt"
"errors"
"os"
"testing"

Expand All @@ -28,7 +28,7 @@ func (f testRestClient) GetResponse(path string) ([]byte, error) {
}

if f.fail {
return []byte{}, fmt.Errorf("failed")
return []byte{}, errors.New("failed")
}
if f.invalidJSON {
return []byte("wrong-json-body"), nil
Expand Down
3 changes: 1 addition & 2 deletions receiver/awsecscontainermetricsreceiver/receiver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ package awsecscontainermetricsreceiver
import (
"context"
"errors"
"fmt"
"os"
"testing"

Expand Down Expand Up @@ -96,7 +95,7 @@ type invalidFakeClient struct {
}

func (f invalidFakeClient) GetResponse(_ string) ([]byte, error) {
return nil, fmt.Errorf("intentional error")
return nil, errors.New("intentional error")
}

func TestCollectDataFromEndpointWithEndpointError(t *testing.T) {
Expand Down
3 changes: 1 addition & 2 deletions receiver/awsfirehosereceiver/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
package awsfirehosereceiver

import (
"fmt"
"path/filepath"
"testing"

Expand All @@ -23,7 +22,7 @@ func TestLoadConfig(t *testing.T) {
"cwmetrics", "cwlogs", "otlp_v1", "invalid",
} {
t.Run(configType, func(t *testing.T) {
fileName := fmt.Sprintf("%s_config.yaml", configType)
fileName := configType + "_config.yaml"
cm, err := confmaptest.LoadConf(filepath.Join("testdata", fileName))
require.NoError(t, err)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
package unmarshalertest

import (
"fmt"
"errors"
"testing"

"github.com/stretchr/testify/require"
Expand All @@ -31,7 +31,7 @@ func TestNewWithLogs(t *testing.T) {
}

func TestNewErrLogs(t *testing.T) {
wantErr := fmt.Errorf("test error")
wantErr := errors.New("test error")
unmarshaler := NewErrLogs(wantErr)
got, err := unmarshaler.Unmarshal(nil)
require.Error(t, err)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
package unmarshalertest

import (
"fmt"
"errors"
"testing"

"github.com/stretchr/testify/require"
Expand All @@ -31,7 +31,7 @@ func TestNewWithMetrics(t *testing.T) {
}

func TestNewErrMetrics(t *testing.T) {
wantErr := fmt.Errorf("test error")
wantErr := errors.New("test error")
unmarshaler := NewErrMetrics(wantErr)
got, err := unmarshaler.Unmarshal(nil)
require.Error(t, err)
Expand Down
3 changes: 2 additions & 1 deletion receiver/awsfirehosereceiver/receiver.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"io"
"net"
"net/http"
"strconv"
"sync"
"time"

Expand Down Expand Up @@ -282,7 +283,7 @@ func (fmr *firehoseReceiver) sendResponse(w http.ResponseWriter, requestID strin
}
payload, _ := json.Marshal(body)
w.Header().Set(headerContentType, "application/json")
w.Header().Set(headerContentLength, fmt.Sprintf("%d", len(payload)))
w.Header().Set(headerContentLength, strconv.Itoa(len(payload)))
w.WriteHeader(statusCode)
if _, err = w.Write(payload); err != nil {
fmr.settings.Logger.Error("Failed to send response", zap.Error(err))
Expand Down
3 changes: 2 additions & 1 deletion receiver/awsfirehosereceiver/receiver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
"net"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"time"

Expand Down Expand Up @@ -190,7 +191,7 @@ func TestFirehoseRequest(t *testing.T) {

request := httptest.NewRequest(http.MethodPost, "/", requestBody)
request.Header.Set(headerContentType, "application/json")
request.Header.Set(headerContentLength, fmt.Sprintf("%d", requestBody.Len()))
request.Header.Set(headerContentLength, strconv.Itoa(requestBody.Len()))
request.Header.Set(headerFirehoseRequestID, testFirehoseRequestID)
request.Header.Set(headerFirehoseAccessKey, testFirehoseAccessKey)
if testCase.headers != nil {
Expand Down
Loading