Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable errcheck linter #4462

Merged
merged 4 commits into from
Nov 23, 2021
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,8 @@ linters-settings:
- kilometres

linters:
disable:
- errcheck
enable:
- errcheck
- exportloopref
- gocritic
- gofmt
Expand Down Expand Up @@ -138,7 +137,7 @@ issues:
# The list of ids of default excludes to include or disable. By default it's empty.
# See the list of default excludes here https://golangci-lint.run/usage/configuration.
include:
- EXC0001
# - EXC0001 - errcheck checks that are not usually checked
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We discussed this on #2881 (comment)

- EXC0002
- EXC0003
- EXC0004
Expand Down
12 changes: 6 additions & 6 deletions config/configgrpc/configgrpc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ import (
func TestDefaultGrpcClientSettings(t *testing.T) {
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
defer tt.Shutdown(context.Background())
defer func() { require.NoError(t, tt.Shutdown(context.Background())) }()

gcs := &GRPCClientSettings{
TLSSetting: configtls.TLSClientSetting{
Expand All @@ -55,7 +55,7 @@ func TestDefaultGrpcClientSettings(t *testing.T) {
func TestAllGrpcClientSettings(t *testing.T) {
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
defer tt.Shutdown(context.Background())
defer func() { require.NoError(t, tt.Shutdown(context.Background())) }()

gcs := &GRPCClientSettings{
Headers: map[string]string{
Expand Down Expand Up @@ -160,7 +160,7 @@ func TestGrpcServerAuthSettings(t *testing.T) {
func TestGRPCClientSettingsError(t *testing.T) {
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
defer tt.Shutdown(context.Background())
defer func() { require.NoError(t, tt.Shutdown(context.Background())) }()

tests := []struct {
settings GRPCClientSettings
Expand Down Expand Up @@ -251,7 +251,7 @@ func TestGRPCClientSettingsError(t *testing.T) {
func TestUseSecure(t *testing.T) {
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
defer tt.Shutdown(context.Background())
defer func() { require.NoError(t, tt.Shutdown(context.Background())) }()

gcs := &GRPCClientSettings{
Headers: nil,
Expand Down Expand Up @@ -371,7 +371,7 @@ func TestGetGRPCCompressionKey(t *testing.T) {
func TestHttpReception(t *testing.T) {
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
defer tt.Shutdown(context.Background())
defer func() { require.NoError(t, tt.Shutdown(context.Background())) }()

tests := []struct {
name string
Expand Down Expand Up @@ -526,7 +526,7 @@ func TestReceiveOnUnixDomainSocket(t *testing.T) {
}
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
defer tt.Shutdown(context.Background())
defer func() { require.NoError(t, tt.Shutdown(context.Background())) }()

socketName := tempSocketName(t)
gss := &GRPCServerSettings{
Expand Down
3 changes: 2 additions & 1 deletion config/configmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,8 @@ func (l *Map) Set(key string, value interface{}) {
// koanf doesn't offer a direct setting mechanism so merging is required.
merged := koanf.New(KeyDelimiter)
_ = merged.Load(confmap.Provider(map[string]interface{}{key: value}, KeyDelimiter), nil)
l.k.Merge(merged)
// Ignore error for now; it only fails with strict merge when the types are different
_ = l.k.Merge(merged)
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should Koanf decide to return an error for a different reason we would have no way to report the Set failed, so maybe we should return the error here instead?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

seems there's at least a couple of calls that could trigger an error in this func with Load just above this line, I would be in favour of returning an error from Set

Copy link
Member Author

@mx-psi mx-psi Nov 22, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Opened #4467 for this, I want to keep this PR for non-breaking changes

}

// IsSet checks to see if the key has been set in any of the data locations.
Expand Down
6 changes: 3 additions & 3 deletions exporter/exporterhelper/internal/persistent_queue_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func TestPersistentQueue_Capacity(t *testing.T) {

for i := 0; i < 100; i++ {
ext := createStorageExtension(path)
defer ext.Shutdown(context.Background())
defer func() { require.NoError(t, ext.Shutdown(context.Background())) }()

wq := createTestQueue(ext, 5)
require.Equal(t, 0, wq.Size())
Expand Down Expand Up @@ -85,7 +85,7 @@ func TestPersistentQueue_Close(t *testing.T) {
defer os.RemoveAll(path)

ext := createStorageExtension(path)
defer ext.Shutdown(context.Background())
defer func() { require.NoError(t, ext.Shutdown(context.Background())) }()

wq := createTestQueue(ext, 1001)
traces := newTraces(1, 10)
Expand Down Expand Up @@ -147,7 +147,7 @@ func TestPersistentQueue_ConsumersProducers(t *testing.T) {

defer os.RemoveAll(path)
defer tq.Stop()
defer ext.Shutdown(context.Background())
defer func() { require.NoError(t, ext.Shutdown(context.Background())) }()

numMessagesConsumed := int32(0)
tq.StartConsumers(c.numConsumers, func(item interface{}) {
Expand Down
4 changes: 2 additions & 2 deletions exporter/exporterhelper/internal/persistent_storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ func TestPersistentStorage_RepeatPutCloseReadClose(t *testing.T) {
ext := createStorageExtension(path)
wq := createTestQueue(ext, 5000)
require.Equal(t, 0, wq.Size())
ext.Shutdown(context.Background())
require.NoError(t, ext.Shutdown(context.Background()))
}

func TestPersistentStorage_EmptyRequest(t *testing.T) {
Expand Down Expand Up @@ -279,7 +279,7 @@ func BenchmarkPersistentStorage_TraceSpans(b *testing.B) {
req := ps.get()
require.NotNil(bb, req)
}
ext.Shutdown(context.Background())
require.NoError(b, ext.Shutdown(context.Background()))
})
}
}
Expand Down
7 changes: 4 additions & 3 deletions exporter/exporterhelper/logs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ func TestLogsExporter_WithRecordLogs_ReturnError(t *testing.T) {
func TestLogsExporter_WithRecordEnqueueFailedMetrics(t *testing.T) {
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
defer tt.Shutdown(context.Background())
defer func() { require.NoError(t, tt.Shutdown(context.Background())) }()

rCfg := DefaultRetrySettings()
qCfg := DefaultQueueSettings()
Expand All @@ -141,7 +141,8 @@ func TestLogsExporter_WithRecordEnqueueFailedMetrics(t *testing.T) {
md := testdata.GenerateLogsTwoLogRecordsSameResourceOneDifferent()
const numBatches = 7
for i := 0; i < numBatches; i++ {
te.ConsumeLogs(context.Background(), md)
// errors are checked in the checkExporterEnqueueFailedLogsStats function below.
_ = te.ConsumeLogs(context.Background(), md)
}

// 2 batched must be in queue, and 5 batches (15 log records) rejected due to queue overflow
Expand Down Expand Up @@ -207,7 +208,7 @@ func newPushLogsData(retError error) consumerhelper.ConsumeLogsFunc {
func checkRecordedMetricsForLogsExporter(t *testing.T, le component.LogsExporter, wantError error) {
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
defer tt.Shutdown(context.Background())
defer func() { require.NoError(t, tt.Shutdown(context.Background())) }()

ld := testdata.GenerateLogsTwoLogRecordsSameResource()
const numBatches = 7
Expand Down
7 changes: 4 additions & 3 deletions exporter/exporterhelper/metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ func TestMetricsExporter_WithRecordMetrics_ReturnError(t *testing.T) {
func TestMetricsExporter_WithRecordEnqueueFailedMetrics(t *testing.T) {
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
defer tt.Shutdown(context.Background())
defer func() { require.NoError(t, tt.Shutdown(context.Background())) }()

rCfg := DefaultRetrySettings()
qCfg := DefaultQueueSettings()
Expand All @@ -140,7 +140,8 @@ func TestMetricsExporter_WithRecordEnqueueFailedMetrics(t *testing.T) {
md := testdata.GenerateMetricsOneMetric()
const numBatches = 7
for i := 0; i < numBatches; i++ {
te.ConsumeMetrics(context.Background(), md)
// errors are checked in the checkExporterEnqueueFailedMetricsStats function below.
_ = te.ConsumeMetrics(context.Background(), md)
}

// 2 batched must be in queue, and 10 metric points rejected due to queue overflow
Expand Down Expand Up @@ -208,7 +209,7 @@ func newPushMetricsData(retError error) consumerhelper.ConsumeMetricsFunc {
func checkRecordedMetricsForMetricsExporter(t *testing.T, me component.MetricsExporter, wantError error) {
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
defer tt.Shutdown(context.Background())
defer func() { require.NoError(t, tt.Shutdown(context.Background())) }()

md := testdata.GenerateMetricsTwoMetrics()
const numBatches = 7
Expand Down
2 changes: 1 addition & 1 deletion exporter/exporterhelper/obsreport_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import (
func TestExportEnqueueFailure(t *testing.T) {
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
defer tt.Shutdown(context.Background())
defer func() { require.NoError(t, tt.Shutdown(context.Background())) }()

exporter := config.NewComponentID("fakeExporter")

Expand Down
2 changes: 1 addition & 1 deletion exporter/exporterhelper/queued_retry_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ func TestQueuedRetry_DropOnFull(t *testing.T) {
func TestQueuedRetryHappyPath(t *testing.T) {
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
defer tt.Shutdown(context.Background())
defer func() { require.NoError(t, tt.Shutdown(context.Background())) }()

qCfg := DefaultQueueSettings()
rCfg := DefaultRetrySettings()
Expand Down
7 changes: 4 additions & 3 deletions exporter/exporterhelper/traces_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ func TestTracesExporter_WithRecordMetrics_ReturnError(t *testing.T) {
func TestTracesExporter_WithRecordEnqueueFailedMetrics(t *testing.T) {
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
defer tt.Shutdown(context.Background())
defer func() { require.NoError(t, tt.Shutdown(context.Background())) }()

rCfg := DefaultRetrySettings()
qCfg := DefaultQueueSettings()
Expand All @@ -138,7 +138,8 @@ func TestTracesExporter_WithRecordEnqueueFailedMetrics(t *testing.T) {
td := testdata.GenerateTracesTwoSpansSameResource()
const numBatches = 7
for i := 0; i < numBatches; i++ {
te.ConsumeTraces(context.Background(), td)
// errors are checked in the checkExporterEnqueueFailedTracesStats function below.
_ = te.ConsumeTraces(context.Background(), td)
}

// 2 batched must be in queue, and 5 batches (10 spans) rejected due to queue overflow
Expand Down Expand Up @@ -208,7 +209,7 @@ func newTraceDataPusher(retError error) consumerhelper.ConsumeTracesFunc {
func checkRecordedMetricsForTracesExporter(t *testing.T, te component.TracesExporter, wantError error) {
tt, err := obsreporttest.SetupTelemetry()
require.NoError(t, err)
defer tt.Shutdown(context.Background())
defer func() { require.NoError(t, tt.Shutdown(context.Background())) }()

td := testdata.GenerateTracesTwoSpansSameResource()
const numBatches = 7
Expand Down
27 changes: 18 additions & 9 deletions obsreport/obsreport_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,8 @@ func NewProcessor(cfg ProcessorSettings) *Processor {
// TracesAccepted reports that the trace data was accepted.
func (por *Processor) TracesAccepted(ctx context.Context, numSpans int) {
if por.level != configtelemetry.LevelNone {
stats.RecordWithTags(
// ignore the error for now; should not happen
_ = stats.RecordWithTags(
ctx,
por.mutators,
obsmetrics.ProcessorAcceptedSpans.M(int64(numSpans)),
Expand All @@ -78,7 +79,8 @@ func (por *Processor) TracesAccepted(ctx context.Context, numSpans int) {
// TracesRefused reports that the trace data was refused.
func (por *Processor) TracesRefused(ctx context.Context, numSpans int) {
if por.level != configtelemetry.LevelNone {
stats.RecordWithTags(
// ignore the error for now; should not happen
_ = stats.RecordWithTags(
ctx,
por.mutators,
obsmetrics.ProcessorAcceptedSpans.M(0),
Expand All @@ -91,7 +93,8 @@ func (por *Processor) TracesRefused(ctx context.Context, numSpans int) {
// TracesDropped reports that the trace data was dropped.
func (por *Processor) TracesDropped(ctx context.Context, numSpans int) {
if por.level != configtelemetry.LevelNone {
stats.RecordWithTags(
// ignore the error for now; should not happen
_ = stats.RecordWithTags(
ctx,
por.mutators,
obsmetrics.ProcessorAcceptedSpans.M(0),
Expand All @@ -104,7 +107,8 @@ func (por *Processor) TracesDropped(ctx context.Context, numSpans int) {
// MetricsAccepted reports that the metrics were accepted.
func (por *Processor) MetricsAccepted(ctx context.Context, numPoints int) {
if por.level != configtelemetry.LevelNone {
stats.RecordWithTags(
// ignore the error for now; should not happen
_ = stats.RecordWithTags(
ctx,
por.mutators,
obsmetrics.ProcessorAcceptedMetricPoints.M(int64(numPoints)),
Expand All @@ -117,7 +121,8 @@ func (por *Processor) MetricsAccepted(ctx context.Context, numPoints int) {
// MetricsRefused reports that the metrics were refused.
func (por *Processor) MetricsRefused(ctx context.Context, numPoints int) {
if por.level != configtelemetry.LevelNone {
stats.RecordWithTags(
// ignore the error for now; should not happen
_ = stats.RecordWithTags(
ctx,
por.mutators,
obsmetrics.ProcessorAcceptedMetricPoints.M(0),
Expand All @@ -130,7 +135,8 @@ func (por *Processor) MetricsRefused(ctx context.Context, numPoints int) {
// MetricsDropped reports that the metrics were dropped.
func (por *Processor) MetricsDropped(ctx context.Context, numPoints int) {
if por.level != configtelemetry.LevelNone {
stats.RecordWithTags(
// ignore the error for now; should not happen
_ = stats.RecordWithTags(
ctx,
por.mutators,
obsmetrics.ProcessorAcceptedMetricPoints.M(0),
Expand All @@ -143,7 +149,8 @@ func (por *Processor) MetricsDropped(ctx context.Context, numPoints int) {
// LogsAccepted reports that the logs were accepted.
func (por *Processor) LogsAccepted(ctx context.Context, numRecords int) {
if por.level != configtelemetry.LevelNone {
stats.RecordWithTags(
// ignore the error for now; should not happen
_ = stats.RecordWithTags(
ctx,
por.mutators,
obsmetrics.ProcessorAcceptedLogRecords.M(int64(numRecords)),
Expand All @@ -156,7 +163,8 @@ func (por *Processor) LogsAccepted(ctx context.Context, numRecords int) {
// LogsRefused reports that the logs were refused.
func (por *Processor) LogsRefused(ctx context.Context, numRecords int) {
if por.level != configtelemetry.LevelNone {
stats.RecordWithTags(
// ignore the error for now; should not happen
_ = stats.RecordWithTags(
ctx,
por.mutators,
obsmetrics.ProcessorAcceptedLogRecords.M(0),
Expand All @@ -169,7 +177,8 @@ func (por *Processor) LogsRefused(ctx context.Context, numRecords int) {
// LogsDropped reports that the logs were dropped.
func (por *Processor) LogsDropped(ctx context.Context, numRecords int) {
if por.level != configtelemetry.LevelNone {
stats.RecordWithTags(
// ignore the error for now; should not happen
_ = stats.RecordWithTags(
ctx,
por.mutators,
obsmetrics.ProcessorAcceptedLogRecords.M(0),
Expand Down
Loading