From 080f1a8c4963bab652ab93153552b4ac1510b6da Mon Sep 17 00:00:00 2001 From: Andrei Ozerov Date: Thu, 28 Jan 2021 11:17:51 +0300 Subject: [PATCH 1/9] Add ScrapeErrors struct to simplify errors usage Add ScrapeErrors that contains a slice with errors and that has methods to simplify adding new PartialScrapeErrors and regular generic errors. Use new methods to refactor errors appends in receiver/hostmetricsreceiver. Use ScrapeErrors.Combine() in component/componenterror to not create cycling dependencies in consumererrors package. --- consumer/consumererror/combineerrors_test.go | 4 +- consumer/consumererror/scrapeerrors.go | 75 +++++++++ consumer/consumererror/scrapeerrors_test.go | 145 ++++++++++++++++++ .../filesystemscraper/filesystem_scraper.go | 7 +- .../scraper/networkscraper/network_scraper.go | 13 +- .../pagingscraper/paging_scraper_others.go | 13 +- .../pagingscraper/paging_scraper_windows.go | 17 +- .../scraper/processscraper/process_scraper.go | 24 +-- receiver/scraperhelper/errors.go | 58 ------- receiver/scraperhelper/errors_test.go | 88 ----------- receiver/scraperhelper/scrapercontroller.go | 6 +- 11 files changed, 260 insertions(+), 190 deletions(-) create mode 100644 consumer/consumererror/scrapeerrors.go create mode 100644 consumer/consumererror/scrapeerrors_test.go delete mode 100644 receiver/scraperhelper/errors.go delete mode 100644 receiver/scraperhelper/errors_test.go diff --git a/consumer/consumererror/combineerrors_test.go b/consumer/consumererror/combineerrors_test.go index 404b853f342..c465376477f 100644 --- a/consumer/consumererror/combineerrors_test.go +++ b/consumer/consumererror/combineerrors_test.go @@ -1,10 +1,10 @@ -// Copyright The OpenTelemetry Authors +// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, diff --git a/consumer/consumererror/scrapeerrors.go b/consumer/consumererror/scrapeerrors.go new file mode 100644 index 00000000000..d687a9600c8 --- /dev/null +++ b/consumer/consumererror/scrapeerrors.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumererror + +import ( + "fmt" + "strings" +) + +// ScrapeErrors contains multiple PartialScrapeErrors and can also contain generic errors. +type ScrapeErrors struct { + errs []error + scrapeErrsCount int +} + +// Add adds a PartialScrapeError with the provided failed count and error. +func (s *ScrapeErrors) Add(failed int, err error) { + s.errs = append(s.errs, NewPartialScrapeError(err, failed)) + s.scrapeErrsCount++ +} + +// Addf adds a PartialScrapeError with the provided failed count and arguments to format an error. +func (s *ScrapeErrors) Addf(failed int, format string, a ...interface{}) { + s.errs = append(s.errs, NewPartialScrapeError(fmt.Errorf(format, a...), failed)) + s.scrapeErrsCount++ +} + +// Add adds a regular generic error. +func (s *ScrapeErrors) AddRegular(err error) { + s.errs = append(s.errs, err) +} + +// Add adds a regular generic error from the provided format specifier. +func (s *ScrapeErrors) AddRegularf(format string, a ...interface{}) { + s.errs = append(s.errs, fmt.Errorf(format, a...)) +} + +// Combine converts a slice of errors into one error. +// It will return a PartialScrapeError if at least one error in the slice is a PartialScrapeError. +func (s *ScrapeErrors) Combine() error { + if s.scrapeErrsCount == 0 { + return CombineErrors(s.errs) + } + + errMsgs := make([]string, 0, len(s.errs)) + failedScrapeCount := 0 + for _, err := range s.errs { + if partialError, isPartial := err.(PartialScrapeError); isPartial { + failedScrapeCount += partialError.Failed + } + + errMsgs = append(errMsgs, err.Error()) + } + + var err error + if len(s.errs) == 1 { + err = s.errs[0] + } else { + err = fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) + } + + return NewPartialScrapeError(err, failedScrapeCount) +} diff --git a/consumer/consumererror/scrapeerrors_test.go b/consumer/consumererror/scrapeerrors_test.go new file mode 100644 index 00000000000..044377047fb --- /dev/null +++ b/consumer/consumererror/scrapeerrors_test.go @@ -0,0 +1,145 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumererror + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestScrapeErrorsAdd(t *testing.T) { + err1 := errors.New("err 1") + err2 := errors.New("err 2") + expected := []error{ + PartialScrapeError{error: err1, Failed: 1}, + PartialScrapeError{error: err2, Failed: 10}, + } + + var errs ScrapeErrors + errs.Add(1, err1) + errs.Add(10, err2) + assert.Equal(t, expected, errs.errs) +} + +func TestScrapeErrorsAddf(t *testing.T) { + err1 := errors.New("err 10") + err2 := errors.New("err 20") + expected := []error{ + PartialScrapeError{error: fmt.Errorf("err: %s", err1), Failed: 20}, + PartialScrapeError{error: fmt.Errorf("err %s: %w", "2", err2), Failed: 2}, + } + + var errs ScrapeErrors + errs.Addf(20, "err: %s", err1) + errs.Addf(2, "err %s: %w", "2", err2) + assert.Equal(t, expected, errs.errs) +} + +func TestScrapeErrorsAddRegular(t *testing.T) { + err1 := errors.New("err a") + err2 := errors.New("err b") + expected := []error{err1, err2} + + var errs ScrapeErrors + errs.AddRegular(err1) + errs.AddRegular(err2) + assert.Equal(t, expected, errs.errs) +} + +func TestScrapeErrorsAddRegularf(t *testing.T) { + err1 := errors.New("err aa") + err2 := errors.New("err bb") + expected := []error{ + fmt.Errorf("err: %s", err1), + fmt.Errorf("err %s: %w", "bb", err2), + } + + var errs ScrapeErrors + errs.AddRegularf("err: %s", err1) + errs.AddRegularf("err %s: %w", "bb", err2) + assert.Equal(t, expected, errs.errs) +} + +func TestScrapeErrorsCombine(t *testing.T) { + testCases := []struct { + errs func() ScrapeErrors + expectedErr string + expectedFailedCount int + expectNil bool + expectedScrape bool + }{ + { + errs: func() ScrapeErrors { + var errs ScrapeErrors + return errs + }, + expectNil: true, + }, + { + errs: func() ScrapeErrors { + var errs ScrapeErrors + errs.Add(10, errors.New("bad scrapes")) + errs.Addf(1, "err: %s", errors.New("bad scrape")) + return errs + }, + expectedErr: "[bad scrapes; err: bad scrape]", + expectedFailedCount: 11, + expectedScrape: true, + }, + { + errs: func() ScrapeErrors { + var errs ScrapeErrors + errs.AddRegular(errors.New("bad regular")) + errs.AddRegularf("err: %s", errors.New("bad reg")) + return errs + }, + expectedErr: "[bad regular; err: bad reg]", + }, + { + errs: func() ScrapeErrors { + var errs ScrapeErrors + errs.Add(2, errors.New("bad two scrapes")) + errs.Addf(10, "%d scrapes failed: %s", 10, errors.New("bad things happened")) + errs.AddRegular(errors.New("bad event")) + errs.AddRegularf("event: %s", errors.New("something happened")) + return errs + }, + expectedErr: "[bad two scrapes; 10 scrapes failed: bad things happened; bad event; event: something happened]", + expectedFailedCount: 12, + expectedScrape: true, + }, + } + + for _, tc := range testCases { + scrapeErrs := tc.errs() + if (scrapeErrs.Combine() == nil) != tc.expectNil { + t.Errorf("%+v.Combine() == nil? Got: %t. Want: %t", scrapeErrs, scrapeErrs.Combine() == nil, tc.expectNil) + } + if scrapeErrs.Combine() != nil && tc.expectedErr != scrapeErrs.Combine().Error() { + t.Errorf("%+v.Combine() = %q. Want: %q", scrapeErrs, scrapeErrs.Combine(), tc.expectedErr) + } + if tc.expectedScrape { + partialScrapeErr, ok := scrapeErrs.Combine().(PartialScrapeError) + if !ok { + t.Errorf("%+v.Combine() = %q. Want: PartialScrapeError", scrapeErrs, scrapeErrs.Combine()) + } else if tc.expectedFailedCount != partialScrapeErr.Failed { + t.Errorf("%+v.Combine().Failed. Got %d Failed count. Want: %d", scrapeErrs, partialScrapeErr.Failed, tc.expectedFailedCount) + } + } + } +} diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go index 0287ee074ba..a3b14e9f883 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go @@ -25,7 +25,6 @@ import ( "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" - "go.opentelemetry.io/collector/receiver/scraperhelper" ) const ( @@ -71,7 +70,7 @@ func (s *scraper) Scrape(_ context.Context) (pdata.MetricSlice, error) { return metrics, consumererror.NewPartialScrapeError(err, metricsLen) } - var errors []error + var errors consumererror.ScrapeErrors usages := make([]*deviceUsage, 0, len(partitions)) for _, partition := range partitions { if !s.fsFilter.includePartition(partition) { @@ -79,7 +78,7 @@ func (s *scraper) Scrape(_ context.Context) (pdata.MetricSlice, error) { } usage, usageErr := s.usage(partition.Mountpoint) if usageErr != nil { - errors = append(errors, consumererror.NewPartialScrapeError(usageErr, 0)) + errors.Add(0, usageErr) continue } @@ -92,7 +91,7 @@ func (s *scraper) Scrape(_ context.Context) (pdata.MetricSlice, error) { appendSystemSpecificMetrics(metrics, 1, now, usages) } - err = scraperhelper.CombineScrapeErrors(errors) + err = errors.Combine() if err != nil && len(usages) == 0 { partialErr := err.(consumererror.PartialScrapeError) partialErr.Failed = metricsLen diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go index a981504d206..a25febf339e 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go @@ -28,7 +28,6 @@ import ( "go.opentelemetry.io/collector/internal/processor/filterset" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" - "go.opentelemetry.io/collector/receiver/scraperhelper" ) const ( @@ -85,19 +84,19 @@ func (s *scraper) start(context.Context, component.Host) error { func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { metrics := pdata.NewMetricSlice() - var errors []error + var errors consumererror.ScrapeErrors err := s.scrapeAndAppendNetworkCounterMetrics(metrics, s.startTime) if err != nil { - errors = append(errors, err) + errors.Add(networkMetricsLen, err) } err = s.scrapeAndAppendNetworkConnectionsMetric(metrics) if err != nil { - errors = append(errors, err) + errors.Add(connectionsMetricsLen, err) } - return metrics, scraperhelper.CombineScrapeErrors(errors) + return metrics, errors.Combine() } func (s *scraper) scrapeAndAppendNetworkCounterMetrics(metrics pdata.MetricSlice, startTime pdata.TimestampUnixNano) error { @@ -106,7 +105,7 @@ func (s *scraper) scrapeAndAppendNetworkCounterMetrics(metrics pdata.MetricSlice // get total stats only ioCounters, err := s.ioCounters( /*perNetworkInterfaceController=*/ true) if err != nil { - return consumererror.NewPartialScrapeError(err, networkMetricsLen) + return err } // filter network interfaces by name @@ -182,7 +181,7 @@ func (s *scraper) scrapeAndAppendNetworkConnectionsMetric(metrics pdata.MetricSl connections, err := s.connections("tcp") if err != nil { - return consumererror.NewPartialScrapeError(err, connectionsMetricsLen) + return err } tcpConnectionStatusCounts := getTCPConnectionStatusCounts(connections) diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go index 3f50cbdb247..82bad8ef6d1 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go @@ -28,7 +28,6 @@ import ( "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" - "go.opentelemetry.io/collector/receiver/scraperhelper" ) const ( @@ -65,26 +64,26 @@ func (s *scraper) start(context.Context, component.Host) error { func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { metrics := pdata.NewMetricSlice() - var errors []error + var errors consumererror.ScrapeErrors err := s.scrapeAndAppendPagingUsageMetric(metrics) if err != nil { - errors = append(errors, err) + errors.Add(pagingUsageMetricsLen, err) } err = s.scrapeAndAppendPagingMetrics(metrics) if err != nil { - errors = append(errors, err) + errors.Add(pagingMetricsLen, err) } - return metrics, scraperhelper.CombineScrapeErrors(errors) + return metrics, errors.Combine() } func (s *scraper) scrapeAndAppendPagingUsageMetric(metrics pdata.MetricSlice) error { now := internal.TimeToUnixNano(time.Now()) vmem, err := s.virtualMemory() if err != nil { - return consumererror.NewPartialScrapeError(err, pagingUsageMetricsLen) + return err } idx := metrics.Len() @@ -114,7 +113,7 @@ func (s *scraper) scrapeAndAppendPagingMetrics(metrics pdata.MetricSlice) error now := internal.TimeToUnixNano(time.Now()) swap, err := s.swapMemory() if err != nil { - return consumererror.NewPartialScrapeError(err, pagingMetricsLen) + return err } idx := metrics.Len() diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go index 572a5400cae..ca9e1999ef7 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go @@ -29,7 +29,6 @@ import ( "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/perfcounters" - "go.opentelemetry.io/collector/receiver/scraperhelper" ) const ( @@ -82,26 +81,26 @@ func (s *scraper) start(context.Context, component.Host) error { func (s *scraper) scrape(context.Context) (pdata.MetricSlice, error) { metrics := pdata.NewMetricSlice() - var errors []error + var errors consumererror.ScrapeErrors err := s.scrapeAndAppendPagingUsageMetric(metrics) if err != nil { - errors = append(errors, err) + errors.Add(pagingUsageMetricsLen, err) } err = s.scrapeAndAppendPagingOperationsMetric(metrics) if err != nil { - errors = append(errors, err) + errors.Add(pagingMetricsLen, err) } - return metrics, scraperhelper.CombineScrapeErrors(errors) + return metrics, errors.Combine() } func (s *scraper) scrapeAndAppendPagingUsageMetric(metrics pdata.MetricSlice) error { now := internal.TimeToUnixNano(time.Now()) pageFiles, err := s.pageFileStats() if err != nil { - return consumererror.NewPartialScrapeError(err, pagingUsageMetricsLen) + return err } idx := metrics.Len() @@ -137,17 +136,17 @@ func (s *scraper) scrapeAndAppendPagingOperationsMetric(metrics pdata.MetricSlic counters, err := s.perfCounterScraper.Scrape() if err != nil { - return consumererror.NewPartialScrapeError(err, pagingMetricsLen) + return err } memoryObject, err := counters.GetObject(memory) if err != nil { - return consumererror.NewPartialScrapeError(err, pagingMetricsLen) + return err } memoryCounterValues, err := memoryObject.GetValues(pageReadsPerSec, pageWritesPerSec) if err != nil { - return consumererror.NewPartialScrapeError(err, pagingMetricsLen) + return err } if len(memoryCounterValues) > 0 { diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go index 94994ff5532..29680806c64 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go @@ -29,7 +29,6 @@ import ( "go.opentelemetry.io/collector/internal/processor/filterset" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" - "go.opentelemetry.io/collector/receiver/scraperhelper" ) const ( @@ -88,7 +87,7 @@ func (s *scraper) start(context.Context, component.Host) error { func (s *scraper) scrape(_ context.Context) (pdata.ResourceMetricsSlice, error) { rms := pdata.NewResourceMetricsSlice() - var errs []error + var errs consumererror.ScrapeErrors metadata, err := s.getProcessMetadata() if err != nil { @@ -96,7 +95,7 @@ func (s *scraper) scrape(_ context.Context) (pdata.ResourceMetricsSlice, error) return rms, err } - errs = append(errs, err) + errs.AddRegular(err) } rms.Resize(len(metadata)) @@ -111,19 +110,19 @@ func (s *scraper) scrape(_ context.Context) (pdata.ResourceMetricsSlice, error) now := internal.TimeToUnixNano(time.Now()) if err = scrapeAndAppendCPUTimeMetric(metrics, s.startTime, now, md.handle); err != nil { - errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading cpu times for process %q (pid %v): %w", md.executable.name, md.pid, err), cpuMetricsLen)) + errs.Addf(cpuMetricsLen, "error reading cpu times for process %q (pid %v): %w", md.executable.name, md.pid, err) } if err = scrapeAndAppendMemoryUsageMetrics(metrics, now, md.handle); err != nil { - errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading memory info for process %q (pid %v): %w", md.executable.name, md.pid, err), memoryMetricsLen)) + errs.Addf(memoryMetricsLen, "error reading memory info for process %q (pid %v): %w", md.executable.name, md.pid, err) } if err = scrapeAndAppendDiskIOMetric(metrics, s.startTime, now, md.handle); err != nil { - errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading disk usage for process %q (pid %v): %w", md.executable.name, md.pid, err), diskMetricsLen)) + errs.Addf(diskMetricsLen, "error reading disk usage for process %q (pid %v): %w", md.executable.name, md.pid, err) } } - return rms, scraperhelper.CombineScrapeErrors(errs) + return rms, errs.Combine() } // getProcessMetadata returns a slice of processMetadata, including handles, @@ -136,7 +135,8 @@ func (s *scraper) getProcessMetadata() ([]*processMetadata, error) { return nil, err } - var errs []error + var errs consumererror.ScrapeErrors + metadata := make([]*processMetadata, 0, handles.Len()) for i := 0; i < handles.Len(); i++ { pid := handles.Pid(i) @@ -144,7 +144,7 @@ func (s *scraper) getProcessMetadata() ([]*processMetadata, error) { executable, err := getProcessExecutable(handle) if err != nil { - errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading process name for pid %v: %w", pid, err), 1)) + errs.Addf(1, "error reading process name for pid %v: %w", pid, err) continue } @@ -156,12 +156,12 @@ func (s *scraper) getProcessMetadata() ([]*processMetadata, error) { command, err := getProcessCommand(handle) if err != nil { - errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading command for process %q (pid %v): %w", executable.name, pid, err), 0)) + errs.Addf(0, "error reading command for process %q (pid %v): %w", executable.name, pid, err) } username, err := handle.Username() if err != nil { - errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading username for process %q (pid %v): %w", executable.name, pid, err), 0)) + errs.Addf(0, "error reading username for process %q (pid %v): %w", executable.name, pid, err) } md := &processMetadata{ @@ -175,7 +175,7 @@ func (s *scraper) getProcessMetadata() ([]*processMetadata, error) { metadata = append(metadata, md) } - return metadata, scraperhelper.CombineScrapeErrors(errs) + return metadata, errs.Combine() } func scrapeAndAppendCPUTimeMetric(metrics pdata.MetricSlice, startTime, now pdata.TimestampUnixNano, handle processHandle) error { diff --git a/receiver/scraperhelper/errors.go b/receiver/scraperhelper/errors.go deleted file mode 100644 index ba11e127fee..00000000000 --- a/receiver/scraperhelper/errors.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scraperhelper - -import ( - "errors" - "fmt" - "strings" - - "go.opentelemetry.io/collector/consumer/consumererror" -) - -// CombineScrapeErrors converts a list of errors into one error. -func CombineScrapeErrors(errs []error) error { - partialScrapeErr := false - for _, err := range errs { - var partialError consumererror.PartialScrapeError - if errors.As(err, &partialError) { - partialScrapeErr = true - break - } - } - - if !partialScrapeErr { - return consumererror.CombineErrors(errs) - } - - errMsgs := make([]string, 0, len(errs)) - failedScrapeCount := 0 - for _, err := range errs { - if partialError, isPartial := err.(consumererror.PartialScrapeError); isPartial { - failedScrapeCount += partialError.Failed - } - - errMsgs = append(errMsgs, err.Error()) - } - - var err error - if len(errs) == 1 { - err = errs[0] - } else { - err = fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) - } - - return consumererror.NewPartialScrapeError(err, failedScrapeCount) -} diff --git a/receiver/scraperhelper/errors_test.go b/receiver/scraperhelper/errors_test.go deleted file mode 100644 index e5def5a7c44..00000000000 --- a/receiver/scraperhelper/errors_test.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scraperhelper - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - - "go.opentelemetry.io/collector/consumer/consumererror" -) - -func TestCombineScrapeErrors(t *testing.T) { - testCases := []struct { - errors []error - expected string - expectNil bool - expectedPartialScrapeErr bool - expectedFailedScrapeCount int - }{ - { - errors: []error{}, - expectNil: true, - }, - { - errors: []error{ - fmt.Errorf("foo"), - }, - expected: "foo", - }, - { - errors: []error{ - fmt.Errorf("foo"), - fmt.Errorf("bar"), - }, - expected: "[foo; bar]", - }, - { - errors: []error{ - fmt.Errorf("foo"), - fmt.Errorf("bar"), - consumererror.NewPartialScrapeError(fmt.Errorf("partial"), 0)}, - expected: "[foo; bar; partial]", - expectedPartialScrapeErr: true, - expectedFailedScrapeCount: 0, - }, - { - errors: []error{ - fmt.Errorf("foo"), - fmt.Errorf("bar"), - consumererror.NewPartialScrapeError(fmt.Errorf("partial 1"), 2), - consumererror.NewPartialScrapeError(fmt.Errorf("partial 2"), 3)}, - expected: "[foo; bar; partial 1; partial 2]", - expectedPartialScrapeErr: true, - expectedFailedScrapeCount: 5, - }, - } - - for _, tc := range testCases { - got := CombineScrapeErrors(tc.errors) - - if tc.expectNil { - assert.NoError(t, got, tc.expected) - } else { - assert.EqualError(t, got, tc.expected) - } - - partialErr, isPartial := got.(consumererror.PartialScrapeError) - assert.Equal(t, tc.expectedPartialScrapeErr, isPartial) - - if tc.expectedPartialScrapeErr && isPartial { - assert.Equal(t, tc.expectedFailedScrapeCount, partialErr.Failed) - } - } -} diff --git a/receiver/scraperhelper/scrapercontroller.go b/receiver/scraperhelper/scrapercontroller.go index 866c27368fe..9920d5c91a7 100644 --- a/receiver/scraperhelper/scrapercontroller.go +++ b/receiver/scraperhelper/scrapercontroller.go @@ -260,11 +260,11 @@ func (mms *multiMetricScraper) Scrape(ctx context.Context, receiverName string) ilms.Resize(1) ilm := ilms.At(0) - var errs []error + var errs consumererror.ScrapeErrors for _, scraper := range mms.scrapers { metrics, err := scraper.Scrape(ctx, receiverName) if err != nil { - errs = append(errs, err) + errs.AddRegular(err) if !consumererror.IsPartialScrapeError(err) { continue } @@ -272,5 +272,5 @@ func (mms *multiMetricScraper) Scrape(ctx context.Context, receiverName string) metrics.MoveAndAppendTo(ilm.Metrics()) } - return rms, CombineScrapeErrors(errs) + return rms, errs.Combine() } From f6c7477c3d60d14e0e6bd9cd8dcb8eae8ae1092f Mon Sep 17 00:00:00 2001 From: Andrei Ozerov Date: Thu, 4 Feb 2021 11:32:45 +0300 Subject: [PATCH 2/9] Component error: do not change CombineErrors yet Leave componenterror.CombineErrors unchanged to not introduce too many changes in a single PR. Add a note about the necessity of componenterror.CombineErrors deprecation. --- component/componenterror/errors.go | 26 ++++++++- component/componenterror/errors_test.go | 70 +++++++++++++++++++++++++ consumer/consumererror/combineerrors.go | 2 + 3 files changed, 97 insertions(+), 1 deletion(-) create mode 100644 component/componenterror/errors_test.go diff --git a/component/componenterror/errors.go b/component/componenterror/errors.go index ecc8c6a1f1f..c4b7f4ddfbe 100644 --- a/component/componenterror/errors.go +++ b/component/componenterror/errors.go @@ -18,6 +18,8 @@ package componenterror import ( "errors" + "fmt" + "strings" "go.opentelemetry.io/collector/consumer/consumererror" ) @@ -36,5 +38,27 @@ var ( // CombineErrors converts a list of errors into one error. // Deprecated: use consumererror.CombineErrors instead. func CombineErrors(errs []error) error { - return consumererror.CombineErrors(errs) + numErrors := len(errs) + if numErrors == 0 { + // No errors + return nil + } + + if numErrors == 1 { + return errs[0] + } + + errMsgs := make([]string, 0, numErrors) + permanent := false + for _, err := range errs { + if !permanent && consumererror.IsPermanent(err) { + permanent = true + } + errMsgs = append(errMsgs, err.Error()) + } + err := fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) + if permanent { + err = consumererror.Permanent(err) + } + return err } diff --git a/component/componenterror/errors_test.go b/component/componenterror/errors_test.go new file mode 100644 index 00000000000..c0c7b8e7bd8 --- /dev/null +++ b/component/componenterror/errors_test.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenterror_test + +import ( + "fmt" + "testing" + + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/consumer/consumererror" +) + +func TestCombineErrors(t *testing.T) { + testCases := []struct { + errors []error + expected string + expectNil bool + expectedPermanent bool + }{ + { + errors: []error{}, + expectNil: true, + }, + { + errors: []error{ + fmt.Errorf("foo"), + }, + expected: "foo", + }, + { + errors: []error{ + fmt.Errorf("foo"), + fmt.Errorf("bar"), + }, + expected: "[foo; bar]", + }, + { + errors: []error{ + fmt.Errorf("foo"), + fmt.Errorf("bar"), + consumererror.Permanent(fmt.Errorf("permanent"))}, + expected: "Permanent error: [foo; bar; Permanent error: permanent]", + }, + } + + for _, tc := range testCases { + got := componenterror.CombineErrors(tc.errors) + if (got == nil) != tc.expectNil { + t.Errorf("CombineErrors(%v) == nil? Got: %t. Want: %t", tc.errors, got == nil, tc.expectNil) + } + if got != nil && tc.expected != got.Error() { + t.Errorf("CombineErrors(%v) = %q. Want: %q", tc.errors, got, tc.expected) + } + if tc.expectedPermanent && !consumererror.IsPermanent(got) { + t.Errorf("CombineErrors(%v) = %q. Want: consumererror.permanent", tc.errors, got) + } + } +} diff --git a/consumer/consumererror/combineerrors.go b/consumer/consumererror/combineerrors.go index 1010c07d60c..54bbf86f5af 100644 --- a/consumer/consumererror/combineerrors.go +++ b/consumer/consumererror/combineerrors.go @@ -20,6 +20,8 @@ import ( ) // CombineErrors converts a list of errors into one error. +// +// TODO: deprecate componenterror.CombineErrors in favor of this function. func CombineErrors(errs []error) error { numErrors := len(errs) if numErrors == 0 { From fc2a7e56bd8ed6961c63deee05e712af6267567f Mon Sep 17 00:00:00 2001 From: Andrei Ozerov Date: Sun, 7 Feb 2021 22:59:10 +0300 Subject: [PATCH 3/9] ScrapeErrors: do not iterate over all errors Rename scrapeErrsCount field to failedScrapeCount. Add failed count in every Add, Addf call of ScrapeErrors and use this info to call CombineErrors directly instead of iterating over all errors in ScrapeErrors. Fix filesystemscraper Scrape function to not cast error into PartialScrapeError since it will cause panics after combining ScrapeErrors. Use NewPartialScrapeError instead. --- consumer/consumererror/scrapeerrors.go | 30 ++++--------------- .../filesystemscraper/filesystem_scraper.go | 4 +-- 2 files changed, 7 insertions(+), 27 deletions(-) diff --git a/consumer/consumererror/scrapeerrors.go b/consumer/consumererror/scrapeerrors.go index d687a9600c8..e5dfa473a7b 100644 --- a/consumer/consumererror/scrapeerrors.go +++ b/consumer/consumererror/scrapeerrors.go @@ -16,25 +16,24 @@ package consumererror import ( "fmt" - "strings" ) // ScrapeErrors contains multiple PartialScrapeErrors and can also contain generic errors. type ScrapeErrors struct { - errs []error - scrapeErrsCount int + errs []error + failedScrapeCount int } // Add adds a PartialScrapeError with the provided failed count and error. func (s *ScrapeErrors) Add(failed int, err error) { s.errs = append(s.errs, NewPartialScrapeError(err, failed)) - s.scrapeErrsCount++ + s.failedScrapeCount += failed } // Addf adds a PartialScrapeError with the provided failed count and arguments to format an error. func (s *ScrapeErrors) Addf(failed int, format string, a ...interface{}) { s.errs = append(s.errs, NewPartialScrapeError(fmt.Errorf(format, a...), failed)) - s.scrapeErrsCount++ + s.failedScrapeCount += failed } // Add adds a regular generic error. @@ -50,26 +49,9 @@ func (s *ScrapeErrors) AddRegularf(format string, a ...interface{}) { // Combine converts a slice of errors into one error. // It will return a PartialScrapeError if at least one error in the slice is a PartialScrapeError. func (s *ScrapeErrors) Combine() error { - if s.scrapeErrsCount == 0 { + if s.failedScrapeCount == 0 { return CombineErrors(s.errs) } - errMsgs := make([]string, 0, len(s.errs)) - failedScrapeCount := 0 - for _, err := range s.errs { - if partialError, isPartial := err.(PartialScrapeError); isPartial { - failedScrapeCount += partialError.Failed - } - - errMsgs = append(errMsgs, err.Error()) - } - - var err error - if len(s.errs) == 1 { - err = s.errs[0] - } else { - err = fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) - } - - return NewPartialScrapeError(err, failedScrapeCount) + return NewPartialScrapeError(CombineErrors(s.errs), s.failedScrapeCount) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go index a3b14e9f883..8c53213fbee 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go @@ -93,9 +93,7 @@ func (s *scraper) Scrape(_ context.Context) (pdata.MetricSlice, error) { err = errors.Combine() if err != nil && len(usages) == 0 { - partialErr := err.(consumererror.PartialScrapeError) - partialErr.Failed = metricsLen - err = partialErr + err = consumererror.NewPartialScrapeError(err, metricsLen) } return metrics, err From 8e036fa543f5f1fb61c4dad0f35f1870e31ce16e Mon Sep 17 00:00:00 2001 From: Andrei Ozerov Date: Sun, 7 Feb 2021 23:42:27 +0300 Subject: [PATCH 4/9] ScrapeErrors: fix hostmetricsreceiver Scrape err Cast getProcessMetadata() error into consumererror.PartialScrapeError to get the valid scrape errors count. --- .../internal/scraper/processscraper/process_scraper.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go index 29680806c64..975a8a9272a 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go @@ -91,11 +91,12 @@ func (s *scraper) scrape(_ context.Context) (pdata.ResourceMetricsSlice, error) metadata, err := s.getProcessMetadata() if err != nil { - if !consumererror.IsPartialScrapeError(err) { + partialErr, isPartial := err.(consumererror.PartialScrapeError) + if !isPartial { return rms, err } - errs.AddRegular(err) + errs.Add(partialErr.Failed, partialErr) } rms.Resize(len(metadata)) From 66c1b484ce2f2ce6d8b7b22189f18c7d52c39b1f Mon Sep 17 00:00:00 2001 From: Andrei Ozerov Date: Mon, 8 Feb 2021 00:02:59 +0300 Subject: [PATCH 5/9] ScrapeErrors: fix multiMetricScraper.Scrape method Cast Scrape() error into consumererror.PartialScrapeError to get the valid scrape errors count and to not skip combined errors. --- receiver/scraperhelper/scrapercontroller.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/receiver/scraperhelper/scrapercontroller.go b/receiver/scraperhelper/scrapercontroller.go index 9920d5c91a7..220eba49b3b 100644 --- a/receiver/scraperhelper/scrapercontroller.go +++ b/receiver/scraperhelper/scrapercontroller.go @@ -264,10 +264,12 @@ func (mms *multiMetricScraper) Scrape(ctx context.Context, receiverName string) for _, scraper := range mms.scrapers { metrics, err := scraper.Scrape(ctx, receiverName) if err != nil { - errs.AddRegular(err) - if !consumererror.IsPartialScrapeError(err) { - continue + partialErr, isPartial := err.(consumererror.PartialScrapeError) + if isPartial { + errs.Add(partialErr.Failed, partialErr) } + + continue } metrics.MoveAndAppendTo(ilm.Metrics()) From 9c2251e18a23ac6a91326937e1d98b654856e205 Mon Sep 17 00:00:00 2001 From: Andrei Ozerov Date: Mon, 8 Feb 2021 13:33:32 +0300 Subject: [PATCH 6/9] ScrapeErrors: check if slice contains partial err Fix `Combine()` method to check if errs slice contains `PartialScrapeError` since we can have scrape errors with 0 failed metrics so checking only `failedScrapeCount` field is not enough. --- consumer/consumererror/scrapeerrors.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/consumer/consumererror/scrapeerrors.go b/consumer/consumererror/scrapeerrors.go index e5dfa473a7b..4b7e640da05 100644 --- a/consumer/consumererror/scrapeerrors.go +++ b/consumer/consumererror/scrapeerrors.go @@ -49,7 +49,14 @@ func (s *ScrapeErrors) AddRegularf(format string, a ...interface{}) { // Combine converts a slice of errors into one error. // It will return a PartialScrapeError if at least one error in the slice is a PartialScrapeError. func (s *ScrapeErrors) Combine() error { - if s.failedScrapeCount == 0 { + partialScrapeErr := false + for _, err := range s.errs { + if IsPartialScrapeError(err) { + partialScrapeErr = true + } + } + + if !partialScrapeErr { return CombineErrors(s.errs) } From da0f95e6cc2adc7b915e02f12fb94537d39caaf5 Mon Sep 17 00:00:00 2001 From: Andrei Ozerov Date: Sun, 14 Feb 2021 21:13:34 +0300 Subject: [PATCH 7/9] Fix changing comments in combineerrors_test.go Do not change comments in consumer/consumererror/combineerrors_test.go. --- consumer/consumererror/combineerrors_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consumer/consumererror/combineerrors_test.go b/consumer/consumererror/combineerrors_test.go index c465376477f..404b853f342 100644 --- a/consumer/consumererror/combineerrors_test.go +++ b/consumer/consumererror/combineerrors_test.go @@ -1,10 +1,10 @@ -// Copyright The OpenTelemetry Authors +// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, From 3f920e2a602e82d965ab41c6139808199121cd93 Mon Sep 17 00:00:00 2001 From: Andrei Ozerov Date: Sun, 14 Feb 2021 21:19:13 +0300 Subject: [PATCH 8/9] Component error: apply 5cc0707e changes Apply 5cc0707e to component/componenterror. --- component/componenterror/errors.go | 26 +-------- component/componenterror/errors_test.go | 70 ------------------------- consumer/consumererror/combineerrors.go | 2 - 3 files changed, 1 insertion(+), 97 deletions(-) delete mode 100644 component/componenterror/errors_test.go diff --git a/component/componenterror/errors.go b/component/componenterror/errors.go index c4b7f4ddfbe..ecc8c6a1f1f 100644 --- a/component/componenterror/errors.go +++ b/component/componenterror/errors.go @@ -18,8 +18,6 @@ package componenterror import ( "errors" - "fmt" - "strings" "go.opentelemetry.io/collector/consumer/consumererror" ) @@ -38,27 +36,5 @@ var ( // CombineErrors converts a list of errors into one error. // Deprecated: use consumererror.CombineErrors instead. func CombineErrors(errs []error) error { - numErrors := len(errs) - if numErrors == 0 { - // No errors - return nil - } - - if numErrors == 1 { - return errs[0] - } - - errMsgs := make([]string, 0, numErrors) - permanent := false - for _, err := range errs { - if !permanent && consumererror.IsPermanent(err) { - permanent = true - } - errMsgs = append(errMsgs, err.Error()) - } - err := fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) - if permanent { - err = consumererror.Permanent(err) - } - return err + return consumererror.CombineErrors(errs) } diff --git a/component/componenterror/errors_test.go b/component/componenterror/errors_test.go deleted file mode 100644 index c0c7b8e7bd8..00000000000 --- a/component/componenterror/errors_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package componenterror_test - -import ( - "fmt" - "testing" - - "go.opentelemetry.io/collector/component/componenterror" - "go.opentelemetry.io/collector/consumer/consumererror" -) - -func TestCombineErrors(t *testing.T) { - testCases := []struct { - errors []error - expected string - expectNil bool - expectedPermanent bool - }{ - { - errors: []error{}, - expectNil: true, - }, - { - errors: []error{ - fmt.Errorf("foo"), - }, - expected: "foo", - }, - { - errors: []error{ - fmt.Errorf("foo"), - fmt.Errorf("bar"), - }, - expected: "[foo; bar]", - }, - { - errors: []error{ - fmt.Errorf("foo"), - fmt.Errorf("bar"), - consumererror.Permanent(fmt.Errorf("permanent"))}, - expected: "Permanent error: [foo; bar; Permanent error: permanent]", - }, - } - - for _, tc := range testCases { - got := componenterror.CombineErrors(tc.errors) - if (got == nil) != tc.expectNil { - t.Errorf("CombineErrors(%v) == nil? Got: %t. Want: %t", tc.errors, got == nil, tc.expectNil) - } - if got != nil && tc.expected != got.Error() { - t.Errorf("CombineErrors(%v) = %q. Want: %q", tc.errors, got, tc.expected) - } - if tc.expectedPermanent && !consumererror.IsPermanent(got) { - t.Errorf("CombineErrors(%v) = %q. Want: consumererror.permanent", tc.errors, got) - } - } -} diff --git a/consumer/consumererror/combineerrors.go b/consumer/consumererror/combineerrors.go index 54bbf86f5af..1010c07d60c 100644 --- a/consumer/consumererror/combineerrors.go +++ b/consumer/consumererror/combineerrors.go @@ -20,8 +20,6 @@ import ( ) // CombineErrors converts a list of errors into one error. -// -// TODO: deprecate componenterror.CombineErrors in favor of this function. func CombineErrors(errs []error) error { numErrors := len(errs) if numErrors == 0 { From 0c4c32914e40473f7e907969c0d8ea0ae25ec1b6 Mon Sep 17 00:00:00 2001 From: Andrei Ozerov Date: Wed, 17 Feb 2021 20:05:15 +0300 Subject: [PATCH 9/9] ScrapeErrors: rename and simplify methods Use only Add and AddPartial methods. --- consumer/consumererror/scrapeerrors.go | 21 +------ consumer/consumererror/scrapeerrors_test.go | 56 +++++-------------- .../filesystemscraper/filesystem_scraper.go | 2 +- .../scraper/networkscraper/network_scraper.go | 4 +- .../pagingscraper/paging_scraper_others.go | 4 +- .../pagingscraper/paging_scraper_windows.go | 4 +- .../scraper/processscraper/process_scraper.go | 14 ++--- receiver/scraperhelper/scrapercontroller.go | 2 +- 8 files changed, 32 insertions(+), 75 deletions(-) diff --git a/consumer/consumererror/scrapeerrors.go b/consumer/consumererror/scrapeerrors.go index 4b7e640da05..1bd0b6de12a 100644 --- a/consumer/consumererror/scrapeerrors.go +++ b/consumer/consumererror/scrapeerrors.go @@ -14,10 +14,6 @@ package consumererror -import ( - "fmt" -) - // ScrapeErrors contains multiple PartialScrapeErrors and can also contain generic errors. type ScrapeErrors struct { errs []error @@ -25,27 +21,16 @@ type ScrapeErrors struct { } // Add adds a PartialScrapeError with the provided failed count and error. -func (s *ScrapeErrors) Add(failed int, err error) { +func (s *ScrapeErrors) AddPartial(failed int, err error) { s.errs = append(s.errs, NewPartialScrapeError(err, failed)) s.failedScrapeCount += failed } -// Addf adds a PartialScrapeError with the provided failed count and arguments to format an error. -func (s *ScrapeErrors) Addf(failed int, format string, a ...interface{}) { - s.errs = append(s.errs, NewPartialScrapeError(fmt.Errorf(format, a...), failed)) - s.failedScrapeCount += failed -} - -// Add adds a regular generic error. -func (s *ScrapeErrors) AddRegular(err error) { +// Add adds a regular error. +func (s *ScrapeErrors) Add(err error) { s.errs = append(s.errs, err) } -// Add adds a regular generic error from the provided format specifier. -func (s *ScrapeErrors) AddRegularf(format string, a ...interface{}) { - s.errs = append(s.errs, fmt.Errorf(format, a...)) -} - // Combine converts a slice of errors into one error. // It will return a PartialScrapeError if at least one error in the slice is a PartialScrapeError. func (s *ScrapeErrors) Combine() error { diff --git a/consumer/consumererror/scrapeerrors_test.go b/consumer/consumererror/scrapeerrors_test.go index 044377047fb..dd3c3df945e 100644 --- a/consumer/consumererror/scrapeerrors_test.go +++ b/consumer/consumererror/scrapeerrors_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestScrapeErrorsAdd(t *testing.T) { +func TestScrapeErrorsAddPartial(t *testing.T) { err1 := errors.New("err 1") err2 := errors.New("err 2") expected := []error{ @@ -31,47 +31,19 @@ func TestScrapeErrorsAdd(t *testing.T) { } var errs ScrapeErrors - errs.Add(1, err1) - errs.Add(10, err2) - assert.Equal(t, expected, errs.errs) -} - -func TestScrapeErrorsAddf(t *testing.T) { - err1 := errors.New("err 10") - err2 := errors.New("err 20") - expected := []error{ - PartialScrapeError{error: fmt.Errorf("err: %s", err1), Failed: 20}, - PartialScrapeError{error: fmt.Errorf("err %s: %w", "2", err2), Failed: 2}, - } - - var errs ScrapeErrors - errs.Addf(20, "err: %s", err1) - errs.Addf(2, "err %s: %w", "2", err2) + errs.AddPartial(1, err1) + errs.AddPartial(10, err2) assert.Equal(t, expected, errs.errs) } -func TestScrapeErrorsAddRegular(t *testing.T) { +func TestScrapeErrorsAdd(t *testing.T) { err1 := errors.New("err a") err2 := errors.New("err b") expected := []error{err1, err2} var errs ScrapeErrors - errs.AddRegular(err1) - errs.AddRegular(err2) - assert.Equal(t, expected, errs.errs) -} - -func TestScrapeErrorsAddRegularf(t *testing.T) { - err1 := errors.New("err aa") - err2 := errors.New("err bb") - expected := []error{ - fmt.Errorf("err: %s", err1), - fmt.Errorf("err %s: %w", "bb", err2), - } - - var errs ScrapeErrors - errs.AddRegularf("err: %s", err1) - errs.AddRegularf("err %s: %w", "bb", err2) + errs.Add(err1) + errs.Add(err2) assert.Equal(t, expected, errs.errs) } @@ -93,8 +65,8 @@ func TestScrapeErrorsCombine(t *testing.T) { { errs: func() ScrapeErrors { var errs ScrapeErrors - errs.Add(10, errors.New("bad scrapes")) - errs.Addf(1, "err: %s", errors.New("bad scrape")) + errs.AddPartial(10, errors.New("bad scrapes")) + errs.AddPartial(1, fmt.Errorf("err: %s", errors.New("bad scrape"))) return errs }, expectedErr: "[bad scrapes; err: bad scrape]", @@ -104,8 +76,8 @@ func TestScrapeErrorsCombine(t *testing.T) { { errs: func() ScrapeErrors { var errs ScrapeErrors - errs.AddRegular(errors.New("bad regular")) - errs.AddRegularf("err: %s", errors.New("bad reg")) + errs.Add(errors.New("bad regular")) + errs.Add(fmt.Errorf("err: %s", errors.New("bad reg"))) return errs }, expectedErr: "[bad regular; err: bad reg]", @@ -113,10 +85,10 @@ func TestScrapeErrorsCombine(t *testing.T) { { errs: func() ScrapeErrors { var errs ScrapeErrors - errs.Add(2, errors.New("bad two scrapes")) - errs.Addf(10, "%d scrapes failed: %s", 10, errors.New("bad things happened")) - errs.AddRegular(errors.New("bad event")) - errs.AddRegularf("event: %s", errors.New("something happened")) + errs.AddPartial(2, errors.New("bad two scrapes")) + errs.AddPartial(10, fmt.Errorf("%d scrapes failed: %s", 10, errors.New("bad things happened"))) + errs.Add(errors.New("bad event")) + errs.Add(fmt.Errorf("event: %s", errors.New("something happened"))) return errs }, expectedErr: "[bad two scrapes; 10 scrapes failed: bad things happened; bad event; event: something happened]", diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go index 8c53213fbee..01279ea8d04 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go @@ -78,7 +78,7 @@ func (s *scraper) Scrape(_ context.Context) (pdata.MetricSlice, error) { } usage, usageErr := s.usage(partition.Mountpoint) if usageErr != nil { - errors.Add(0, usageErr) + errors.AddPartial(0, usageErr) continue } diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go index a25febf339e..5be6838b373 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go @@ -88,12 +88,12 @@ func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { err := s.scrapeAndAppendNetworkCounterMetrics(metrics, s.startTime) if err != nil { - errors.Add(networkMetricsLen, err) + errors.AddPartial(networkMetricsLen, err) } err = s.scrapeAndAppendNetworkConnectionsMetric(metrics) if err != nil { - errors.Add(connectionsMetricsLen, err) + errors.AddPartial(connectionsMetricsLen, err) } return metrics, errors.Combine() diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go index 82bad8ef6d1..f28b255b608 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go @@ -68,12 +68,12 @@ func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { err := s.scrapeAndAppendPagingUsageMetric(metrics) if err != nil { - errors.Add(pagingUsageMetricsLen, err) + errors.AddPartial(pagingUsageMetricsLen, err) } err = s.scrapeAndAppendPagingMetrics(metrics) if err != nil { - errors.Add(pagingMetricsLen, err) + errors.AddPartial(pagingMetricsLen, err) } return metrics, errors.Combine() diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go index ca9e1999ef7..cbc12a464ec 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go @@ -85,12 +85,12 @@ func (s *scraper) scrape(context.Context) (pdata.MetricSlice, error) { err := s.scrapeAndAppendPagingUsageMetric(metrics) if err != nil { - errors.Add(pagingUsageMetricsLen, err) + errors.AddPartial(pagingUsageMetricsLen, err) } err = s.scrapeAndAppendPagingOperationsMetric(metrics) if err != nil { - errors.Add(pagingMetricsLen, err) + errors.AddPartial(pagingMetricsLen, err) } return metrics, errors.Combine() diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go index 975a8a9272a..7fc65fd29be 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go @@ -96,7 +96,7 @@ func (s *scraper) scrape(_ context.Context) (pdata.ResourceMetricsSlice, error) return rms, err } - errs.Add(partialErr.Failed, partialErr) + errs.AddPartial(partialErr.Failed, partialErr) } rms.Resize(len(metadata)) @@ -111,15 +111,15 @@ func (s *scraper) scrape(_ context.Context) (pdata.ResourceMetricsSlice, error) now := internal.TimeToUnixNano(time.Now()) if err = scrapeAndAppendCPUTimeMetric(metrics, s.startTime, now, md.handle); err != nil { - errs.Addf(cpuMetricsLen, "error reading cpu times for process %q (pid %v): %w", md.executable.name, md.pid, err) + errs.AddPartial(cpuMetricsLen, fmt.Errorf("error reading cpu times for process %q (pid %v): %w", md.executable.name, md.pid, err)) } if err = scrapeAndAppendMemoryUsageMetrics(metrics, now, md.handle); err != nil { - errs.Addf(memoryMetricsLen, "error reading memory info for process %q (pid %v): %w", md.executable.name, md.pid, err) + errs.AddPartial(memoryMetricsLen, fmt.Errorf("error reading memory info for process %q (pid %v): %w", md.executable.name, md.pid, err)) } if err = scrapeAndAppendDiskIOMetric(metrics, s.startTime, now, md.handle); err != nil { - errs.Addf(diskMetricsLen, "error reading disk usage for process %q (pid %v): %w", md.executable.name, md.pid, err) + errs.AddPartial(diskMetricsLen, fmt.Errorf("error reading disk usage for process %q (pid %v): %w", md.executable.name, md.pid, err)) } } @@ -145,7 +145,7 @@ func (s *scraper) getProcessMetadata() ([]*processMetadata, error) { executable, err := getProcessExecutable(handle) if err != nil { - errs.Addf(1, "error reading process name for pid %v: %w", pid, err) + errs.AddPartial(1, fmt.Errorf("error reading process name for pid %v: %w", pid, err)) continue } @@ -157,12 +157,12 @@ func (s *scraper) getProcessMetadata() ([]*processMetadata, error) { command, err := getProcessCommand(handle) if err != nil { - errs.Addf(0, "error reading command for process %q (pid %v): %w", executable.name, pid, err) + errs.AddPartial(0, fmt.Errorf("error reading command for process %q (pid %v): %w", executable.name, pid, err)) } username, err := handle.Username() if err != nil { - errs.Addf(0, "error reading username for process %q (pid %v): %w", executable.name, pid, err) + errs.AddPartial(0, fmt.Errorf("error reading username for process %q (pid %v): %w", executable.name, pid, err)) } md := &processMetadata{ diff --git a/receiver/scraperhelper/scrapercontroller.go b/receiver/scraperhelper/scrapercontroller.go index 220eba49b3b..c0ea0409dc6 100644 --- a/receiver/scraperhelper/scrapercontroller.go +++ b/receiver/scraperhelper/scrapercontroller.go @@ -266,7 +266,7 @@ func (mms *multiMetricScraper) Scrape(ctx context.Context, receiverName string) if err != nil { partialErr, isPartial := err.(consumererror.PartialScrapeError) if isPartial { - errs.Add(partialErr.Failed, partialErr) + errs.AddPartial(partialErr.Failed, partialErr) } continue