diff --git a/component/componenterror/errors.go b/component/componenterror/errors.go index 0a89c29b3f23..3d8f010fc9b2 100644 --- a/component/componenterror/errors.go +++ b/component/componenterror/errors.go @@ -18,8 +18,6 @@ package componenterror import ( "errors" - "fmt" - "strings" "go.opentelemetry.io/collector/consumer/consumererror" ) @@ -37,27 +35,5 @@ var ( // CombineErrors converts a list of errors into one error. func CombineErrors(errs []error) error { - numErrors := len(errs) - if numErrors == 0 { - // No errors - return nil - } - - if numErrors == 1 { - return errs[0] - } - - errMsgs := make([]string, 0, numErrors) - permanent := false - for _, err := range errs { - if !permanent && consumererror.IsPermanent(err) { - permanent = true - } - errMsgs = append(errMsgs, err.Error()) - } - err := fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) - if permanent { - err = consumererror.Permanent(err) - } - return err + return consumererror.CombineErrors(errs) } diff --git a/consumer/consumererror/combineerrors.go b/consumer/consumererror/combineerrors.go new file mode 100644 index 000000000000..6ee640caeee7 --- /dev/null +++ b/consumer/consumererror/combineerrors.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumererror + +import ( + "fmt" + "strings" +) + +// CombineErrors converts a list of errors into one error. +func CombineErrors(errs []error) error { + numErrors := len(errs) + if numErrors == 0 { + // No errors + return nil + } + + if numErrors == 1 { + return errs[0] + } + + errMsgs := make([]string, 0, numErrors) + permanent := false + for _, err := range errs { + if !permanent && IsPermanent(err) { + permanent = true + } + errMsgs = append(errMsgs, err.Error()) + } + err := fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) + if permanent { + err = Permanent(err) + } + return err +} diff --git a/component/componenterror/errors_test.go b/consumer/consumererror/combineerrors_test.go similarity index 78% rename from component/componenterror/errors_test.go rename to consumer/consumererror/combineerrors_test.go index c0c7b8e7bd85..c465376477f4 100644 --- a/component/componenterror/errors_test.go +++ b/consumer/consumererror/combineerrors_test.go @@ -1,10 +1,10 @@ -// Copyright The OpenTelemetry Authors +// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,14 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package componenterror_test +package consumererror import ( "fmt" "testing" - - "go.opentelemetry.io/collector/component/componenterror" - "go.opentelemetry.io/collector/consumer/consumererror" ) func TestCombineErrors(t *testing.T) { @@ -50,20 +47,20 @@ func TestCombineErrors(t *testing.T) { errors: []error{ fmt.Errorf("foo"), fmt.Errorf("bar"), - consumererror.Permanent(fmt.Errorf("permanent"))}, + Permanent(fmt.Errorf("permanent"))}, expected: "Permanent error: [foo; bar; Permanent error: permanent]", }, } for _, tc := range testCases { - got := componenterror.CombineErrors(tc.errors) + got := CombineErrors(tc.errors) if (got == nil) != tc.expectNil { t.Errorf("CombineErrors(%v) == nil? Got: %t. Want: %t", tc.errors, got == nil, tc.expectNil) } if got != nil && tc.expected != got.Error() { t.Errorf("CombineErrors(%v) = %q. Want: %q", tc.errors, got, tc.expected) } - if tc.expectedPermanent && !consumererror.IsPermanent(got) { + if tc.expectedPermanent && !IsPermanent(got) { t.Errorf("CombineErrors(%v) = %q. Want: consumererror.permanent", tc.errors, got) } } diff --git a/consumer/consumererror/scrapeerrors.go b/consumer/consumererror/scrapeerrors.go new file mode 100644 index 000000000000..d687a9600c8a --- /dev/null +++ b/consumer/consumererror/scrapeerrors.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumererror + +import ( + "fmt" + "strings" +) + +// ScrapeErrors contains multiple PartialScrapeErrors and can also contain generic errors. +type ScrapeErrors struct { + errs []error + scrapeErrsCount int +} + +// Add adds a PartialScrapeError with the provided failed count and error. +func (s *ScrapeErrors) Add(failed int, err error) { + s.errs = append(s.errs, NewPartialScrapeError(err, failed)) + s.scrapeErrsCount++ +} + +// Addf adds a PartialScrapeError with the provided failed count and arguments to format an error. +func (s *ScrapeErrors) Addf(failed int, format string, a ...interface{}) { + s.errs = append(s.errs, NewPartialScrapeError(fmt.Errorf(format, a...), failed)) + s.scrapeErrsCount++ +} + +// Add adds a regular generic error. +func (s *ScrapeErrors) AddRegular(err error) { + s.errs = append(s.errs, err) +} + +// Add adds a regular generic error from the provided format specifier. +func (s *ScrapeErrors) AddRegularf(format string, a ...interface{}) { + s.errs = append(s.errs, fmt.Errorf(format, a...)) +} + +// Combine converts a slice of errors into one error. +// It will return a PartialScrapeError if at least one error in the slice is a PartialScrapeError. +func (s *ScrapeErrors) Combine() error { + if s.scrapeErrsCount == 0 { + return CombineErrors(s.errs) + } + + errMsgs := make([]string, 0, len(s.errs)) + failedScrapeCount := 0 + for _, err := range s.errs { + if partialError, isPartial := err.(PartialScrapeError); isPartial { + failedScrapeCount += partialError.Failed + } + + errMsgs = append(errMsgs, err.Error()) + } + + var err error + if len(s.errs) == 1 { + err = s.errs[0] + } else { + err = fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) + } + + return NewPartialScrapeError(err, failedScrapeCount) +} diff --git a/consumer/consumererror/scrapeerrors_test.go b/consumer/consumererror/scrapeerrors_test.go new file mode 100644 index 000000000000..044377047fba --- /dev/null +++ b/consumer/consumererror/scrapeerrors_test.go @@ -0,0 +1,145 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumererror + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestScrapeErrorsAdd(t *testing.T) { + err1 := errors.New("err 1") + err2 := errors.New("err 2") + expected := []error{ + PartialScrapeError{error: err1, Failed: 1}, + PartialScrapeError{error: err2, Failed: 10}, + } + + var errs ScrapeErrors + errs.Add(1, err1) + errs.Add(10, err2) + assert.Equal(t, expected, errs.errs) +} + +func TestScrapeErrorsAddf(t *testing.T) { + err1 := errors.New("err 10") + err2 := errors.New("err 20") + expected := []error{ + PartialScrapeError{error: fmt.Errorf("err: %s", err1), Failed: 20}, + PartialScrapeError{error: fmt.Errorf("err %s: %w", "2", err2), Failed: 2}, + } + + var errs ScrapeErrors + errs.Addf(20, "err: %s", err1) + errs.Addf(2, "err %s: %w", "2", err2) + assert.Equal(t, expected, errs.errs) +} + +func TestScrapeErrorsAddRegular(t *testing.T) { + err1 := errors.New("err a") + err2 := errors.New("err b") + expected := []error{err1, err2} + + var errs ScrapeErrors + errs.AddRegular(err1) + errs.AddRegular(err2) + assert.Equal(t, expected, errs.errs) +} + +func TestScrapeErrorsAddRegularf(t *testing.T) { + err1 := errors.New("err aa") + err2 := errors.New("err bb") + expected := []error{ + fmt.Errorf("err: %s", err1), + fmt.Errorf("err %s: %w", "bb", err2), + } + + var errs ScrapeErrors + errs.AddRegularf("err: %s", err1) + errs.AddRegularf("err %s: %w", "bb", err2) + assert.Equal(t, expected, errs.errs) +} + +func TestScrapeErrorsCombine(t *testing.T) { + testCases := []struct { + errs func() ScrapeErrors + expectedErr string + expectedFailedCount int + expectNil bool + expectedScrape bool + }{ + { + errs: func() ScrapeErrors { + var errs ScrapeErrors + return errs + }, + expectNil: true, + }, + { + errs: func() ScrapeErrors { + var errs ScrapeErrors + errs.Add(10, errors.New("bad scrapes")) + errs.Addf(1, "err: %s", errors.New("bad scrape")) + return errs + }, + expectedErr: "[bad scrapes; err: bad scrape]", + expectedFailedCount: 11, + expectedScrape: true, + }, + { + errs: func() ScrapeErrors { + var errs ScrapeErrors + errs.AddRegular(errors.New("bad regular")) + errs.AddRegularf("err: %s", errors.New("bad reg")) + return errs + }, + expectedErr: "[bad regular; err: bad reg]", + }, + { + errs: func() ScrapeErrors { + var errs ScrapeErrors + errs.Add(2, errors.New("bad two scrapes")) + errs.Addf(10, "%d scrapes failed: %s", 10, errors.New("bad things happened")) + errs.AddRegular(errors.New("bad event")) + errs.AddRegularf("event: %s", errors.New("something happened")) + return errs + }, + expectedErr: "[bad two scrapes; 10 scrapes failed: bad things happened; bad event; event: something happened]", + expectedFailedCount: 12, + expectedScrape: true, + }, + } + + for _, tc := range testCases { + scrapeErrs := tc.errs() + if (scrapeErrs.Combine() == nil) != tc.expectNil { + t.Errorf("%+v.Combine() == nil? Got: %t. Want: %t", scrapeErrs, scrapeErrs.Combine() == nil, tc.expectNil) + } + if scrapeErrs.Combine() != nil && tc.expectedErr != scrapeErrs.Combine().Error() { + t.Errorf("%+v.Combine() = %q. Want: %q", scrapeErrs, scrapeErrs.Combine(), tc.expectedErr) + } + if tc.expectedScrape { + partialScrapeErr, ok := scrapeErrs.Combine().(PartialScrapeError) + if !ok { + t.Errorf("%+v.Combine() = %q. Want: PartialScrapeError", scrapeErrs, scrapeErrs.Combine()) + } else if tc.expectedFailedCount != partialScrapeErr.Failed { + t.Errorf("%+v.Combine().Failed. Got %d Failed count. Want: %d", scrapeErrs, partialScrapeErr.Failed, tc.expectedFailedCount) + } + } + } +} diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go index 0287ee074ba2..a3b14e9f883b 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go @@ -25,7 +25,6 @@ import ( "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" - "go.opentelemetry.io/collector/receiver/scraperhelper" ) const ( @@ -71,7 +70,7 @@ func (s *scraper) Scrape(_ context.Context) (pdata.MetricSlice, error) { return metrics, consumererror.NewPartialScrapeError(err, metricsLen) } - var errors []error + var errors consumererror.ScrapeErrors usages := make([]*deviceUsage, 0, len(partitions)) for _, partition := range partitions { if !s.fsFilter.includePartition(partition) { @@ -79,7 +78,7 @@ func (s *scraper) Scrape(_ context.Context) (pdata.MetricSlice, error) { } usage, usageErr := s.usage(partition.Mountpoint) if usageErr != nil { - errors = append(errors, consumererror.NewPartialScrapeError(usageErr, 0)) + errors.Add(0, usageErr) continue } @@ -92,7 +91,7 @@ func (s *scraper) Scrape(_ context.Context) (pdata.MetricSlice, error) { appendSystemSpecificMetrics(metrics, 1, now, usages) } - err = scraperhelper.CombineScrapeErrors(errors) + err = errors.Combine() if err != nil && len(usages) == 0 { partialErr := err.(consumererror.PartialScrapeError) partialErr.Failed = metricsLen diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go index a12a336b931f..d74610098347 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go @@ -27,7 +27,6 @@ import ( "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterset" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" - "go.opentelemetry.io/collector/receiver/scraperhelper" ) const ( @@ -84,19 +83,19 @@ func (s *scraper) start(context.Context, component.Host) error { func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { metrics := pdata.NewMetricSlice() - var errors []error + var errors consumererror.ScrapeErrors err := s.scrapeAndAppendNetworkCounterMetrics(metrics, s.startTime) if err != nil { - errors = append(errors, err) + errors.Add(networkMetricsLen, err) } err = s.scrapeAndAppendNetworkConnectionsMetric(metrics) if err != nil { - errors = append(errors, err) + errors.Add(connectionsMetricsLen, err) } - return metrics, scraperhelper.CombineScrapeErrors(errors) + return metrics, errors.Combine() } func (s *scraper) scrapeAndAppendNetworkCounterMetrics(metrics pdata.MetricSlice, startTime pdata.TimestampUnixNano) error { @@ -105,7 +104,7 @@ func (s *scraper) scrapeAndAppendNetworkCounterMetrics(metrics pdata.MetricSlice // get total stats only ioCounters, err := s.ioCounters( /*perNetworkInterfaceController=*/ true) if err != nil { - return consumererror.NewPartialScrapeError(err, networkMetricsLen) + return err } // filter network interfaces by name @@ -181,7 +180,7 @@ func (s *scraper) scrapeAndAppendNetworkConnectionsMetric(metrics pdata.MetricSl connections, err := s.connections("tcp") if err != nil { - return consumererror.NewPartialScrapeError(err, connectionsMetricsLen) + return err } tcpConnectionStatusCounts := getTCPConnectionStatusCounts(connections) diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go index d913379e9aeb..9e45a7bb847d 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go @@ -27,7 +27,6 @@ import ( "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" - "go.opentelemetry.io/collector/receiver/scraperhelper" ) const ( @@ -64,26 +63,26 @@ func (s *scraper) start(context.Context, component.Host) error { func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { metrics := pdata.NewMetricSlice() - var errors []error + var errors consumererror.ScrapeErrors err := s.scrapeAndAppendPagingUsageMetric(metrics) if err != nil { - errors = append(errors, err) + errors.Add(pagingUsageMetricsLen, err) } err = s.scrapeAndAppendPagingMetrics(metrics) if err != nil { - errors = append(errors, err) + errors.Add(pagingMetricsLen, err) } - return metrics, scraperhelper.CombineScrapeErrors(errors) + return metrics, errors.Combine() } func (s *scraper) scrapeAndAppendPagingUsageMetric(metrics pdata.MetricSlice) error { now := internal.TimeToUnixNano(time.Now()) vmem, err := s.virtualMemory() if err != nil { - return consumererror.NewPartialScrapeError(err, pagingUsageMetricsLen) + return err } idx := metrics.Len() @@ -113,7 +112,7 @@ func (s *scraper) scrapeAndAppendPagingMetrics(metrics pdata.MetricSlice) error now := internal.TimeToUnixNano(time.Now()) swap, err := s.swapMemory() if err != nil { - return consumererror.NewPartialScrapeError(err, pagingMetricsLen) + return err } idx := metrics.Len() diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go index e93cf8ffb7d0..5d695ea2d51a 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go @@ -28,7 +28,6 @@ import ( "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/perfcounters" - "go.opentelemetry.io/collector/receiver/scraperhelper" ) const ( @@ -81,26 +80,26 @@ func (s *scraper) start(context.Context, component.Host) error { func (s *scraper) scrape(context.Context) (pdata.MetricSlice, error) { metrics := pdata.NewMetricSlice() - var errors []error + var errors consumererror.ScrapeErrors err := s.scrapeAndAppendPagingUsageMetric(metrics) if err != nil { - errors = append(errors, err) + errors.Add(pagingUsageMetricsLen, err) } err = s.scrapeAndAppendPagingOperationsMetric(metrics) if err != nil { - errors = append(errors, err) + errors.Add(pagingMetricsLen, err) } - return metrics, scraperhelper.CombineScrapeErrors(errors) + return metrics, errors.Combine() } func (s *scraper) scrapeAndAppendPagingUsageMetric(metrics pdata.MetricSlice) error { now := internal.TimeToUnixNano(time.Now()) pageFiles, err := s.pageFileStats() if err != nil { - return consumererror.NewPartialScrapeError(err, pagingUsageMetricsLen) + return err } idx := metrics.Len() @@ -136,17 +135,17 @@ func (s *scraper) scrapeAndAppendPagingOperationsMetric(metrics pdata.MetricSlic counters, err := s.perfCounterScraper.Scrape() if err != nil { - return consumererror.NewPartialScrapeError(err, pagingMetricsLen) + return err } memoryObject, err := counters.GetObject(memory) if err != nil { - return consumererror.NewPartialScrapeError(err, pagingMetricsLen) + return err } memoryCounterValues, err := memoryObject.GetValues(pageReadsPerSec, pageWritesPerSec) if err != nil { - return consumererror.NewPartialScrapeError(err, pagingMetricsLen) + return err } if len(memoryCounterValues) > 0 { diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go index 8588b6c75c1a..9f4b3db5a136 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go @@ -28,7 +28,6 @@ import ( "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterset" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" - "go.opentelemetry.io/collector/receiver/scraperhelper" ) const ( @@ -87,7 +86,7 @@ func (s *scraper) start(context.Context, component.Host) error { func (s *scraper) scrape(_ context.Context) (pdata.ResourceMetricsSlice, error) { rms := pdata.NewResourceMetricsSlice() - var errs []error + var errs consumererror.ScrapeErrors metadata, err := s.getProcessMetadata() if err != nil { @@ -95,7 +94,7 @@ func (s *scraper) scrape(_ context.Context) (pdata.ResourceMetricsSlice, error) return rms, err } - errs = append(errs, err) + errs.AddRegular(err) } rms.Resize(len(metadata)) @@ -110,19 +109,19 @@ func (s *scraper) scrape(_ context.Context) (pdata.ResourceMetricsSlice, error) now := internal.TimeToUnixNano(time.Now()) if err = scrapeAndAppendCPUTimeMetric(metrics, s.startTime, now, md.handle); err != nil { - errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading cpu times for process %q (pid %v): %w", md.executable.name, md.pid, err), cpuMetricsLen)) + errs.Addf(cpuMetricsLen, "error reading cpu times for process %q (pid %v): %w", md.executable.name, md.pid, err) } if err = scrapeAndAppendMemoryUsageMetrics(metrics, now, md.handle); err != nil { - errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading memory info for process %q (pid %v): %w", md.executable.name, md.pid, err), memoryMetricsLen)) + errs.Addf(memoryMetricsLen, "error reading memory info for process %q (pid %v): %w", md.executable.name, md.pid, err) } if err = scrapeAndAppendDiskIOMetric(metrics, s.startTime, now, md.handle); err != nil { - errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading disk usage for process %q (pid %v): %w", md.executable.name, md.pid, err), diskMetricsLen)) + errs.Addf(diskMetricsLen, "error reading disk usage for process %q (pid %v): %w", md.executable.name, md.pid, err) } } - return rms, scraperhelper.CombineScrapeErrors(errs) + return rms, errs.Combine() } // getProcessMetadata returns a slice of processMetadata, including handles, @@ -135,7 +134,8 @@ func (s *scraper) getProcessMetadata() ([]*processMetadata, error) { return nil, err } - var errs []error + var errs consumererror.ScrapeErrors + metadata := make([]*processMetadata, 0, handles.Len()) for i := 0; i < handles.Len(); i++ { pid := handles.Pid(i) @@ -143,7 +143,7 @@ func (s *scraper) getProcessMetadata() ([]*processMetadata, error) { executable, err := getProcessExecutable(handle) if err != nil { - errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading process name for pid %v: %w", pid, err), 1)) + errs.Addf(1, "error reading process name for pid %v: %w", pid, err) continue } @@ -155,12 +155,12 @@ func (s *scraper) getProcessMetadata() ([]*processMetadata, error) { command, err := getProcessCommand(handle) if err != nil { - errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading command for process %q (pid %v): %w", executable.name, pid, err), 0)) + errs.Addf(0, "error reading command for process %q (pid %v): %w", executable.name, pid, err) } username, err := handle.Username() if err != nil { - errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading username for process %q (pid %v): %w", executable.name, pid, err), 0)) + errs.Addf(0, "error reading username for process %q (pid %v): %w", executable.name, pid, err) } md := &processMetadata{ @@ -174,7 +174,7 @@ func (s *scraper) getProcessMetadata() ([]*processMetadata, error) { metadata = append(metadata, md) } - return metadata, scraperhelper.CombineScrapeErrors(errs) + return metadata, errs.Combine() } func scrapeAndAppendCPUTimeMetric(metrics pdata.MetricSlice, startTime, now pdata.TimestampUnixNano, handle processHandle) error { diff --git a/receiver/scraperhelper/errors.go b/receiver/scraperhelper/errors.go deleted file mode 100644 index 6134aebd17f4..000000000000 --- a/receiver/scraperhelper/errors.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scraperhelper - -import ( - "errors" - "fmt" - "strings" - - "go.opentelemetry.io/collector/component/componenterror" - "go.opentelemetry.io/collector/consumer/consumererror" -) - -// CombineScrapeErrors converts a list of errors into one error. -func CombineScrapeErrors(errs []error) error { - partialScrapeErr := false - for _, err := range errs { - var partialError consumererror.PartialScrapeError - if errors.As(err, &partialError) { - partialScrapeErr = true - break - } - } - - if !partialScrapeErr { - return componenterror.CombineErrors(errs) - } - - errMsgs := make([]string, 0, len(errs)) - failedScrapeCount := 0 - for _, err := range errs { - if partialError, isPartial := err.(consumererror.PartialScrapeError); isPartial { - failedScrapeCount += partialError.Failed - } - - errMsgs = append(errMsgs, err.Error()) - } - - var err error - if len(errs) == 1 { - err = errs[0] - } else { - err = fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) - } - - return consumererror.NewPartialScrapeError(err, failedScrapeCount) -} diff --git a/receiver/scraperhelper/errors_test.go b/receiver/scraperhelper/errors_test.go deleted file mode 100644 index e5def5a7c443..000000000000 --- a/receiver/scraperhelper/errors_test.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scraperhelper - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - - "go.opentelemetry.io/collector/consumer/consumererror" -) - -func TestCombineScrapeErrors(t *testing.T) { - testCases := []struct { - errors []error - expected string - expectNil bool - expectedPartialScrapeErr bool - expectedFailedScrapeCount int - }{ - { - errors: []error{}, - expectNil: true, - }, - { - errors: []error{ - fmt.Errorf("foo"), - }, - expected: "foo", - }, - { - errors: []error{ - fmt.Errorf("foo"), - fmt.Errorf("bar"), - }, - expected: "[foo; bar]", - }, - { - errors: []error{ - fmt.Errorf("foo"), - fmt.Errorf("bar"), - consumererror.NewPartialScrapeError(fmt.Errorf("partial"), 0)}, - expected: "[foo; bar; partial]", - expectedPartialScrapeErr: true, - expectedFailedScrapeCount: 0, - }, - { - errors: []error{ - fmt.Errorf("foo"), - fmt.Errorf("bar"), - consumererror.NewPartialScrapeError(fmt.Errorf("partial 1"), 2), - consumererror.NewPartialScrapeError(fmt.Errorf("partial 2"), 3)}, - expected: "[foo; bar; partial 1; partial 2]", - expectedPartialScrapeErr: true, - expectedFailedScrapeCount: 5, - }, - } - - for _, tc := range testCases { - got := CombineScrapeErrors(tc.errors) - - if tc.expectNil { - assert.NoError(t, got, tc.expected) - } else { - assert.EqualError(t, got, tc.expected) - } - - partialErr, isPartial := got.(consumererror.PartialScrapeError) - assert.Equal(t, tc.expectedPartialScrapeErr, isPartial) - - if tc.expectedPartialScrapeErr && isPartial { - assert.Equal(t, tc.expectedFailedScrapeCount, partialErr.Failed) - } - } -} diff --git a/receiver/scraperhelper/scrapercontroller.go b/receiver/scraperhelper/scrapercontroller.go index e829f6ca204e..57c5bc39cddb 100644 --- a/receiver/scraperhelper/scrapercontroller.go +++ b/receiver/scraperhelper/scrapercontroller.go @@ -260,11 +260,11 @@ func (mms *multiMetricScraper) Scrape(ctx context.Context, receiverName string) ilms.Resize(1) ilm := ilms.At(0) - var errs []error + var errs consumererror.ScrapeErrors for _, scraper := range mms.scrapers { metrics, err := scraper.Scrape(ctx, receiverName) if err != nil { - errs = append(errs, err) + errs.AddRegular(err) if !consumererror.IsPartialScrapeError(err) { continue } @@ -272,5 +272,5 @@ func (mms *multiMetricScraper) Scrape(ctx context.Context, receiverName string) metrics.MoveAndAppendTo(ilm.Metrics()) } - return rms, CombineScrapeErrors(errs) + return rms, errs.Combine() }