Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[elasticsearchexporter] Direct serialization without objmodel in OTel mode #37032

Open
wants to merge 36 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 7 commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
f23c80a
Serialize logs directly to JSON in OTel mode
felixbarny Jan 6, 2025
74e94ff
Serialize spans and span events without objmodel
felixbarny Jan 6, 2025
8956a9e
Serialize metrics without objmodel
felixbarny Jan 6, 2025
eb36c67
Add changelog
felixbarny Jan 6, 2025
3c50b69
Merge remote-tracking branch 'origin/main' into es-direct-serialization
felixbarny Jan 6, 2025
ea0ac70
goporto
felixbarny Jan 6, 2025
2fc6b0b
Fix linting issues
felixbarny Jan 6, 2025
40c20e0
Merge remote-tracking branch 'origin/main' into es-direct-serialization
felixbarny Jan 8, 2025
d938537
Merge remote-tracking branch 'origin/main' into es-direct-serialization
felixbarny Jan 9, 2025
d3e8c7a
Add event_name for logs
felixbarny Jan 10, 2025
8679172
Merge branch 'main' into es-direct-serialization
felixbarny Jan 10, 2025
e09e0e5
Remove all error handling from serialization code
felixbarny Jan 10, 2025
539fa9d
Avoid copying attributes
felixbarny Jan 10, 2025
7ba2575
Propagate isEvent flag to writeLogBody function
felixbarny Jan 10, 2025
80976d0
Merge remote-tracking branch 'origin/main' into es-direct-serialization
felixbarny Jan 10, 2025
b15169d
write geo attribute keys
felixbarny Jan 10, 2025
5e523c5
Pool buffers
felixbarny Jan 10, 2025
fb5f38d
Add subtext to changelog
felixbarny Jan 10, 2025
1fb2156
Fix checkapi error
felixbarny Jan 10, 2025
d837d4f
Merge branch 'main' into es-direct-serialization
felixbarny Jan 10, 2025
29e9daf
gotidy
felixbarny Jan 10, 2025
19d0c94
Apply suggestions from code review
felixbarny Jan 11, 2025
43b7869
fix stale comment
felixbarny Jan 11, 2025
cd16343
fix typo in file name
felixbarny Jan 11, 2025
d150493
Remove otel serialization code from objmodel
felixbarny Jan 11, 2025
90f46f7
Move bufferpool to dedicated package
felixbarny Jan 11, 2025
20e960c
Fix geo serialization
felixbarny Jan 11, 2025
60bc183
Move mergeGeoLocation to pdata_serializer.go
felixbarny Jan 11, 2025
16145d2
Fix imports
felixbarny Jan 11, 2025
69ae5ad
Remove appendValueOnConflict parameter as it's always true
felixbarny Jan 11, 2025
8fcd99b
Log validation error when metric with same name has already been seri…
felixbarny Jan 11, 2025
a99c3fc
make goporto
felixbarny Jan 11, 2025
4277f76
Optimize and fix geo attribute serialization
felixbarny Jan 12, 2025
2328a7a
Optimize timestamp serialization
felixbarny Jan 12, 2025
1dc4635
Add todo for more optimization for metrics
felixbarny Jan 12, 2025
64d258c
Merge branch 'main' into es-direct-serialization
felixbarny Jan 12, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .chloggen/elasticsearchexporter_optimized-json-encoding.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: elasticsearchexporter

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: More efficient JSON encoding for OTel mode

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [37032]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: [user]
83 changes: 38 additions & 45 deletions exporter/elasticsearchexporter/exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@ import (
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.uber.org/zap"

"github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter/internal/objmodel"
)

type elasticsearchExporter struct {
Expand Down Expand Up @@ -193,21 +191,18 @@ func (e *elasticsearchExporter) pushMetricsData(
}
defer session.End()

var (
validationErrs []error // log instead of returning these so that upstream does not retry
errs []error
)
var errs []error
resourceMetrics := metrics.ResourceMetrics()
for i := 0; i < resourceMetrics.Len(); i++ {
resourceMetric := resourceMetrics.At(i)
resource := resourceMetric.Resource()
scopeMetrics := resourceMetric.ScopeMetrics()

resourceDocs := make(map[string]map[uint32]objmodel.Document)

for j := 0; j < scopeMetrics.Len(); j++ {
var validationErrs []error // log instead of returning these so that upstream does not retry
scopeMetrics := scopeMetrics.At(j)
scope := scopeMetrics.Scope()
groupedDataPointsByIndex := make(map[string]map[uint32][]dataPoint)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note to reviewer: I made it so that documents from different scopes are never merged. This simplified the serialization logic and also fixes a subtle bug in the current implementation where we're only hashing the scope attributes but not the scope name. This leads to grouping of potentially different scopes to the same document. I guess as a consequence, we should also add the scope name as a dimension in the mappings.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think by moving this here, rather than outside of the scopeMetrics loop, we're assuming that there will never be two identical scopes within a resource. Is that a safe assumption?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I suppose it's no worse than the existing assumption that resourceMetrics is free of duplicate resources.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What will make it safe is that the only consequence of being wrong in the assumption is leaving some storage savings on the table. In other words, we should prioritize elastic/elasticsearch#99123, which turns out to be more of an issue than anticipated in various contexts.

Copy link
Contributor

@axw axw Jan 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wouldn't duplicates resources/scopes lead to duplicate _tsid & doc rejections? Definitely agree on prioritising that issue though...

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, it would, until we fix the referenced issue.

for k := 0; k < scopeMetrics.Metrics().Len(); k++ {
metric := scopeMetrics.Metrics().At(k)

Expand All @@ -216,13 +211,17 @@ func (e *elasticsearchExporter) pushMetricsData(
if err != nil {
return err
}
if _, ok := resourceDocs[fIndex]; !ok {
resourceDocs[fIndex] = make(map[uint32]objmodel.Document)
groupedDataPoints, ok := groupedDataPointsByIndex[fIndex]
if !ok {
groupedDataPoints = make(map[uint32][]dataPoint)
groupedDataPointsByIndex[fIndex] = groupedDataPoints
}

if err = e.model.upsertMetricDataPointValue(resourceDocs[fIndex], resource,
resourceMetric.SchemaUrl(), scope, scopeMetrics.SchemaUrl(), metric, dp); err != nil {
return err
dpHash := e.model.hashDataPoint(dp)
dataPoints, ok := groupedDataPoints[dpHash]
if !ok {
groupedDataPoints[dpHash] = []dataPoint{dp}
} else {
groupedDataPoints[dpHash] = append(dataPoints, dp)
}
return nil
}
Expand All @@ -232,7 +231,7 @@ func (e *elasticsearchExporter) pushMetricsData(
dps := metric.Sum().DataPoints()
for l := 0; l < dps.Len(); l++ {
dp := dps.At(l)
if err := upsertDataPoint(newNumberDataPoint(dp)); err != nil {
if err := upsertDataPoint(newNumberDataPoint(metric, dp)); err != nil {
validationErrs = append(validationErrs, err)
continue
}
Expand All @@ -241,7 +240,7 @@ func (e *elasticsearchExporter) pushMetricsData(
dps := metric.Gauge().DataPoints()
for l := 0; l < dps.Len(); l++ {
dp := dps.At(l)
if err := upsertDataPoint(newNumberDataPoint(dp)); err != nil {
if err := upsertDataPoint(newNumberDataPoint(metric, dp)); err != nil {
validationErrs = append(validationErrs, err)
continue
}
Expand All @@ -254,7 +253,7 @@ func (e *elasticsearchExporter) pushMetricsData(
dps := metric.ExponentialHistogram().DataPoints()
for l := 0; l < dps.Len(); l++ {
dp := dps.At(l)
if err := upsertDataPoint(newExponentialHistogramDataPoint(dp)); err != nil {
if err := upsertDataPoint(newExponentialHistogramDataPoint(metric, dp)); err != nil {
validationErrs = append(validationErrs, err)
continue
}
Expand All @@ -267,7 +266,7 @@ func (e *elasticsearchExporter) pushMetricsData(
dps := metric.Histogram().DataPoints()
for l := 0; l < dps.Len(); l++ {
dp := dps.At(l)
if err := upsertDataPoint(newHistogramDataPoint(dp)); err != nil {
if err := upsertDataPoint(newHistogramDataPoint(metric, dp)); err != nil {
validationErrs = append(validationErrs, err)
continue
}
Expand All @@ -276,37 +275,32 @@ func (e *elasticsearchExporter) pushMetricsData(
dps := metric.Summary().DataPoints()
for l := 0; l < dps.Len(); l++ {
dp := dps.At(l)
if err := upsertDataPoint(newSummaryDataPoint(dp)); err != nil {
if err := upsertDataPoint(newSummaryDataPoint(metric, dp)); err != nil {
validationErrs = append(validationErrs, err)
continue
}
}
}
}
}

if len(validationErrs) > 0 {
e.Logger.Warn("validation errors", zap.Error(errors.Join(validationErrs...)))
}

for fIndex, docs := range resourceDocs {
for _, doc := range docs {
var (
docBytes []byte
err error
)
docBytes, err = e.model.encodeDocument(doc)
if err != nil {
errs = append(errs, err)
continue
}
if err := session.Add(ctx, fIndex, bytes.NewReader(docBytes), doc.DynamicTemplates()); err != nil {
if cerr := ctx.Err(); cerr != nil {
return cerr
for fIndex, groupedDataPoints := range groupedDataPointsByIndex {
for _, dataPoints := range groupedDataPoints {
docBytes, dynamicTemplates, err := e.model.encodeMetrics(resource, resourceMetric.SchemaUrl(), scope, scopeMetrics.SchemaUrl(), dataPoints, &validationErrs)
if err != nil {
errs = append(errs, err)
continue
}
if err := session.Add(ctx, fIndex, bytes.NewReader(docBytes), dynamicTemplates); err != nil {
if cerr := ctx.Err(); cerr != nil {
return cerr
}
errs = append(errs, err)
}
errs = append(errs, err)
}
}
if len(validationErrs) > 0 {
e.Logger.Warn("validation errors", zap.Error(errors.Join(validationErrs...)))
}
}
}

Expand Down Expand Up @@ -440,14 +434,13 @@ func (e *elasticsearchExporter) pushSpanEvent(
}
fIndex = formattedIndex
}

document := e.model.encodeSpanEvent(resource, resourceSchemaURL, span, spanEvent, scope, scopeSchemaURL)
if document == nil {
return nil
}
docBytes, err := e.model.encodeDocument(*document)
docBytes, err := e.model.encodeSpanEvent(resource, resourceSchemaURL, span, spanEvent, scope, scopeSchemaURL)
if err != nil {
return err
}
if docBytes == nil {
return nil
}

return bulkIndexerSession.Add(ctx, fIndex, bytes.NewReader(docBytes), nil)
}
Loading
Loading