Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(loadbalancingexporter): optimize metrics and traces export #30141

Merged
merged 11 commits into from
Jan 25, 2024
27 changes: 27 additions & 0 deletions .chloggen/mat-rumian-lbe-imp.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: loadbalancingexporter

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: "Optimize metrics and traces export"

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [30141]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: []
123 changes: 123 additions & 0 deletions exporter/loadbalancingexporter/helpers.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0

package loadbalancingexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter"

import (
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
)

// mergeTraces concatenates two ptrace.Traces into a single ptrace.Traces.
func mergeTraces(t1 ptrace.Traces, t2 ptrace.Traces) ptrace.Traces {
mergedTraces := ptrace.NewTraces()

if t1.SpanCount() == 0 && t2.SpanCount() == 0 {
return mergedTraces
}

// Iterate over the first trace and append spans to the merged traces
for i := 0; i < t1.ResourceSpans().Len(); i++ {
rs := t1.ResourceSpans().At(i)
newRS := mergedTraces.ResourceSpans().AppendEmpty()

rs.Resource().MoveTo(newRS.Resource())
newRS.SetSchemaUrl(rs.SchemaUrl())

for j := 0; j < rs.ScopeSpans().Len(); j++ {
ils := rs.ScopeSpans().At(j)

newILS := newRS.ScopeSpans().AppendEmpty()
ils.Scope().MoveTo(newILS.Scope())
newILS.SetSchemaUrl(ils.SchemaUrl())

for k := 0; k < ils.Spans().Len(); k++ {
span := ils.Spans().At(k)
newSpan := newILS.Spans().AppendEmpty()
span.MoveTo(newSpan)
}
}
}

// Iterate over the second trace and append spans to the merged traces
for i := 0; i < t2.ResourceSpans().Len(); i++ {
rs := t2.ResourceSpans().At(i)
newRS := mergedTraces.ResourceSpans().AppendEmpty()

rs.Resource().MoveTo(newRS.Resource())
newRS.SetSchemaUrl(rs.SchemaUrl())

for j := 0; j < rs.ScopeSpans().Len(); j++ {
ils := rs.ScopeSpans().At(j)

newILS := newRS.ScopeSpans().AppendEmpty()
ils.Scope().MoveTo(newILS.Scope())
newILS.SetSchemaUrl(ils.SchemaUrl())

for k := 0; k < ils.Spans().Len(); k++ {
span := ils.Spans().At(k)
newSpan := newILS.Spans().AppendEmpty()
span.MoveTo(newSpan)
}
}
}

return mergedTraces
}

// mergeMetrics concatenates two pmetric.Metrics into a single pmetric.Metrics.
func mergeMetrics(m1 pmetric.Metrics, m2 pmetric.Metrics) pmetric.Metrics {
mergedMetrics := pmetric.NewMetrics()

if m1.MetricCount() == 0 && m2.MetricCount() == 0 {
return mergedMetrics
}

// Iterate over the first metric and append metrics to the merged metrics
for i := 0; i < m1.ResourceMetrics().Len(); i++ {
rs := m1.ResourceMetrics().At(i)
newRS := mergedMetrics.ResourceMetrics().AppendEmpty()

rs.Resource().MoveTo(newRS.Resource())
newRS.SetSchemaUrl(rs.SchemaUrl())

for j := 0; j < rs.ScopeMetrics().Len(); j++ {
ils := rs.ScopeMetrics().At(j)

newILS := newRS.ScopeMetrics().AppendEmpty()
ils.Scope().MoveTo(newILS.Scope())
newILS.SetSchemaUrl(ils.SchemaUrl())

for k := 0; k < ils.Metrics().Len(); k++ {
metric := ils.Metrics().At(k)
newMetric := newILS.Metrics().AppendEmpty()
metric.MoveTo(newMetric)
}
}
}

// Iterate over the second metric and append metrics to the merged metrics
for i := 0; i < m2.ResourceMetrics().Len(); i++ {
rs := m2.ResourceMetrics().At(i)
newRS := mergedMetrics.ResourceMetrics().AppendEmpty()

rs.Resource().MoveTo(newRS.Resource())
newRS.SetSchemaUrl(rs.SchemaUrl())

for j := 0; j < rs.ScopeMetrics().Len(); j++ {
ils := rs.ScopeMetrics().At(j)

newILS := newRS.ScopeMetrics().AppendEmpty()
ils.Scope().MoveTo(newILS.Scope())
newILS.SetSchemaUrl(ils.SchemaUrl())

for k := 0; k < ils.Metrics().Len(); k++ {
metric := ils.Metrics().At(k)
newMetric := newILS.Metrics().AppendEmpty()
metric.MoveTo(newMetric)
}
}
}

return mergedMetrics
}
121 changes: 121 additions & 0 deletions exporter/loadbalancingexporter/helpers_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0

package loadbalancingexporter

import (
"testing"

"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
)

func TestMergeTracesTwoEmpty(t *testing.T) {
expectedEmpty := ptrace.NewTraces()
trace1 := ptrace.NewTraces()
trace2 := ptrace.NewTraces()

mergedTraces := mergeTraces(trace1, trace2)

require.Equal(t, expectedEmpty, mergedTraces)
}

func TestMergeTracesSingleEmpty(t *testing.T) {
expectedTraces := simpleTraces()

trace1 := ptrace.NewTraces()
trace2 := simpleTraces()

mergedTraces := mergeTraces(trace1, trace2)

require.Equal(t, expectedTraces, mergedTraces)
}

func TestMergeTraces(t *testing.T) {
jpkrohling marked this conversation as resolved.
Show resolved Hide resolved
expectedTraces := ptrace.NewTraces()
expectedTraces.ResourceSpans().EnsureCapacity(3)
aspans := expectedTraces.ResourceSpans().AppendEmpty()
aspans.Resource().Attributes().PutStr(conventions.AttributeServiceName, "service-name-1")
aspans.ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID([16]byte{1, 2, 3, 4})
bspans := expectedTraces.ResourceSpans().AppendEmpty()
bspans.Resource().Attributes().PutStr(conventions.AttributeServiceName, "service-name-2")
bspans.ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID([16]byte{1, 2, 3, 2})
cspans := expectedTraces.ResourceSpans().AppendEmpty()
cspans.Resource().Attributes().PutStr(conventions.AttributeServiceName, "service-name-3")
cspans.ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID([16]byte{1, 2, 3, 3})

trace1 := ptrace.NewTraces()
trace1.ResourceSpans().EnsureCapacity(2)
t1aspans := trace1.ResourceSpans().AppendEmpty()
t1aspans.Resource().Attributes().PutStr(conventions.AttributeServiceName, "service-name-1")
t1aspans.ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID([16]byte{1, 2, 3, 4})
t1bspans := trace1.ResourceSpans().AppendEmpty()
t1bspans.Resource().Attributes().PutStr(conventions.AttributeServiceName, "service-name-2")
t1bspans.ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID([16]byte{1, 2, 3, 2})

trace2 := ptrace.NewTraces()
trace2.ResourceSpans().EnsureCapacity(1)
t2cspans := trace2.ResourceSpans().AppendEmpty()
t2cspans.Resource().Attributes().PutStr(conventions.AttributeServiceName, "service-name-3")
t2cspans.ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID([16]byte{1, 2, 3, 3})

mergedTraces := mergeTraces(trace1, trace2)

require.Equal(t, expectedTraces, mergedTraces)
}

func TestMergeMetricsTwoEmpty(t *testing.T) {
expectedEmpty := pmetric.NewMetrics()
metric1 := pmetric.NewMetrics()
metric2 := pmetric.NewMetrics()

mergedMetrics := mergeMetrics(metric1, metric2)

require.Equal(t, expectedEmpty, mergedMetrics)
}

func TestMergeMetricsSingleEmpty(t *testing.T) {
expectedMetrics := simpleMetricsWithResource()

metric1 := pmetric.NewMetrics()
metric2 := simpleMetricsWithResource()

mergedMetrics := mergeMetrics(metric1, metric2)

require.Equal(t, expectedMetrics, mergedMetrics)
}

func TestMergeMetrics(t *testing.T) {
expectedMetrics := pmetric.NewMetrics()
expectedMetrics.ResourceMetrics().EnsureCapacity(3)
ametrics := expectedMetrics.ResourceMetrics().AppendEmpty()
ametrics.Resource().Attributes().PutStr(conventions.AttributeServiceName, "service-name-1")
ametrics.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty().SetName("m1")
bmetrics := expectedMetrics.ResourceMetrics().AppendEmpty()
bmetrics.Resource().Attributes().PutStr(conventions.AttributeServiceName, "service-name-2")
bmetrics.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty().SetName("m1")
cmetrics := expectedMetrics.ResourceMetrics().AppendEmpty()
cmetrics.Resource().Attributes().PutStr(conventions.AttributeServiceName, "service-name-3")
cmetrics.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty().SetName("m2")

metric1 := pmetric.NewMetrics()
metric1.ResourceMetrics().EnsureCapacity(2)
m1ametrics := metric1.ResourceMetrics().AppendEmpty()
m1ametrics.Resource().Attributes().PutStr(conventions.AttributeServiceName, "service-name-1")
m1ametrics.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty().SetName("m1")
m1bmetrics := metric1.ResourceMetrics().AppendEmpty()
m1bmetrics.Resource().Attributes().PutStr(conventions.AttributeServiceName, "service-name-2")
m1bmetrics.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty().SetName("m1")

metric2 := pmetric.NewMetrics()
metric2.ResourceMetrics().EnsureCapacity(1)
m2cmetrics := metric2.ResourceMetrics().AppendEmpty()
m2cmetrics.Resource().Attributes().PutStr(conventions.AttributeServiceName, "service-name-3")
m2cmetrics.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty().SetName("m2")

mergedMetrics := mergeMetrics(metric1, metric2)

require.Equal(t, expectedMetrics, mergedMetrics)
}
90 changes: 59 additions & 31 deletions exporter/loadbalancingexporter/metrics_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ import (

var _ exporter.Metrics = (*metricExporterImp)(nil)

type exporterMetrics map[component.Component]map[string]pmetric.Metrics
type endpointMetrics map[string]pmetric.Metrics

type metricExporterImp struct {
loadBalancer loadBalancer
routingKey routingKey
Expand Down Expand Up @@ -80,46 +83,71 @@ func (e *metricExporterImp) Shutdown(context.Context) error {

func (e *metricExporterImp) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
var errs error
var exp component.Component

batches := batchpersignal.SplitMetrics(md)
for _, batch := range batches {
errs = multierr.Append(errs, e.consumeMetric(ctx, batch))
}

return errs
}
exporterSegregatedMetrics := make(exporterMetrics)
endpointSegregatedMetrics := make(endpointMetrics)

func (e *metricExporterImp) consumeMetric(ctx context.Context, md pmetric.Metrics) error {
var exp component.Component
routingIds, err := routingIdentifiersFromMetrics(md, e.routingKey)
if err != nil {
return err
}
for rid := range routingIds {
endpoint := e.loadBalancer.Endpoint([]byte(rid))
exp, err = e.loadBalancer.Exporter(endpoint)
for _, batch := range batches {
routingIds, err := routingIdentifiersFromMetrics(batch, e.routingKey)
if err != nil {
return err
}

te, ok := exp.(exporter.Metrics)
if !ok {
return fmt.Errorf("unable to export metrics, unexpected exporter type: expected exporter.Metrics but got %T", exp)
for rid := range routingIds {
endpoint := e.loadBalancer.Endpoint([]byte(rid))
exp, err = e.loadBalancer.Exporter(endpoint)
if err != nil {
return err
}
_, ok := exp.(exporter.Metrics)
if !ok {
return fmt.Errorf("unable to export metrics, unexpected exporter type: expected exporter.Metrics but got %T", exp)
}

_, ok = endpointSegregatedMetrics[endpoint]
if !ok {
endpointSegregatedMetrics[endpoint] = pmetric.NewMetrics()
}
endpointSegregatedMetrics[endpoint] = mergeMetrics(endpointSegregatedMetrics[endpoint], batch)

_, ok = exporterSegregatedMetrics[exp]
if !ok {
exporterSegregatedMetrics[exp] = endpointMetrics{}
}
exporterSegregatedMetrics[exp][endpoint] = endpointSegregatedMetrics[endpoint]
}
}

errs = multierr.Append(errs, e.consumeMetric(ctx, exporterSegregatedMetrics))

return errs
}

start := time.Now()
err = te.ConsumeMetrics(ctx, md)
duration := time.Since(start)

if err == nil {
_ = stats.RecordWithTags(
ctx,
[]tag.Mutator{tag.Upsert(endpointTagKey, endpoint), successTrueMutator},
mBackendLatency.M(duration.Milliseconds()))
} else {
_ = stats.RecordWithTags(
ctx,
[]tag.Mutator{tag.Upsert(endpointTagKey, endpoint), successFalseMutator},
mBackendLatency.M(duration.Milliseconds()))
func (e *metricExporterImp) consumeMetric(ctx context.Context, exporterSegregatedMetrics exporterMetrics) error {
var err error

for exp, endpointMetrics := range exporterSegregatedMetrics {
for endpoint, md := range endpointMetrics {
te, _ := exp.(exporter.Metrics)

start := time.Now()
err = te.ConsumeMetrics(ctx, md)
duration := time.Since(start)

if err == nil {
_ = stats.RecordWithTags(
ctx,
[]tag.Mutator{tag.Upsert(endpointTagKey, endpoint), successTrueMutator},
mBackendLatency.M(duration.Milliseconds()))
} else {
_ = stats.RecordWithTags(
ctx,
[]tag.Mutator{tag.Upsert(endpointTagKey, endpoint), successFalseMutator},
mBackendLatency.M(duration.Milliseconds()))
}
}
}

Expand Down
Loading