Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[processor/spanmetrics] Prune histograms #27083

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions .chloggen/27083-spanmetrics-prune.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: bug_fix
Copy link
Contributor Author

@nijave nijave Oct 4, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not really sure about this one. Technically this will produce less metric series than it previously did (the output of this component is changing) but my understanding was it was always intended that metric series (histograms) get pruned at the same time as dimensions cache map.

i.e. it was a bug these histograms weren't being pruned


# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: processor/spanmetrics

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Prune histograms when dimension cache is pruned.

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [27080]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext: |
Dimension cache was always pruned but histograms were not being pruned. This caused metric series created
by processor/spanmetrics to grow unbounded.

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: [user]
4 changes: 4 additions & 0 deletions processor/spanmetricsprocessor/internal/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,10 @@ func (c *Cache[K, V]) Get(key K) (V, bool) {
return val, ok
}

func (c *Cache[K, V]) Contains(key K) bool {
return c.lru.Contains(key)
}

// Len returns the number of items in the cache.
func (c *Cache[K, V]) Len() int {
return c.lru.Len()
Expand Down
5 changes: 5 additions & 0 deletions processor/spanmetricsprocessor/processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,11 @@ func (p *processorImp) exportMetrics(ctx context.Context) {
p.metricKeyToDimensions.Purge()
} else {
p.metricKeyToDimensions.RemoveEvictedItems()
for key := range p.histograms {
if !p.metricKeyToDimensions.Contains(key) {
delete(p.histograms, key)
}
}
}

// This component no longer needs to read the metrics once built, so it is safe to unlock.
Expand Down
67 changes: 44 additions & 23 deletions processor/spanmetricsprocessor/processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ const (
notInSpanAttrName0 = "shouldBeInMetric"
notInSpanAttrName1 = "shouldNotBeInMetric"
regionResourceAttrName = "region"
DimensionsCacheSize = 2
DimensionsCacheSize = 1000

sampleRegion = "us-east-1"
sampleLatency = float64(11)
Expand Down Expand Up @@ -144,7 +144,7 @@ func TestProcessorConcurrentShutdown(t *testing.T) {
ticker := mockClock.NewTicker(time.Nanosecond)

// Test
p := newProcessorImp(mexp, tcon, nil, cumulative, logger, ticker)
p := newProcessorImp(mexp, tcon, nil, cumulative, logger, ticker, DimensionsCacheSize)
err := p.Start(ctx, mhost)
require.NoError(t, err)

Expand Down Expand Up @@ -230,7 +230,7 @@ func TestProcessorConsumeTracesErrors(t *testing.T) {
tcon := &mocks.TracesConsumer{}
tcon.On("ConsumeTraces", mock.Anything, mock.Anything).Return(fakeErr)

p := newProcessorImp(mexp, tcon, nil, cumulative, logger, nil)
p := newProcessorImp(mexp, tcon, nil, cumulative, logger, nil, DimensionsCacheSize)

traces := buildSampleTrace()

Expand Down Expand Up @@ -262,7 +262,7 @@ func TestProcessorConsumeMetricsErrors(t *testing.T) {

mockClock := clock.NewMock(time.Now())
ticker := mockClock.NewTicker(time.Nanosecond)
p := newProcessorImp(mexp, tcon, nil, cumulative, logger, ticker)
p := newProcessorImp(mexp, tcon, nil, cumulative, logger, ticker, DimensionsCacheSize)

exporters := map[component.DataType]map[component.ID]component.Component{}
mhost := &mocks.Host{}
Expand Down Expand Up @@ -362,7 +362,7 @@ func TestProcessorConsumeTraces(t *testing.T) {
mockClock := clock.NewMock(time.Now())
ticker := mockClock.NewTicker(time.Nanosecond)

p := newProcessorImp(mexp, tcon, &defaultNullValue, tc.aggregationTemporality, zaptest.NewLogger(t), ticker)
p := newProcessorImp(mexp, tcon, &defaultNullValue, tc.aggregationTemporality, zaptest.NewLogger(t), ticker, DimensionsCacheSize)

exporters := map[component.DataType]map[component.ID]component.Component{}
mhost := &mocks.Host{}
Expand All @@ -387,39 +387,61 @@ func TestProcessorConsumeTraces(t *testing.T) {
}
}

func TestMetricKeyCache(t *testing.T) {
func TestMetricCache(t *testing.T) {
var wg sync.WaitGroup

mexp := &mocks.MetricsConsumer{}
tcon := &mocks.TracesConsumer{}
mexp.On("ConsumeMetrics", mock.Anything, mock.MatchedBy(func(input pmetric.Metrics) bool {
wg.Done()
return true
})).Return(nil)

mexp.On("ConsumeMetrics", mock.Anything, mock.Anything).Return(nil)
tcon := &mocks.TracesConsumer{}
tcon.On("ConsumeTraces", mock.Anything, mock.Anything).Return(nil)

defaultNullValue := pcommon.NewValueStr("defaultNullValue")
p := newProcessorImp(mexp, tcon, &defaultNullValue, cumulative, zaptest.NewLogger(t), nil)
traces := buildSampleTrace()
mockClock := clock.NewMock(time.Now())
ticker := mockClock.NewTicker(time.Nanosecond)
dimensionsCacheSize := 2

p := newProcessorImp(mexp, tcon, &defaultNullValue, cumulative, zaptest.NewLogger(t), ticker, dimensionsCacheSize)

exporters := map[component.DataType]map[component.ID]component.Component{}
mhost := &mocks.Host{}
mhost.On("GetExporters").Return(exporters)

// Test
ctx := metadata.NewIncomingContext(context.Background(), nil)
err := p.Start(ctx, mhost)
require.NoError(t, err)

// 0 key was cached at beginning
assert.Zero(t, p.metricKeyToDimensions.Len())
assert.Zero(t, len(p.histograms))

traces := buildSampleTrace()
require.Condition(t, func() bool {
return traces.SpanCount() >= dimensionsCacheSize
})

err = p.ConsumeTraces(ctx, traces)
wg.Add(1)
mockClock.Add(time.Nanosecond)
wg.Wait()

err := p.ConsumeTraces(ctx, traces)
// Validate
require.NoError(t, err)
// 2 key was cached, 1 key was evicted and cleaned after the processing
assert.Eventually(t, func() bool {
return p.metricKeyToDimensions.Len() == DimensionsCacheSize
}, 10*time.Second, time.Millisecond*100)
assert.Equal(t, len(p.histograms), dimensionsCacheSize)

// consume another batch of traces
err = p.ConsumeTraces(ctx, traces)
require.NoError(t, err)
wg.Add(1)
mockClock.Add(time.Nanosecond)
wg.Wait()

// 2 key was cached, other keys were evicted and cleaned after the processing
assert.Eventually(t, func() bool {
return p.metricKeyToDimensions.Len() == DimensionsCacheSize
}, 10*time.Second, time.Millisecond*100)
assert.Equal(t, len(p.histograms), dimensionsCacheSize)
}

func BenchmarkProcessorConsumeTraces(b *testing.B) {
Expand All @@ -431,7 +453,7 @@ func BenchmarkProcessorConsumeTraces(b *testing.B) {
tcon.On("ConsumeTraces", mock.Anything, mock.Anything).Return(nil)

defaultNullValue := pcommon.NewValueStr("defaultNullValue")
p := newProcessorImp(mexp, tcon, &defaultNullValue, cumulative, zaptest.NewLogger(b), nil)
p := newProcessorImp(mexp, tcon, &defaultNullValue, cumulative, zaptest.NewLogger(b), nil, DimensionsCacheSize)

traces := buildSampleTrace()

Expand All @@ -442,10 +464,10 @@ func BenchmarkProcessorConsumeTraces(b *testing.B) {
}
}

func newProcessorImp(mexp *mocks.MetricsConsumer, tcon *mocks.TracesConsumer, defaultNullValue *pcommon.Value, temporality string, logger *zap.Logger, ticker *clock.Ticker) *processorImp {
func newProcessorImp(mexp *mocks.MetricsConsumer, tcon *mocks.TracesConsumer, defaultNullValue *pcommon.Value, temporality string, logger *zap.Logger, ticker *clock.Ticker, cacheSize int) *processorImp {
defaultNotInSpanAttrVal := pcommon.NewValueStr("defaultNotInSpanAttrVal")
// use size 2 for LRU cache for testing purpose
metricKeyToDimensions, err := cache.NewCache[metricKey, pcommon.Map](DimensionsCacheSize)
metricKeyToDimensions, err := cache.NewCache[metricKey, pcommon.Map](cacheSize)
if err != nil {
panic(err)
}
Expand Down Expand Up @@ -979,8 +1001,7 @@ func TestConsumeTracesEvictedCacheKey(t *testing.T) {
mockClock := clock.NewMock(time.Now())
ticker := mockClock.NewTicker(time.Nanosecond)

// Note: default dimension key cache size is 2.
p := newProcessorImp(mexp, tcon, &defaultNullValue, cumulative, zaptest.NewLogger(t), ticker)
p := newProcessorImp(mexp, tcon, &defaultNullValue, cumulative, zaptest.NewLogger(t), ticker, DimensionsCacheSize)

exporters := map[component.DataType]map[component.ID]component.Component{}

Expand Down