Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[processor/spanmetrics] Fix flaky test #18024

Merged
merged 3 commits into from
Jan 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions .chloggen/spanmetricsprocessor-fix-flaky-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: bug_fix

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: spanmetricsprocessor

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Fix a flaky test caused by a race condition between WaitGroup completion and observed logs being written and flushed.

# One or more tracking issues related to the change
issues: [18014]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
29 changes: 18 additions & 11 deletions processor/spanmetricsprocessor/processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,8 +174,15 @@ func TestProcessorConcurrentShutdown(t *testing.T) {
}()
}
wg.Wait()
allLogs := observedLogs.All()
require.NotEmpty(t, allLogs)

// Allow time for log observer to sync all logs emitted.
// Even though the WaitGroup has been given the "done" signal, there's still a potential race condition
// between the WaitGroup being unblocked and when the logs will be flushed.
var allLogs []observer.LoggedEntry
assert.Eventually(t, func() bool {
allLogs = observedLogs.All()
return len(allLogs) > 0
}, time.Second, time.Millisecond*10)

// Starting spanmetricsprocessor...
// Started spanmetricsprocessor...
Expand Down Expand Up @@ -288,13 +295,13 @@ func TestProcessorConsumeMetricsErrors(t *testing.T) {
wg.Wait()

// Allow time for log observer to sync all logs emitted.
// Unfortunately, we can't tell the log observer to wait until all logs have been synced/received.
// Core/Logger.Sync() does not appear to achieve the desired behavior of syncing observedLogs with the logger.
time.Sleep(time.Millisecond)

// Verify
allLogs := observedLogs.All()
require.NotEmpty(t, allLogs)
// Even though the WaitGroup has been given the "done" signal, there's still a potential race condition
// between the WaitGroup being unblocked and when the logs will be flushed.
var allLogs []observer.LoggedEntry
assert.Eventually(t, func() bool {
allLogs = observedLogs.All()
return len(allLogs) > 0
}, time.Second, time.Millisecond*10)

assert.Equal(t, "Failed ConsumeMetrics", allLogs[0].Message)
}
Expand Down Expand Up @@ -412,7 +419,7 @@ func TestMetricKeyCache(t *testing.T) {
require.NoError(t, err)
// 2 key was cached, 1 key was evicted and cleaned after the processing
assert.Eventually(t, func() bool {
return assert.Equal(t, DimensionsCacheSize, p.metricKeyToDimensions.Len())
return p.metricKeyToDimensions.Len() == DimensionsCacheSize
}, 10*time.Second, time.Millisecond*100)

// consume another batch of traces
Expand All @@ -421,7 +428,7 @@ func TestMetricKeyCache(t *testing.T) {

// 2 key was cached, other keys were evicted and cleaned after the processing
assert.Eventually(t, func() bool {
return assert.Equal(t, DimensionsCacheSize, p.metricKeyToDimensions.Len())
return p.metricKeyToDimensions.Len() == DimensionsCacheSize
}, 10*time.Second, time.Millisecond*100)
}

Expand Down