Skip to content

Commit

Permalink
[connector/servicegraph] Fix incorrectly reversed latency settings fo…
Browse files Browse the repository at this point in the history
…r the server and client (open-telemetry#34562)

**Description:** A mirror bug fix PR of
open-telemetry#32232
since the original author has not been active for a long time.

**Link to tracking Issue:** <Issue number if applicable>

**Testing:** <Describe what testing was performed and which tests were
added.>

**Documentation:** <Describe the documentation added.>
  • Loading branch information
Frapschen authored Aug 27, 2024
1 parent ee5d327 commit 1a432b9
Show file tree
Hide file tree
Showing 5 changed files with 112 additions and 82 deletions.
27 changes: 27 additions & 0 deletions .chloggen/fix-wrong-latency.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: bug_fix

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: servicegraphconnector

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Fix incorrectly reversed latency settings for the server and client

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [34562]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: []
12 changes: 6 additions & 6 deletions connector/servicegraphconnector/connector.go
Original file line number Diff line number Diff line change
Expand Up @@ -545,9 +545,9 @@ func (p *serviceGraphConnector) collectClientLatencyMetrics(ilm pmetric.ScopeMet
dpDuration.SetStartTimestamp(pcommon.NewTimestampFromTime(p.startTime))
dpDuration.SetTimestamp(timestamp)
dpDuration.ExplicitBounds().FromRaw(p.reqDurationBounds)
dpDuration.BucketCounts().FromRaw(p.reqServerDurationSecondsBucketCounts[key])
dpDuration.SetCount(p.reqServerDurationSecondsCount[key])
dpDuration.SetSum(p.reqServerDurationSecondsSum[key])
dpDuration.BucketCounts().FromRaw(p.reqClientDurationSecondsBucketCounts[key])
dpDuration.SetCount(p.reqClientDurationSecondsCount[key])
dpDuration.SetSum(p.reqClientDurationSecondsSum[key])

// TODO: Support exemplars
dimensions, ok := p.dimensionsForSeries(key)
Expand Down Expand Up @@ -575,9 +575,9 @@ func (p *serviceGraphConnector) collectServerLatencyMetrics(ilm pmetric.ScopeMet
dpDuration.SetStartTimestamp(pcommon.NewTimestampFromTime(p.startTime))
dpDuration.SetTimestamp(timestamp)
dpDuration.ExplicitBounds().FromRaw(p.reqDurationBounds)
dpDuration.BucketCounts().FromRaw(p.reqClientDurationSecondsBucketCounts[key])
dpDuration.SetCount(p.reqClientDurationSecondsCount[key])
dpDuration.SetSum(p.reqClientDurationSecondsSum[key])
dpDuration.BucketCounts().FromRaw(p.reqServerDurationSecondsBucketCounts[key])
dpDuration.SetCount(p.reqServerDurationSecondsCount[key])
dpDuration.SetSum(p.reqServerDurationSecondsSum[key])

// TODO: Support exemplars
dimensions, ok := p.dimensionsForSeries(key)
Expand Down
23 changes: 13 additions & 10 deletions connector/servicegraphconnector/connector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,10 +210,10 @@ func getGoldenTraces(t *testing.T, file string) ptrace.Traces {
}

func verifyHappyCaseMetrics(t *testing.T, md pmetric.Metrics) {
verifyHappyCaseMetricsWithDuration(1)(t, md)
verifyHappyCaseMetricsWithDuration()(t, md)
}

func verifyHappyCaseMetricsWithDuration(durationSum float64) func(t *testing.T, md pmetric.Metrics) {
func verifyHappyCaseMetricsWithDuration() func(t *testing.T, md pmetric.Metrics) {
return func(t *testing.T, md pmetric.Metrics) {
assert.Equal(t, 3, md.MetricCount())

Expand All @@ -231,11 +231,11 @@ func verifyHappyCaseMetricsWithDuration(durationSum float64) func(t *testing.T,

mServerDuration := ms.At(1)
assert.Equal(t, "traces_service_graph_request_server_seconds", mServerDuration.Name())
verifyDuration(t, mServerDuration, durationSum)
verifyDuration(t, mServerDuration, 2, []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0})

mClientDuration := ms.At(2)
assert.Equal(t, "traces_service_graph_request_client_seconds", mClientDuration.Name())
verifyDuration(t, mClientDuration, durationSum)
verifyDuration(t, mClientDuration, 1, []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0})
}
}

Expand All @@ -259,16 +259,16 @@ func verifyCount(t *testing.T, m pmetric.Metric) {
verifyAttr(t, attributes, "client_some-attribute", "val")
}

func verifyDuration(t *testing.T, m pmetric.Metric, durationSum float64) {
func verifyDuration(t *testing.T, m pmetric.Metric, durationSum float64, bs []uint64) {
assert.Equal(t, pmetric.MetricTypeHistogram, m.Type())
dps := m.Histogram().DataPoints()
assert.Equal(t, 1, dps.Len())

dp := dps.At(0)
assert.Equal(t, durationSum, dp.Sum()) // Duration: 1sec
assert.Equal(t, durationSum, dp.Sum()) // Duration: client is 1sec, server is 2sec
assert.Equal(t, uint64(1), dp.Count())
buckets := pcommon.NewUInt64Slice()
buckets.FromRaw([]uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0})
buckets.FromRaw(bs)
assert.Equal(t, buckets, dp.BucketCounts())

attributes := dp.Attributes()
Expand All @@ -287,7 +287,10 @@ func verifyAttr(t *testing.T, attrs pcommon.Map, k, expected string) {

func buildSampleTrace(t *testing.T, attrValue string) ptrace.Traces {
tStart := time.Date(2022, 1, 2, 3, 4, 5, 6, time.UTC)
tEnd := time.Date(2022, 1, 2, 3, 4, 6, 6, time.UTC)
// client: 1s
cEnd := time.Date(2022, 1, 2, 3, 4, 6, 6, time.UTC)
// server: 2s
sEnd := time.Date(2022, 1, 2, 3, 4, 7, 6, time.UTC)

traces := ptrace.NewTraces()

Expand All @@ -312,7 +315,7 @@ func buildSampleTrace(t *testing.T, attrValue string) ptrace.Traces {
clientSpan.SetTraceID(traceID)
clientSpan.SetKind(ptrace.SpanKindClient)
clientSpan.SetStartTimestamp(pcommon.NewTimestampFromTime(tStart))
clientSpan.SetEndTimestamp(pcommon.NewTimestampFromTime(tEnd))
clientSpan.SetEndTimestamp(pcommon.NewTimestampFromTime(cEnd))
clientSpan.Attributes().PutStr("some-attribute", attrValue) // Attribute selected as dimension for metrics
serverSpan := scopeSpans.Spans().AppendEmpty()
serverSpan.SetName("server span")
Expand All @@ -321,7 +324,7 @@ func buildSampleTrace(t *testing.T, attrValue string) ptrace.Traces {
serverSpan.SetParentSpanID(clientSpanID)
serverSpan.SetKind(ptrace.SpanKindServer)
serverSpan.SetStartTimestamp(pcommon.NewTimestampFromTime(tStart))
serverSpan.SetEndTimestamp(pcommon.NewTimestampFromTime(tEnd))
serverSpan.SetEndTimestamp(pcommon.NewTimestampFromTime(sEnd))

return traces
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,74 +32,74 @@ resourceMetrics:
- histogram:
aggregationTemporality: 2
dataPoints:
- attributes:
- key: client
value:
stringValue: user
- key: connection_type
value:
stringValue: virtual_node
- key: failed
value:
boolValue: false
- key: server
value:
stringValue: bar-requester
- key: server_peer.service
value:
stringValue: external-platform
- key: virtual_node
value:
stringValue: client
bucketCounts:
- "1"
- "0"
- "0"
- "0"
count: "1"
explicitBounds:
- 0.1
- 1
- 10
startTimeUnixNano: "1000000"
sum: 0.000000
timeUnixNano: "2000000"
- attributes:
- key: client
value:
stringValue: user
- key: connection_type
value:
stringValue: virtual_node
- key: failed
value:
boolValue: false
- key: server
value:
stringValue: bar-requester
- key: server_peer.service
value:
stringValue: external-platform
- key: virtual_node
value:
stringValue: client
bucketCounts:
- "1"
- "0"
- "0"
- "0"
count: "1"
explicitBounds:
- 0.1
- 1
- 10
startTimeUnixNano: "1000000"
sum: 1e-06
timeUnixNano: "2000000"
name: traces_service_graph_request_server_seconds
- histogram:
aggregationTemporality: 2
dataPoints:
- attributes:
- key: client
value:
stringValue: user
- key: connection_type
value:
stringValue: virtual_node
- key: failed
value:
boolValue: false
- key: server
value:
stringValue: bar-requester
- key: server_peer.service
value:
stringValue: external-platform
- key: virtual_node
value:
stringValue: client
bucketCounts:
- "1"
- "0"
- "0"
- "0"
count: "1"
explicitBounds:
- 0.1
- 1
- 10
startTimeUnixNano: "1000000"
sum: 0.000001
timeUnixNano: "2000000"
- attributes:
- key: client
value:
stringValue: user
- key: connection_type
value:
stringValue: virtual_node
- key: failed
value:
boolValue: false
- key: server
value:
stringValue: bar-requester
- key: server_peer.service
value:
stringValue: external-platform
- key: virtual_node
value:
stringValue: client
bucketCounts:
- "1"
- "0"
- "0"
- "0"
count: "1"
explicitBounds:
- 0.1
- 1
- 10
startTimeUnixNano: "1000000"
sum: 0
timeUnixNano: "2000000"
name: traces_service_graph_request_client_seconds
scope:
name: traces_service_graph
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ resourceMetrics:
- 1
- 10
startTimeUnixNano: "1000000"
sum: 0.000001
sum: 0
timeUnixNano: "2000000"
name: traces_service_graph_request_server_seconds
- histogram:
Expand Down Expand Up @@ -89,7 +89,7 @@ resourceMetrics:
- 1
- 10
startTimeUnixNano: "1000000"
sum: 0.000000
sum: 1e-06
timeUnixNano: "2000000"
name: traces_service_graph_request_client_seconds
scope:
Expand Down

0 comments on commit 1a432b9

Please sign in to comment.