Skip to content

Commit

Permalink
all: remove OtlpProtoSize in favor of Sizer interface (#3818)
Browse files Browse the repository at this point in the history
* all: remove OtlpProtoSize in favor of Sizer interface

* add {Metrics|Traces|Logs}Sizer, fix test commenting

* move size tests to pb_test, fix metrics+logs parameter names

* uncommented batch_processor tests + adjusted them to use *Sizer

* I forgot BenchmarkTraceSizeBytes :(

* Add docs for NewProtobof*Sizer + update docs for NewProtobuf*Marshaler

* cast *Marshaler to *Sizer for now

* add casts to batch_processor_test
  • Loading branch information
kirbyquerby authored Aug 17, 2021
1 parent 9cafb5d commit aa450a0
Show file tree
Hide file tree
Showing 9 changed files with 118 additions and 68 deletions.
23 changes: 20 additions & 3 deletions model/otlp/pb_marshaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,27 +19,32 @@ import (
"go.opentelemetry.io/collector/model/pdata"
)

// NewProtobufTracesMarshaler returns a model.TracesMarshaler. Marshals to OTLP binary protobuf bytes.
// NewProtobufTracesMarshaler returns a pdata.TracesMarshaler. Marshals to OTLP binary protobuf bytes.
func NewProtobufTracesMarshaler() pdata.TracesMarshaler {
return newPbMarshaler()
}

// NewProtobufMetricsMarshaler returns a model.MetricsMarshaler. Marshals to OTLP binary protobuf bytes.
// NewProtobufMetricsMarshaler returns a pdata.MetricsMarshaler. Marshals to OTLP binary protobuf bytes.
func NewProtobufMetricsMarshaler() pdata.MetricsMarshaler {
return newPbMarshaler()
}

// NewProtobufLogsMarshaler returns a model.LogsMarshaler. Marshals to OTLP binary protobuf bytes.
// NewProtobufLogsMarshaler returns a pdata.LogsMarshaler. Marshals to OTLP binary protobuf bytes.
func NewProtobufLogsMarshaler() pdata.LogsMarshaler {
return newPbMarshaler()
}

// TODO(#3842): Figure out how we want to represent/return *Sizers.
type pbMarshaler struct{}

func newPbMarshaler() *pbMarshaler {
return &pbMarshaler{}
}

var _ pdata.TracesSizer = (*pbMarshaler)(nil)
var _ pdata.MetricsSizer = (*pbMarshaler)(nil)
var _ pdata.LogsSizer = (*pbMarshaler)(nil)

func (e *pbMarshaler) MarshalLogs(ld pdata.Logs) ([]byte, error) {
return internal.LogsToOtlp(ld.InternalRep()).Marshal()
}
Expand All @@ -51,3 +56,15 @@ func (e *pbMarshaler) MarshalMetrics(md pdata.Metrics) ([]byte, error) {
func (e *pbMarshaler) MarshalTraces(td pdata.Traces) ([]byte, error) {
return internal.TracesToOtlp(td.InternalRep()).Marshal()
}

func (e *pbMarshaler) TracesSize(td pdata.Traces) int {
return internal.TracesToOtlp(td.InternalRep()).Size()
}

func (e *pbMarshaler) MetricsSize(md pdata.Metrics) int {
return internal.MetricsToOtlp(md.InternalRep()).Size()
}

func (e *pbMarshaler) LogsSize(ld pdata.Logs) int {
return internal.LogsToOtlp(ld.InternalRep()).Size()
}
59 changes: 59 additions & 0 deletions model/otlp/pb_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,65 @@ func TestProtobufTracesUnmarshaler_error(t *testing.T) {
assert.Error(t, err)
}

func TestProtobufTracesSizer(t *testing.T) {
sizer := NewProtobufTracesMarshaler().(pdata.TracesSizer)
marshaler := NewProtobufTracesMarshaler()
td := pdata.NewTraces()
rms := td.ResourceSpans()
rms.AppendEmpty().InstrumentationLibrarySpans().AppendEmpty().Spans().AppendEmpty().SetName("foo")

size := sizer.TracesSize(td)

bytes, err := marshaler.MarshalTraces(td)
require.NoError(t, err)
assert.Equal(t, len(bytes), size)
}

func TestProtobufTracesSizer_withNil(t *testing.T) {
sizer := NewProtobufTracesMarshaler().(pdata.TracesSizer)

assert.Equal(t, 0, sizer.TracesSize(pdata.NewTraces()))
}

func TestProtobufMetricsSizer(t *testing.T) {
sizer := NewProtobufMetricsMarshaler().(pdata.MetricsSizer)
marshaler := NewProtobufMetricsMarshaler()
md := pdata.NewMetrics()
md.ResourceMetrics().AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty().Metrics().AppendEmpty().SetName("foo")

size := sizer.MetricsSize(md)

bytes, err := marshaler.MarshalMetrics(md)
require.NoError(t, err)
assert.Equal(t, len(bytes), size)
}

func TestProtobufMetricsSizer_withNil(t *testing.T) {
sizer := NewProtobufMetricsMarshaler().(pdata.MetricsSizer)

assert.Equal(t, 0, sizer.MetricsSize(pdata.NewMetrics()))
}

func TestProtobufLogsSizer(t *testing.T) {
sizer := NewProtobufLogsMarshaler().(pdata.LogsSizer)
marshaler := NewProtobufLogsMarshaler()
ld := pdata.NewLogs()
ld.ResourceLogs().AppendEmpty().InstrumentationLibraryLogs().AppendEmpty().Logs().AppendEmpty().SetName("foo")

size := sizer.LogsSize(ld)

bytes, err := marshaler.MarshalLogs(ld)
require.NoError(t, err)
assert.Equal(t, len(bytes), size)

}

func TestProtobufLogsSizer_withNil(t *testing.T) {
sizer := NewProtobufLogsMarshaler().(pdata.LogsSizer)

assert.Equal(t, 0, sizer.LogsSize(pdata.NewLogs()))
}

func BenchmarkLogsToProtobuf(b *testing.B) {
marshaler := NewProtobufLogsMarshaler()
logs := generateBenchmarkLogs(128)
Expand Down
14 changes: 7 additions & 7 deletions model/pdata/logs.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
type LogsMarshaler interface {
// MarshalLogs the given pdata.Logs into bytes.
// If the error is not nil, the returned bytes slice cannot be used.
MarshalLogs(td Logs) ([]byte, error)
MarshalLogs(ld Logs) ([]byte, error)
}

// LogsUnmarshaler unmarshalls bytes into pdata.Logs.
Expand All @@ -34,6 +34,12 @@ type LogsUnmarshaler interface {
UnmarshalLogs(buf []byte) (Logs, error)
}

// LogsSizer returns the size of a Logs.
type LogsSizer interface {
// LogsSize returns the size in bytes of a Logs.
LogsSize(ld Logs) int
}

// Logs is the top-level struct that is propagated through the logs pipeline.
//
// This is a reference type (like builtin map).
Expand Down Expand Up @@ -85,12 +91,6 @@ func (ld Logs) LogRecordCount() int {
return logCount
}

// OtlpProtoSize returns the size in bytes of this Logs encoded as OTLP Collector
// ExportLogsServiceRequest ProtoBuf bytes.
func (ld Logs) OtlpProtoSize() int {
return ld.orig.Size()
}

// ResourceLogs returns the ResourceLogsSlice associated with this Logs.
func (ld Logs) ResourceLogs() ResourceLogsSlice {
return newResourceLogsSlice(&ld.orig.ResourceLogs)
Expand Down
14 changes: 7 additions & 7 deletions model/pdata/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
type MetricsMarshaler interface {
// MarshalMetrics the given pdata.Metrics into bytes.
// If the error is not nil, the returned bytes slice cannot be used.
MarshalMetrics(td Metrics) ([]byte, error)
MarshalMetrics(md Metrics) ([]byte, error)
}

// MetricsUnmarshaler unmarshalls bytes into pdata.Metrics.
Expand All @@ -34,6 +34,12 @@ type MetricsUnmarshaler interface {
UnmarshalMetrics(buf []byte) (Metrics, error)
}

// MetricsSizer returns the size of a Metrics.
type MetricsSizer interface {
// LogsSize returns the size in bytes of a Metrics.
MetricsSize(md Metrics) int
}

// Metrics is an opaque interface that allows transition to the new internal Metrics data, but also facilitates the
// transition to the new components, especially for traces.
//
Expand Down Expand Up @@ -87,12 +93,6 @@ func (md Metrics) MetricCount() int {
return metricCount
}

// OtlpProtoSize returns the size in bytes of this Metrics encoded as OTLP Collector
// ExportMetricsServiceRequest ProtoBuf bytes.
func (md Metrics) OtlpProtoSize() int {
return md.orig.Size()
}

// DataPointCount calculates the total number of data points.
func (md Metrics) DataPointCount() (dataPointCount int) {
rms := md.ResourceMetrics()
Expand Down
17 changes: 0 additions & 17 deletions model/pdata/metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ import (

gogoproto "github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
goproto "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/emptypb"

Expand Down Expand Up @@ -148,22 +147,6 @@ func TestMetricCount(t *testing.T) {
assert.EqualValues(t, 6, md.MetricCount())
}

func TestMetricsSize(t *testing.T) {
assert.Equal(t, 0, NewMetrics().OtlpProtoSize())

md := generateMetricsEmptyDataPoints()
orig := md.orig
size := orig.Size()
bytes, err := orig.Marshal()
require.NoError(t, err)
assert.Equal(t, size, md.OtlpProtoSize())
assert.Equal(t, len(bytes), md.OtlpProtoSize())
}

func TestMetricsSizeWithNil(t *testing.T) {
assert.Equal(t, 0, NewMetrics().OtlpProtoSize())
}

func TestMetricCountWithEmpty(t *testing.T) {
assert.EqualValues(t, 0, generateMetricsEmptyResource().MetricCount())
assert.EqualValues(t, 0, generateMetricsEmptyInstrumentation().MetricCount())
Expand Down
12 changes: 6 additions & 6 deletions model/pdata/traces.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,12 @@ type TracesUnmarshaler interface {
UnmarshalTraces(buf []byte) (Traces, error)
}

// TracesSizer returns the size of a Traces.
type TracesSizer interface {
// TracesSize returns the size in bytes of a Traces.
TracesSize(td Traces) int
}

// Traces is the top-level struct that is propagated through the traces pipeline.
type Traces struct {
orig *otlpcollectortrace.ExportTraceServiceRequest
Expand Down Expand Up @@ -77,12 +83,6 @@ func (td Traces) SpanCount() int {
return spanCount
}

// OtlpProtoSize returns the size in bytes of this Traces encoded as OTLP Collector
// ExportTraceServiceRequest ProtoBuf bytes.
func (td Traces) OtlpProtoSize() int {
return td.orig.Size()
}

// ResourceSpans returns the ResourceSpansSlice associated with this Metrics.
func (td Traces) ResourceSpans() ResourceSpansSlice {
return newResourceSpansSlice(&td.orig.ResourceSpans)
Expand Down
18 changes: 0 additions & 18 deletions model/pdata/traces_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ import (

gogoproto "github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
goproto "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/emptypb"

Expand Down Expand Up @@ -52,23 +51,6 @@ func TestSpanCount(t *testing.T) {
assert.EqualValues(t, 6, md.SpanCount())
}

func TestTracesSize(t *testing.T) {
assert.Equal(t, 0, NewTraces().OtlpProtoSize())
td := NewTraces()
rms := td.ResourceSpans()
rms.AppendEmpty().InstrumentationLibrarySpans().AppendEmpty().Spans().AppendEmpty().SetName("foo")
orig := td.orig
size := orig.Size()
bytes, err := orig.Marshal()
require.NoError(t, err)
assert.Equal(t, size, td.OtlpProtoSize())
assert.Equal(t, len(bytes), td.OtlpProtoSize())
}

func TestTracesSizeWithNil(t *testing.T) {
assert.Equal(t, 0, NewTraces().OtlpProtoSize())
}

func TestSpanCountWithEmpty(t *testing.T) {
assert.EqualValues(t, 0, Traces{orig: &otlpcollectortrace.ExportTraceServiceRequest{
ResourceSpans: []*otlptrace.ResourceSpans{{}},
Expand Down
16 changes: 10 additions & 6 deletions processor/batchprocessor/batch_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/configtelemetry"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/model/otlp"
"go.opentelemetry.io/collector/model/pdata"
)

Expand Down Expand Up @@ -223,10 +224,11 @@ type batchTraces struct {
nextConsumer consumer.Traces
traceData pdata.Traces
spanCount int
sizer pdata.TracesSizer
}

func newBatchTraces(nextConsumer consumer.Traces) *batchTraces {
return &batchTraces{nextConsumer: nextConsumer, traceData: pdata.NewTraces()}
return &batchTraces{nextConsumer: nextConsumer, traceData: pdata.NewTraces(), sizer: otlp.NewProtobufTracesMarshaler().(pdata.TracesSizer)}
}

// add updates current batchTraces by adding new TraceData object
Expand Down Expand Up @@ -259,17 +261,18 @@ func (bt *batchTraces) itemCount() int {
}

func (bt *batchTraces) size() int {
return bt.traceData.OtlpProtoSize()
return bt.sizer.TracesSize(bt.traceData)
}

type batchMetrics struct {
nextConsumer consumer.Metrics
metricData pdata.Metrics
dataPointCount int
sizer pdata.MetricsSizer
}

func newBatchMetrics(nextConsumer consumer.Metrics) *batchMetrics {
return &batchMetrics{nextConsumer: nextConsumer, metricData: pdata.NewMetrics()}
return &batchMetrics{nextConsumer: nextConsumer, metricData: pdata.NewMetrics(), sizer: otlp.NewProtobufMetricsMarshaler().(pdata.MetricsSizer)}
}

func (bm *batchMetrics) export(ctx context.Context, sendBatchMaxSize int) error {
Expand All @@ -290,7 +293,7 @@ func (bm *batchMetrics) itemCount() int {
}

func (bm *batchMetrics) size() int {
return bm.metricData.OtlpProtoSize()
return bm.sizer.MetricsSize(bm.metricData)
}

func (bm *batchMetrics) add(item interface{}) {
Expand All @@ -308,10 +311,11 @@ type batchLogs struct {
nextConsumer consumer.Logs
logData pdata.Logs
logCount int
sizer pdata.LogsSizer
}

func newBatchLogs(nextConsumer consumer.Logs) *batchLogs {
return &batchLogs{nextConsumer: nextConsumer, logData: pdata.NewLogs()}
return &batchLogs{nextConsumer: nextConsumer, logData: pdata.NewLogs(), sizer: otlp.NewProtobufLogsMarshaler().(pdata.LogsSizer)}
}

func (bl *batchLogs) export(ctx context.Context, sendBatchMaxSize int) error {
Expand All @@ -332,7 +336,7 @@ func (bl *batchLogs) itemCount() int {
}

func (bl *batchLogs) size() int {
return bl.logData.OtlpProtoSize()
return bl.sizer.LogsSize(bl.logData)
}

func (bl *batchLogs) add(item interface{}) {
Expand Down
Loading

0 comments on commit aa450a0

Please sign in to comment.