Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement sync disabled queue #12245

Merged
merged 1 commit into from
Feb 5, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions .chloggen/disabled-queue.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver)
component: exporterhelper

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Implement sync disabled queue used when batching is enabled.

# One or more tracking issues or pull requests related to the change
issues: [12245]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: [user]
9 changes: 5 additions & 4 deletions exporter/exporterhelper/internal/base_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,9 @@ func NewBaseExporter(set exporter.Settings, signal pipeline.Signal, osf ObsrepSe
}

be := &BaseExporter{
timeoutCfg: NewDefaultTimeoutConfig(),
Set: set,
Set: set,
timeoutCfg: NewDefaultTimeoutConfig(),
queueFactory: exporterqueue.NewMemoryQueueFactory[internal.Request](),
}

for _, op := range options {
Expand All @@ -100,7 +101,7 @@ func NewBaseExporter(set exporter.Settings, signal pipeline.Signal, osf ObsrepSe
be.ConsumerOptions = append(be.ConsumerOptions, consumer.WithCapabilities(consumer.Capabilities{MutatesData: true}))
}

if be.batcherCfg.Enabled && !(usePullingBasedExporterQueueBatcher.IsEnabled() && be.queueCfg.Enabled) {
if !usePullingBasedExporterQueueBatcher.IsEnabled() && be.batcherCfg.Enabled {
concurrencyLimit := int64(0)
if be.queueCfg.Enabled {
concurrencyLimit = int64(be.queueCfg.NumConsumers)
Expand All @@ -109,7 +110,7 @@ func NewBaseExporter(set exporter.Settings, signal pipeline.Signal, osf ObsrepSe
be.firstSender = be.BatchSender
}

if be.queueCfg.Enabled {
if be.queueCfg.Enabled || usePullingBasedExporterQueueBatcher.IsEnabled() && be.batcherCfg.Enabled {
qSet := exporterqueue.Settings{
Signal: signal,
ExporterSettings: set,
Expand Down
18 changes: 9 additions & 9 deletions exporter/exporterhelper/internal/batch_sender_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,9 @@ func TestBatchSender_PostShutdown(t *testing.T) {
assert.Equal(t, int64(8), sink.ItemsCount())
})
}
runTest("enable_queue_batcher", true)
// This test is disabled because in the new batching, we still do the batching while shutdown because that will
// limit the number of request sent.
// runTest("enable_queue_batcher", true)
runTest("disable_queue_batcher", false)
}

Expand Down Expand Up @@ -436,8 +438,7 @@ func TestBatchSender_BatchBlocking(t *testing.T) {
defer setFeatureGateForTest(t, usePullingBasedExporterQueueBatcher, enableQueueBatcher)()
bCfg := exporterbatcher.NewDefaultConfig()
bCfg.MinSizeItems = 3
be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender,
WithBatcher(bCfg))
be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, WithBatcher(bCfg))
require.NotNil(t, be)
require.NoError(t, err)
require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost()))
Expand All @@ -449,8 +450,8 @@ func TestBatchSender_BatchBlocking(t *testing.T) {
for i := 0; i < 6; i++ {
wg.Add(1)
go func() {
defer wg.Done()
assert.NoError(t, be.Send(context.Background(), &requesttest.FakeRequest{Items: 1, Sink: sink, Delay: 10 * time.Millisecond}))
wg.Done()
}()
}
wg.Wait()
Expand All @@ -473,8 +474,7 @@ func TestBatchSender_BatchCancelled(t *testing.T) {
defer setFeatureGateForTest(t, usePullingBasedExporterQueueBatcher, enableQueueBatcher)()
bCfg := exporterbatcher.NewDefaultConfig()
bCfg.MinSizeItems = 2
be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender,
WithBatcher(bCfg))
be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, WithBatcher(bCfg))
require.NotNil(t, be)
require.NoError(t, err)
require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost()))
Expand All @@ -486,14 +486,14 @@ func TestBatchSender_BatchCancelled(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
wg.Add(1)
go func() {
defer wg.Done()
assert.ErrorIs(t, be.Send(ctx, &requesttest.FakeRequest{Items: 1, Sink: sink, Delay: 100 * time.Millisecond}), context.Canceled)
wg.Done()
}()
wg.Add(1)
go func() {
time.Sleep(20 * time.Millisecond) // ensure this call is the second
defer wg.Done()
time.Sleep(100 * time.Millisecond) // ensure this call is the second
assert.ErrorIs(t, be.Send(context.Background(), &requesttest.FakeRequest{Items: 1, Sink: sink, Delay: 100 * time.Millisecond}), context.Canceled)
wg.Done()
}()
cancel() // canceling the first request should cancel the whole batch
wg.Wait()
Expand Down
67 changes: 67 additions & 0 deletions exporter/exporterqueue/disabled_queue.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0

package exporterqueue // import "go.opentelemetry.io/collector/exporter/exporterqueue"

import (
"context"
"sync"
"sync/atomic"

"go.opentelemetry.io/collector/component"
)

var donePool = sync.Pool{
New: func() any {
return &blockingDone{ch: make(chan error, 1)}
},
}

func newDisabledQueue[T any](consumeFunc ConsumeFunc[T]) Queue[T] {
return &disabledQueue[T]{
consumeFunc: consumeFunc,
size: &atomic.Int64{},
}
}

type disabledQueue[T any] struct {
component.StartFunc
component.ShutdownFunc
consumeFunc ConsumeFunc[T]
size *atomic.Int64
}

func (d *disabledQueue[T]) Offer(ctx context.Context, req T) error {
done := donePool.Get().(*blockingDone)
d.size.Add(1)
d.consumeFunc(ctx, req, done)
defer d.size.Add(-1)
// Only re-add the blockingDone instance back to the pool if successfully received the
// message from the consumer which guarantees consumer will not use that anymore,
// otherwise no guarantee about when the consumer will add the message to the channel so cannot reuse or close.
select {
case doneErr := <-done.ch:
donePool.Put(done)
return doneErr
case <-ctx.Done():
return ctx.Err()
}
}

// Size returns the current number of blocked requests waiting to be processed.
func (d *disabledQueue[T]) Size() int64 {
return d.size.Load()
}

// Capacity returns the capacity of this queue, which is 0 that means no bounds.
func (d *disabledQueue[T]) Capacity() int64 {
return 0
}

type blockingDone struct {
ch chan error
}

func (d *blockingDone) OnDone(err error) {
d.ch <- err
}
180 changes: 180 additions & 0 deletions exporter/exporterqueue/disabled_queue_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0

package exporterqueue

import (
"context"
"errors"
"sync"
"sync/atomic"
"testing"
"time"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

"go.opentelemetry.io/collector/component/componenttest"
)

func TestDisabledPassErrorBack(t *testing.T) {
myErr := errors.New("test error")
q := newDisabledQueue[int64](func(_ context.Context, _ int64, done Done) {
done.OnDone(myErr)
})
require.NoError(t, q.Start(context.Background(), componenttest.NewNopHost()))
require.ErrorIs(t, q.Offer(context.Background(), int64(1)), myErr)
require.NoError(t, q.Shutdown(context.Background()))
}

func TestDisabledCancelIncomingRequest(t *testing.T) {
wg := sync.WaitGroup{}
stop := make(chan struct{})
q := newDisabledQueue[int64](func(_ context.Context, _ int64, done Done) {
wg.Add(1)
go func() {
defer wg.Done()
<-stop
done.OnDone(nil)
}()
})
require.NoError(t, q.Start(context.Background(), componenttest.NewNopHost()))
ctx, cancel := context.WithCancel(context.Background())
wg.Add(1)
go func() {
defer wg.Done()
<-time.After(time.Second)
cancel()
}()
require.ErrorIs(t, q.Offer(ctx, int64(1)), context.Canceled)
close(stop)
require.NoError(t, q.Shutdown(context.Background()))
wg.Wait()
}

func TestDisabledSizeAndCapacity(t *testing.T) {
wg := sync.WaitGroup{}
stop := make(chan struct{})
q := newDisabledQueue[int64](func(_ context.Context, _ int64, done Done) {
wg.Add(1)
go func() {
defer wg.Done()
<-stop
done.OnDone(nil)
}()
})
require.NoError(t, q.Start(context.Background(), componenttest.NewNopHost()))
assert.EqualValues(t, 0, q.Size())
assert.EqualValues(t, 0, q.Capacity())
wg.Add(1)
go func() {
defer wg.Done()
assert.NoError(t, q.Offer(context.Background(), int64(1)))
}()
assert.Eventually(t, func() bool { return q.Size() == 1 }, 1*time.Second, 10*time.Millisecond)
assert.EqualValues(t, 0, q.Capacity())
close(stop)
require.NoError(t, q.Shutdown(context.Background()))
wg.Wait()
}

func TestDisabledQueueMultiThread(t *testing.T) {
buf := newBuffer()
buf.start()
q := newDisabledQueue[int64](buf.consume)
require.NoError(t, q.Start(context.Background(), componenttest.NewNopHost()))
wg := sync.WaitGroup{}
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 10_000; j++ {
assert.NoError(t, q.Offer(context.Background(), int64(j)))
}
}()
}
wg.Wait()
require.NoError(t, q.Shutdown(context.Background()))
buf.shutdown()
assert.Equal(t, int64(10*10_000), buf.consumed())
}

func BenchmarkDisabledQueueOffer(b *testing.B) {
consumed := &atomic.Int64{}
q := newDisabledQueue[int64](func(_ context.Context, _ int64, done Done) {
consumed.Add(1)
done.OnDone(nil)
})
require.NoError(b, q.Start(context.Background(), componenttest.NewNopHost()))
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
require.NoError(b, q.Offer(context.Background(), int64(i)))
}
require.NoError(b, q.Shutdown(context.Background()))
assert.Equal(b, int64(b.N), consumed.Load())
}

const flushNum = 5

type buffer struct {
ch chan Done
nr *atomic.Int64
wg sync.WaitGroup
dones []Done
}

func newBuffer() *buffer {
buf := &buffer{
ch: make(chan Done, 10),
nr: &atomic.Int64{},
dones: make([]Done, 0, flushNum),
}
return buf
}

func (buf *buffer) consume(_ context.Context, _ int64, done Done) {
buf.ch <- done
}

func (buf *buffer) start() {
buf.wg.Add(1)
go func() {
defer buf.wg.Done()
buf.dones = make([]Done, 0, flushNum)
for {
select {
case done, ok := <-buf.ch:
if !ok {
return
}
buf.dones = append(buf.dones, done)
if len(buf.dones) == flushNum {
buf.flush()
}
case <-time.After(10 * time.Millisecond):
buf.flush()
}
}
}()
}

func (buf *buffer) shutdown() {
close(buf.ch)
buf.wg.Wait()
}

func (buf *buffer) flush() {
if len(buf.dones) == 0 {
return
}
buf.nr.Add(int64(len(buf.dones)))
for _, done := range buf.dones {
done.OnDone(nil)
}
buf.dones = buf.dones[:0]
}

func (buf *buffer) consumed() int64 {
return buf.nr.Load()
}
6 changes: 6 additions & 0 deletions exporter/exporterqueue/queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,9 @@
// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved.
func NewMemoryQueueFactory[T any]() Factory[T] {
return func(_ context.Context, _ Settings, cfg Config, consume ConsumeFunc[T]) Queue[T] {
if !cfg.Enabled {
return newDisabledQueue(consume)
}
q := newMemoryQueue[T](memoryQueueSettings[T]{
sizer: &requestSizer[T]{},
capacity: int64(cfg.QueueSize),
Expand Down Expand Up @@ -109,6 +112,9 @@
return NewMemoryQueueFactory[T]()
}
return func(_ context.Context, set Settings, cfg Config, consume ConsumeFunc[T]) Queue[T] {
if !cfg.Enabled {
return newDisabledQueue(consume)
}

Check warning on line 117 in exporter/exporterqueue/queue.go

View check run for this annotation

Codecov / codecov/patch

exporter/exporterqueue/queue.go#L116-L117

Added lines #L116 - L117 were not covered by tests
q := newPersistentQueue[T](persistentQueueSettings[T]{
sizer: &requestSizer[T]{},
capacity: int64(cfg.QueueSize),
Expand Down
Loading
Loading