@@ -87,6 +87,9 @@ type Config struct {
87
87
ClientCleanupPeriod time.Duration
88
88
IngestionRateLimit float64
89
89
IngestionBurstSize int
90
+
91
+ // for testing
92
+ ingesterClientFactory func (string ) cortex.IngesterClient
90
93
}
91
94
92
95
// RegisterFlags adds the flags required to config this to the given FlagSet
@@ -218,22 +221,27 @@ func (d *Distributor) getClientFor(ingester *ring.IngesterDesc) (cortex.Ingester
218
221
return client , nil
219
222
}
220
223
221
- conn , err := grpc .Dial (
222
- ingester .Addr ,
223
- grpc .WithTimeout (d .cfg .RemoteTimeout ),
224
- grpc .WithInsecure (),
225
- grpc .WithUnaryInterceptor (grpc_middleware .ChainUnaryClient (
226
- otgrpc .OpenTracingClientInterceptor (opentracing .GlobalTracer ()),
227
- middleware .ClientUserHeaderInterceptor ,
228
- )),
229
- )
230
- if err != nil {
231
- return nil , err
232
- }
233
-
234
- client = ingesterClient {
235
- IngesterClient : cortex .NewIngesterClient (conn ),
236
- conn : conn ,
224
+ if d .cfg .ingesterClientFactory != nil {
225
+ client = ingesterClient {
226
+ IngesterClient : d .cfg .ingesterClientFactory (ingester .Addr ),
227
+ }
228
+ } else {
229
+ conn , err := grpc .Dial (
230
+ ingester .Addr ,
231
+ grpc .WithTimeout (d .cfg .RemoteTimeout ),
232
+ grpc .WithInsecure (),
233
+ grpc .WithUnaryInterceptor (grpc_middleware .ChainUnaryClient (
234
+ otgrpc .OpenTracingClientInterceptor (opentracing .GlobalTracer ()),
235
+ middleware .ClientUserHeaderInterceptor ,
236
+ )),
237
+ )
238
+ if err != nil {
239
+ return nil , err
240
+ }
241
+ client = ingesterClient {
242
+ IngesterClient : cortex .NewIngesterClient (conn ),
243
+ conn : conn ,
244
+ }
237
245
}
238
246
d .clients [ingester .Addr ] = client
239
247
return client , nil
@@ -301,8 +309,6 @@ func (d *Distributor) Push(ctx context.Context, req *remote.WriteRequest) (*cort
301
309
sample : samples [i ],
302
310
minSuccess : minSuccess ,
303
311
maxFailures : len (ingesters [i ]) - minSuccess ,
304
- succeeded : 0 ,
305
- failed : 0 ,
306
312
}
307
313
308
314
// Skip those that have not heartbeated in a while. NB these are still
@@ -330,7 +336,6 @@ func (d *Distributor) Push(ctx context.Context, req *remote.WriteRequest) (*cort
330
336
331
337
pushTracker := pushTracker {
332
338
samplesPending : int32 (len (samples )),
333
- samplesFailed : 0 ,
334
339
done : make (chan struct {}),
335
340
err : make (chan error ),
336
341
}
@@ -363,8 +368,8 @@ func (d *Distributor) getOrCreateIngestLimiter(userID string) *rate.Limiter {
363
368
func (d * Distributor ) sendSamples (ctx context.Context , ingester * ring.IngesterDesc , sampleTrackers []* sampleTracker , pushTracker * pushTracker ) {
364
369
err := d .sendSamplesErr (ctx , ingester , sampleTrackers )
365
370
366
- // If we suceed , decrement each sample's pending count by one. If we reach
367
- // the requred number of successful puts on this sample, then decrement the
371
+ // If we succeed , decrement each sample's pending count by one. If we reach
372
+ // the required number of successful puts on this sample, then decrement the
368
373
// number of pending samples by one. If we successfully push all samples to
369
374
// min success ingesters, wake up the waiting rpc so it can return early.
370
375
// Similarly, track the number of errors, and if it exceeds maxFailures
@@ -374,7 +379,7 @@ func (d *Distributor) sendSamples(ctx context.Context, ingester *ring.IngesterDe
374
379
// goroutine will write to either channel.
375
380
for i := range sampleTrackers {
376
381
if err != nil {
377
- if atomic .AddInt32 (& sampleTrackers [i ].failed , 1 ) > int32 (sampleTrackers [i ].maxFailures ) {
382
+ if atomic .AddInt32 (& sampleTrackers [i ].failed , 1 ) <= int32 (sampleTrackers [i ].maxFailures ) {
378
383
continue
379
384
}
380
385
if atomic .AddInt32 (& pushTracker .samplesFailed , 1 ) == 1 {
0 commit comments