23
23
import java .util .Collections ;
24
24
import java .util .HashMap ;
25
25
import java .util .HashSet ;
26
- import java .util .Iterator ;
27
26
import java .util .List ;
28
27
import java .util .Map ;
29
28
30
29
import java .util .Objects ;
31
- import java .util .Optional ;
32
30
import java .util .Set ;
33
31
import java .util .concurrent .TimeUnit ;
34
32
import java .util .function .BiConsumer ;
35
33
import java .util .function .Consumer ;
34
+ import java .util .stream .Collectors ;
35
+
36
36
import org .elasticsearch .ElasticsearchParseException ;
37
37
import org .elasticsearch .ExceptionsHelper ;
38
38
import org .elasticsearch .ResourceNotFoundException ;
50
50
import org .elasticsearch .cluster .metadata .MetaData ;
51
51
import org .elasticsearch .cluster .node .DiscoveryNode ;
52
52
import org .elasticsearch .cluster .service .ClusterService ;
53
- import org .elasticsearch .common .metrics .CounterMetric ;
54
- import org .elasticsearch .common .metrics .MeanMetric ;
55
53
import org .elasticsearch .common .regex .Regex ;
56
54
import org .elasticsearch .common .unit .TimeValue ;
57
55
import org .elasticsearch .common .util .concurrent .AbstractRunnable ;
@@ -80,8 +78,7 @@ public class IngestService implements ClusterStateApplier {
80
78
// are loaded, so in the cluster state we just save the pipeline config and here we keep the actual pipelines around.
81
79
private volatile Map <String , Pipeline > pipelines = new HashMap <>();
82
80
private final ThreadPool threadPool ;
83
- private final StatsHolder totalStats = new StatsHolder ();
84
- private volatile Map <String , StatsHolder > statsHolderPerPipeline = Collections .emptyMap ();
81
+ private final IngestMetric totalMetrics = new IngestMetric ();
85
82
86
83
public IngestService (ClusterService clusterService , ThreadPool threadPool ,
87
84
Environment env , ScriptService scriptService , AnalysisRegistry analysisRegistry ,
@@ -258,10 +255,16 @@ Map<String, Pipeline> pipelines() {
258
255
@ Override
259
256
public void applyClusterState (final ClusterChangedEvent event ) {
260
257
ClusterState state = event .state ();
258
+ Map <String , Pipeline > originalPipelines = pipelines ;
261
259
innerUpdatePipelines (event .previousState (), state );
262
- IngestMetadata ingestMetadata = state .getMetaData ().custom (IngestMetadata .TYPE );
263
- if (ingestMetadata != null ) {
264
- updatePipelineStats (ingestMetadata );
260
+ //pipelines changed, so add the old metrics to the new metrics
261
+ if (originalPipelines != pipelines ) {
262
+ pipelines .forEach ((id , pipeline ) -> {
263
+ Pipeline originalPipeline = originalPipelines .get (id );
264
+ if (originalPipeline != null ) {
265
+ pipeline .getMetrics ().add (originalPipeline .getMetrics ());
266
+ }
267
+ });
265
268
}
266
269
}
267
270
@@ -326,6 +329,7 @@ void validatePipeline(Map<DiscoveryNode, IngestInfo> ingestInfos, PutPipelineReq
326
329
public void executeBulkRequest (Iterable <DocWriteRequest <?>> actionRequests ,
327
330
BiConsumer <IndexRequest , Exception > itemFailureHandler , Consumer <Exception > completionHandler ,
328
331
Consumer <IndexRequest > itemDroppedHandler ) {
332
+
329
333
threadPool .executor (ThreadPool .Names .WRITE ).execute (new AbstractRunnable () {
330
334
331
335
@ Override
@@ -368,37 +372,11 @@ protected void doRun() {
368
372
}
369
373
370
374
public IngestStats stats () {
371
- Map <String , StatsHolder > statsHolderPerPipeline = this .statsHolderPerPipeline ;
372
375
373
- Map <String , IngestStats .Stats > statsPerPipeline = new HashMap <>(statsHolderPerPipeline .size ());
374
- for (Map .Entry <String , StatsHolder > entry : statsHolderPerPipeline .entrySet ()) {
375
- statsPerPipeline .put (entry .getKey (), entry .getValue ().createStats ());
376
- }
376
+ Map <String , IngestStats .Stats > statsPerPipeline =
377
+ pipelines .entrySet ().stream ().collect (Collectors .toMap (Map .Entry ::getKey , v -> v .getValue ().getMetrics ().createStats ()));
377
378
378
- return new IngestStats (totalStats .createStats (), statsPerPipeline );
379
- }
380
-
381
- void updatePipelineStats (IngestMetadata ingestMetadata ) {
382
- boolean changed = false ;
383
- Map <String , StatsHolder > newStatsPerPipeline = new HashMap <>(statsHolderPerPipeline );
384
- Iterator <String > iterator = newStatsPerPipeline .keySet ().iterator ();
385
- while (iterator .hasNext ()) {
386
- String pipeline = iterator .next ();
387
- if (ingestMetadata .getPipelines ().containsKey (pipeline ) == false ) {
388
- iterator .remove ();
389
- changed = true ;
390
- }
391
- }
392
- for (String pipeline : ingestMetadata .getPipelines ().keySet ()) {
393
- if (newStatsPerPipeline .containsKey (pipeline ) == false ) {
394
- newStatsPerPipeline .put (pipeline , new StatsHolder ());
395
- changed = true ;
396
- }
397
- }
398
-
399
- if (changed ) {
400
- statsHolderPerPipeline = Collections .unmodifiableMap (newStatsPerPipeline );
401
- }
379
+ return new IngestStats (totalMetrics .createStats (), statsPerPipeline );
402
380
}
403
381
404
382
private void innerExecute (IndexRequest indexRequest , Pipeline pipeline , Consumer <IndexRequest > itemDroppedHandler ) throws Exception {
@@ -409,10 +387,8 @@ private void innerExecute(IndexRequest indexRequest, Pipeline pipeline, Consumer
409
387
long startTimeInNanos = System .nanoTime ();
410
388
// the pipeline specific stat holder may not exist and that is fine:
411
389
// (e.g. the pipeline may have been removed while we're ingesting a document
412
- Optional <StatsHolder > pipelineStats = Optional .ofNullable (statsHolderPerPipeline .get (pipeline .getId ()));
413
390
try {
414
- totalStats .preIngest ();
415
- pipelineStats .ifPresent (StatsHolder ::preIngest );
391
+ totalMetrics .preIngest ();
416
392
String index = indexRequest .index ();
417
393
String type = indexRequest .type ();
418
394
String id = indexRequest .id ();
@@ -438,13 +414,11 @@ private void innerExecute(IndexRequest indexRequest, Pipeline pipeline, Consumer
438
414
indexRequest .source (ingestDocument .getSourceAndMetadata ());
439
415
}
440
416
} catch (Exception e ) {
441
- totalStats .ingestFailed ();
442
- pipelineStats .ifPresent (StatsHolder ::ingestFailed );
417
+ totalMetrics .ingestFailed ();
443
418
throw e ;
444
419
} finally {
445
420
long ingestTimeInMillis = TimeUnit .NANOSECONDS .toMillis (System .nanoTime () - startTimeInNanos );
446
- totalStats .postIngest (ingestTimeInMillis );
447
- pipelineStats .ifPresent (statsHolder -> statsHolder .postIngest (ingestTimeInMillis ));
421
+ totalMetrics .postIngest (ingestTimeInMillis );
448
422
}
449
423
}
450
424
@@ -481,27 +455,4 @@ private void innerUpdatePipelines(ClusterState previousState, ClusterState state
481
455
ExceptionsHelper .rethrowAndSuppress (exceptions );
482
456
}
483
457
484
- private static class StatsHolder {
485
-
486
- private final MeanMetric ingestMetric = new MeanMetric ();
487
- private final CounterMetric ingestCurrent = new CounterMetric ();
488
- private final CounterMetric ingestFailed = new CounterMetric ();
489
-
490
- void preIngest () {
491
- ingestCurrent .inc ();
492
- }
493
-
494
- void postIngest (long ingestTimeInMillis ) {
495
- ingestCurrent .dec ();
496
- ingestMetric .inc (ingestTimeInMillis );
497
- }
498
-
499
- void ingestFailed () {
500
- ingestFailed .inc ();
501
- }
502
-
503
- IngestStats .Stats createStats () {
504
- return new IngestStats .Stats (ingestMetric .count (), ingestMetric .sum (), ingestCurrent .count (), ingestFailed .count ());
505
- }
506
- }
507
458
}
0 commit comments