diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 49e81a67e85f9..3f7ee8b60b53c 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -37,6 +37,7 @@ dependencies { // us to invoke the JMH uberjar as usual. exclude group: 'net.sf.jopt-simple', module: 'jopt-simple' } + api(project(':libs:elasticsearch-h3')) api(project(':modules:aggregations')) api(project(':x-pack:plugin:esql-core')) api(project(':x-pack:plugin:esql')) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java new file mode 100644 index 0000000000000..2441acab7d405 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.benchmark.h3; + +import org.elasticsearch.h3.H3; +import org.openjdk.jmh.Main; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +@OutputTimeUnit(TimeUnit.SECONDS) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 25, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(1) +public class H3Benchmark { + + @Benchmark + public void pointToH3(H3State state, Blackhole bh) { + for (int i = 0; i < state.points.length; i++) { + for (int res = 0; res <= 15; res++) { + bh.consume(H3.geoToH3(state.points[i][0], state.points[i][1], res)); + } + } + } + + @Benchmark + public void h3Boundary(H3State state, Blackhole bh) { + for (int i = 0; i < state.h3.length; i++) { + bh.consume(H3.h3ToGeoBoundary(state.h3[i])); + } + } + + public static void main(String[] args) throws Exception { + Main.main(args); + } +} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java new file mode 100644 index 0000000000000..5707e692a0750 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.benchmark.h3; + +import org.elasticsearch.h3.H3; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; + +import java.io.IOException; +import java.util.Random; + +@State(Scope.Benchmark) +public class H3State { + + double[][] points = new double[1000][2]; + long[] h3 = new long[1000]; + + @Setup(Level.Trial) + public void setupTrial() throws IOException { + Random random = new Random(1234); + for (int i = 0; i < points.length; i++) { + points[i][0] = random.nextDouble() * 180 - 90; // lat + points[i][1] = random.nextDouble() * 360 - 180; // lon + int res = random.nextInt(16); // resolution + h3[i] = H3.geoToH3(points[i][0], points[i][1], res); + } + } +} diff --git a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties index efe2ff3449216..9036682bf0f0c 100644 --- a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties +++ b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip +distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/build-tools-internal/src/main/resources/minimumGradleVersion b/build-tools-internal/src/main/resources/minimumGradleVersion index f7b1c8ff61774..8d04a0f38fab0 100644 --- a/build-tools-internal/src/main/resources/minimumGradleVersion +++ b/build-tools-internal/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -8.9 \ No newline at end of file +8.10 \ No newline at end of file diff --git a/docs/changelog/109414.yaml b/docs/changelog/109414.yaml new file mode 100644 index 0000000000000..81b7541bde35b --- /dev/null +++ b/docs/changelog/109414.yaml @@ -0,0 +1,6 @@ +pr: 109414 +summary: Don't fail retention lease sync actions due to capacity constraints +area: CRUD +type: bug +issues: + - 105926 diff --git a/docs/changelog/110524.yaml b/docs/changelog/110524.yaml new file mode 100644 index 0000000000000..6274c99b09998 --- /dev/null +++ b/docs/changelog/110524.yaml @@ -0,0 +1,5 @@ +pr: 110524 +summary: Introduce mode `subobjects=auto` for objects +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/110847.yaml b/docs/changelog/110847.yaml new file mode 100644 index 0000000000000..214adc97ac7cb --- /dev/null +++ b/docs/changelog/110847.yaml @@ -0,0 +1,5 @@ +pr: 110847 +summary: SLM Interval based scheduling +area: ILM+SLM +type: feature +issues: [] diff --git a/docs/changelog/111193.yaml b/docs/changelog/111193.yaml new file mode 100644 index 0000000000000..9e56facb60d3a --- /dev/null +++ b/docs/changelog/111193.yaml @@ -0,0 +1,6 @@ +pr: 111193 +summary: Fix cases of collections with one point +area: Geo +type: bug +issues: + - 110982 diff --git a/docs/changelog/111412.yaml b/docs/changelog/111412.yaml new file mode 100644 index 0000000000000..297fa77cd2664 --- /dev/null +++ b/docs/changelog/111412.yaml @@ -0,0 +1,6 @@ +pr: 111412 +summary: Make enrich cache based on memory usage +area: Ingest Node +type: enhancement +issues: + - 106081 diff --git a/docs/changelog/111544.yaml b/docs/changelog/111544.yaml new file mode 100644 index 0000000000000..d4c46f485e664 --- /dev/null +++ b/docs/changelog/111544.yaml @@ -0,0 +1,5 @@ +pr: 111544 +summary: "ESQL: Strings support for MAX and MIN aggregations" +area: ES|QL +type: feature +issues: [] diff --git a/docs/changelog/111655.yaml b/docs/changelog/111655.yaml new file mode 100644 index 0000000000000..077714d15a712 --- /dev/null +++ b/docs/changelog/111655.yaml @@ -0,0 +1,5 @@ +pr: 111655 +summary: Migrate Inference to `ChunkedToXContent` +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/111690.yaml b/docs/changelog/111690.yaml new file mode 100644 index 0000000000000..36e715744ad88 --- /dev/null +++ b/docs/changelog/111690.yaml @@ -0,0 +1,5 @@ +pr: 111690 +summary: "ESQL: Support INLINESTATS grouped on expressions" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/111749.yaml b/docs/changelog/111749.yaml new file mode 100644 index 0000000000000..77e0c65005dd6 --- /dev/null +++ b/docs/changelog/111749.yaml @@ -0,0 +1,6 @@ +pr: 111749 +summary: "ESQL: Added `mv_percentile` function" +area: ES|QL +type: feature +issues: + - 111591 diff --git a/docs/changelog/111797.yaml b/docs/changelog/111797.yaml new file mode 100644 index 0000000000000..00b793a19d9c3 --- /dev/null +++ b/docs/changelog/111797.yaml @@ -0,0 +1,6 @@ +pr: 111797 +summary: "ESQL: fix for missing indices error message" +area: ES|QL +type: bug +issues: + - 111712 diff --git a/docs/changelog/111840.yaml b/docs/changelog/111840.yaml new file mode 100644 index 0000000000000..c40a9e2aef621 --- /dev/null +++ b/docs/changelog/111840.yaml @@ -0,0 +1,5 @@ +pr: 111840 +summary: "ESQL: Add async ID and `is_running` headers to ESQL async query" +area: ES|QL +type: feature +issues: [] diff --git a/docs/changelog/111855.yaml b/docs/changelog/111855.yaml new file mode 100644 index 0000000000000..3f15e9c20135a --- /dev/null +++ b/docs/changelog/111855.yaml @@ -0,0 +1,5 @@ +pr: 111855 +summary: "ESQL: Profile more timing information" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/111874.yaml b/docs/changelog/111874.yaml new file mode 100644 index 0000000000000..26ec90aa6cd4c --- /dev/null +++ b/docs/changelog/111874.yaml @@ -0,0 +1,8 @@ +pr: 111874 +summary: "ESQL: BUCKET: allow numerical spans as whole numbers" +area: ES|QL +type: enhancement +issues: + - 104646 + - 109340 + - 105375 diff --git a/docs/changelog/111937.yaml b/docs/changelog/111937.yaml new file mode 100644 index 0000000000000..7d856e29d54c5 --- /dev/null +++ b/docs/changelog/111937.yaml @@ -0,0 +1,6 @@ +pr: 111937 +summary: Handle `BigInteger` in xcontent copy +area: Infra/Core +type: bug +issues: + - 111812 diff --git a/docs/changelog/111943.yaml b/docs/changelog/111943.yaml new file mode 100644 index 0000000000000..6b9f03ccee31c --- /dev/null +++ b/docs/changelog/111943.yaml @@ -0,0 +1,6 @@ +pr: 111943 +summary: Fix synthetic source for empty nested objects +area: Mapping +type: bug +issues: + - 111811 diff --git a/docs/changelog/111948.yaml b/docs/changelog/111948.yaml new file mode 100644 index 0000000000000..a3a592abaf1ca --- /dev/null +++ b/docs/changelog/111948.yaml @@ -0,0 +1,5 @@ +pr: 111948 +summary: Upgrade xcontent to Jackson 2.17.0 +area: Infra/Core +type: upgrade +issues: [] diff --git a/docs/changelog/111955.yaml b/docs/changelog/111955.yaml new file mode 100644 index 0000000000000..ebc518203b7cc --- /dev/null +++ b/docs/changelog/111955.yaml @@ -0,0 +1,7 @@ +pr: 111955 +summary: Clean up dangling S3 multipart uploads +area: Snapshot/Restore +type: enhancement +issues: + - 101169 + - 44971 diff --git a/docs/changelog/111966.yaml b/docs/changelog/111966.yaml new file mode 100644 index 0000000000000..facf0a61c4d8a --- /dev/null +++ b/docs/changelog/111966.yaml @@ -0,0 +1,5 @@ +pr: 111966 +summary: No error when `store_array_source` is used without synthetic source +area: Mapping +type: bug +issues: [] diff --git a/docs/changelog/111968.yaml b/docs/changelog/111968.yaml new file mode 100644 index 0000000000000..9d758c76369e9 --- /dev/null +++ b/docs/changelog/111968.yaml @@ -0,0 +1,6 @@ +pr: 111968 +summary: "ESQL: don't lose the original casting error message" +area: ES|QL +type: bug +issues: + - 111967 diff --git a/docs/changelog/111969.yaml b/docs/changelog/111969.yaml new file mode 100644 index 0000000000000..2d276850c4988 --- /dev/null +++ b/docs/changelog/111969.yaml @@ -0,0 +1,5 @@ +pr: 111969 +summary: "[Profiling] add `container.id` field to event index template" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/111972.yaml b/docs/changelog/111972.yaml new file mode 100644 index 0000000000000..58477c68f0e7c --- /dev/null +++ b/docs/changelog/111972.yaml @@ -0,0 +1,15 @@ +pr: 111972 +summary: Introduce global retention in data stream lifecycle. +area: Data streams +type: feature +issues: [] +highlight: + title: Add global retention in data stream lifecycle + body: "Data stream lifecycle now supports configuring retention on a cluster level,\ + \ namely global retention. Global retention \nallows us to configure two different\ + \ retentions:\n\n- `data_streams.lifecycle.retention.default` is applied to all\ + \ data streams managed by the data stream lifecycle that do not have retention\n\ + defined on the data stream level.\n- `data_streams.lifecycle.retention.max` is\ + \ applied to all data streams managed by the data stream lifecycle and it allows\ + \ any data stream \ndata to be deleted after the `max_retention` has passed." + notable: true diff --git a/docs/changelog/111983.yaml b/docs/changelog/111983.yaml new file mode 100644 index 0000000000000..d5043d0b44155 --- /dev/null +++ b/docs/changelog/111983.yaml @@ -0,0 +1,6 @@ +pr: 111983 +summary: Avoid losing error message in failure collector +area: ES|QL +type: bug +issues: + - 111894 diff --git a/docs/changelog/111994.yaml b/docs/changelog/111994.yaml new file mode 100644 index 0000000000000..ee62651c43987 --- /dev/null +++ b/docs/changelog/111994.yaml @@ -0,0 +1,6 @@ +pr: 111994 +summary: Merge multiple ignored source entires for the same field +area: Logs +type: bug +issues: + - 111694 diff --git a/docs/changelog/112005.yaml b/docs/changelog/112005.yaml new file mode 100644 index 0000000000000..2d84381e632b3 --- /dev/null +++ b/docs/changelog/112005.yaml @@ -0,0 +1,6 @@ +pr: 112005 +summary: Check for valid `parentDoc` before retrieving its previous +area: Mapping +type: bug +issues: + - 111990 diff --git a/docs/changelog/112019.yaml b/docs/changelog/112019.yaml new file mode 100644 index 0000000000000..7afb207864ed7 --- /dev/null +++ b/docs/changelog/112019.yaml @@ -0,0 +1,5 @@ +pr: 112019 +summary: Display effective retention in the relevant data stream APIs +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/112038.yaml b/docs/changelog/112038.yaml new file mode 100644 index 0000000000000..6cbfb373b7420 --- /dev/null +++ b/docs/changelog/112038.yaml @@ -0,0 +1,6 @@ +pr: 112038 +summary: Semantic reranking should fail whenever inference ID does not exist +area: Relevance +type: bug +issues: + - 111934 diff --git a/docs/changelog/112046.yaml b/docs/changelog/112046.yaml new file mode 100644 index 0000000000000..f3cda1ed7a7d2 --- /dev/null +++ b/docs/changelog/112046.yaml @@ -0,0 +1,5 @@ +pr: 112046 +summary: Fix calculation of parent offset for ignored source in some cases +area: Mapping +type: bug +issues: [] diff --git a/docs/changelog/112058.yaml b/docs/changelog/112058.yaml new file mode 100644 index 0000000000000..e974b3413582e --- /dev/null +++ b/docs/changelog/112058.yaml @@ -0,0 +1,5 @@ +pr: 112058 +summary: Fix RRF validation for `rank_constant` < 1 +area: Ranking +type: bug +issues: [] diff --git a/docs/changelog/112090.yaml b/docs/changelog/112090.yaml new file mode 100644 index 0000000000000..6d6e4d0851523 --- /dev/null +++ b/docs/changelog/112090.yaml @@ -0,0 +1,6 @@ +pr: 112090 +summary: Always check `crsType` when folding spatial functions +area: Geo +type: bug +issues: + - 112089 diff --git a/docs/changelog/112123.yaml b/docs/changelog/112123.yaml new file mode 100644 index 0000000000000..0c0d7ac44cd17 --- /dev/null +++ b/docs/changelog/112123.yaml @@ -0,0 +1,5 @@ +pr: 112123 +summary: SLM interval schedule followup - add back `getFieldName` style getters +area: ILM+SLM +type: enhancement +issues: [] diff --git a/docs/changelog/112126.yaml b/docs/changelog/112126.yaml new file mode 100644 index 0000000000000..f6a7aeb893a5e --- /dev/null +++ b/docs/changelog/112126.yaml @@ -0,0 +1,5 @@ +pr: 112126 +summary: Add support for spatial relationships in point field mapper +area: Geo +type: enhancement +issues: [] diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index 25881b707d724..f8d925945401e 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -334,6 +334,7 @@ All REST API parameters (both request parameters and JSON body) support providing boolean "false" as the value `false` and boolean "true" as the value `true`. All other values will raise an error. +[[api-conventions-number-values]] [discrete] === Number Values diff --git a/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc b/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc index 090eda5ef5436..e4da2c45ee978 100644 --- a/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc +++ b/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc @@ -4,7 +4,7 @@ NOTE: {cloud-only} -You can use the following APIs to perform autoscaling operations. +You can use the following APIs to perform {cloud}/ec-autoscaling.html[autoscaling operations]. [discrete] [[autoscaling-api-top-level]] diff --git a/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc index 608b7bd7cb903..190428485a003 100644 --- a/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc +++ b/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc @@ -7,7 +7,7 @@ NOTE: {cloud-only} -Delete autoscaling policy. +Delete {cloud}/ec-autoscaling.html[autoscaling] policy. [[autoscaling-delete-autoscaling-policy-request]] ==== {api-request-title} diff --git a/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc b/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc index 05724b9c48b6e..d635d8c8f7bd0 100644 --- a/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc +++ b/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc @@ -7,7 +7,7 @@ NOTE: {cloud-only} -Get autoscaling capacity. +Get {cloud}/ec-autoscaling.html[autoscaling] capacity. [[autoscaling-get-autoscaling-capacity-request]] ==== {api-request-title} diff --git a/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc index ad00d69d1aeb2..973eedcb361c9 100644 --- a/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc +++ b/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc @@ -7,7 +7,7 @@ NOTE: {cloud-only} -Get autoscaling policy. +Get {cloud}/ec-autoscaling.html[autoscaling] policy. [[autoscaling-get-autoscaling-policy-request]] ==== {api-request-title} diff --git a/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc index ff79def51ebb9..e564f83411eb4 100644 --- a/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc +++ b/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc @@ -7,7 +7,7 @@ NOTE: {cloud-only} -Creates or updates an autoscaling policy. +Creates or updates an {cloud}/ec-autoscaling.html[autoscaling] policy. [[autoscaling-put-autoscaling-policy-request]] ==== {api-request-title} diff --git a/docs/reference/autoscaling/deciders/fixed-decider.asciidoc b/docs/reference/autoscaling/deciders/fixed-decider.asciidoc index c46d1dffe2cc8..5a8b009d9f063 100644 --- a/docs/reference/autoscaling/deciders/fixed-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/fixed-decider.asciidoc @@ -6,7 +6,7 @@ experimental[] [WARNING] The fixed decider is intended for testing only. Do not use this decider in production. -The `fixed` decider responds with a fixed required capacity. It is not enabled +The {cloud}/ec-autoscaling.html[autoscaling] `fixed` decider responds with a fixed required capacity. It is not enabled by default but can be enabled for any policy by explicitly configuring it. ==== Configuration settings diff --git a/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc b/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc index 832cf330053aa..0fc9ad444a213 100644 --- a/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-frozen-existence-decider]] === Frozen existence decider -The frozen existence decider (`frozen_existence`) ensures that once the first +The {cloud}/ec-autoscaling.html[autoscaling] frozen existence decider (`frozen_existence`) ensures that once the first index enters the frozen ILM phase, the frozen tier is scaled into existence. The frozen existence decider is enabled for all policies governing frozen data diff --git a/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc b/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc index ab11da04c8642..1977f95797ef0 100644 --- a/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-frozen-shards-decider]] === Frozen shards decider -The frozen shards decider (`frozen_shards`) calculates the memory required to search +The {cloud}/ec-autoscaling.html[autoscaling] frozen shards decider (`frozen_shards`) calculates the memory required to search the current set of partially mounted indices in the frozen tier. Based on a required memory amount per shard, it calculates the necessary memory in the frozen tier. diff --git a/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc b/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc index 5a10f31f1365b..3a8e7cdb518b3 100644 --- a/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-frozen-storage-decider]] === Frozen storage decider -The frozen storage decider (`frozen_storage`) calculates the local storage +The {cloud}/ec-autoscaling.html[autoscaling] frozen storage decider (`frozen_storage`) calculates the local storage required to search the current set of partially mounted indices based on a percentage of the total data set size of such indices. It signals that additional storage capacity is necessary when existing capacity is less than the diff --git a/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc b/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc index 26ced6ad7bb26..5432d96a47edb 100644 --- a/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-machine-learning-decider]] === Machine learning decider -The {ml} decider (`ml`) calculates the memory and CPU requirements to run {ml} +The {cloud}/ec-autoscaling.html[autoscaling] {ml} decider (`ml`) calculates the memory and CPU requirements to run {ml} jobs and trained models. The {ml} decider is enabled for policies governing `ml` nodes. diff --git a/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc b/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc index 763f1de96f6b9..33c989f3b12eb 100644 --- a/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-proactive-storage-decider]] === Proactive storage decider -The proactive storage decider (`proactive_storage`) calculates the storage required to contain +The {cloud}/ec-autoscaling.html[autoscaling] proactive storage decider (`proactive_storage`) calculates the storage required to contain the current data set plus an estimated amount of expected additional data. The proactive storage decider is enabled for all policies governing nodes with the `data_hot` role. diff --git a/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc b/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc index 50897178a88de..7c38df75169fd 100644 --- a/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-reactive-storage-decider]] === Reactive storage decider -The reactive storage decider (`reactive_storage`) calculates the storage required to contain +The {cloud}/ec-autoscaling.html[autoscaling] reactive storage decider (`reactive_storage`) calculates the storage required to contain the current data set. It signals that additional storage capacity is necessary when existing capacity has been exceeded (reactively). diff --git a/docs/reference/autoscaling/index.asciidoc b/docs/reference/autoscaling/index.asciidoc index fbf1a9536973e..e70c464889419 100644 --- a/docs/reference/autoscaling/index.asciidoc +++ b/docs/reference/autoscaling/index.asciidoc @@ -4,7 +4,7 @@ NOTE: {cloud-only} -The autoscaling feature enables an operator to configure tiers of nodes that +The {cloud}/ec-autoscaling.html[autoscaling] feature enables an operator to configure tiers of nodes that self-monitor whether or not they need to scale based on an operator-defined policy. Then, via the autoscaling API, an Elasticsearch cluster can report whether or not it needs additional resources to meet the policy. For example, an diff --git a/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc index 9b15bcca3fc85..a6894a933b460 100644 --- a/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc +++ b/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc @@ -17,7 +17,7 @@ PUT _application/analytics/my_analytics_collection //// -Removes an Analytics Collection and its associated data stream. +Removes a <> Collection and its associated data stream. [[delete-analytics-collection-request]] ==== {api-request-title} diff --git a/docs/reference/behavioral-analytics/apis/index.asciidoc b/docs/reference/behavioral-analytics/apis/index.asciidoc index 042b50259b1bb..692d3374f89f5 100644 --- a/docs/reference/behavioral-analytics/apis/index.asciidoc +++ b/docs/reference/behavioral-analytics/apis/index.asciidoc @@ -9,7 +9,7 @@ beta::[] --- -Use the following APIs to manage tasks and resources related to Behavioral Analytics: +Use the following APIs to manage tasks and resources related to <>: * <> * <> diff --git a/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc index 8d2491ff8a6ee..14511a1258278 100644 --- a/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc +++ b/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc @@ -24,7 +24,7 @@ DELETE _application/analytics/my_analytics_collection2 // TEARDOWN //// -Returns information about Analytics Collections. +Returns information about <> Collections. [[list-analytics-collection-request]] ==== {api-request-title} diff --git a/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc b/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc index 84d9cb5351799..f82717e22ed34 100644 --- a/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc +++ b/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc @@ -22,7 +22,7 @@ DELETE _application/analytics/my_analytics_collection // TEARDOWN //// -Post an event to an Analytics Collection. +Post an event to a <> Collection. [[post-analytics-collection-event-request]] ==== {api-request-title} diff --git a/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc index 48273fb3906c4..cbbab2ae3e26c 100644 --- a/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc +++ b/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc @@ -16,7 +16,7 @@ DELETE _application/analytics/my_analytics_collection // TEARDOWN //// -Creates an Analytics Collection. +Creates a <> Collection. [[put-analytics-collection-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc index 1c72fb8742b93..b510163bab50b 100644 --- a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Delete auto-follow pattern ++++ -Delete auto-follow patterns. +Delete {ccr} <>. [[ccr-delete-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc index 46ef288b05088..a2969e993ddfb 100644 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Get auto-follow pattern ++++ -Get auto-follow patterns. +Get {ccr} <>. [[ccr-get-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc index 1e64ab813e2ad..c5ae5a7b4af9d 100644 --- a/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Pause auto-follow pattern ++++ -Pauses an auto-follow pattern. +Pauses a {ccr} <>. [[ccr-pause-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index d08997068f705..6769f21ca5cef 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Create auto-follow pattern ++++ -Creates an auto-follow pattern. +Creates a {ccr} <>. [[ccr-put-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc index 04da9b4a35ba0..a580bb3838f9b 100644 --- a/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Resume auto-follow pattern ++++ -Resumes an auto-follow pattern. +Resumes a {ccr} <>. [[ccr-resume-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/ccr-apis.asciidoc b/docs/reference/ccr/apis/ccr-apis.asciidoc index 0c9f033639eda..ae94e1931af85 100644 --- a/docs/reference/ccr/apis/ccr-apis.asciidoc +++ b/docs/reference/ccr/apis/ccr-apis.asciidoc @@ -2,7 +2,7 @@ [[ccr-apis]] == {ccr-cap} APIs -You can use the following APIs to perform {ccr} operations. +You can use the following APIs to perform <> operations. [discrete] [[ccr-api-top-level]] diff --git a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc index 68fd6e210f884..6c049d9c92b59 100644 --- a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc @@ -5,7 +5,7 @@ Get follower info ++++ -Retrieves information about all follower indices. +Retrieves information about all <> follower indices. [[ccr-get-follow-info-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index 72224cc7f51f4..4892f86b3523d 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -5,7 +5,7 @@ Get follower stats ++++ -Get follower stats. +Get <> follower stats. [[ccr-get-follow-stats-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc index ea7e8640056bf..1917c08d6640d 100644 --- a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc +++ b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc @@ -5,7 +5,7 @@ Forget follower ++++ -Removes the follower retention leases from the leader. +Removes the <> follower retention leases from the leader. [[ccr-post-forget-follower-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc index a4ab69aba8d84..6d4730d10efe6 100644 --- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc @@ -5,7 +5,7 @@ Pause follower ++++ -Pauses a follower index. +Pauses a <> follower index. [[ccr-post-pause-follow-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc index 47ba51a3fb8a0..b023a8cb5cb70 100644 --- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc @@ -5,7 +5,7 @@ Resume follower ++++ -Resumes a follower index. +Resumes a <> follower index. [[ccr-post-resume-follow-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc index b96777b455d3b..dab11ef9e7a54 100644 --- a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc @@ -5,7 +5,7 @@ Unfollow ++++ -Converts a follower index to a regular index. +Converts a <> follower index to a regular index. [[ccr-post-unfollow-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index eb83e2a13dcf1..b7ae9ac987474 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -5,7 +5,7 @@ Create follower ++++ -Creates a follower index. +Creates a <> follower index. [[ccr-put-follow-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index 128df5e47c777..92e6bae0bdce8 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -6,7 +6,7 @@ Get {ccr-init} stats ++++ -Get {ccr} stats. +Get <> stats. [[ccr-get-stats-request]] ==== {api-request-title} diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index 0b0fde6546c29..7547dd74c5ecd 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -4,7 +4,7 @@ Cluster allocation explain ++++ -Provides an explanation for a shard's current allocation. +Provides an explanation for a shard's current <>. [source,console] ---- @@ -81,6 +81,7 @@ you might expect otherwise. ===== Unassigned primary shard +====== Conflicting settings The following request gets an allocation explanation for an unassigned primary shard. @@ -158,6 +159,56 @@ node. <5> The decider which led to the `no` decision for the node. <6> An explanation as to why the decider returned a `no` decision, with a helpful hint pointing to the setting that led to the decision. In this example, a newly created index has <> that requires that it only be allocated to a node named `nonexistent_node`, which does not exist, so the index is unable to allocate. +====== Maximum number of retries exceeded + +The following response contains an allocation explanation for an unassigned +primary shard that has reached the maximum number of allocation retry attempts. + +[source,js] +---- +{ + "index" : "my-index-000001", + "shard" : 0, + "primary" : true, + "current_state" : "unassigned", + "unassigned_info" : { + "at" : "2017-01-04T18:03:28.464Z", + "failed shard on node [mEKjwwzLT1yJVb8UxT6anw]: failed recovery, failure RecoveryFailedException", + "reason": "ALLOCATION_FAILED", + "failed_allocation_attempts": 5, + "last_allocation_status": "no", + }, + "can_allocate": "no", + "allocate_explanation": "cannot allocate because allocation is not permitted to any of the nodes", + "node_allocation_decisions" : [ + { + "node_id" : "3sULLVJrRneSg0EfBB-2Ew", + "node_name" : "node_t0", + "transport_address" : "127.0.0.1:9400", + "roles" : ["data_content", "data_hot"], + "node_decision" : "no", + "store" : { + "matching_size" : "4.2kb", + "matching_size_in_bytes" : 4325 + }, + "deciders" : [ + { + "decider": "max_retry", + "decision" : "NO", + "explanation": "shard has exceeded the maximum number of retries [5] on failed allocation attempts - manually call [/_cluster/reroute?retry_failed=true] to retry, [unassigned_info[[reason=ALLOCATION_FAILED], at[2024-07-30T21:04:12.166Z], failed_attempts[5], failed_nodes[[mEKjwwzLT1yJVb8UxT6anw]], delayed=false, details[failed shard on node [mEKjwwzLT1yJVb8UxT6anw]: failed recovery, failure RecoveryFailedException], allocation_status[deciders_no]]]" + } + ] + } + ] +} +---- +// NOTCONSOLE + +If decider message indicates a transient allocation issue, use +<> to retry allocation. + +====== No valid shard copy + The following response contains an allocation explanation for an unassigned primary shard that was previously allocated. @@ -184,6 +235,8 @@ TIP: If a shard is unassigned with an allocation status of `no_valid_shard_copy` ===== Unassigned replica shard +====== Allocation delayed + The following response contains an allocation explanation for a replica that's unassigned due to <>. @@ -241,8 +294,52 @@ unassigned due to <>. <2> The remaining delay before allocating the replica shard. <3> Information about the shard data found on a node. +====== Allocation throttled + +The following response contains an allocation explanation for a replica that's +queued to allocate but currently waiting on other queued shards. + +[source,js] +---- +{ + "index" : "my-index-000001", + "shard" : 0, + "primary" : false, + "current_state" : "unassigned", + "unassigned_info" : { + "reason" : "NODE_LEFT", + "at" : "2017-01-04T18:53:59.498Z", + "details" : "node_left[G92ZwuuaRY-9n8_tc-IzEg]", + "last_allocation_status" : "no_attempt" + }, + "can_allocate": "throttled", + "allocate_explanation": "Elasticsearch is currently busy with other activities. It expects to be able to allocate this shard when those activities finish. Please wait.", + "node_allocation_decisions" : [ + { + "node_id" : "3sULLVJrRneSg0EfBB-2Ew", + "node_name" : "node_t0", + "transport_address" : "127.0.0.1:9400", + "roles" : ["data_content", "data_hot"], + "node_decision" : "no", + "deciders" : [ + { + "decider": "throttling", + "decision": "THROTTLE", + "explanation": "reached the limit of incoming shard recoveries [2], cluster setting [cluster.routing.allocation.node_concurrent_incoming_recoveries=2] (can also be set via [cluster.routing.allocation.node_concurrent_recoveries])" + } + ] + } + ] +} +---- +// NOTCONSOLE + +This is a transient message that might appear when a large amount of shards are allocating. + ===== Assigned shard +====== Cannot remain on current node + The following response contains an allocation explanation for an assigned shard. The response indicates the shard is not allowed to remain on its current node and must be reallocated. @@ -295,6 +392,8 @@ and must be reallocated. <2> The deciders that factored into the decision of why the shard is not allowed to remain on its current node. <3> Whether the shard is allowed to be allocated to another node. +====== Must remain on current node + The following response contains an allocation explanation for a shard that must remain on its current node. Moving the shard to another node would not improve cluster balance. @@ -338,7 +437,7 @@ cluster balance. ===== No arguments If you call the API with no arguments, {es} retrieves an allocation explanation -for an arbitrary unassigned primary or replica shard. +for an arbitrary unassigned primary or replica shard, returning any unassigned primary shards first. [source,console] ---- diff --git a/docs/reference/cluster/delete-desired-balance.asciidoc b/docs/reference/cluster/delete-desired-balance.asciidoc index f81dcab011da4..c67834269e505 100644 --- a/docs/reference/cluster/delete-desired-balance.asciidoc +++ b/docs/reference/cluster/delete-desired-balance.asciidoc @@ -6,7 +6,7 @@ NOTE: {cloud-only} -Discards the current desired balance and computes a new desired balance starting from the current allocation of shards. +Discards the current <> and computes a new desired balance starting from the current allocation of shards. This can sometimes help {es} find a desired balance which needs fewer shard movements to achieve, especially if the cluster has experienced changes so substantial that the current desired balance is no longer optimal without {es} having detected that the current desired balance will take more shard movements to achieve than needed. However, this API diff --git a/docs/reference/cluster/get-desired-balance.asciidoc b/docs/reference/cluster/get-desired-balance.asciidoc index 3fd87dcfedc4f..74afdaa52daf1 100644 --- a/docs/reference/cluster/get-desired-balance.asciidoc +++ b/docs/reference/cluster/get-desired-balance.asciidoc @@ -8,7 +8,7 @@ NOTE: {cloud-only} Exposes: -* the desired balance computation and reconciliation stats +* the <> computation and reconciliation stats * balancing stats such as distribution of shards, disk and ingest forecasts across nodes and data tiers (based on the current cluster state) * routing table with each shard current and desired location diff --git a/docs/reference/data-streams/change-mappings-and-settings.asciidoc b/docs/reference/data-streams/change-mappings-and-settings.asciidoc index 076b315558b60..1290f289e5bbd 100644 --- a/docs/reference/data-streams/change-mappings-and-settings.asciidoc +++ b/docs/reference/data-streams/change-mappings-and-settings.asciidoc @@ -5,7 +5,7 @@ [[data-streams-change-mappings-and-settings]] === Change mappings and settings for a data stream -Each data stream has a <> has a <>. Mappings and index settings from this template are applied to new backing indices created for the stream. This includes the stream's first backing index, which is auto-generated when the stream is created. diff --git a/docs/reference/data-streams/downsampling-manual.asciidoc b/docs/reference/data-streams/downsampling-manual.asciidoc index 771a08d97d949..44ae77d072034 100644 --- a/docs/reference/data-streams/downsampling-manual.asciidoc +++ b/docs/reference/data-streams/downsampling-manual.asciidoc @@ -14,7 +14,7 @@ DELETE _ingest/pipeline/my-timestamp-pipeline // TEARDOWN //// -The recommended way to downsample a time series data stream (TSDS) is +The recommended way to <> a <> is <>. However, if you're not using ILM, you can downsample a TSDS manually. This guide shows you how, using typical Kubernetes cluster monitoring data. @@ -32,7 +32,7 @@ To test out manual downsampling, follow these steps: ==== Prerequisites * Refer to the <>. -* It is not possible to downsample a data stream directly, nor +* It is not possible to downsample a <> directly, nor multiple indices at once. It's only possible to downsample one time series index (TSDS backing index). * In order to downsample an index, it needs to be read-only. For a TSDS write diff --git a/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc index f20c949c2fbc8..315f7fa85e45f 100644 --- a/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc @@ -4,7 +4,7 @@ Delete Data Stream Lifecycle ++++ -Deletes the lifecycle from a set of data streams. +Deletes the <> from a set of data streams. [[delete-lifecycle-api-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc index 7968bb78939e8..2b15886ebe192 100644 --- a/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc @@ -4,7 +4,7 @@ Explain Data Stream Lifecycle ++++ -Retrieves the current data stream lifecycle status for one or more data stream backing indices. +Retrieves the current <> status for one or more data stream backing indices. [[explain-lifecycle-api-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc index a99fa19d9db8d..f48fa1eb52daa 100644 --- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc @@ -4,7 +4,7 @@ Get Data Stream Lifecycle ++++ -Gets stats about the execution of data stream lifecycle. +Gets stats about the execution of <>. [[get-lifecycle-stats-api-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc index 331285af395b6..6bac1c7f7cc75 100644 --- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc @@ -4,7 +4,7 @@ Get Data Stream Lifecycle ++++ -Gets the lifecycle of a set of data streams. +Gets the <> of a set of <>. [[get-lifecycle-api-prereqs]] ==== {api-prereq-title} @@ -128,14 +128,18 @@ The response will look like the following: "name": "my-data-stream-1", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" } }, { "name": "my-data-stream-2", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" } } ] diff --git a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc index 7d33a5b5f880c..c60c105e818ab 100644 --- a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc @@ -4,7 +4,7 @@ Put Data Stream Lifecycle ++++ -Configures the data stream lifecycle for the targeted data streams. +Configures the data stream <> for the targeted <>. [[put-lifecycle-api-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/data-streams/lifecycle/index.asciidoc b/docs/reference/data-streams/lifecycle/index.asciidoc index 16ccf2ef82391..e4d5acfb704d3 100644 --- a/docs/reference/data-streams/lifecycle/index.asciidoc +++ b/docs/reference/data-streams/lifecycle/index.asciidoc @@ -14,10 +14,11 @@ To achieve that, it supports: * Automatic <>, which chunks your incoming data in smaller pieces to facilitate better performance and backwards incompatible mapping changes. * Configurable retention, which allows you to configure the time period for which your data is guaranteed to be stored. -{es} is allowed at a later time to delete data older than this time period. +{es} is allowed at a later time to delete data older than this time period. Retention can be configured on the data stream level +or on a global level. Read more about the different options in this <>. A data stream lifecycle also supports downsampling the data stream backing indices. -See <> for +See <> for more details. [discrete] @@ -33,16 +34,17 @@ each data stream and performs the following steps: 3. After an index is not the write index anymore (i.e. the data stream has been rolled over), automatically tail merges the index. Data stream lifecycle executes a merge operation that only targets the long tail of small segments instead of the whole shard. As the segments are organised -into tiers of exponential sizes, merging the long tail of small segments is only a +into tiers of exponential sizes, merging the long tail of small segments is only a fraction of the cost of force merging to a single segment. The small segments would usually hold the most recent data so tail merging will focus the merging resources on the higher-value data that is most likely to keep being queried. -4. If <> is configured it will execute +4. If <> is configured it will execute all the configured downsampling rounds. 5. Applies retention to the remaining backing indices. This means deleting the backing indices whose -`generation_time` is longer than the configured retention period. The `generation_time` is only applicable to rolled over backing -indices and it is either the time since the backing index got rolled over, or the time optionally configured in the -<> setting. +`generation_time` is longer than the effective retention period (read more about the +<>). The `generation_time` is only applicable to rolled +over backing indices and it is either the time since the backing index got rolled over, or the time optionally configured +in the <> setting. IMPORTANT: We use the `generation_time` instead of the creation time because this ensures that all data in the backing index have passed the retention period. As a result, the retention period is not the exact time data gets deleted, but @@ -75,4 +77,6 @@ include::tutorial-manage-new-data-stream.asciidoc[] include::tutorial-manage-existing-data-stream.asciidoc[] +include::tutorial-manage-data-stream-retention.asciidoc[] + include::tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc[] diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc new file mode 100644 index 0000000000000..83a587c250e73 --- /dev/null +++ b/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc @@ -0,0 +1,215 @@ +[role="xpack"] +[[tutorial-manage-data-stream-retention]] +=== Tutorial: Data stream retention + +In this tutorial, we are going to go over the data stream lifecycle retention; we will define it, go over how it can be configured +and how it can gets applied. Keep in mind, the following options apply only to data streams that are managed by the data stream lifecycle. + +. <> +. <> +. <> +. <> + +You can verify if a data steam is managed by the data stream lifecycle via the <>: + +//// +[source,console] +---- +PUT /_index_template/template +{ + "index_patterns": ["my-data-stream*"], + "template": { + "lifecycle": {} + }, + "data_stream": { } +} + +PUT /_data_stream/my-data-stream +---- +// TESTSETUP +//// + +//// +[source,console] +---- +DELETE /_data_stream/my-data-stream* +DELETE /_index_template/template +PUT /_cluster/settings +{ + "persistent" : { + "data_streams.lifecycle.retention.*" : null + } +} +---- +// TEARDOWN +//// + +[source,console] +-------------------------------------------------- +GET _data_stream/my-data-stream/_lifecycle +-------------------------------------------------- + +The result should look like this: + +[source,console-result] +-------------------------------------------------- +{ + "data_streams": [ + { + "name": "my-data-stream", <1> + "lifecycle": { + "enabled": true <2> + } + } + ] +} +-------------------------------------------------- +// TESTRESPONSE[skip:the result is for illustrating purposes only] +<1> The name of your data stream. +<2> Ensure that the lifecycle is enabled, meaning this should be `true`. + +[discrete] +[[what-is-retention]] +==== What is data stream retention? + +We define retention as the least amount of time the data of a data stream are going to be kept in {es}. After this time period +has passed, {es} is allowed to remove these data to free up space and/or manage costs. + +NOTE: Retention does not define the period that the data will be removed, but the minimum time period they will be kept. + +We define 4 different types of retention: + +* The data stream retention, or `data_retention`, which is the retention configured on the data stream level. It can be +set via an <> for future data streams or via the <> for an existing data stream. When the data stream retention is not set, it implies that the data +need to be kept forever. +* The global default retention, let's call it `default_retention`, which is a retention configured via the cluster setting +<> and will be +applied to all data streams managed by data stream lifecycle that do not have `data_retention` configured. Effectively, +it ensures that there will be no data streams keeping their data forever. This can be set via the +<>. +* The global max retention, let's call it `max_retention`, which is a retention configured via the cluster setting +<> and will be applied to +all data streams managed by data stream lifecycle. Effectively, it ensures that there will be no data streams whose retention +will exceed this time period. This can be set via the <>. +* The effective retention, or `effective_retention`, which is the retention applied at a data stream on a given moment. +Effective retention cannot be set, it is derived by taking into account all the configured retention listed above and is +calculated as it is described <>. + +[discrete] +[[retention-configuration]] +==== How to configure retention? + +- By setting the `data_retention` on the data stream level. This retention can be configured in two ways: ++ +-- For new data streams, it can be defined in the index template that would be applied during the data stream's creation. +You can use the <>, for example: ++ +[source,console] +-------------------------------------------------- +PUT _index_template/template +{ + "index_patterns": ["my-data-stream*"], + "data_stream": { }, + "priority": 500, + "template": { + "lifecycle": { + "data_retention": "7d" + } + }, + "_meta": { + "description": "Template with data stream lifecycle" + } +} +-------------------------------------------------- +-- For an existing data stream, it can be set via the <>. ++ +[source,console] +---- +PUT _data_stream/my-data-stream/_lifecycle +{ + "data_retention": "30d" <1> +} +---- +// TEST[continued] +<1> The retention period of this data stream is set to 30 days. + +- By setting the global retention via the `data_streams.lifecycle.retention.default` and/or `data_streams.lifecycle.retention.max` +that are set on a cluster level. You can be set via the <>. For example: ++ +[source,console] +-------------------------------------------------- +PUT /_cluster/settings +{ + "persistent" : { + "data_streams.lifecycle.retention.default" : "7d", + "data_streams.lifecycle.retention.max" : "90d" + } +} +-------------------------------------------------- +// TEST[continued] + +[discrete] +[[effective-retention-calculation]] +==== How is the effective retention calculated? +The effective is calculated in the following way: + +- The `effective_retention` is the `default_retention`, when `default_retention` is defined and the data stream does not +have `data_retention`. +- The `effective_retention` is the `data_retention`, when `data_retention` is defined and if `max_retention` is defined, +it is less than the `max_retention`. +- The `effective_retention` is the `max_retention`, when `max_retention` is defined, and the data stream has either no +`data_retention` or its `data_retention` is greater than the `max_retention`. + +The above is demonstrated in the examples below: + +|=== +|`default_retention` |`max_retention` |`data_retention` |`effective_retention` |Retention determined by + +|Not set |Not set |Not set |Infinite |N/A +|Not relevant |12 months |**30 days** |30 days |`data_retention` +|Not relevant |Not set |**30 days** |30 days |`data_retention` +|**30 days** |12 months |Not set |30 days |`default_retention` +|**30 days** |30 days |Not set |30 days |`default_retention` +|Not relevant |**30 days** |12 months |30 days |`max_retention` +|Not set |**30 days** |Not set |30 days |`max_retention` +|=== + +Considering our example, if we retrieve the lifecycle of `my-data-stream`: +[source,console] +---- +GET _data_stream/my-data-stream/_lifecycle +---- +// TEST[continued] + +We see that it will remain the same with what the user configured: +[source,console-result] +---- +{ + "data_streams": [ + { + "name": "my-data-stream", + "lifecycle": { + "enabled": true, + "data_retention": "30d", + "effective_retention": "30d", + "retention_determined_by": "data_stream_configuration" + } + } + ] +} +---- + +[discrete] +[[effective-retention-application]] +==== How is the effective retention applied? + +Retention is applied to the remaining backing indices of a data stream as the last step of +<>. Data stream lifecycle will retrieve the backing indices +whose `generation_time` is longer than the effective retention period and delete them. The `generation_time` is only +applicable to rolled over backing indices and it is either the time since the backing index got rolled over, or the time +optionally configured in the <> setting. + +IMPORTANT: We use the `generation_time` instead of the creation time because this ensures that all data in the backing +index have passed the retention period. As a result, the retention period is not the exact time data get deleted, but +the minimum time data will be stored. diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc index c34340a096046..01d51cdde3167 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc @@ -91,10 +91,12 @@ The result will look like this: { "data_streams": [ { - "name": "my-data-stream",<1> + "name": "my-data-stream", <1> "lifecycle": { - "enabled": true, <2> - "data_retention": "7d" <3> + "enabled": true, <2> + "data_retention": "7d", <3> + "effective_retention": "7d", <4> + "retention_determined_by": "data_stream_configuration" } } ] @@ -102,8 +104,9 @@ The result will look like this: -------------------------------------------------- <1> The name of your data stream. <2> Shows if the data stream lifecycle is enabled for this data stream. -<3> The retention period of the data indexed in this data stream, this means that the data in this data stream will -be kept at least for 7 days. After that {es} can delete it at its own discretion. +<3> The retention period of the data indexed in this data stream, as configured by the user. +<4> The retention period that will be applied by the data stream lifecycle. This means that the data in this data stream will + be kept at least for 7 days. After that {es} can delete it at its own discretion. If you want to see more information about how the data stream lifecycle is applied on individual backing indices use the <>: diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc index 5b2e2a1ec70a2..a2c12466b7f2b 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc @@ -1,14 +1,14 @@ [role="xpack"] [[tutorial-migrate-data-stream-from-ilm-to-dsl]] -=== Tutorial: Migrate ILM managed data stream to data stream lifecycle +=== Tutorial: Migrate ILM managed data stream to data stream lifecycle -In this tutorial we'll look at migrating an existing data stream from Index Lifecycle Management ({ilm-init}) to -data stream lifecycle. The existing {ilm-init} managed backing indices will continue +In this tutorial we'll look at migrating an existing data stream from <> to +<>. The existing {ilm-init} managed backing indices will continue to be managed by {ilm-init} until they age out and get deleted by {ilm-init}; however, -the new backing indices will be managed by data stream lifecycle. -This way, a data stream is gradually migrated away from being managed by {ilm-init} to +the new backing indices will be managed by data stream lifecycle. +This way, a data stream is gradually migrated away from being managed by {ilm-init} to being managed by data stream lifecycle. As we'll see, {ilm-init} and data stream lifecycle -can co-manage a data stream; however, an index can only be managed by one system at +can co-manage a data stream; however, an index can only be managed by one system at a time. [discrete] @@ -17,7 +17,7 @@ a time. To migrate a data stream from {ilm-init} to data stream lifecycle we'll have to execute two steps: -1. Update the index template that's backing the data stream to set <> +1. Update the index template that's backing the data stream to set <> to `false`, and to configure data stream lifecycle. 2. Configure the data stream lifecycle for the _existing_ data stream using the <>. @@ -174,8 +174,8 @@ in the index template). To migrate the `dsl-data-stream` to data stream lifecycle we'll have to execute two steps: -1. Update the index template that's backing the data stream to set <> -to `false`, and to configure data stream lifecycle. +1. Update the index template that's backing the data stream to set <> +to `false`, and to configure data stream lifecycle. 2. Configure the data stream lifecycle for the _existing_ `dsl-data-stream` using the <>. @@ -209,9 +209,9 @@ PUT _index_template/dsl-data-stream-template // TEST[continued] <1> The `prefer_ilm` setting will now be configured on the **new** backing indices -(created by rolling over the data stream) such that {ilm-init} does _not_ take +(created by rolling over the data stream) such that {ilm-init} does _not_ take precedence over data stream lifecycle. -<2> We're configuring the data stream lifecycle so _new_ data streams will be +<2> We're configuring the data stream lifecycle so _new_ data streams will be managed by data stream lifecycle. We've now made sure that new data streams will be managed by data stream lifecycle. @@ -227,7 +227,7 @@ PUT _data_stream/dsl-data-stream/_lifecycle ---- // TEST[continued] -We can inspect the data stream to check that the next generation will indeed be +We can inspect the data stream to check that the next generation will indeed be managed by data stream lifecycle: [source,console] @@ -266,7 +266,9 @@ GET _data_stream/dsl-data-stream "template": "dsl-data-stream-template", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" }, "ilm_policy": "pre-dsl-ilm-policy", "next_generation_managed_by": "Data stream lifecycle", <3> @@ -292,7 +294,7 @@ GET _data_stream/dsl-data-stream <4> The `prefer_ilm` setting value we configured in the index template is reflected and will be configured accordingly for new backing indices. -We'll now rollover the data stream to see the new generation index being managed by +We'll now rollover the data stream to see the new generation index being managed by data stream lifecycle: [source,console] @@ -344,7 +346,9 @@ GET _data_stream/dsl-data-stream "template": "dsl-data-stream-template", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" }, "ilm_policy": "pre-dsl-ilm-policy", "next_generation_managed_by": "Data stream lifecycle", @@ -375,9 +379,9 @@ in the index template [discrete] [[migrate-from-dsl-to-ilm]] ==== Migrate data stream back to ILM -We can easily change this data stream to be managed by {ilm-init} because we didn't remove -the {ilm-init} policy when we <>. +We can easily change this data stream to be managed by {ilm-init} because we didn't remove +the {ilm-init} policy when we <>. We can achieve this in two ways: diff --git a/docs/reference/data-streams/modify-data-streams-api.asciidoc b/docs/reference/data-streams/modify-data-streams-api.asciidoc index f05e76e67c32f..2da869083df22 100644 --- a/docs/reference/data-streams/modify-data-streams-api.asciidoc +++ b/docs/reference/data-streams/modify-data-streams-api.asciidoc @@ -4,7 +4,7 @@ Modify data streams ++++ -Performs one or more data stream modification actions in a single atomic +Performs one or more <> modification actions in a single atomic operation. [source,console] diff --git a/docs/reference/data-streams/promote-data-stream-api.asciidoc b/docs/reference/data-streams/promote-data-stream-api.asciidoc index 281e9b549abcb..111c7a2256f8a 100644 --- a/docs/reference/data-streams/promote-data-stream-api.asciidoc +++ b/docs/reference/data-streams/promote-data-stream-api.asciidoc @@ -5,7 +5,7 @@ Promote data stream ++++ -The purpose of the promote data stream api is to turn +The purpose of the promote <> API is to turn a data stream that is replicated by CCR into a regular data stream. diff --git a/docs/reference/data-streams/tsds-reindex.asciidoc b/docs/reference/data-streams/tsds-reindex.asciidoc index ea4ba16df5c4a..9d6594db4e779 100644 --- a/docs/reference/data-streams/tsds-reindex.asciidoc +++ b/docs/reference/data-streams/tsds-reindex.asciidoc @@ -9,7 +9,7 @@ [[tsds-reindex-intro]] ==== Introduction -With reindexing, you can copy documents from an old time-series data stream (TSDS) to a new one. Data streams support +With reindexing, you can copy documents from an old <> to a new one. Data streams support reindexing in general, with a few <>. Still, time-series data streams introduce additional challenges due to tight control on the accepted timestamp range for each backing index they contain. Direct use of the reindex API would likely error out due to attempting to insert documents with timestamps that are diff --git a/docs/reference/eql/eql-apis.asciidoc b/docs/reference/eql/eql-apis.asciidoc index d3f591ccfe6c1..e8cc2b21492ae 100644 --- a/docs/reference/eql/eql-apis.asciidoc +++ b/docs/reference/eql/eql-apis.asciidoc @@ -1,7 +1,7 @@ [[eql-apis]] == EQL APIs -Event Query Language (EQL) is a query language for event-based time series data, +<> is a query language for event-based time series data, such as logs, metrics, and traces. For an overview of EQL and related tutorials, see <>. diff --git a/docs/reference/esql/esql-apis.asciidoc b/docs/reference/esql/esql-apis.asciidoc index 686a71506bc14..8586cd1ae6bce 100644 --- a/docs/reference/esql/esql-apis.asciidoc +++ b/docs/reference/esql/esql-apis.asciidoc @@ -1,7 +1,7 @@ [[esql-apis]] == {esql} APIs -The {es} Query Language ({esql}) provides a powerful way to filter, transform, +The <> provides a powerful way to filter, transform, and analyze data stored in {es}, and in the future in other runtimes. For an overview of {esql} and related tutorials, see <>. diff --git a/docs/reference/esql/esql-async-query-delete-api.asciidoc b/docs/reference/esql/esql-async-query-delete-api.asciidoc index 90f8c06b9124a..5cad566f7f9c0 100644 --- a/docs/reference/esql/esql-async-query-delete-api.asciidoc +++ b/docs/reference/esql/esql-async-query-delete-api.asciidoc @@ -4,7 +4,7 @@ {esql} async query delete API ++++ -The {esql} async query delete API is used to manually delete an async query +The <> async query delete API is used to manually delete an async query by ID. If the query is still running, the query will be cancelled. Otherwise, the stored results are deleted. diff --git a/docs/reference/esql/functions/description/mv_percentile.asciidoc b/docs/reference/esql/functions/description/mv_percentile.asciidoc new file mode 100644 index 0000000000000..3e731f6525cec --- /dev/null +++ b/docs/reference/esql/functions/description/mv_percentile.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Converts a multivalued field into a single valued field containing the value at which a certain percentage of observed values occur. diff --git a/docs/reference/esql/functions/description/to_datetime.asciidoc b/docs/reference/esql/functions/description/to_datetime.asciidoc index b37bd6b22ac2f..91cbfa0b5fe1e 100644 --- a/docs/reference/esql/functions/description/to_datetime.asciidoc +++ b/docs/reference/esql/functions/description/to_datetime.asciidoc @@ -3,3 +3,5 @@ *Description* Converts an input value to a date value. A string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. To convert dates in other formats, use <>. + +NOTE: Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is truncated, not rounded. diff --git a/docs/reference/esql/functions/examples/bucket.asciidoc b/docs/reference/esql/functions/examples/bucket.asciidoc index e1bba0529d7db..4afea30660339 100644 --- a/docs/reference/esql/functions/examples/bucket.asciidoc +++ b/docs/reference/esql/functions/examples/bucket.asciidoc @@ -86,10 +86,6 @@ include::{esql-specs}/bucket.csv-spec[tag=docsBucketNumericWithSpan] |=== include::{esql-specs}/bucket.csv-spec[tag=docsBucketNumericWithSpan-result] |=== - -NOTE: When providing the bucket size as the second parameter, it must be -of a floating point type. - Create hourly buckets for the last 24 hours, and calculate the number of events per hour: [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/functions/examples/mv_percentile.asciidoc b/docs/reference/esql/functions/examples/mv_percentile.asciidoc new file mode 100644 index 0000000000000..9b20a5bef5e0d --- /dev/null +++ b/docs/reference/esql/functions/examples/mv_percentile.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/mv_percentile.csv-spec[tag=example] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/mv_percentile.csv-spec[tag=example-result] +|=== + diff --git a/docs/reference/esql/functions/kibana/definition/add.json b/docs/reference/esql/functions/kibana/definition/add.json index e20299821facb..0932a76966560 100644 --- a/docs/reference/esql/functions/kibana/definition/add.json +++ b/docs/reference/esql/functions/kibana/definition/add.json @@ -8,7 +8,7 @@ "params" : [ { "name" : "lhs", - "type" : "date_period", + "type" : "date", "optional" : false, "description" : "A numeric value or a date time value." }, @@ -20,61 +20,61 @@ } ], "variadic" : false, - "returnType" : "date_period" + "returnType" : "date" }, { "params" : [ { "name" : "lhs", - "type" : "date_period", + "type" : "date", "optional" : false, "description" : "A numeric value or a date time value." }, { "name" : "rhs", - "type" : "datetime", + "type" : "time_duration", "optional" : false, "description" : "A numeric value or a date time value." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date_period", "optional" : false, "description" : "A numeric value or a date time value." }, { "name" : "rhs", - "type" : "date_period", + "type" : "date", "optional" : false, "description" : "A numeric value or a date time value." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date_period", "optional" : false, "description" : "A numeric value or a date time value." }, { "name" : "rhs", - "type" : "time_duration", + "type" : "date_period", "optional" : false, "description" : "A numeric value or a date time value." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date_period" }, { "params" : [ @@ -248,13 +248,13 @@ }, { "name" : "rhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A numeric value or a date time value." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/bucket.json b/docs/reference/esql/functions/kibana/definition/bucket.json index 7141ca4c27443..94214a3a4f047 100644 --- a/docs/reference/esql/functions/kibana/definition/bucket.json +++ b/docs/reference/esql/functions/kibana/definition/bucket.json @@ -8,7 +8,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -16,17 +16,17 @@ "name" : "buckets", "type" : "date_period", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -34,29 +34,269 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", - "type" : "datetime", + "type" : "date", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", - "type" : "datetime", + "type" : "date", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "date", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "keyword", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "date", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "text", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "keyword", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "date", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "keyword", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "keyword", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "keyword", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "text", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "text", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "date", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "text", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "keyword", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "text", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "text", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -64,11 +304,11 @@ "name" : "buckets", "type" : "time_duration", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -82,7 +322,25 @@ "name" : "buckets", "type" : "double", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -100,19 +358,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -130,19 +388,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -160,19 +418,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -190,19 +448,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -220,19 +478,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -250,19 +508,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -280,19 +538,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -310,19 +568,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -340,19 +598,37 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "long", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -370,7 +646,25 @@ "name" : "buckets", "type" : "double", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -388,19 +682,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -418,19 +712,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -448,19 +742,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -478,19 +772,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -508,19 +802,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -538,19 +832,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -568,19 +862,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -598,19 +892,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -628,19 +922,37 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "long", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -658,7 +970,25 @@ "name" : "buckets", "type" : "double", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -676,19 +1006,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -706,19 +1036,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -736,19 +1066,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -766,19 +1096,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -796,19 +1126,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -826,19 +1156,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -856,19 +1186,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -886,19 +1216,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -916,19 +1246,37 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "long", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, diff --git a/docs/reference/esql/functions/kibana/definition/case.json b/docs/reference/esql/functions/kibana/definition/case.json index 5959eed62d37b..27705cd3897f9 100644 --- a/docs/reference/esql/functions/kibana/definition/case.json +++ b/docs/reference/esql/functions/kibana/definition/case.json @@ -50,13 +50,13 @@ }, { "name" : "trueValue", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." } ], "variadic" : true, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/coalesce.json b/docs/reference/esql/functions/kibana/definition/coalesce.json index f00f471e63ecc..2459a4d51bb2d 100644 --- a/docs/reference/esql/functions/kibana/definition/coalesce.json +++ b/docs/reference/esql/functions/kibana/definition/coalesce.json @@ -74,19 +74,19 @@ "params" : [ { "name" : "first", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Expression to evaluate." }, { "name" : "rest", - "type" : "datetime", + "type" : "date", "optional" : true, "description" : "Other expression to evaluate." } ], "variadic" : true, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/count.json b/docs/reference/esql/functions/kibana/definition/count.json index e05ebc6789816..2a15fb3bdd335 100644 --- a/docs/reference/esql/functions/kibana/definition/count.json +++ b/docs/reference/esql/functions/kibana/definition/count.json @@ -32,7 +32,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : true, "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)." } diff --git a/docs/reference/esql/functions/kibana/definition/count_distinct.json b/docs/reference/esql/functions/kibana/definition/count_distinct.json index 801bd26f7d022..f6a148783ba42 100644 --- a/docs/reference/esql/functions/kibana/definition/count_distinct.json +++ b/docs/reference/esql/functions/kibana/definition/count_distinct.json @@ -74,7 +74,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Column or literal for which to count the number of distinct values." } @@ -86,7 +86,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Column or literal for which to count the number of distinct values." }, @@ -104,7 +104,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Column or literal for which to count the number of distinct values." }, @@ -122,7 +122,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Column or literal for which to count the number of distinct values." }, diff --git a/docs/reference/esql/functions/kibana/definition/date_diff.json b/docs/reference/esql/functions/kibana/definition/date_diff.json index 7995d3c6d32b6..d6589f041075d 100644 --- a/docs/reference/esql/functions/kibana/definition/date_diff.json +++ b/docs/reference/esql/functions/kibana/definition/date_diff.json @@ -14,13 +14,13 @@ }, { "name" : "startTimestamp", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A string representing a start timestamp" }, { "name" : "endTimestamp", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A string representing an end timestamp" } @@ -38,13 +38,13 @@ }, { "name" : "startTimestamp", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A string representing a start timestamp" }, { "name" : "endTimestamp", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A string representing an end timestamp" } diff --git a/docs/reference/esql/functions/kibana/definition/date_extract.json b/docs/reference/esql/functions/kibana/definition/date_extract.json index 75cedcc191b50..557f0e0a47e54 100644 --- a/docs/reference/esql/functions/kibana/definition/date_extract.json +++ b/docs/reference/esql/functions/kibana/definition/date_extract.json @@ -14,7 +14,7 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression. If `null`, the function returns `null`." } @@ -32,7 +32,7 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression. If `null`, the function returns `null`." } diff --git a/docs/reference/esql/functions/kibana/definition/date_format.json b/docs/reference/esql/functions/kibana/definition/date_format.json index 5e8587c046d70..7bd01d7f4ef31 100644 --- a/docs/reference/esql/functions/kibana/definition/date_format.json +++ b/docs/reference/esql/functions/kibana/definition/date_format.json @@ -14,7 +14,7 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression. If `null`, the function returns `null`." } @@ -32,7 +32,7 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression. If `null`, the function returns `null`." } diff --git a/docs/reference/esql/functions/kibana/definition/date_parse.json b/docs/reference/esql/functions/kibana/definition/date_parse.json index 890179143bef8..9400340750c2a 100644 --- a/docs/reference/esql/functions/kibana/definition/date_parse.json +++ b/docs/reference/esql/functions/kibana/definition/date_parse.json @@ -20,7 +20,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -38,7 +38,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -56,7 +56,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -74,7 +74,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/date_trunc.json b/docs/reference/esql/functions/kibana/definition/date_trunc.json index 3d8658c496529..bd3f362d1670b 100644 --- a/docs/reference/esql/functions/kibana/definition/date_trunc.json +++ b/docs/reference/esql/functions/kibana/definition/date_trunc.json @@ -14,13 +14,13 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -32,13 +32,13 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/equals.json b/docs/reference/esql/functions/kibana/definition/equals.json index 8d0525ac3e91e..eca80ccdbf657 100644 --- a/docs/reference/esql/functions/kibana/definition/equals.json +++ b/docs/reference/esql/functions/kibana/definition/equals.json @@ -63,13 +63,13 @@ "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." }, { "name" : "rhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." } diff --git a/docs/reference/esql/functions/kibana/definition/greater_than.json b/docs/reference/esql/functions/kibana/definition/greater_than.json index 9083e114bfe9d..7831b0f41cd9d 100644 --- a/docs/reference/esql/functions/kibana/definition/greater_than.json +++ b/docs/reference/esql/functions/kibana/definition/greater_than.json @@ -9,13 +9,13 @@ "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." }, { "name" : "rhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." } diff --git a/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json b/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json index 75888ab25399f..b6a40a838c393 100644 --- a/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json +++ b/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json @@ -9,13 +9,13 @@ "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." }, { "name" : "rhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." } diff --git a/docs/reference/esql/functions/kibana/definition/less_than.json b/docs/reference/esql/functions/kibana/definition/less_than.json index 30c6c9eab0442..bf6b9c5c08774 100644 --- a/docs/reference/esql/functions/kibana/definition/less_than.json +++ b/docs/reference/esql/functions/kibana/definition/less_than.json @@ -9,13 +9,13 @@ "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." }, { "name" : "rhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." } diff --git a/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json b/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json index 64f9c463748d1..4e57161887141 100644 --- a/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json +++ b/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json @@ -9,13 +9,13 @@ "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." }, { "name" : "rhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." } diff --git a/docs/reference/esql/functions/kibana/definition/max.json b/docs/reference/esql/functions/kibana/definition/max.json index 853cb9f9a97c3..b13d367d37345 100644 --- a/docs/reference/esql/functions/kibana/definition/max.json +++ b/docs/reference/esql/functions/kibana/definition/max.json @@ -20,13 +20,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -64,6 +64,18 @@ "variadic" : false, "returnType" : "ip" }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "keyword" + }, { "params" : [ { @@ -75,6 +87,30 @@ ], "variadic" : false, "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "text" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "version" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/min.json b/docs/reference/esql/functions/kibana/definition/min.json index 1c0c02eb9860f..338ed10d67b2e 100644 --- a/docs/reference/esql/functions/kibana/definition/min.json +++ b/docs/reference/esql/functions/kibana/definition/min.json @@ -20,13 +20,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -64,6 +64,18 @@ "variadic" : false, "returnType" : "ip" }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "keyword" + }, { "params" : [ { @@ -75,6 +87,30 @@ ], "variadic" : false, "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "text" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "version" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_append.json b/docs/reference/esql/functions/kibana/definition/mv_append.json index 8ee4e7297cc3a..3365226141f8f 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_append.json +++ b/docs/reference/esql/functions/kibana/definition/mv_append.json @@ -62,19 +62,19 @@ "params" : [ { "name" : "field1", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "" }, { "name" : "field2", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_count.json b/docs/reference/esql/functions/kibana/definition/mv_count.json index d414e5b957495..f125327314f4e 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_count.json +++ b/docs/reference/esql/functions/kibana/definition/mv_count.json @@ -44,7 +44,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } diff --git a/docs/reference/esql/functions/kibana/definition/mv_dedupe.json b/docs/reference/esql/functions/kibana/definition/mv_dedupe.json index 7ab287bc94d34..7d66e3dcc0b9b 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_dedupe.json +++ b/docs/reference/esql/functions/kibana/definition/mv_dedupe.json @@ -45,13 +45,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_first.json b/docs/reference/esql/functions/kibana/definition/mv_first.json index e3141e800e4ad..de6e642068517 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_first.json +++ b/docs/reference/esql/functions/kibana/definition/mv_first.json @@ -44,13 +44,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_last.json b/docs/reference/esql/functions/kibana/definition/mv_last.json index e55d66dbf8b93..ea1293e7acfec 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_last.json +++ b/docs/reference/esql/functions/kibana/definition/mv_last.json @@ -44,13 +44,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_max.json b/docs/reference/esql/functions/kibana/definition/mv_max.json index 0783f6d6d5cbc..eb25369f78f77 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_max.json +++ b/docs/reference/esql/functions/kibana/definition/mv_max.json @@ -20,13 +20,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_min.json b/docs/reference/esql/functions/kibana/definition/mv_min.json index cc23df386356e..87ad94338492e 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_min.json +++ b/docs/reference/esql/functions/kibana/definition/mv_min.json @@ -20,13 +20,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_percentile.json b/docs/reference/esql/functions/kibana/definition/mv_percentile.json new file mode 100644 index 0000000000000..dad611122f0db --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/mv_percentile.json @@ -0,0 +1,173 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "mv_percentile", + "description" : "Converts a multivalued field into a single valued field containing the value at which a certain percentage of observed values occur.", + "signatures" : [ + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "double", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "integer", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "long", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "double", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "integer", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "long", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "double", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "integer", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "long", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "long" + } + ], + "examples" : [ + "ROW values = [5, 5, 10, 12, 5000]\n| EVAL p50 = MV_PERCENTILE(values, 50), median = MV_MEDIAN(values)" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/mv_slice.json b/docs/reference/esql/functions/kibana/definition/mv_slice.json index 30d0e1179dc89..ff52467b7d84a 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_slice.json +++ b/docs/reference/esql/functions/kibana/definition/mv_slice.json @@ -80,7 +80,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression. If `null`, the function returns `null`." }, @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_sort.json b/docs/reference/esql/functions/kibana/definition/mv_sort.json index 28b4c9e8d6fea..d2bbd2c0fdbf4 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_sort.json +++ b/docs/reference/esql/functions/kibana/definition/mv_sort.json @@ -26,7 +26,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression. If `null`, the function returns `null`." }, @@ -38,7 +38,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/not_equals.json b/docs/reference/esql/functions/kibana/definition/not_equals.json index 41863f7496a25..4b4d22a5abef4 100644 --- a/docs/reference/esql/functions/kibana/definition/not_equals.json +++ b/docs/reference/esql/functions/kibana/definition/not_equals.json @@ -63,13 +63,13 @@ "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." }, { "name" : "rhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." } diff --git a/docs/reference/esql/functions/kibana/definition/now.json b/docs/reference/esql/functions/kibana/definition/now.json index 9cdb4945afa2e..1a2fc3a1dc42a 100644 --- a/docs/reference/esql/functions/kibana/definition/now.json +++ b/docs/reference/esql/functions/kibana/definition/now.json @@ -6,7 +6,7 @@ "signatures" : [ { "params" : [ ], - "returnType" : "datetime" + "returnType" : "date" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/sub.json b/docs/reference/esql/functions/kibana/definition/sub.json index 413b0e73f89d0..37e3852865e7f 100644 --- a/docs/reference/esql/functions/kibana/definition/sub.json +++ b/docs/reference/esql/functions/kibana/definition/sub.json @@ -8,7 +8,7 @@ "params" : [ { "name" : "lhs", - "type" : "date_period", + "type" : "date", "optional" : false, "description" : "A numeric value or a date time value." }, @@ -20,43 +20,43 @@ } ], "variadic" : false, - "returnType" : "date_period" + "returnType" : "date" }, { "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A numeric value or a date time value." }, { "name" : "rhs", - "type" : "date_period", + "type" : "time_duration", "optional" : false, "description" : "A numeric value or a date time value." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date_period", "optional" : false, "description" : "A numeric value or a date time value." }, { "name" : "rhs", - "type" : "time_duration", + "type" : "date_period", "optional" : false, "description" : "A numeric value or a date time value." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date_period" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/to_datetime.json b/docs/reference/esql/functions/kibana/definition/to_datetime.json index 10fcf8b22e8b0..032e8e1cbda34 100644 --- a/docs/reference/esql/functions/kibana/definition/to_datetime.json +++ b/docs/reference/esql/functions/kibana/definition/to_datetime.json @@ -3,18 +3,19 @@ "type" : "eval", "name" : "to_datetime", "description" : "Converts an input value to a date value.\nA string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`.\nTo convert dates in other formats, use <>.", + "note" : "Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is truncated, not rounded.", "signatures" : [ { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -26,7 +27,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -38,7 +39,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -50,7 +51,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -62,7 +63,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -74,7 +75,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -86,7 +87,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/to_double.json b/docs/reference/esql/functions/kibana/definition/to_double.json index f4e414068db61..ae7e4832bfb3c 100644 --- a/docs/reference/esql/functions/kibana/definition/to_double.json +++ b/docs/reference/esql/functions/kibana/definition/to_double.json @@ -56,7 +56,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/to_integer.json b/docs/reference/esql/functions/kibana/definition/to_integer.json index 2776d8b29c412..5150d12936711 100644 --- a/docs/reference/esql/functions/kibana/definition/to_integer.json +++ b/docs/reference/esql/functions/kibana/definition/to_integer.json @@ -32,7 +32,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/to_long.json b/docs/reference/esql/functions/kibana/definition/to_long.json index e3218eba9642a..5fd4bce34e7e0 100644 --- a/docs/reference/esql/functions/kibana/definition/to_long.json +++ b/docs/reference/esql/functions/kibana/definition/to_long.json @@ -44,7 +44,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/to_string.json b/docs/reference/esql/functions/kibana/definition/to_string.json index ef03cc06ea636..ea94171834908 100644 --- a/docs/reference/esql/functions/kibana/definition/to_string.json +++ b/docs/reference/esql/functions/kibana/definition/to_string.json @@ -44,7 +44,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/to_unsigned_long.json b/docs/reference/esql/functions/kibana/definition/to_unsigned_long.json index d9cba641573fb..5521241224d61 100644 --- a/docs/reference/esql/functions/kibana/definition/to_unsigned_long.json +++ b/docs/reference/esql/functions/kibana/definition/to_unsigned_long.json @@ -20,7 +20,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/top.json b/docs/reference/esql/functions/kibana/definition/top.json index 4db3aed40a88d..c688bf5ea77c8 100644 --- a/docs/reference/esql/functions/kibana/definition/top.json +++ b/docs/reference/esql/functions/kibana/definition/top.json @@ -32,7 +32,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "The field to collect the top values for." }, @@ -50,7 +50,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/values.json b/docs/reference/esql/functions/kibana/definition/values.json index 3e0036c4d25b6..d9f37cd1ac83d 100644 --- a/docs/reference/esql/functions/kibana/definition/values.json +++ b/docs/reference/esql/functions/kibana/definition/values.json @@ -20,13 +20,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/docs/mv_percentile.md b/docs/reference/esql/functions/kibana/docs/mv_percentile.md new file mode 100644 index 0000000000000..560a0aefa1dc3 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/mv_percentile.md @@ -0,0 +1,11 @@ + + +### MV_PERCENTILE +Converts a multivalued field into a single valued field containing the value at which a certain percentage of observed values occur. + +``` +ROW values = [5, 5, 10, 12, 5000] +| EVAL p50 = MV_PERCENTILE(values, 50), median = MV_MEDIAN(values) +``` diff --git a/docs/reference/esql/functions/kibana/docs/to_datetime.md b/docs/reference/esql/functions/kibana/docs/to_datetime.md index 5e8f9c72adc2c..c194dfd17871a 100644 --- a/docs/reference/esql/functions/kibana/docs/to_datetime.md +++ b/docs/reference/esql/functions/kibana/docs/to_datetime.md @@ -11,3 +11,4 @@ To convert dates in other formats, use <>. ROW string = ["1953-09-02T00:00:00.000Z", "1964-06-02T00:00:00.000Z", "1964-06-02 00:00:00"] | EVAL datetime = TO_DATETIME(string) ``` +Note: Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is truncated, not rounded. diff --git a/docs/reference/esql/functions/layout/mv_percentile.asciidoc b/docs/reference/esql/functions/layout/mv_percentile.asciidoc new file mode 100644 index 0000000000000..a86c4a136b5cd --- /dev/null +++ b/docs/reference/esql/functions/layout/mv_percentile.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-mv_percentile]] +=== `MV_PERCENTILE` + +*Syntax* + +[.text-center] +image::esql/functions/signature/mv_percentile.svg[Embedded,opts=inline] + +include::../parameters/mv_percentile.asciidoc[] +include::../description/mv_percentile.asciidoc[] +include::../types/mv_percentile.asciidoc[] +include::../examples/mv_percentile.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/bucket.asciidoc b/docs/reference/esql/functions/parameters/bucket.asciidoc index 39aac14aaa36d..09c720d6095f3 100644 --- a/docs/reference/esql/functions/parameters/bucket.asciidoc +++ b/docs/reference/esql/functions/parameters/bucket.asciidoc @@ -6,10 +6,10 @@ Numeric or date expression from which to derive buckets. `buckets`:: -Target number of buckets. +Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted. `from`:: -Start of the range. Can be a number or a date expressed as a string. +Start of the range. Can be a number, a date or a date expressed as a string. `to`:: -End of the range. Can be a number or a date expressed as a string. +End of the range. Can be a number, a date or a date expressed as a string. diff --git a/docs/reference/esql/functions/parameters/mv_percentile.asciidoc b/docs/reference/esql/functions/parameters/mv_percentile.asciidoc new file mode 100644 index 0000000000000..57804185e191a --- /dev/null +++ b/docs/reference/esql/functions/parameters/mv_percentile.asciidoc @@ -0,0 +1,9 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`number`:: +Multivalue expression. + +`percentile`:: +The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead. diff --git a/docs/reference/esql/functions/signature/mv_percentile.svg b/docs/reference/esql/functions/signature/mv_percentile.svg new file mode 100644 index 0000000000000..b4d623636572f --- /dev/null +++ b/docs/reference/esql/functions/signature/mv_percentile.svg @@ -0,0 +1 @@ +MV_PERCENTILE(number,percentile) \ No newline at end of file diff --git a/docs/reference/esql/functions/types/add.asciidoc b/docs/reference/esql/functions/types/add.asciidoc index a0215a803d4e3..54d1aec463c1a 100644 --- a/docs/reference/esql/functions/types/add.asciidoc +++ b/docs/reference/esql/functions/types/add.asciidoc @@ -5,10 +5,10 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result +date | date_period | date +date | time_duration | date +date_period | date | date date_period | date_period | date_period -date_period | datetime | datetime -datetime | date_period | datetime -datetime | time_duration | datetime double | double | double double | integer | double double | long | double @@ -18,7 +18,7 @@ integer | long | long long | double | double long | integer | long long | long | long -time_duration | datetime | datetime +time_duration | date | date time_duration | time_duration | time_duration unsigned_long | unsigned_long | unsigned_long |=== diff --git a/docs/reference/esql/functions/types/bucket.asciidoc b/docs/reference/esql/functions/types/bucket.asciidoc index d1ce8e499eb07..172e84b6f7860 100644 --- a/docs/reference/esql/functions/types/bucket.asciidoc +++ b/docs/reference/esql/functions/types/bucket.asciidoc @@ -5,9 +5,17 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== field | buckets | from | to | result -datetime | date_period | | | datetime -datetime | integer | datetime | datetime | datetime -datetime | time_duration | | | datetime +date | date_period | | | date +date | integer | date | date | date +date | integer | date | keyword | date +date | integer | date | text | date +date | integer | keyword | date | date +date | integer | keyword | keyword | date +date | integer | keyword | text | date +date | integer | text | date | date +date | integer | text | keyword | date +date | integer | text | text | date +date | time_duration | | | date double | double | | | double double | integer | double | double | double double | integer | double | integer | double @@ -18,6 +26,8 @@ double | integer | integer | long | double double | integer | long | double | double double | integer | long | integer | double double | integer | long | long | double +double | integer | | | double +double | long | | | double integer | double | | | double integer | integer | double | double | double integer | integer | double | integer | double @@ -28,6 +38,8 @@ integer | integer | integer | long | double integer | integer | long | double | double integer | integer | long | integer | double integer | integer | long | long | double +integer | integer | | | double +integer | long | | | double long | double | | | double long | integer | double | double | double long | integer | double | integer | double @@ -38,4 +50,6 @@ long | integer | integer | long | double long | integer | long | double | double long | integer | long | integer | double long | integer | long | long | double +long | integer | | | double +long | long | | | double |=== diff --git a/docs/reference/esql/functions/types/case.asciidoc b/docs/reference/esql/functions/types/case.asciidoc index 85e4193b5bf2f..f6c8cfe9361d1 100644 --- a/docs/reference/esql/functions/types/case.asciidoc +++ b/docs/reference/esql/functions/types/case.asciidoc @@ -7,7 +7,7 @@ condition | trueValue | result boolean | boolean | boolean boolean | cartesian_point | cartesian_point -boolean | datetime | datetime +boolean | date | date boolean | double | double boolean | geo_point | geo_point boolean | integer | integer diff --git a/docs/reference/esql/functions/types/coalesce.asciidoc b/docs/reference/esql/functions/types/coalesce.asciidoc index 841d836f6837e..368a12db0dca4 100644 --- a/docs/reference/esql/functions/types/coalesce.asciidoc +++ b/docs/reference/esql/functions/types/coalesce.asciidoc @@ -9,7 +9,7 @@ boolean | boolean | boolean boolean | | boolean cartesian_point | cartesian_point | cartesian_point cartesian_shape | cartesian_shape | cartesian_shape -datetime | datetime | datetime +date | date | date geo_point | geo_point | geo_point geo_shape | geo_shape | geo_shape integer | integer | integer diff --git a/docs/reference/esql/functions/types/count.asciidoc b/docs/reference/esql/functions/types/count.asciidoc index 70e79d4899605..959c94c1ec358 100644 --- a/docs/reference/esql/functions/types/count.asciidoc +++ b/docs/reference/esql/functions/types/count.asciidoc @@ -7,7 +7,7 @@ field | result boolean | long cartesian_point | long -datetime | long +date | long double | long geo_point | long integer | long diff --git a/docs/reference/esql/functions/types/count_distinct.asciidoc b/docs/reference/esql/functions/types/count_distinct.asciidoc index 4b201d45732f1..c365c8814573c 100644 --- a/docs/reference/esql/functions/types/count_distinct.asciidoc +++ b/docs/reference/esql/functions/types/count_distinct.asciidoc @@ -9,10 +9,10 @@ boolean | integer | long boolean | long | long boolean | unsigned_long | long boolean | | long -datetime | integer | long -datetime | long | long -datetime | unsigned_long | long -datetime | | long +date | integer | long +date | long | long +date | unsigned_long | long +date | | long double | integer | long double | long | long double | unsigned_long | long diff --git a/docs/reference/esql/functions/types/date_diff.asciidoc b/docs/reference/esql/functions/types/date_diff.asciidoc index 98adcef51e75c..b0a4818f412ac 100644 --- a/docs/reference/esql/functions/types/date_diff.asciidoc +++ b/docs/reference/esql/functions/types/date_diff.asciidoc @@ -5,6 +5,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== unit | startTimestamp | endTimestamp | result -keyword | datetime | datetime | integer -text | datetime | datetime | integer +keyword | date | date | integer +text | date | date | integer |=== diff --git a/docs/reference/esql/functions/types/date_extract.asciidoc b/docs/reference/esql/functions/types/date_extract.asciidoc index 43702ef0671a7..ec9bf70c221cc 100644 --- a/docs/reference/esql/functions/types/date_extract.asciidoc +++ b/docs/reference/esql/functions/types/date_extract.asciidoc @@ -5,6 +5,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== datePart | date | result -keyword | datetime | long -text | datetime | long +keyword | date | long +text | date | long |=== diff --git a/docs/reference/esql/functions/types/date_format.asciidoc b/docs/reference/esql/functions/types/date_format.asciidoc index a76f38653b9b8..b2e97dfa8835a 100644 --- a/docs/reference/esql/functions/types/date_format.asciidoc +++ b/docs/reference/esql/functions/types/date_format.asciidoc @@ -5,6 +5,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== dateFormat | date | result -keyword | datetime | keyword -text | datetime | keyword +keyword | date | keyword +text | date | keyword |=== diff --git a/docs/reference/esql/functions/types/date_parse.asciidoc b/docs/reference/esql/functions/types/date_parse.asciidoc index 314d02eb06271..f3eab18309dd8 100644 --- a/docs/reference/esql/functions/types/date_parse.asciidoc +++ b/docs/reference/esql/functions/types/date_parse.asciidoc @@ -5,8 +5,8 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== datePattern | dateString | result -keyword | keyword | datetime -keyword | text | datetime -text | keyword | datetime -text | text | datetime +keyword | keyword | date +keyword | text | date +text | keyword | date +text | text | date |=== diff --git a/docs/reference/esql/functions/types/date_trunc.asciidoc b/docs/reference/esql/functions/types/date_trunc.asciidoc index 8df45cfef54a8..aa7dee99c6c44 100644 --- a/docs/reference/esql/functions/types/date_trunc.asciidoc +++ b/docs/reference/esql/functions/types/date_trunc.asciidoc @@ -5,6 +5,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== interval | date | result -date_period | datetime | datetime -time_duration | datetime | datetime +date_period | date | date +time_duration | date | date |=== diff --git a/docs/reference/esql/functions/types/equals.asciidoc b/docs/reference/esql/functions/types/equals.asciidoc index 497c9319fedb3..ad0e46ef4b8da 100644 --- a/docs/reference/esql/functions/types/equals.asciidoc +++ b/docs/reference/esql/functions/types/equals.asciidoc @@ -8,7 +8,7 @@ lhs | rhs | result boolean | boolean | boolean cartesian_point | cartesian_point | boolean cartesian_shape | cartesian_shape | boolean -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/greater_than.asciidoc b/docs/reference/esql/functions/types/greater_than.asciidoc index 771daf1a953b2..c506328126a94 100644 --- a/docs/reference/esql/functions/types/greater_than.asciidoc +++ b/docs/reference/esql/functions/types/greater_than.asciidoc @@ -5,7 +5,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc b/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc index 771daf1a953b2..c506328126a94 100644 --- a/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc +++ b/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc @@ -5,7 +5,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/less_than.asciidoc b/docs/reference/esql/functions/types/less_than.asciidoc index 771daf1a953b2..c506328126a94 100644 --- a/docs/reference/esql/functions/types/less_than.asciidoc +++ b/docs/reference/esql/functions/types/less_than.asciidoc @@ -5,7 +5,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/less_than_or_equal.asciidoc b/docs/reference/esql/functions/types/less_than_or_equal.asciidoc index 771daf1a953b2..c506328126a94 100644 --- a/docs/reference/esql/functions/types/less_than_or_equal.asciidoc +++ b/docs/reference/esql/functions/types/less_than_or_equal.asciidoc @@ -5,7 +5,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/max.asciidoc b/docs/reference/esql/functions/types/max.asciidoc index 5b7293d4a4293..35ce5811e0cd0 100644 --- a/docs/reference/esql/functions/types/max.asciidoc +++ b/docs/reference/esql/functions/types/max.asciidoc @@ -6,9 +6,12 @@ |=== field | result boolean | boolean -datetime | datetime +date | date double | double integer | integer ip | ip +keyword | keyword long | long +text | text +version | version |=== diff --git a/docs/reference/esql/functions/types/min.asciidoc b/docs/reference/esql/functions/types/min.asciidoc index 5b7293d4a4293..35ce5811e0cd0 100644 --- a/docs/reference/esql/functions/types/min.asciidoc +++ b/docs/reference/esql/functions/types/min.asciidoc @@ -6,9 +6,12 @@ |=== field | result boolean | boolean -datetime | datetime +date | date double | double integer | integer ip | ip +keyword | keyword long | long +text | text +version | version |=== diff --git a/docs/reference/esql/functions/types/mv_append.asciidoc b/docs/reference/esql/functions/types/mv_append.asciidoc index 49dcef6dc8860..a1894e429ae82 100644 --- a/docs/reference/esql/functions/types/mv_append.asciidoc +++ b/docs/reference/esql/functions/types/mv_append.asciidoc @@ -8,7 +8,7 @@ field1 | field2 | result boolean | boolean | boolean cartesian_point | cartesian_point | cartesian_point cartesian_shape | cartesian_shape | cartesian_shape -datetime | datetime | datetime +date | date | date double | double | double geo_point | geo_point | geo_point geo_shape | geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_count.asciidoc b/docs/reference/esql/functions/types/mv_count.asciidoc index 8af6b76591acb..260c531731f04 100644 --- a/docs/reference/esql/functions/types/mv_count.asciidoc +++ b/docs/reference/esql/functions/types/mv_count.asciidoc @@ -8,7 +8,7 @@ field | result boolean | integer cartesian_point | integer cartesian_shape | integer -datetime | integer +date | integer double | integer geo_point | integer geo_shape | integer diff --git a/docs/reference/esql/functions/types/mv_dedupe.asciidoc b/docs/reference/esql/functions/types/mv_dedupe.asciidoc index a6b78f781f17a..68e546451c8cb 100644 --- a/docs/reference/esql/functions/types/mv_dedupe.asciidoc +++ b/docs/reference/esql/functions/types/mv_dedupe.asciidoc @@ -8,7 +8,7 @@ field | result boolean | boolean cartesian_point | cartesian_point cartesian_shape | cartesian_shape -datetime | datetime +date | date double | double geo_point | geo_point geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_first.asciidoc b/docs/reference/esql/functions/types/mv_first.asciidoc index e077c57971a4a..35633544d99a0 100644 --- a/docs/reference/esql/functions/types/mv_first.asciidoc +++ b/docs/reference/esql/functions/types/mv_first.asciidoc @@ -8,7 +8,7 @@ field | result boolean | boolean cartesian_point | cartesian_point cartesian_shape | cartesian_shape -datetime | datetime +date | date double | double geo_point | geo_point geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_last.asciidoc b/docs/reference/esql/functions/types/mv_last.asciidoc index e077c57971a4a..35633544d99a0 100644 --- a/docs/reference/esql/functions/types/mv_last.asciidoc +++ b/docs/reference/esql/functions/types/mv_last.asciidoc @@ -8,7 +8,7 @@ field | result boolean | boolean cartesian_point | cartesian_point cartesian_shape | cartesian_shape -datetime | datetime +date | date double | double geo_point | geo_point geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_max.asciidoc b/docs/reference/esql/functions/types/mv_max.asciidoc index 4e5f0a5e0ae89..8ea36aebbad37 100644 --- a/docs/reference/esql/functions/types/mv_max.asciidoc +++ b/docs/reference/esql/functions/types/mv_max.asciidoc @@ -6,7 +6,7 @@ |=== field | result boolean | boolean -datetime | datetime +date | date double | double integer | integer ip | ip diff --git a/docs/reference/esql/functions/types/mv_min.asciidoc b/docs/reference/esql/functions/types/mv_min.asciidoc index 4e5f0a5e0ae89..8ea36aebbad37 100644 --- a/docs/reference/esql/functions/types/mv_min.asciidoc +++ b/docs/reference/esql/functions/types/mv_min.asciidoc @@ -6,7 +6,7 @@ |=== field | result boolean | boolean -datetime | datetime +date | date double | double integer | integer ip | ip diff --git a/docs/reference/esql/functions/types/mv_percentile.asciidoc b/docs/reference/esql/functions/types/mv_percentile.asciidoc new file mode 100644 index 0000000000000..99a58b9c3d2e2 --- /dev/null +++ b/docs/reference/esql/functions/types/mv_percentile.asciidoc @@ -0,0 +1,17 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +number | percentile | result +double | double | double +double | integer | double +double | long | double +integer | double | integer +integer | integer | integer +integer | long | integer +long | double | long +long | integer | long +long | long | long +|=== diff --git a/docs/reference/esql/functions/types/mv_slice.asciidoc b/docs/reference/esql/functions/types/mv_slice.asciidoc index 568de10f53d32..0a9dc073370c7 100644 --- a/docs/reference/esql/functions/types/mv_slice.asciidoc +++ b/docs/reference/esql/functions/types/mv_slice.asciidoc @@ -8,7 +8,7 @@ field | start | end | result boolean | integer | integer | boolean cartesian_point | integer | integer | cartesian_point cartesian_shape | integer | integer | cartesian_shape -datetime | integer | integer | datetime +date | integer | integer | date double | integer | integer | double geo_point | integer | integer | geo_point geo_shape | integer | integer | geo_shape diff --git a/docs/reference/esql/functions/types/mv_sort.asciidoc b/docs/reference/esql/functions/types/mv_sort.asciidoc index 24925ca8a6587..93965187482ac 100644 --- a/docs/reference/esql/functions/types/mv_sort.asciidoc +++ b/docs/reference/esql/functions/types/mv_sort.asciidoc @@ -6,7 +6,7 @@ |=== field | order | result boolean | keyword | boolean -datetime | keyword | datetime +date | keyword | date double | keyword | double integer | keyword | integer ip | keyword | ip diff --git a/docs/reference/esql/functions/types/not_equals.asciidoc b/docs/reference/esql/functions/types/not_equals.asciidoc index 497c9319fedb3..ad0e46ef4b8da 100644 --- a/docs/reference/esql/functions/types/not_equals.asciidoc +++ b/docs/reference/esql/functions/types/not_equals.asciidoc @@ -8,7 +8,7 @@ lhs | rhs | result boolean | boolean | boolean cartesian_point | cartesian_point | boolean cartesian_shape | cartesian_shape | boolean -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/now.asciidoc b/docs/reference/esql/functions/types/now.asciidoc index 5737d98f2f7db..b474ab1042050 100644 --- a/docs/reference/esql/functions/types/now.asciidoc +++ b/docs/reference/esql/functions/types/now.asciidoc @@ -5,5 +5,5 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== result -datetime +date |=== diff --git a/docs/reference/esql/functions/types/sub.asciidoc b/docs/reference/esql/functions/types/sub.asciidoc index d309f651705f0..c3ded301ebe68 100644 --- a/docs/reference/esql/functions/types/sub.asciidoc +++ b/docs/reference/esql/functions/types/sub.asciidoc @@ -5,9 +5,9 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result +date | date_period | date +date | time_duration | date date_period | date_period | date_period -datetime | date_period | datetime -datetime | time_duration | datetime double | double | double double | integer | double double | long | double diff --git a/docs/reference/esql/functions/types/to_datetime.asciidoc b/docs/reference/esql/functions/types/to_datetime.asciidoc index 52c4cebb661cf..80c986efca794 100644 --- a/docs/reference/esql/functions/types/to_datetime.asciidoc +++ b/docs/reference/esql/functions/types/to_datetime.asciidoc @@ -5,11 +5,11 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== field | result -datetime | datetime -double | datetime -integer | datetime -keyword | datetime -long | datetime -text | datetime -unsigned_long | datetime +date | date +double | date +integer | date +keyword | date +long | date +text | date +unsigned_long | date |=== diff --git a/docs/reference/esql/functions/types/to_double.asciidoc b/docs/reference/esql/functions/types/to_double.asciidoc index cff686c7bc4ca..d5f5833cd7249 100644 --- a/docs/reference/esql/functions/types/to_double.asciidoc +++ b/docs/reference/esql/functions/types/to_double.asciidoc @@ -9,7 +9,7 @@ boolean | double counter_double | double counter_integer | double counter_long | double -datetime | double +date | double double | double integer | double keyword | double diff --git a/docs/reference/esql/functions/types/to_integer.asciidoc b/docs/reference/esql/functions/types/to_integer.asciidoc index 974f3c9c82d88..d67f8f07affd9 100644 --- a/docs/reference/esql/functions/types/to_integer.asciidoc +++ b/docs/reference/esql/functions/types/to_integer.asciidoc @@ -7,7 +7,7 @@ field | result boolean | integer counter_integer | integer -datetime | integer +date | integer double | integer integer | integer keyword | integer diff --git a/docs/reference/esql/functions/types/to_long.asciidoc b/docs/reference/esql/functions/types/to_long.asciidoc index b3959c5444e34..a07990cb1cfbf 100644 --- a/docs/reference/esql/functions/types/to_long.asciidoc +++ b/docs/reference/esql/functions/types/to_long.asciidoc @@ -8,7 +8,7 @@ field | result boolean | long counter_integer | long counter_long | long -datetime | long +date | long double | long integer | long keyword | long diff --git a/docs/reference/esql/functions/types/to_string.asciidoc b/docs/reference/esql/functions/types/to_string.asciidoc index f14cfbb39929f..26a5b31a2a589 100644 --- a/docs/reference/esql/functions/types/to_string.asciidoc +++ b/docs/reference/esql/functions/types/to_string.asciidoc @@ -8,7 +8,7 @@ field | result boolean | keyword cartesian_point | keyword cartesian_shape | keyword -datetime | keyword +date | keyword double | keyword geo_point | keyword geo_shape | keyword diff --git a/docs/reference/esql/functions/types/to_unsigned_long.asciidoc b/docs/reference/esql/functions/types/to_unsigned_long.asciidoc index a271e1a19321d..87b21f3948dad 100644 --- a/docs/reference/esql/functions/types/to_unsigned_long.asciidoc +++ b/docs/reference/esql/functions/types/to_unsigned_long.asciidoc @@ -6,7 +6,7 @@ |=== field | result boolean | unsigned_long -datetime | unsigned_long +date | unsigned_long double | unsigned_long integer | unsigned_long keyword | unsigned_long diff --git a/docs/reference/esql/functions/types/top.asciidoc b/docs/reference/esql/functions/types/top.asciidoc index ff71b2d153e3a..0eb329c10b9ed 100644 --- a/docs/reference/esql/functions/types/top.asciidoc +++ b/docs/reference/esql/functions/types/top.asciidoc @@ -6,7 +6,7 @@ |=== field | limit | order | result boolean | integer | keyword | boolean -datetime | integer | keyword | datetime +date | integer | keyword | date double | integer | keyword | double integer | integer | keyword | integer ip | integer | keyword | ip diff --git a/docs/reference/esql/functions/types/values.asciidoc b/docs/reference/esql/functions/types/values.asciidoc index 705745d76dbab..35ce5811e0cd0 100644 --- a/docs/reference/esql/functions/types/values.asciidoc +++ b/docs/reference/esql/functions/types/values.asciidoc @@ -6,7 +6,7 @@ |=== field | result boolean | boolean -datetime | datetime +date | date double | double integer | integer ip | ip diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index 36aba99adb8c8..5f67014d5bb4a 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -162,7 +162,8 @@ and smaller shards may be appropriate for {enterprise-search-ref}/index.html[Enterprise Search] and similar use cases. If you use {ilm-init}, set the <>'s -`max_primary_shard_size` threshold to `50gb` to avoid shards larger than 50GB. +`max_primary_shard_size` threshold to `50gb` to avoid shards larger than 50GB +and `min_primary_shard_size` threshold to `10gb` to avoid shards smaller than 10GB. To see the current size of your shards, use the <>. diff --git a/docs/reference/ilm/apis/delete-lifecycle.asciidoc b/docs/reference/ilm/apis/delete-lifecycle.asciidoc index 632cb982b3968..fc9a35e4ef570 100644 --- a/docs/reference/ilm/apis/delete-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/delete-lifecycle.asciidoc @@ -5,7 +5,7 @@ Delete policy ++++ -Deletes an index lifecycle policy. +Deletes an index <> policy. [[ilm-delete-lifecycle-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc index 348a9e7f99e78..a1ddde8c9f2d9 100644 --- a/docs/reference/ilm/apis/explain.asciidoc +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -5,7 +5,7 @@ Explain lifecycle ++++ -Retrieves the current lifecycle status for one or more indices. For data +Retrieves the current <> status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices. diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc index 7443610065487..b4e07389a9fb7 100644 --- a/docs/reference/ilm/apis/get-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -5,7 +5,7 @@ Get policy ++++ -Retrieves a lifecycle policy. +Retrieves a <> policy. [[ilm-get-lifecycle-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/get-status.asciidoc b/docs/reference/ilm/apis/get-status.asciidoc index 7e9e963f6f369..f2ab8d65ec9a1 100644 --- a/docs/reference/ilm/apis/get-status.asciidoc +++ b/docs/reference/ilm/apis/get-status.asciidoc @@ -7,7 +7,7 @@ Get {ilm} status ++++ -Retrieves the current {ilm} ({ilm-init}) status. +Retrieves the current <> ({ilm-init}) status. You can start or stop {ilm-init} with the <> and <> APIs. diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc index 19cc9f7088867..f3441fa997cff 100644 --- a/docs/reference/ilm/apis/move-to-step.asciidoc +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -5,7 +5,7 @@ Move to step ++++ -Triggers execution of a specific step in the lifecycle policy. +Triggers execution of a specific step in the <> policy. [[ilm-move-to-step-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/put-lifecycle.asciidoc b/docs/reference/ilm/apis/put-lifecycle.asciidoc index ffd59a14d8c25..390f6b1bb4d15 100644 --- a/docs/reference/ilm/apis/put-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/put-lifecycle.asciidoc @@ -5,7 +5,7 @@ Create or update lifecycle policy ++++ -Creates or updates lifecycle policy. See <> for +Creates or updates <> policy. See <> for definitions of policy components. [[ilm-put-lifecycle-request]] diff --git a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc index 711eccc298df1..107cab4d5aa19 100644 --- a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc +++ b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc @@ -5,7 +5,7 @@ Remove policy ++++ -Removes assigned lifecycle policies from an index or a data stream's backing +Removes assigned <> policies from an index or a data stream's backing indices. [[ilm-remove-policy-request]] diff --git a/docs/reference/ilm/apis/retry-policy.asciidoc b/docs/reference/ilm/apis/retry-policy.asciidoc index cb2587fbb151b..8f01f15e0c3ad 100644 --- a/docs/reference/ilm/apis/retry-policy.asciidoc +++ b/docs/reference/ilm/apis/retry-policy.asciidoc @@ -5,7 +5,7 @@ Retry policy ++++ -Retry executing the policy for an index that is in the ERROR step. +Retry executing the <> policy for an index that is in the ERROR step. [[ilm-retry-policy-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/start.asciidoc b/docs/reference/ilm/apis/start.asciidoc index 32db585c6b14c..c38b3d9ca8831 100644 --- a/docs/reference/ilm/apis/start.asciidoc +++ b/docs/reference/ilm/apis/start.asciidoc @@ -7,7 +7,7 @@ Start {ilm} ++++ -Start the {ilm} ({ilm-init}) plugin. +Start the <> ({ilm-init}) plugin. [[ilm-start-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/stop.asciidoc b/docs/reference/ilm/apis/stop.asciidoc index 1e9cfb94d0b1f..a6100d794c2d3 100644 --- a/docs/reference/ilm/apis/stop.asciidoc +++ b/docs/reference/ilm/apis/stop.asciidoc @@ -7,7 +7,7 @@ Stop {ilm} ++++ -Stop the {ilm} ({ilm-init}) plugin. +Stop the <> ({ilm-init}) plugin. [[ilm-stop-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/error-handling.asciidoc b/docs/reference/ilm/error-handling.asciidoc index d922fa6687823..f810afc6c2b5f 100644 --- a/docs/reference/ilm/error-handling.asciidoc +++ b/docs/reference/ilm/error-handling.asciidoc @@ -2,7 +2,7 @@ [[index-lifecycle-error-handling]] == Troubleshooting {ilm} errors -When {ilm-init} executes a lifecycle policy, it's possible for errors to occur +When <> executes a lifecycle policy, it's possible for errors to occur while performing the necessary index operations for a step. When this happens, {ilm-init} moves the index to an `ERROR` step. If {ilm-init} cannot resolve the error automatically, execution is halted diff --git a/docs/reference/ilm/ilm-index-lifecycle.asciidoc b/docs/reference/ilm/ilm-index-lifecycle.asciidoc index acf59645dae13..040e02742f5e7 100644 --- a/docs/reference/ilm/ilm-index-lifecycle.asciidoc +++ b/docs/reference/ilm/ilm-index-lifecycle.asciidoc @@ -5,7 +5,7 @@ Index lifecycle ++++ -{ilm-init} defines five index lifecycle _phases_: +<> defines five index lifecycle _phases_: * **Hot**: The index is actively being updated and queried. * **Warm**: The index is no longer being updated but is still being queried. diff --git a/docs/reference/ingest/enrich.asciidoc b/docs/reference/ingest/enrich.asciidoc index 6642cdc2a74ce..4bd50641149c0 100644 --- a/docs/reference/ingest/enrich.asciidoc +++ b/docs/reference/ingest/enrich.asciidoc @@ -230,12 +230,12 @@ Instead, you can: [[ingest-enrich-components]] ==== Enrich components -The enrich coordinator is a component that manages and performs the searches +The enrich coordinator is a component that manages and performs the searches required to enrich documents on each ingest node. It combines searches from all enrich processors in all pipelines into bulk <>. -The enrich policy executor is a component that manages the executions of all -enrich policies. When an enrich policy is executed, this component creates +The enrich policy executor is a component that manages the executions of all +enrich policies. When an enrich policy is executed, this component creates a new enrich index and removes the previous enrich index. The enrich policy executions are managed from the elected master node. The execution of these policies occurs on a different node. @@ -249,9 +249,15 @@ enrich policy executor. The enrich coordinator supports the following node settings: `enrich.cache_size`:: -Maximum number of searches to cache for enriching documents. Defaults to `1000`. -There is a single cache for all enrich processors in the cluster. This setting -determines the size of that cache. +Maximum size of the cache that caches searches for enriching documents. +The size can be specified in three units: the raw number of +cached searches (e.g. `1000`), an absolute size in bytes (e.g. `100Mb`), +or a percentage of the max heap space of the node (e.g. `1%`). +Both for the absolute byte size and the percentage of heap space, +{es} does not guarantee that the enrich cache size will adhere exactly to that maximum, +as {es} uses the byte size of the serialized search response +which is is a good representation of the used space on the heap, but not an exact match. +Defaults to `1%`. There is a single cache for all enrich processors in the cluster. `enrich.coordinator_proxy.max_concurrent_requests`:: Maximum number of concurrent <> to @@ -280,4 +286,4 @@ Maximum number of enrich policies to execute concurrently. Defaults to `50`. include::geo-match-enrich-policy-type-ex.asciidoc[] include::match-enrich-policy-type-ex.asciidoc[] -include::range-enrich-policy-type-ex.asciidoc[] \ No newline at end of file +include::range-enrich-policy-type-ex.asciidoc[] diff --git a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc index 2308ec259da48..537783ef6ff01 100644 --- a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc @@ -6,7 +6,10 @@ mode are described separately. `cluster.remote..mode`:: The mode used for a remote cluster connection. The only supported modes are - `sniff` and `proxy`. + `sniff` and `proxy`. The default is `sniff`. See <> for + further information about these modes, and <> + and <> for further information about their + settings. `cluster.remote.initial_connect_timeout`:: @@ -97,6 +100,11 @@ you configure the remotes. [[remote-cluster-sniff-settings]] ==== Sniff mode remote cluster settings +To use <> to connect to a remote cluster, set +`cluster.remote..mode: sniff` and then configure the following +settings. You may also leave `cluster.remote..mode` unset since +`sniff` is the default mode. + `cluster.remote..seeds`:: The list of seed nodes used to sniff the remote cluster state. @@ -117,6 +125,10 @@ you configure the remotes. [[remote-cluster-proxy-settings]] ==== Proxy mode remote cluster settings +To use <> to connect to a remote cluster, set +`cluster.remote..mode: proxy` and then configure the following +settings. + `cluster.remote..proxy_address`:: The address used for all remote connections. diff --git a/docs/reference/modules/network.asciidoc b/docs/reference/modules/network.asciidoc index 593aa79ded4d9..8fdc9f2e4f9cb 100644 --- a/docs/reference/modules/network.asciidoc +++ b/docs/reference/modules/network.asciidoc @@ -5,7 +5,9 @@ Each {es} node has two different network interfaces. Clients send requests to {es}'s REST APIs using its <>, but nodes communicate with other nodes using the <>. The transport interface is also used for communication with -<>. +<>. The transport interface uses a custom +binary protocol sent over <> TCP channels. +Both interfaces can be configured to use <>. You can configure both of these interfaces at the same time using the `network.*` settings. If you have a more complicated network, you might need to diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 25217302b7631..ca1c507aa4ed9 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -1,7 +1,7 @@ [[remote-clusters]] == Remote clusters You can connect a local cluster to other {es} clusters, known as _remote -clusters_. Remote clusters can be located in different datacenters or +clusters_. Remote clusters can be located in different datacenters or geographic regions, and contain indices or data streams that can be replicated with {ccr} or searched by a local cluster using {ccs}. @@ -30,9 +30,9 @@ capabilities, the local and remote cluster must be on the same [discrete] === Add remote clusters -NOTE: The instructions that follow describe how to create a remote connection from a -self-managed cluster. You can also set up {ccs} and {ccr} from an -link:https://www.elastic.co/guide/en/cloud/current/ec-enable-ccs.html[{ess} deployment] +NOTE: The instructions that follow describe how to create a remote connection from a +self-managed cluster. You can also set up {ccs} and {ccr} from an +link:https://www.elastic.co/guide/en/cloud/current/ec-enable-ccs.html[{ess} deployment] or from an link:https://www.elastic.co/guide/en/cloud-enterprise/current/ece-enable-ccs.html[{ece} deployment]. To add remote clusters, you can choose between @@ -52,7 +52,7 @@ controls. <>. Certificate based security model:: Uses mutual TLS authentication for cross-cluster operations. User authentication -is performed on the local cluster and a user's role names are passed to the +is performed on the local cluster and a user's role names are passed to the remote cluster. In this model, a superuser on the local cluster gains total read access to the remote cluster, so it is only suitable for clusters that are in the same security domain. <>. @@ -63,13 +63,17 @@ the same security domain. <>. [[sniff-mode]] Sniff mode:: -In sniff mode, a cluster is created using a name and a list of seed nodes. When -a remote cluster is registered, its cluster state is retrieved from one of the -seed nodes and up to three _gateway nodes_ are selected as part of remote -cluster requests. This mode requires that the gateway node's publish addresses -are accessible by the local cluster. +In sniff mode, a cluster alias is registered with a name of your choosing and a +list of addresses of _seed_ nodes specified with the +`cluster.remote..seeds` setting. When you register a remote +cluster using sniff mode, {es} retrieves from one of the seed nodes the +addresses of up to three _gateway nodes_. Each `remote_cluster_client` node in +the local {es} cluster then opens several TCP connections to the publish +addresses of the gateway nodes. This mode therefore requires that the gateway +nodes' publish addresses are accessible to nodes in the local cluster. + -Sniff mode is the default connection mode. +Sniff mode is the default connection mode. See <> +for more information about configuring sniff mode. + [[gateway-nodes-selection]] The _gateway nodes_ selection depends on the following criteria: @@ -84,13 +88,21 @@ However, such nodes still have to satisfy the two above requirements. [[proxy-mode]] Proxy mode:: -In proxy mode, a cluster is created using a name and a single proxy address. -When you register a remote cluster, a configurable number of socket connections -are opened to the proxy address. The proxy is required to route those -connections to the remote cluster. Proxy mode does not require remote cluster -nodes to have accessible publish addresses. +In proxy mode, a cluster alias is registered with a name of your choosing and +the address of a TCP (layer 4) reverse proxy specified with the +`cluster.remote..proxy_address` setting. You must configure this +proxy to route connections to one or more nodes of the remote cluster. When you +register a remote cluster using proxy mode, {es} opens several TCP connections +to the proxy address and uses these connections to communicate with the remote +cluster. In proxy mode {es} disregards the publish addresses of the remote +cluster nodes which means that the publish addresses of the remote cluster +nodes need not be accessible to the local cluster. ++ +Proxy mode is not the default connection mode, so you must set +`cluster.remote..mode: proxy` to use it. See +<> for more information about configuring proxy +mode. + -The proxy mode is not the default connection mode and must be configured. Proxy mode has the same <> as sniff mode. diff --git a/docs/reference/release-notes/8.15.0.asciidoc b/docs/reference/release-notes/8.15.0.asciidoc index e2314381a4b06..2069c1bd96ff0 100644 --- a/docs/reference/release-notes/8.15.0.asciidoc +++ b/docs/reference/release-notes/8.15.0.asciidoc @@ -22,6 +22,10 @@ Either downgrade to an earlier version, upgrade to 8.15.1, or else follow the recommendation in the manual to entirely disable swap instead of using the memory lock feature (issue: {es-issue}111847[#111847]) +* The `took` field of the response to the <> API is incorrect and may be rather large. Clients which +<> assume that this value will be within a particular range (e.g. that it fits into a 32-bit +signed integer) may encounter errors (issue: {es-issue}111854[#111854]) + [[breaking-8.15.0]] [float] === Breaking changes diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index bf97da15a1ccf..b52b296220029 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -250,7 +250,7 @@ GET /restaurants/_search } } ], - "rank_constant": 0.3, <5> + "rank_constant": 1, <5> "rank_window_size": 50 <6> } } diff --git a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc index 5d838eb86dcf3..b47bc2370ab10 100644 --- a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc +++ b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc @@ -24,7 +24,10 @@ For more information, see <>. ==== {api-description-title} This API mounts a snapshot as a searchable snapshot index. -Note that manually mounting {ilm-init}-managed snapshots can <> with <>. + +Don't use this API for snapshots managed by {ilm-init}. Manually mounting +{ilm-init}-managed snapshots can <> with +<>. [[searchable-snapshots-api-mount-path-params]] ==== {api-path-parms-title} diff --git a/docs/reference/searchable-snapshots/index.asciidoc b/docs/reference/searchable-snapshots/index.asciidoc index 8e4a1b93b9c05..a38971a0bae6a 100644 --- a/docs/reference/searchable-snapshots/index.asciidoc +++ b/docs/reference/searchable-snapshots/index.asciidoc @@ -176,9 +176,12 @@ nodes that have a shared cache. ==== Manually mounting snapshots captured by an Index Lifecycle Management ({ilm-init}) policy can interfere with {ilm-init}'s automatic management. This may lead to issues such as data loss -or complications with snapshot handling. For optimal results, allow {ilm-init} to manage -snapshots automatically. If manual mounting is necessary, be aware of its potential -impact on {ilm-init} processes. For more information, learn about <>. +or complications with snapshot handling. + +For optimal results, allow {ilm-init} to manage +snapshots automatically. + +<>. ==== [[searchable-snapshots-shared-cache]] @@ -336,6 +339,11 @@ cluster has write access then you must make sure that the other cluster does not delete these snapshots. The snapshot contains the sole full copy of your data. If you delete it then the data cannot be recovered from elsewhere. +* The data in a searchable snapshot index are cached in local storage, so if you +delete the underlying searchable snapshot {es} will continue to operate normally +until the first cache miss. This may be much later, for instance when a shard +relocates to a different node, or when the node holding the shard restarts. + * If the repository fails or corrupts the contents of the snapshot and you cannot restore it to its previous healthy state then the data is permanently lost. diff --git a/docs/reference/security/authorization/privileges.asciidoc b/docs/reference/security/authorization/privileges.asciidoc index f15654bef2d1f..747b1eef40441 100644 --- a/docs/reference/security/authorization/privileges.asciidoc +++ b/docs/reference/security/authorization/privileges.asciidoc @@ -101,6 +101,9 @@ deprecated[7.5] Use `manage_transform` instead. + This privilege is not available in {serverless-full}. +`manage_data_stream_global_retention`:: +This privilege has no effect.deprecated[8.16] + `manage_enrich`:: All operations related to managing and executing enrich policies. @@ -223,6 +226,9 @@ security roles of the user who created or updated them. All cluster read-only operations, like cluster health and state, hot threads, node info, node and cluster stats, and pending cluster tasks. +`monitor_data_stream_global_retention`:: +This privilege has no effect.deprecated[8.16] + `monitor_enrich`:: All read-only operations related to managing and executing enrich policies. diff --git a/docs/reference/settings/data-stream-lifecycle-settings.asciidoc b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc index 0f00e956472d0..4b055525d4e6c 100644 --- a/docs/reference/settings/data-stream-lifecycle-settings.asciidoc +++ b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc @@ -10,6 +10,18 @@ These are the settings available for configuring <>, <>) +The maximum retention period that will apply to all user data streams managed by the data stream lifecycle. The max retention will also +override the retention of a data stream whose configured retention exceeds the max retention. It should be greater than `10s`. + +[[data-streams-lifecycle-retention-default]] +`data_streams.lifecycle.retention.default`:: +(<>, <>) +The retention period that will apply to all user data streams managed by the data stream lifecycle that do not have retention configured. +It should be greater than `10s` and less or equals than <>. + [[data-streams-lifecycle-poll-interval]] `data_streams.lifecycle.poll_interval`:: (<>, <>) diff --git a/docs/reference/slm/apis/slm-put.asciidoc b/docs/reference/slm/apis/slm-put.asciidoc index be265554deef5..51ad571ee12e7 100644 --- a/docs/reference/slm/apis/slm-put.asciidoc +++ b/docs/reference/slm/apis/slm-put.asciidoc @@ -100,13 +100,19 @@ Minimum number of snapshots to retain, even if the snapshots have expired. ==== `schedule`:: -(Required, <>) +(Required, <> or <>) Periodic or absolute schedule at which the policy creates snapshots. {slm-init} applies `schedule` changes immediately. +Schedule may be either a Cron schedule or a time unit describing the interval between snapshots. +When using a time unit interval, the first snapshot is scheduled one interval after the policy modification time, and then again every interval after. + [[slm-api-put-example]] ==== {api-examples-title} + +[[slm-api-put-daily-policy]] +===== Create a policy Create a `daily-snapshots` lifecycle policy: [source,console] @@ -138,4 +144,25 @@ PUT /_slm/policy/daily-snapshots <6> Optional retention configuration <7> Keep snapshots for 30 days <8> Always keep at least 5 successful snapshots, even if they're more than 30 days old -<9> Keep no more than 50 successful snapshots, even if they're less than 30 days old \ No newline at end of file +<9> Keep no more than 50 successful snapshots, even if they're less than 30 days old + + +[[slm-api-put-hourly-policy]] +===== Use Interval Scheduling +Create an `hourly-snapshots` lifecycle policy using interval scheduling: + +[source,console] +-------------------------------------------------- +PUT /_slm/policy/hourly-snapshots +{ + "schedule": "1h", + "name": "", + "repository": "my_repository", + "config": { + "indices": ["data-*", "important"] + } +} +-------------------------------------------------- +// TEST[setup:setup-repository] +Creates a snapshot once every hour. The first snapshot will be created one hour after the policy is modified, +with subsequent snapshots being created every hour afterward. diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index d757a74110ca9..3a9c12caebad9 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -317,6 +317,15 @@ include::repository-shared-settings.asciidoc[] https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html[AWS DeleteObjects API]. +`max_multipart_upload_cleanup_size`:: + + (<>) Sets the maximum number of possibly-dangling multipart + uploads to clean up in each batch of snapshot deletions. Defaults to `1000` + which is the maximum number supported by the + https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html[AWS + ListMultipartUploads API]. If set to `0`, {es} will not attempt to clean up + dangling multipart uploads. + NOTE: The option of defining client settings in the repository settings as documented below is considered deprecated, and will be removed in a future version. @@ -492,33 +501,6 @@ by the `elasticsearch` user. By default, {es} runs as user `elasticsearch` using If the symlink exists, it will be used by default by all S3 repositories that don't have explicit `client` credentials. -==== Cleaning up multi-part uploads - -{es} uses S3's multi-part upload process to upload larger blobs to the -repository. The multi-part upload process works by dividing each blob into -smaller parts, uploading each part independently, and then completing the -upload in a separate step. This reduces the amount of data that {es} must -re-send if an upload fails: {es} only needs to re-send the part that failed -rather than starting from the beginning of the whole blob. The storage for each -part is charged independently starting from the time at which the part was -uploaded. - -If a multi-part upload cannot be completed then it must be aborted in order to -delete any parts that were successfully uploaded, preventing further storage -charges from accumulating. {es} will automatically abort a multi-part upload on -failure, but sometimes the abort request itself fails. For example, if the -repository becomes inaccessible or the instance on which {es} is running is -terminated abruptly then {es} cannot complete or abort any ongoing uploads. - -You must make sure that failed uploads are eventually aborted to avoid -unnecessary storage costs. You can use the -https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html[List -multipart uploads API] to list the ongoing uploads and look for any which are -unusually long-running, or you can -https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpu-abort-incomplete-mpu-lifecycle-config.html[configure -a bucket lifecycle policy] to automatically abort incomplete uploads once they -reach a certain age. - [[repository-s3-aws-vpc]] ==== AWS VPC bandwidth settings diff --git a/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc b/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc index 728d805db7a30..7eb27d5428956 100644 --- a/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc +++ b/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc @@ -44,13 +44,11 @@ GET _cluster/allocation/explain { "index": "my-index", "shard": 0, - "primary": false, - "current_node": "my-node" + "primary": false } ---- // TEST[s/^/PUT my-index\n/] // TEST[s/"primary": false,/"primary": false/] -// TEST[s/"current_node": "my-node"//] [[fix-watermark-errors-temporary]] ==== Temporary Relief diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 00f1caec24cf7..1001ab2b709dd 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -306,6 +306,11 @@ + + + + + @@ -336,6 +341,11 @@ + + + + + @@ -346,6 +356,11 @@ + + + + + @@ -361,6 +376,11 @@ + + + + + @@ -953,11 +973,6 @@ - - - - - @@ -1746,16 +1761,16 @@ - - - - - + + + + + diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 2c3521197d7c4..a4b76b9530d66 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index efe2ff3449216..9036682bf0f0c 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip +distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java index 454581ae70b51..79caf04c97246 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java @@ -15,7 +15,6 @@ import org.elasticsearch.nativeaccess.lib.NativeLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; import org.elasticsearch.nativeaccess.lib.VectorLibrary; import org.elasticsearch.nativeaccess.lib.ZstdLibrary; @@ -38,8 +37,6 @@ public JnaNativeLibraryProvider() { JnaMacCLibrary::new, Kernel32Library.class, JnaKernel32Library::new, - SystemdLibrary.class, - JnaSystemdLibrary::new, ZstdLibrary.class, JnaZstdLibrary::new, VectorLibrary.class, diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java index d984d239e0b39..82a69e4864d94 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java @@ -16,6 +16,7 @@ import com.sun.jna.Pointer; import com.sun.jna.Structure; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import java.util.Arrays; @@ -109,6 +110,16 @@ public long bytesalloc() { } } + public static class JnaSockAddr implements SockAddr { + final Memory memory; + + JnaSockAddr(String path) { + this.memory = new Memory(110); + memory.setShort(0, AF_UNIX); + memory.setString(2, path, "UTF-8"); + } + } + private interface NativeFunctions extends Library { int geteuid(); @@ -126,6 +137,12 @@ private interface NativeFunctions extends Library { int close(int fd); + int socket(int domain, int type, int protocol); + + int connect(int sockfd, Pointer addr, int addrlen); + + long send(int sockfd, Pointer buf, long buflen, int flags); + String strerror(int errno); } @@ -235,6 +252,30 @@ public int fstat64(int fd, Stat64 stats) { return fstat64.fstat64(fd, jnaStats.memory); } + @Override + public int socket(int domain, int type, int protocol) { + return functions.socket(domain, type, protocol); + } + + @Override + public SockAddr newUnixSockAddr(String path) { + return new JnaSockAddr(path); + } + + @Override + public int connect(int sockfd, SockAddr addr) { + assert addr instanceof JnaSockAddr; + var jnaAddr = (JnaSockAddr) addr; + return functions.connect(sockfd, jnaAddr.memory, (int) jnaAddr.memory.size()); + } + + @Override + public long send(int sockfd, CloseableByteBuffer buffer, int flags) { + assert buffer instanceof JnaCloseableByteBuffer; + var nativeBuffer = (JnaCloseableByteBuffer) buffer; + return functions.send(sockfd, nativeBuffer.memory, nativeBuffer.buffer().remaining(), flags); + } + @Override public String strerror(int errno) { return functions.strerror(errno); diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java deleted file mode 100644 index f06361e8807c5..0000000000000 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.nativeaccess.jna; - -import com.sun.jna.Library; -import com.sun.jna.Native; - -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; - -class JnaSystemdLibrary implements SystemdLibrary { - private interface NativeFunctions extends Library { - int sd_notify(int unset_environment, String state); - } - - private final NativeFunctions functions; - - JnaSystemdLibrary() { - this.functions = Native.load("libsystemd.so.0", NativeFunctions.class); - } - - @Override - public int sd_notify(int unset_environment, String state) { - return functions.sd_notify(unset_environment, state); - } -} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java index f6e6035a8aba6..e1ea28e8786f5 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java @@ -12,7 +12,7 @@ import org.elasticsearch.nativeaccess.lib.LinuxCLibrary.SockFProg; import org.elasticsearch.nativeaccess.lib.LinuxCLibrary.SockFilter; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; +import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import java.util.Map; @@ -92,7 +92,14 @@ record Arch( LinuxNativeAccess(NativeLibraryProvider libraryProvider) { super("Linux", libraryProvider, new PosixConstants(-1L, 9, 1, 8, 64, 144, 48, 64)); this.linuxLibc = libraryProvider.getLibrary(LinuxCLibrary.class); - this.systemd = new Systemd(libraryProvider.getLibrary(SystemdLibrary.class)); + String socketPath = System.getenv("NOTIFY_SOCKET"); + if (socketPath == null) { + this.systemd = null; // not running under systemd + } else { + logger.debug("Systemd socket path: {}", socketPath); + var buffer = newBuffer(64); + this.systemd = new Systemd(libraryProvider.getLibrary(PosixCLibrary.class), socketPath, buffer); + } } @Override diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java index 4deade118b788..058cfe77b1ff3 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java @@ -10,17 +10,28 @@ import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; +import org.elasticsearch.nativeaccess.lib.PosixCLibrary; -import java.util.Locale; +import java.nio.charset.StandardCharsets; +/** + * Wraps access to notifications to systemd. + *

+ * Systemd notifications are done through a Unix socket. Although Java does support + * opening unix sockets, it unfortunately does not support datagram sockets. This class + * instead opens and communicates with the socket using native methods. + */ public class Systemd { private static final Logger logger = LogManager.getLogger(Systemd.class); - private final SystemdLibrary lib; + private final PosixCLibrary libc; + private final String socketPath; + private final CloseableByteBuffer buffer; - Systemd(SystemdLibrary lib) { - this.lib = lib; + Systemd(PosixCLibrary libc, String socketPath, CloseableByteBuffer buffer) { + this.libc = libc; + this.socketPath = socketPath; + this.buffer = buffer; } /** @@ -41,15 +52,61 @@ public void notify_stopping() { } private void notify(String state, boolean warnOnError) { - int rc = lib.sd_notify(0, state); - logger.trace("sd_notify({}, {}) returned [{}]", 0, state, rc); - if (rc < 0) { - String message = String.format(Locale.ROOT, "sd_notify(%d, %s) returned error [%d]", 0, state, rc); - if (warnOnError) { - logger.warn(message); + int sockfd = libc.socket(PosixCLibrary.AF_UNIX, PosixCLibrary.SOCK_DGRAM, 0); + if (sockfd < 0) { + throwOrLog("Could not open systemd socket: " + libc.strerror(libc.errno()), warnOnError); + return; + } + RuntimeException error = null; + try { + var sockAddr = libc.newUnixSockAddr(socketPath); + if (libc.connect(sockfd, sockAddr) != 0) { + throwOrLog("Could not connect to systemd socket: " + libc.strerror(libc.errno()), warnOnError); + return; + } + + byte[] bytes = state.getBytes(StandardCharsets.US_ASCII); + final long bytesSent; + synchronized (buffer) { + buffer.buffer().clear(); + buffer.buffer().put(0, bytes); + buffer.buffer().limit(bytes.length); + bytesSent = libc.send(sockfd, buffer, 0); + } + + if (bytesSent == -1) { + throwOrLog("Failed to send message (" + state + ") to systemd socket: " + libc.strerror(libc.errno()), warnOnError); + } else if (bytesSent != bytes.length) { + throwOrLog("Not all bytes of message (" + state + ") sent to systemd socket (sent " + bytesSent + ")", warnOnError); } else { - throw new RuntimeException(message); + logger.trace("Message (" + state + ") sent to systemd"); + } + } catch (RuntimeException e) { + error = e; + } finally { + if (libc.close(sockfd) != 0) { + try { + throwOrLog("Could not close systemd socket: " + libc.strerror(libc.errno()), warnOnError); + } catch (RuntimeException e) { + if (error != null) { + error.addSuppressed(e); + throw error; + } else { + throw e; + } + } + } else if (error != null) { + throw error; } } } + + private void throwOrLog(String message, boolean warnOnError) { + if (warnOnError) { + logger.warn(message); + } else { + logger.error(message); + throw new RuntimeException(message); + } + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java index faa0e861dc63f..cdd0a56c52a90 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java @@ -9,5 +9,5 @@ package org.elasticsearch.nativeaccess.lib; /** A marker interface for libraries that can be loaded by {@link org.elasticsearch.nativeaccess.lib.NativeLibraryProvider} */ -public sealed interface NativeLibrary permits JavaLibrary, PosixCLibrary, LinuxCLibrary, MacCLibrary, Kernel32Library, SystemdLibrary, - VectorLibrary, ZstdLibrary {} +public sealed interface NativeLibrary permits JavaLibrary, PosixCLibrary, LinuxCLibrary, MacCLibrary, Kernel32Library, VectorLibrary, + ZstdLibrary {} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java index 0e7d07d0ad623..ac34fcb23b3eb 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java @@ -8,11 +8,19 @@ package org.elasticsearch.nativeaccess.lib; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; + /** * Provides access to methods in libc.so available on POSIX systems. */ public non-sealed interface PosixCLibrary extends NativeLibrary { + /** socket domain indicating unix file socket */ + short AF_UNIX = 1; + + /** socket type indicating a datagram-oriented socket */ + int SOCK_DGRAM = 2; + /** * Gets the effective userid of the current process. * @@ -68,8 +76,6 @@ interface Stat64 { int open(String pathname, int flags); - int close(int fd); - int fstat64(int fd, Stat64 stats); int ftruncate(int fd, long length); @@ -90,6 +96,55 @@ interface FStore { int fcntl(int fd, int cmd, FStore fst); + /** + * Open a file descriptor to connect to a socket. + * + * @param domain The socket protocol family, eg AF_UNIX + * @param type The socket type, eg SOCK_DGRAM + * @param protocol The protocol for the given protocl family, normally 0 + * @return an open file descriptor, or -1 on failure with errno set + * @see socket manpage + */ + int socket(int domain, int type, int protocol); + + /** + * Marker interface for sockaddr struct implementations. + */ + interface SockAddr {} + + /** + * Create a sockaddr for the AF_UNIX family. + */ + SockAddr newUnixSockAddr(String path); + + /** + * Connect a socket to an address. + * + * @param sockfd An open socket file descriptor + * @param addr The address to connect to + * @return 0 on success, -1 on failure with errno set + */ + int connect(int sockfd, SockAddr addr); + + /** + * Send a message to a socket. + * + * @param sockfd The open socket file descriptor + * @param buffer The message bytes to send + * @param flags Flags that may adjust how the message is sent + * @return The number of bytes sent, or -1 on failure with errno set + * @see send manpage + */ + long send(int sockfd, CloseableByteBuffer buffer, int flags); + + /** + * Close a file descriptor + * @param fd The file descriptor to close + * @return 0 on success, -1 on failure with errno set + * @see close manpage + */ + int close(int fd); + /** * Return a string description for an error. * diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java deleted file mode 100644 index 3c4ffefb6e41f..0000000000000 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.nativeaccess.lib; - -public non-sealed interface SystemdLibrary extends NativeLibrary { - int sd_notify(int unset_environment, String state); -} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java index cbd43a394379b..1ac7d6c6f897d 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java @@ -14,7 +14,6 @@ import org.elasticsearch.nativeaccess.lib.MacCLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; import org.elasticsearch.nativeaccess.lib.VectorLibrary; import org.elasticsearch.nativeaccess.lib.ZstdLibrary; @@ -36,8 +35,6 @@ public JdkNativeLibraryProvider() { JdkMacCLibrary::new, Kernel32Library.class, JdkKernel32Library::new, - SystemdLibrary.class, - JdkSystemdLibrary::new, ZstdLibrary.class, JdkZstdLibrary::new, VectorLibrary.class, diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java index 7affd0614461d..f5e3132b76b56 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java @@ -10,6 +10,7 @@ import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import java.lang.foreign.Arena; @@ -24,8 +25,10 @@ import static java.lang.foreign.MemoryLayout.PathElement.groupElement; import static java.lang.foreign.ValueLayout.ADDRESS; +import static java.lang.foreign.ValueLayout.JAVA_BYTE; import static java.lang.foreign.ValueLayout.JAVA_INT; import static java.lang.foreign.ValueLayout.JAVA_LONG; +import static java.lang.foreign.ValueLayout.JAVA_SHORT; import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; import static org.elasticsearch.nativeaccess.jdk.MemorySegmentUtil.varHandleWithoutOffset; @@ -89,6 +92,18 @@ class JdkPosixCLibrary implements PosixCLibrary { } fstat$mh = fstat; } + private static final MethodHandle socket$mh = downcallHandleWithErrno( + "socket", + FunctionDescriptor.of(JAVA_INT, JAVA_INT, JAVA_INT, JAVA_INT) + ); + private static final MethodHandle connect$mh = downcallHandleWithErrno( + "connect", + FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS, JAVA_INT) + ); + private static final MethodHandle send$mh = downcallHandleWithErrno( + "send", + FunctionDescriptor.of(JAVA_LONG, JAVA_INT, ADDRESS, JAVA_LONG, JAVA_INT) + ); static final MemorySegment errnoState = Arena.ofAuto().allocate(CAPTURE_ERRNO_LAYOUT); @@ -226,6 +241,44 @@ public int fstat64(int fd, Stat64 stat64) { } } + @Override + public int socket(int domain, int type, int protocol) { + try { + return (int) socket$mh.invokeExact(errnoState, domain, type, protocol); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public SockAddr newUnixSockAddr(String path) { + return new JdkSockAddr(path); + } + + @Override + public int connect(int sockfd, SockAddr addr) { + assert addr instanceof JdkSockAddr; + var jdkAddr = (JdkSockAddr) addr; + try { + return (int) connect$mh.invokeExact(errnoState, sockfd, jdkAddr.segment, (int) jdkAddr.segment.byteSize()); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public long send(int sockfd, CloseableByteBuffer buffer, int flags) { + assert buffer instanceof JdkCloseableByteBuffer; + var nativeBuffer = (JdkCloseableByteBuffer) buffer; + var segment = nativeBuffer.segment; + try { + logger.info("Sending {} bytes to socket", buffer.buffer().remaining()); + return (long) send$mh.invokeExact(errnoState, sockfd, segment, (long) buffer.buffer().remaining(), flags); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + static class JdkRLimit implements RLimit { private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_LONG, JAVA_LONG); private static final VarHandle rlim_cur$vh = varHandleWithoutOffset(layout, groupElement(0)); @@ -326,4 +379,15 @@ public long bytesalloc() { return (long) st_bytesalloc$vh.get(segment); } } + + private static class JdkSockAddr implements SockAddr { + private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_SHORT, MemoryLayout.sequenceLayout(108, JAVA_BYTE)); + final MemorySegment segment; + + JdkSockAddr(String path) { + segment = Arena.ofAuto().allocate(layout); + segment.set(JAVA_SHORT, 0, AF_UNIX); + MemorySegmentUtil.setString(segment, 2, path); + } + } } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java deleted file mode 100644 index c34c8c070edc5..0000000000000 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.nativeaccess.jdk; - -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.lang.foreign.Arena; -import java.lang.foreign.FunctionDescriptor; -import java.lang.foreign.MemorySegment; -import java.lang.invoke.MethodHandle; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.attribute.BasicFileAttributes; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static java.lang.foreign.ValueLayout.ADDRESS; -import static java.lang.foreign.ValueLayout.JAVA_INT; -import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; - -class JdkSystemdLibrary implements SystemdLibrary { - - static { - // Find and load libsystemd. We attempt all instances of - // libsystemd in case of multiarch systems, and stop when - // one is successfully loaded. If none can be loaded, - // UnsatisfiedLinkError will be thrown. - List paths = findLibSystemd(); - if (paths.isEmpty()) { - String libpath = System.getProperty("java.library.path"); - throw new UnsatisfiedLinkError("Could not find libsystemd in java.library.path: " + libpath); - } - UnsatisfiedLinkError last = null; - for (String path : paths) { - try { - System.load(path); - last = null; - break; - } catch (UnsatisfiedLinkError e) { - last = e; - } - } - if (last != null) { - throw last; - } - } - - // findLibSystemd returns a list of paths to instances of libsystemd - // found within java.library.path. - static List findLibSystemd() { - // Note: on some systems libsystemd does not have a non-versioned symlink. - // System.loadLibrary only knows how to find non-versioned library files, - // so we must manually check the library path to find what we need. - final Path libsystemd = Paths.get("libsystemd.so.0"); - final String libpath = System.getProperty("java.library.path"); - final List foundPaths = new ArrayList<>(); - Arrays.stream(libpath.split(":")).map(Paths::get).filter(Files::exists).forEach(rootPath -> { - try { - Files.walkFileTree(rootPath, new SimpleFileVisitor<>() { - @Override - public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) { - if (Files.isReadable(dir)) { - return FileVisitResult.CONTINUE; - } - return FileVisitResult.SKIP_SUBTREE; - } - - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { - if (file.getFileName().equals(libsystemd)) { - foundPaths.add(file.toAbsolutePath().toString()); - } - return FileVisitResult.CONTINUE; - } - - @Override - public FileVisitResult visitFileFailed(Path file, IOException exc) { - return FileVisitResult.CONTINUE; - } - }); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); - return foundPaths; - } - - private static final MethodHandle sd_notify$mh = downcallHandle("sd_notify", FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS)); - - @Override - public int sd_notify(int unset_environment, String state) { - try (Arena arena = Arena.ofConfined()) { - MemorySegment nativeState = MemorySegmentUtil.allocateString(arena, state); - return (int) sd_notify$mh.invokeExact(unset_environment, nativeState); - } catch (Throwable t) { - throw new AssertionError(t); - } - } -} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java index c65711af0f63f..6c4c9bd0111c0 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java @@ -22,6 +22,10 @@ static String getString(MemorySegment segment, long offset) { return segment.getUtf8String(offset); } + static void setString(MemorySegment segment, long offset, String value) { + segment.setUtf8String(offset, value); + } + static MemorySegment allocateString(Arena arena, String s) { return arena.allocateUtf8String(s); } diff --git a/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java b/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java index 25c449337e294..23d9919603ab4 100644 --- a/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java +++ b/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java @@ -20,6 +20,10 @@ static String getString(MemorySegment segment, long offset) { return segment.getString(offset); } + static void setString(MemorySegment segment, long offset, String value) { + segment.setString(offset, value); + } + static MemorySegment allocateString(Arena arena, String s) { return arena.allocateFrom(s); } diff --git a/libs/x-content/impl/build.gradle b/libs/x-content/impl/build.gradle index 41b65044735ca..829b75524baeb 100644 --- a/libs/x-content/impl/build.gradle +++ b/libs/x-content/impl/build.gradle @@ -12,7 +12,7 @@ base { archivesName = "x-content-impl" } -String jacksonVersion = "2.15.0" +String jacksonVersion = "2.17.0" dependencies { compileOnly project(':libs:elasticsearch-core') diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java index ae494796c88cb..4e04230a7486e 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java @@ -54,6 +54,8 @@ public static final XContent jsonXContent() { jsonFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); jsonFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); jsonFactory.configure(JsonParser.Feature.USE_FAST_DOUBLE_PARSER, true); + // keeping existing behavior of including source, for now + jsonFactory.configure(JsonParser.Feature.INCLUDE_SOURCE_IN_LOCATION, true); jsonXContent = new JsonXContentImpl(); } diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java index 5037ed0b40664..add5a913faf8a 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java @@ -148,6 +148,12 @@ default void copyCurrentEvent(XContentParser parser) throws IOException { case LONG -> writeNumber(parser.longValue()); case FLOAT -> writeNumber(parser.floatValue()); case DOUBLE -> writeNumber(parser.doubleValue()); + case BIG_INTEGER -> writeNumber((BigInteger) parser.numberValue()); + // note: BIG_DECIMAL is not supported, ES only supports up to double. + // BIG_INTEGER above is only for representing unsigned long + default -> { + assert false : "missing xcontent number handling for type [" + parser.numberType() + "]"; + } } break; case VALUE_BOOLEAN: @@ -158,6 +164,9 @@ default void copyCurrentEvent(XContentParser parser) throws IOException { break; case VALUE_EMBEDDED_OBJECT: writeBinary(parser.binaryValue()); + break; + default: + assert false : "missing xcontent token handling for token [" + parser.text() + "]"; } } diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentGeneratorTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentGeneratorTests.java new file mode 100644 index 0000000000000..ab141f9af484c --- /dev/null +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentGeneratorTests.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.xcontent; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.ByteArrayOutputStream; +import java.nio.charset.StandardCharsets; +import java.util.Locale; + +import static org.hamcrest.Matchers.equalTo; + +public class XContentGeneratorTests extends ESTestCase { + + public void testCopyCurrentEventRoundtrip() throws Exception { + assertTypeCopy("null", "null"); + assertTypeCopy("string", "\"hi\""); + assertTypeCopy("integer", "1"); + assertTypeCopy("float", "1.0"); + assertTypeCopy("long", "5000000000"); + assertTypeCopy("double", "1.123456789"); + assertTypeCopy("biginteger", "18446744073709551615"); + } + + private void assertTypeCopy(String typename, String value) throws Exception { + var input = String.format(Locale.ROOT, "{\"%s\":%s,\"%s_in_array\":[%s]}", typename, value, typename, value); + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + try ( + var generator = JsonXContent.jsonXContent.createGenerator(outputStream); + var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, input) + ) { + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + generator.copyCurrentEvent(parser); + } + generator.copyCurrentEvent(parser); // copy end object too + } + assertThat(outputStream.toString(StandardCharsets.UTF_8), equalTo(input)); + } +} diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index 77fd095806d10..1fc42a1b294fe 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -36,3 +36,6 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTest("search.query/50_queries_with_synonyms/Test common terms query with stacked tokens", "#42654 - `common` query throws an exception") } +artifacts { + restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) +} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/15_analyze.yml similarity index 100% rename from modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_analyze.yml rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/15_analyze.yml diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java new file mode 100644 index 0000000000000..a52016e8c7f0b --- /dev/null +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java @@ -0,0 +1,428 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.TransportIndicesAliasesAction; +import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequest; +import org.elasticsearch.action.admin.indices.readonly.TransportAddIndexBlockAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.FailureStoreMetrics; +import org.elasticsearch.action.datastreams.CreateDataStreamAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.core.Strings; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestTestPlugin; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.ingest.TestProcessor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Consumer; + +import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.DEFAULT_TIMESTAMP_FIELD; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +/** + * An integration test that verifies how different paths/scenarios affect the APM metrics for failure stores. + */ +@ESIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = ESIntegTestCase.Scope.SUITE) +public class IngestFailureStoreMetricsIT extends ESIntegTestCase { + + private static final List METRICS = List.of( + FailureStoreMetrics.METRIC_TOTAL, + FailureStoreMetrics.METRIC_FAILURE_STORE, + FailureStoreMetrics.METRIC_REJECTED + ); + + private String template; + private String dataStream; + private String pipeline; + + @Before + public void initializeRandomNames() { + template = "template-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + dataStream = "data-stream-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + pipeline = "pipeline-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + logger.info( + "--> running [{}] with generated names data stream [{}], template [{}] and pipeline [{}]", + getTestName(), + dataStream, + template, + pipeline + ); + } + + @Override + protected Collection> nodePlugins() { + return List.of(DataStreamsPlugin.class, CustomIngestTestPlugin.class, TestTelemetryPlugin.class, MapperExtrasPlugin.class); + } + + public void testNoPipelineNoFailures() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfDocs, null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testFailingPipelineNoFailureStore() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + createBasicPipeline("fail"); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_REJECTED), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.PIPELINE, + false + ); + } + + public void testFailingPipelineWithFailureStore() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + createBasicPipeline("fail"); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.PIPELINE + ); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testShardFailureNoFailureStore() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, "\"foo\"", null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_REJECTED), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD, + false + ); + } + + public void testShardFailureWithFailureStore() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, "\"foo\"", null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD + ); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + /** + * Make sure the rejected counter gets incremented when there were shard-level failures while trying to redirect a document to the + * failure store. + */ + public void testRejectionFromFailureStore() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + + // Initialize failure store. + var rolloverRequest = new RolloverRequest(dataStream, null); + rolloverRequest.setIndicesOptions( + IndicesOptions.builder(rolloverRequest.indicesOptions()) + .failureStoreOptions(opts -> opts.includeFailureIndices(true).includeRegularIndices(false)) + .build() + ); + var rolloverResponse = client().execute(RolloverAction.INSTANCE, rolloverRequest).actionGet(); + var failureStoreIndex = rolloverResponse.getNewIndex(); + // Add a write block to the failure store index, which causes shard-level "failures". + var addIndexBlockRequest = new AddIndexBlockRequest(IndexMetadata.APIBlock.WRITE, failureStoreIndex); + client().execute(TransportAddIndexBlockAction.TYPE, addIndexBlockRequest).actionGet(); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, "\"foo\"", null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD + ); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_REJECTED), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD, + true + ); + } + + /** + * Make sure metrics get the correct data_stream attribute after a reroute. + */ + public void testRerouteSuccessfulCorrectName() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + + String destination = dataStream + "-destination"; + final var createDataStreamRequest = new CreateDataStreamAction.Request(destination); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); + createReroutePipeline(destination); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, destination); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testDropping() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + createBasicPipeline("drop"); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testDataStreamAlias() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + var indicesAliasesRequest = new IndicesAliasesRequest(); + indicesAliasesRequest.addAliasAction( + IndicesAliasesRequest.AliasActions.add().alias("some-alias").index(dataStream).writeIndex(true) + ); + client().execute(TransportIndicesAliasesAction.TYPE, indicesAliasesRequest).actionGet(); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs("some-alias", nrOfDocs, null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + private void putComposableIndexTemplate(boolean failureStore) throws IOException { + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(template); + request.indexTemplate( + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStream + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, failureStore)) + .template(new Template(null, new CompressedXContent(""" + { + "dynamic": false, + "properties": { + "@timestamp": { + "type": "date" + }, + "count": { + "type": "long" + } + } + }"""), null)) + .build() + ); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); + } + + private void createDataStream() { + final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStream); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); + } + + private void createBasicPipeline(String processorType) { + createPipeline(Strings.format("\"%s\": {}", processorType)); + } + + private void createReroutePipeline(String destination) { + createPipeline(Strings.format("\"reroute\": {\"destination\": \"%s\"}", destination)); + } + + private void createPipeline(String processor) { + String pipelineDefinition = Strings.format("{\"processors\": [{%s}]}", processor); + BytesReference bytes = new BytesArray(pipelineDefinition); + clusterAdmin().putPipeline(new PutPipelineRequest(pipeline, bytes, XContentType.JSON)).actionGet(); + } + + private void indexDocs(String dataStream, int numDocs, String pipeline) { + indexDocs(dataStream, numDocs, "1", pipeline); + } + + private void indexDocs(String dataStream, int numDocs, String value, String pipeline) { + BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < numDocs; i++) { + String time = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); + bulkRequest.add( + new IndexRequest(dataStream).opType(DocWriteRequest.OpType.CREATE) + .source(Strings.format("{\"%s\":\"%s\", \"count\": %s}", DEFAULT_TIMESTAMP_FIELD, time, value), XContentType.JSON) + .setPipeline(pipeline) + ); + } + client().bulk(bulkRequest).actionGet(); + } + + private static Map> collectTelemetry() { + Map> measurements = new HashMap<>(); + for (PluginsService pluginsService : internalCluster().getInstances(PluginsService.class)) { + final TestTelemetryPlugin telemetryPlugin = pluginsService.filterPlugins(TestTelemetryPlugin.class).findFirst().orElseThrow(); + + telemetryPlugin.collect(); + + for (String metricName : METRICS) { + measurements.put(metricName, telemetryPlugin.getLongCounterMeasurement(metricName)); + } + } + return measurements; + } + + private void assertMeasurements(List measurements, int expectedSize, String expectedDataStream) { + assertMeasurements(measurements, expectedSize, expectedDataStream, (Consumer) null); + } + + private void assertMeasurements( + List measurements, + int expectedSize, + String expectedDataStream, + FailureStoreMetrics.ErrorLocation location + ) { + assertMeasurements( + measurements, + expectedSize, + expectedDataStream, + measurement -> assertEquals(location.name(), measurement.attributes().get("error_location")) + ); + } + + private void assertMeasurements( + List measurements, + int expectedSize, + String expectedDataStream, + FailureStoreMetrics.ErrorLocation location, + boolean failureStore + ) { + assertMeasurements(measurements, expectedSize, expectedDataStream, measurement -> { + assertEquals(location.name(), measurement.attributes().get("error_location")); + assertEquals(failureStore, measurement.attributes().get("failure_store")); + }); + } + + private void assertMeasurements( + List measurements, + int expectedSize, + String expectedDataStream, + Consumer customAssertion + ) { + assertEquals(expectedSize, measurements.size()); + for (Measurement measurement : measurements) { + assertEquals(expectedDataStream, measurement.attributes().get("data_stream")); + if (customAssertion != null) { + customAssertion.accept(measurement); + } + } + } + + public static class CustomIngestTestPlugin extends IngestTestPlugin { + @Override + public Map getProcessors(Processor.Parameters parameters) { + Map processors = new HashMap<>(); + processors.put( + "drop", + (factories, tag, description, config) -> new TestProcessor(tag, "drop", description, ingestDocument -> null) + ); + processors.put("reroute", (factories, tag, description, config) -> { + String destination = (String) config.remove("destination"); + return new TestProcessor( + tag, + "reroute", + description, + (Consumer) ingestDocument -> ingestDocument.reroute(destination) + ); + }); + processors.put( + "fail", + (processorFactories, tag, description, config) -> new TestProcessor(tag, "fail", description, new RuntimeException()) + ); + return processors; + } + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java new file mode 100644 index 0000000000000..514eb6d8742ea --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java @@ -0,0 +1,190 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams.lifecycle; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.WarningFailureException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.datastreams.DisabledSecurityDataStreamTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class DataStreamGlobalRetentionIT extends DisabledSecurityDataStreamTestCase { + + @Before + public void setup() throws IOException { + updateClusterSettings( + Settings.builder() + .put("data_streams.lifecycle.poll_interval", "1s") + .put("cluster.lifecycle.default.rollover", "min_docs=1,max_docs=1") + .build() + ); + // Create a template with the default lifecycle + Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); + putComposableIndexTemplateRequest.setJsonEntity(""" + { + "index_patterns": ["my-data-stream*"], + "data_stream": {}, + "template": { + "lifecycle": {} + } + } + """); + assertOK(client().performRequest(putComposableIndexTemplateRequest)); + + // Create a data streams with one doc + Request createDocRequest = new Request("POST", "/my-data-stream/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2022-12-12\"}"); + assertOK(client().performRequest(createDocRequest)); + } + + @After + public void cleanUp() throws IOException { + adminClient().performRequest(new Request("DELETE", "_data_stream/*")); + updateClusterSettings( + Settings.builder().putNull("data_streams.lifecycle.retention.default").putNull("data_streams.lifecycle.retention.max").build() + ); + } + + @SuppressWarnings("unchecked") + public void testDataStreamRetention() throws Exception { + // Set global retention and add retention to the data stream + { + updateClusterSettings( + Settings.builder() + .put("data_streams.lifecycle.retention.default", "7d") + .put("data_streams.lifecycle.retention.default", "90d") + .build() + ); + Request request = new Request("PUT", "_data_stream/my-data-stream/_lifecycle"); + request.setJsonEntity(""" + { + "data_retention": "10s" + }"""); + assertAcknowledged(client().performRequest(request)); + } + + // Verify that the effective retention matches the default retention + { + Request request = new Request("GET", "/_data_stream/my-data-stream"); + Response response = client().performRequest(request); + List dataStreams = (List) entityAsMap(response).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + Map lifecycle = (Map) dataStream.get("lifecycle"); + assertThat(lifecycle.get("effective_retention"), is("10s")); + assertThat(lifecycle.get("retention_determined_by"), is("data_stream_configuration")); + assertThat(lifecycle.get("data_retention"), is("10s")); + } + + // Verify that the first generation index was removed + assertBusy(() -> { + Response response = client().performRequest(new Request("GET", "/_data_stream/my-data-stream")); + Map dataStream = ((List>) entityAsMap(response).get("data_streams")).get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + List backingIndices = (List) dataStream.get("indices"); + assertThat(backingIndices.size(), is(1)); + // 2 backing indices created + 1 for the deleted index + assertThat(dataStream.get("generation"), is(3)); + }, 20, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + public void testDefaultRetention() throws Exception { + // Set default global retention + updateClusterSettings(Settings.builder().put("data_streams.lifecycle.retention.default", "10s").build()); + + // Verify that the effective retention matches the default retention + { + Request request = new Request("GET", "/_data_stream/my-data-stream"); + Response response = client().performRequest(request); + List dataStreams = (List) entityAsMap(response).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + Map lifecycle = (Map) dataStream.get("lifecycle"); + assertThat(lifecycle.get("effective_retention"), is("10s")); + assertThat(lifecycle.get("retention_determined_by"), is("default_global_retention")); + assertThat(lifecycle.get("data_retention"), nullValue()); + } + + // Verify that the first generation index was removed + assertBusy(() -> { + Response response = client().performRequest(new Request("GET", "/_data_stream/my-data-stream")); + Map dataStream = ((List>) entityAsMap(response).get("data_streams")).get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + List backingIndices = (List) dataStream.get("indices"); + assertThat(backingIndices.size(), is(1)); + // 2 backing indices created + 1 for the deleted index + assertThat(dataStream.get("generation"), is(3)); + }, 20, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + public void testMaxRetention() throws Exception { + // Set default global retention + updateClusterSettings(Settings.builder().put("data_streams.lifecycle.retention.max", "10s").build()); + boolean withDataStreamLevelRetention = randomBoolean(); + if (withDataStreamLevelRetention) { + try { + Request request = new Request("PUT", "_data_stream/my-data-stream/_lifecycle"); + request.setJsonEntity(""" + { + "data_retention": "30d" + }"""); + assertAcknowledged(client().performRequest(request)); + fail("Should have returned a warning about data retention exceeding the max retention"); + } catch (WarningFailureException warningFailureException) { + assertThat( + warningFailureException.getMessage(), + containsString("The retention provided [30d] is exceeding the max allowed data retention of this project [10s]") + ); + } + } + + // Verify that the effective retention matches the max retention + { + Request request = new Request("GET", "/_data_stream/my-data-stream"); + Response response = client().performRequest(request); + List dataStreams = (List) entityAsMap(response).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + Map lifecycle = (Map) dataStream.get("lifecycle"); + assertThat(lifecycle.get("effective_retention"), is("10s")); + assertThat(lifecycle.get("retention_determined_by"), is("max_global_retention")); + if (withDataStreamLevelRetention) { + assertThat(lifecycle.get("data_retention"), is("30d")); + } else { + assertThat(lifecycle.get("data_retention"), nullValue()); + } + } + + // Verify that the first generation index was removed + assertBusy(() -> { + Response response = client().performRequest(new Request("GET", "/_data_stream/my-data-stream")); + Map dataStream = ((List>) entityAsMap(response).get("data_streams")).get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + List backingIndices = (List) dataStream.get("indices"); + assertThat(backingIndices.size(), is(1)); + // 2 backing indices created + 1 for the deleted index + assertThat(dataStream.get("generation"), is(3)); + }, 20, TimeUnit.SECONDS); + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java index 0b41d62f6fe2c..f53fdcb6e8600 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.FormatNames; +import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.logsdb.datageneration.DataGenerator; import org.elasticsearch.logsdb.datageneration.DataGeneratorSpecification; import org.elasticsearch.logsdb.datageneration.FieldType; @@ -32,20 +33,19 @@ */ public class StandardVersusLogsIndexModeRandomDataChallengeRestIT extends StandardVersusLogsIndexModeChallengeRestIT { private final boolean fullyDynamicMapping; - private final boolean subobjectsDisabled; + private final ObjectMapper.Subobjects subobjects; private final DataGenerator dataGenerator; public StandardVersusLogsIndexModeRandomDataChallengeRestIT() { super(); this.fullyDynamicMapping = randomBoolean(); - this.subobjectsDisabled = randomBoolean(); + this.subobjects = randomFrom(ObjectMapper.Subobjects.values()); var specificationBuilder = DataGeneratorSpecification.builder(); - // TODO enable nested fields when subobjects are enabled - // It currently hits a bug with empty nested objects - // Nested fields don't work with subobjects: false. - specificationBuilder = specificationBuilder.withNestedFieldsLimit(0); + if (subobjects != ObjectMapper.Subobjects.ENABLED) { + specificationBuilder = specificationBuilder.withNestedFieldsLimit(0); + } this.dataGenerator = new DataGenerator(specificationBuilder.withDataSourceHandlers(List.of(new DataSourceHandler() { @Override public DataSourceResponse.FieldTypeGenerator handle(DataSourceRequest.FieldTypeGenerator request) { @@ -61,7 +61,7 @@ public DataSourceResponse.FieldTypeGenerator handle(DataSourceRequest.FieldTypeG } public DataSourceResponse.ObjectMappingParametersGenerator handle(DataSourceRequest.ObjectMappingParametersGenerator request) { - if (subobjectsDisabled == false) { + if (subobjects == ObjectMapper.Subobjects.ENABLED) { // Use default behavior return null; } @@ -69,16 +69,16 @@ public DataSourceResponse.ObjectMappingParametersGenerator handle(DataSourceRequ assert request.isNested() == false; // "enabled: false" is not compatible with subobjects: false - // "runtime: false/strict/runtime" is not compatible with subobjects: false + // "dynamic: false/strict/runtime" is not compatible with subobjects: false return new DataSourceResponse.ObjectMappingParametersGenerator(() -> { var parameters = new HashMap(); + parameters.put("subobjects", subobjects.toString()); if (ESTestCase.randomBoolean()) { parameters.put("dynamic", "true"); } if (ESTestCase.randomBoolean()) { parameters.put("enabled", "true"); } - return parameters; }); } @@ -107,15 +107,15 @@ public void baselineMappings(XContentBuilder builder) throws IOException { @Override public void contenderMappings(XContentBuilder builder) throws IOException { if (fullyDynamicMapping == false) { - if (subobjectsDisabled) { - dataGenerator.writeMapping(builder, b -> builder.field("subobjects", false)); + if (subobjects != ObjectMapper.Subobjects.ENABLED) { + dataGenerator.writeMapping(builder, b -> builder.field("subobjects", subobjects.toString())); } else { dataGenerator.writeMapping(builder); } } else { builder.startObject(); - if (subobjectsDisabled) { - builder.field("subobjects", false); + if (subobjects != ObjectMapper.Subobjects.ENABLED) { + builder.field("subobjects", subobjects.toString()); } builder.endObject(); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index cd233e29dee0e..615c0006a4ce6 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -201,7 +201,7 @@ public Collection createComponents(PluginServices services) { errorStoreInitialisationService.get(), services.allocationService(), dataStreamLifecycleErrorsPublisher.get(), - services.dataStreamGlobalRetentionProvider() + services.dataStreamGlobalRetentionSettings() ) ); dataLifecycleInitialisationService.get().init(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java index b32ba361963e5..dcca32355082b 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.health.ClusterStateHealth; import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -57,7 +57,7 @@ public class GetDataStreamsTransportAction extends TransportMasterNodeReadAction private static final Logger LOGGER = LogManager.getLogger(GetDataStreamsTransportAction.class); private final SystemIndices systemIndices; private final ClusterSettings clusterSettings; - private final DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; @Inject public GetDataStreamsTransportAction( @@ -67,7 +67,7 @@ public GetDataStreamsTransportAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, SystemIndices systemIndices, - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider + DataStreamGlobalRetentionSettings globalRetentionSettings ) { super( GetDataStreamAction.NAME, @@ -81,7 +81,7 @@ public GetDataStreamsTransportAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.systemIndices = systemIndices; - this.dataStreamGlobalRetentionProvider = dataStreamGlobalRetentionProvider; + this.globalRetentionSettings = globalRetentionSettings; clusterSettings = clusterService.getClusterSettings(); } @@ -93,7 +93,7 @@ protected void masterOperation( ActionListener listener ) throws Exception { listener.onResponse( - innerOperation(state, request, indexNameExpressionResolver, systemIndices, clusterSettings, dataStreamGlobalRetentionProvider) + innerOperation(state, request, indexNameExpressionResolver, systemIndices, clusterSettings, globalRetentionSettings) ); } @@ -103,7 +103,7 @@ static GetDataStreamAction.Response innerOperation( IndexNameExpressionResolver indexNameExpressionResolver, SystemIndices systemIndices, ClusterSettings clusterSettings, - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider + DataStreamGlobalRetentionSettings globalRetentionSettings ) { List dataStreams = getDataStreams(state, indexNameExpressionResolver, request); List dataStreamInfos = new ArrayList<>(dataStreams.size()); @@ -223,7 +223,7 @@ public int compareTo(IndexInfo o) { return new GetDataStreamAction.Response( dataStreamInfos, request.includeDefaults() ? clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) : null, - dataStreamGlobalRetentionProvider.provide() + globalRetentionSettings.get() ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 9e1b01ef47a88..0b24a3c9c9101 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -44,7 +44,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -162,7 +162,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab final ResultDeduplicator transportActionsDeduplicator; final ResultDeduplicator clusterStateChangesDeduplicator; private final DataStreamLifecycleHealthInfoPublisher dslHealthInfoPublisher; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; private LongSupplier nowSupplier; private final Clock clock; private final DataStreamLifecycleErrorStore errorStore; @@ -211,7 +211,7 @@ public DataStreamLifecycleService( DataStreamLifecycleErrorStore errorStore, AllocationService allocationService, DataStreamLifecycleHealthInfoPublisher dataStreamLifecycleHealthInfoPublisher, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { this.settings = settings; this.client = client; @@ -222,7 +222,7 @@ public DataStreamLifecycleService( this.clusterStateChangesDeduplicator = new ResultDeduplicator<>(threadPool.getThreadContext()); this.nowSupplier = nowSupplier; this.errorStore = errorStore; - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; this.scheduledJob = null; this.pollInterval = DATA_STREAM_LIFECYCLE_POLL_INTERVAL_SETTING.get(settings); this.targetMergePolicyFloorSegment = DATA_STREAM_MERGE_POLICY_TARGET_FLOOR_SEGMENT_SETTING.get(settings); @@ -296,13 +296,13 @@ public void close() { @Override public void triggered(SchedulerEngine.Event event) { - if (event.getJobName().equals(LIFECYCLE_JOB_NAME)) { + if (event.jobName().equals(LIFECYCLE_JOB_NAME)) { if (this.isMaster) { logger.trace( "Data stream lifecycle job triggered: {}, {}, {}", - event.getJobName(), - event.getScheduledTime(), - event.getTriggeredTime() + event.jobName(), + event.scheduledTime(), + event.triggeredTime() ); run(clusterService.state()); dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() { @@ -819,7 +819,7 @@ private Index maybeExecuteRollover(ClusterState state, DataStream dataStream, bo RolloverRequest rolloverRequest = getDefaultRolloverRequest( rolloverConfiguration, dataStream.getName(), - dataStream.getLifecycle().getEffectiveDataRetention(dataStream.isSystem() ? null : globalRetentionResolver.provide()), + dataStream.getLifecycle().getEffectiveDataRetention(dataStream.isSystem() ? null : globalRetentionSettings.get()), rolloverFailureStore ); transportActionsDeduplicator.executeOnce( @@ -871,7 +871,7 @@ private Index maybeExecuteRollover(ClusterState state, DataStream dataStream, bo */ Set maybeExecuteRetention(ClusterState state, DataStream dataStream, Set indicesToExcludeForRemainingRun) { Metadata metadata = state.metadata(); - DataStreamGlobalRetention globalRetention = dataStream.isSystem() ? null : globalRetentionResolver.provide(); + DataStreamGlobalRetention globalRetention = dataStream.isSystem() ? null : globalRetentionSettings.get(); List backingIndicesOlderThanRetention = dataStream.getIndicesPastRetention(metadata::index, nowSupplier, globalRetention); if (backingIndicesOlderThanRetention.isEmpty()) { return Set.of(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java index 408bc3b239f23..855b1713e5ec2 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -44,7 +44,7 @@ public class TransportExplainDataStreamLifecycleAction extends TransportMasterNo ExplainDataStreamLifecycleAction.Response> { private final DataStreamLifecycleErrorStore errorStore; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; @Inject public TransportExplainDataStreamLifecycleAction( @@ -54,7 +54,7 @@ public TransportExplainDataStreamLifecycleAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DataStreamLifecycleErrorStore dataLifecycleServiceErrorStore, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { super( ExplainDataStreamLifecycleAction.INSTANCE.name(), @@ -68,7 +68,7 @@ public TransportExplainDataStreamLifecycleAction( threadPool.executor(ThreadPool.Names.MANAGEMENT) ); this.errorStore = dataLifecycleServiceErrorStore; - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; } @Override @@ -118,7 +118,7 @@ protected void masterOperation( new ExplainDataStreamLifecycleAction.Response( explainIndices, request.includeDefaults() ? clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) : null, - globalRetentionResolver.provide() + globalRetentionSettings.get() ) ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java index 3def1351dd5e8..452295aab0ce9 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; @@ -40,7 +40,7 @@ public class TransportGetDataStreamLifecycleAction extends TransportMasterNodeRe GetDataStreamLifecycleAction.Request, GetDataStreamLifecycleAction.Response> { private final ClusterSettings clusterSettings; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; @Inject public TransportGetDataStreamLifecycleAction( @@ -49,7 +49,7 @@ public TransportGetDataStreamLifecycleAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { super( GetDataStreamLifecycleAction.INSTANCE.name(), @@ -63,7 +63,7 @@ public TransportGetDataStreamLifecycleAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); clusterSettings = clusterService.getClusterSettings(); - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; } @Override @@ -96,7 +96,7 @@ protected void masterOperation( .sorted(Comparator.comparing(GetDataStreamLifecycleAction.Response.DataStreamLifecycle::dataStreamName)) .toList(), request.includeDefaults() ? clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) : null, - globalRetentionResolver.provide() + globalRetentionSettings.get() ) ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java index f44e59d0278c3..82350130e57af 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycleAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -19,6 +20,7 @@ import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @@ -56,4 +58,9 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient public boolean allowSystemIndexAccessByDefault() { return true; } + + @Override + public Set supportedCapabilities() { + return Set.of(DataStreamLifecycle.EFFECTIVE_RETENTION_REST_API_CAPABILITY); + } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java index 94724f6778013..00f9d4da88301 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -19,6 +20,7 @@ import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -54,4 +56,9 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli public boolean allowSystemIndexAccessByDefault() { return true; } + + @Override + public Set supportedCapabilities() { + return Set.of(DataStreamLifecycle.EFFECTIVE_RETENTION_REST_API_CAPABILITY); + } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java index 5acb59841d6a6..c3178208d51c2 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -19,6 +20,7 @@ import org.elasticsearch.rest.action.RestToXContentListener; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -50,4 +52,9 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli public boolean allowSystemIndexAccessByDefault() { return true; } + + @Override + public Set supportedCapabilities() { + return Set.of(DataStreamLifecycle.EFFECTIVE_RETENTION_REST_API_CAPABILITY); + } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java index b61b70f55c734..d5356e371f497 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; @@ -216,7 +216,10 @@ private MetadataIndexTemplateService getMetadataIndexTemplateService() { xContentRegistry(), EmptySystemIndices.INSTANCE, indexSettingProviders, - new DataStreamGlobalRetentionProvider(DataStreamFactoryRetention.emptyFactoryRetention()) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java index cd3f862a51ddf..80d867ec7745e 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; @@ -45,7 +45,8 @@ public class GetDataStreamsTransportActionTests extends ESTestCase { private final IndexNameExpressionResolver resolver = TestIndexNameExpressionResolver.newInstance(); private final SystemIndices systemIndices = new SystemIndices(List.of()); - private final DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider = new DataStreamGlobalRetentionProvider( + private final DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), DataStreamFactoryRetention.emptyFactoryRetention() ); @@ -165,7 +166,7 @@ public void testGetTimeSeriesDataStream() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); assertThat( response.getDataStreams(), @@ -195,7 +196,7 @@ public void testGetTimeSeriesDataStream() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); assertThat( response.getDataStreams(), @@ -245,7 +246,7 @@ public void testGetTimeSeriesDataStreamWithOutOfOrderIndices() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); assertThat( response.getDataStreams(), @@ -288,7 +289,7 @@ public void testGetTimeSeriesMixedDataStream() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); var name1 = DataStream.getDefaultBackingIndexName("ds-1", 1, instant.toEpochMilli()); @@ -333,30 +334,24 @@ public void testPassingGlobalRetention() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); assertThat(response.getGlobalRetention(), nullValue()); DataStreamGlobalRetention globalRetention = new DataStreamGlobalRetention( TimeValue.timeValueDays(randomIntBetween(1, 5)), TimeValue.timeValueDays(randomIntBetween(5, 10)) ); - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProviderWithSettings = new DataStreamGlobalRetentionProvider( - new DataStreamFactoryRetention() { - @Override - public TimeValue getMaxRetention() { - return globalRetention.maxRetention(); - } - - @Override - public TimeValue getDefaultRetention() { - return globalRetention.defaultRetention(); - } - - @Override - public void init(ClusterSettings clusterSettings) { - - } - } + DataStreamGlobalRetentionSettings withGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings( + Settings.builder() + .put( + DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), + globalRetention.defaultRetention() + ) + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), globalRetention.maxRetention()) + .build() + ), + DataStreamFactoryRetention.emptyFactoryRetention() ); response = GetDataStreamsTransportAction.innerOperation( state, @@ -364,7 +359,7 @@ public void init(ClusterSettings clusterSettings) { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProviderWithSettings + withGlobalRetentionSettings ); assertThat(response.getGlobalRetention(), equalTo(globalRetention)); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 77b4d5f21529b..8cb27fd9fd282 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -37,7 +37,7 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling; import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling.Round; @@ -138,7 +138,8 @@ public class DataStreamLifecycleServiceTests extends ESTestCase { private List clientSeenRequests; private DoExecuteDelegate clientDelegate; private ClusterService clusterService; - private final DataStreamGlobalRetentionProvider globalRetentionResolver = new DataStreamGlobalRetentionProvider( + private final DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), DataStreamFactoryRetention.emptyFactoryRetention() ); @@ -187,7 +188,7 @@ public void setupServices() { errorStore, new FeatureService(List.of(new DataStreamFeatures())) ), - globalRetentionResolver + globalRetentionSettings ); clientDelegate = null; dataStreamLifecycleService.init(); @@ -1426,7 +1427,7 @@ public void testTrackingTimeStats() { errorStore, new FeatureService(List.of(new DataStreamFeatures())) ), - globalRetentionResolver + globalRetentionSettings ); assertThat(service.getLastRunDuration(), is(nullValue())); assertThat(service.getTimeBetweenStarts(), is(nullValue())); diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_effective_retention.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_effective_retention.yml new file mode 100644 index 0000000000000..ef36f283fe237 --- /dev/null +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_effective_retention.yml @@ -0,0 +1,104 @@ +setup: + - requires: + cluster_features: [ "gte_v8.11.0" ] + reason: "Data stream lifecycle was released as tech preview in 8.11" + test_runner_features: allowed_warnings + - do: + allowed_warnings: + - "index template [template-with-lifecycle] has index patterns [managed-data-stream] matching patterns from existing older templates [global] with patterns (global => [*]); this template [template-with-lifecycle] will take precedence during new index creation" + indices.put_index_template: + name: template-with-lifecycle + body: + index_patterns: [ managed-data-stream ] + template: + settings: + index.number_of_replicas: 0 + lifecycle: + data_retention: "30d" + data_stream: { } + - do: + indices.create_data_stream: + name: managed-data-stream +--- +teardown: + - do: + cluster.put_settings: + body: + persistent: + data_streams.lifecycle.retention.max: null + data_streams.lifecycle.retention.default: null + +--- +"Retrieve effective retention via the data stream API": + - requires: + reason: "Effective retention was exposed in 8.16+" + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_data_stream/{index} + capabilities: [ 'data_stream_lifecycle_effective_retention' ] + - do: + indices.get_data_stream: + name: "managed-data-stream" + - match: { data_streams.0.name: managed-data-stream } + - match: { data_streams.0.lifecycle.data_retention: '30d' } + - match: { data_streams.0.lifecycle.effective_retention: '30d'} + - match: { data_streams.0.lifecycle.retention_determined_by: 'data_stream_configuration'} + +--- +"Retrieve effective retention with explain": + - requires: + reason: "Effective retention was exposed in 8.16+" + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /{index}/_lifecycle/explain + capabilities: [ 'data_stream_lifecycle_effective_retention' ] + - do: + cluster.put_settings: + body: + persistent: + data_streams.lifecycle.retention.max: "7d" + - is_true: acknowledged + - do: + indices.get_data_stream: + name: "managed-data-stream" + - match: { data_streams.0.name: managed-data-stream } + - set: + data_streams.0.indices.0.index_name: backing_index + + - do: + indices.explain_data_lifecycle: + index: managed-data-stream + include_defaults: true + - match: { indices.$backing_index.managed_by_lifecycle: true } + - match: { indices.$backing_index.lifecycle.data_retention: '30d' } + - match: { indices.$backing_index.lifecycle.effective_retention: '7d' } + - match: { indices.$backing_index.lifecycle.retention_determined_by: 'max_global_retention' } + +--- +"Retrieve effective retention with data stream lifecycle": + - requires: + reason: "Effective retention was exposed in 8.16+" + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_data_stream/{index}/_lifecycle + capabilities: [ 'data_stream_lifecycle_effective_retention' ] + - do: + indices.put_data_lifecycle: + name: "managed-data-stream" + body: {} + - is_true: acknowledged + - do: + cluster.put_settings: + body: + persistent: + data_streams.lifecycle.retention.default: "7d" + - do: + indices.get_data_lifecycle: + name: "managed-data-stream" + - length: { data_streams: 1} + - match: { data_streams.0.name: managed-data-stream } + - match: { data_streams.0.lifecycle.effective_retention: '7d' } + - match: { data_streams.0.lifecycle.retention_determined_by: 'default_global_retention' } diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 1132111826563..1ab370ad203fc 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -10,13 +10,20 @@ import fixture.s3.S3HttpHandler; import com.amazonaws.http.AmazonHttpClient; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; +import com.amazonaws.services.s3.model.MultipartUpload; import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.core.LogEvent; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; @@ -54,6 +61,7 @@ import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -70,6 +78,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -81,6 +90,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasEntry; @@ -451,6 +461,106 @@ private Map getServerMetrics() { return Collections.emptyMap(); } + public void testMultipartUploadCleanup() { + final String repoName = randomRepositoryName(); + createRepository(repoName, repositorySettings(repoName), true); + + createIndex("test-idx-1"); + for (int i = 0; i < 100; i++) { + prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + } + client().admin().indices().prepareRefresh().get(); + + final String snapshotName = randomIdentifier(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(true) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + + final var repository = asInstanceOf( + S3Repository.class, + internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName) + ); + final var blobStore = asInstanceOf(S3BlobStore.class, asInstanceOf(BlobStoreWrapper.class, repository.blobStore()).delegate()); + + try (var clientRef = blobStore.clientReference()) { + final var danglingBlobName = randomIdentifier(); + final var initiateMultipartUploadRequest = new InitiateMultipartUploadRequest( + blobStore.bucket(), + blobStore.blobContainer(repository.basePath().add("test-multipart-upload")).path().buildAsString() + danglingBlobName + ); + initiateMultipartUploadRequest.putCustomQueryParameter( + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, + OperationPurpose.SNAPSHOT_DATA.getKey() + ); + final var multipartUploadResult = clientRef.client().initiateMultipartUpload(initiateMultipartUploadRequest); + + final var listMultipartUploadsRequest = new ListMultipartUploadsRequest(blobStore.bucket()).withPrefix( + repository.basePath().buildAsString() + ); + listMultipartUploadsRequest.putCustomQueryParameter( + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, + OperationPurpose.SNAPSHOT_DATA.getKey() + ); + assertEquals( + List.of(multipartUploadResult.getUploadId()), + clientRef.client() + .listMultipartUploads(listMultipartUploadsRequest) + .getMultipartUploads() + .stream() + .map(MultipartUpload::getUploadId) + .toList() + ); + + final var seenCleanupLogLatch = new CountDownLatch(1); + MockLog.assertThatLogger(() -> { + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName)); + safeAwait(seenCleanupLogLatch); + }, + S3BlobContainer.class, + new MockLog.SeenEventExpectation( + "found-dangling", + S3BlobContainer.class.getCanonicalName(), + Level.INFO, + "found [1] possibly-dangling multipart uploads; will clean them up after finalizing the current snapshot deletions" + ), + new MockLog.SeenEventExpectation( + "cleaned-dangling", + S3BlobContainer.class.getCanonicalName(), + Level.INFO, + Strings.format( + "cleaned up dangling multipart upload [%s] of blob [%s]*test-multipart-upload/%s]", + multipartUploadResult.getUploadId(), + repoName, + danglingBlobName + ) + ) { + @Override + public void match(LogEvent event) { + super.match(event); + if (Regex.simpleMatch(message, event.getMessage().getFormattedMessage())) { + seenCleanupLogLatch.countDown(); + } + } + } + ); + + assertThat( + clientRef.client() + .listMultipartUploads(listMultipartUploadsRequest) + .getMultipartUploads() + .stream() + .map(MultipartUpload::getUploadId) + .toList(), + empty() + ); + } + } + /** * S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload. */ @@ -592,6 +702,9 @@ public void maybeTrack(final String rawRequest, Headers requestHeaders) { trackRequest("ListObjects"); metricsCount.computeIfAbsent(new S3BlobStore.StatsKey(S3BlobStore.Operation.LIST_OBJECTS, purpose), k -> new AtomicLong()) .incrementAndGet(); + } else if (Regex.simpleMatch("GET /*/?uploads&*", request)) { + // TODO track ListMultipartUploads requests + logger.info("--> ListMultipartUploads not tracked [{}] with parsed purpose [{}]", request, purpose.getKey()); } else if (Regex.simpleMatch("GET /*/*", request)) { trackRequest("GetObject"); metricsCount.computeIfAbsent(new S3BlobStore.StatsKey(S3BlobStore.Operation.GET_OBJECT, purpose), k -> new AtomicLong()) diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 3e2249bf82bb6..cf3e73df2aee2 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -28,13 +28,17 @@ import com.amazonaws.services.s3.model.UploadPartResult; import com.amazonaws.util.ValidationUtils; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.action.support.ThreadedActionListener; +import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; @@ -54,6 +58,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.ChunkedBlobOutputStream; import org.elasticsearch.repositories.s3.S3BlobStore.Operation; import org.elasticsearch.threadpool.ThreadPool; @@ -912,4 +917,94 @@ public void getRegister(OperationPurpose purpose, String key, ActionListener getMultipartUploadCleanupListener(int maxUploads, RefCountingRunnable refs) { + try (var clientReference = blobStore.clientReference()) { + final var bucket = blobStore.bucket(); + final var request = new ListMultipartUploadsRequest(bucket).withPrefix(keyPath).withMaxUploads(maxUploads); + request.putCustomQueryParameter(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, OperationPurpose.SNAPSHOT_DATA.getKey()); + final var multipartUploadListing = SocketAccess.doPrivileged(() -> clientReference.client().listMultipartUploads(request)); + final var multipartUploads = multipartUploadListing.getMultipartUploads(); + if (multipartUploads.isEmpty()) { + logger.debug("found no multipart uploads to clean up"); + return ActionListener.noop(); + } else { + // the uploads are only _possibly_ dangling because it's also possible we're no longer then master and the new master has + // started some more shard snapshots + if (multipartUploadListing.isTruncated()) { + logger.info(""" + found at least [{}] possibly-dangling multipart uploads; will clean up the first [{}] after finalizing \ + the current snapshot deletions, and will check for further possibly-dangling multipart uploads in future \ + snapshot deletions""", multipartUploads.size(), multipartUploads.size()); + } else { + logger.info(""" + found [{}] possibly-dangling multipart uploads; \ + will clean them up after finalizing the current snapshot deletions""", multipartUploads.size()); + } + return newMultipartUploadCleanupListener( + refs, + multipartUploads.stream().map(u -> new AbortMultipartUploadRequest(bucket, u.getKey(), u.getUploadId())).toList() + ); + } + } catch (Exception e) { + // Cleanup is a best-effort thing, we can't do anything better than log and carry on here. + logger.warn("failure while checking for possibly-dangling multipart uploads", e); + return ActionListener.noop(); + } + } + + private ActionListener newMultipartUploadCleanupListener( + RefCountingRunnable refs, + List abortMultipartUploadRequests + ) { + return new ThreadedActionListener<>(blobStore.getSnapshotExecutor(), ActionListener.releaseAfter(new ActionListener<>() { + @Override + public void onResponse(Void unused) { + try (var clientReference = blobStore.clientReference()) { + for (final var abortMultipartUploadRequest : abortMultipartUploadRequests) { + abortMultipartUploadRequest.putCustomQueryParameter( + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, + OperationPurpose.SNAPSHOT_DATA.getKey() + ); + try { + SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortMultipartUploadRequest)); + logger.info( + "cleaned up dangling multipart upload [{}] of blob [{}][{}][{}]", + abortMultipartUploadRequest.getUploadId(), + blobStore.getRepositoryMetadata().name(), + abortMultipartUploadRequest.getBucketName(), + abortMultipartUploadRequest.getKey() + ); + } catch (Exception e) { + // Cleanup is a best-effort thing, we can't do anything better than log and carry on here. Note that any failure + // is surprising, even a 404 means that something else aborted/completed the upload at a point where there + // should be no other processes interacting with the repository. + logger.warn( + Strings.format( + "failed to clean up multipart upload [{}] of blob [{}][{}][{}]", + abortMultipartUploadRequest.getUploadId(), + blobStore.getRepositoryMetadata().name(), + abortMultipartUploadRequest.getBucketName(), + abortMultipartUploadRequest.getKey() + ), + e + ); + } + } + } + } + + @Override + public void onFailure(Exception e) { + logger.log( + MasterService.isPublishFailureException(e) + || (e instanceof RepositoryException repositoryException + && repositoryException.getCause() instanceof Exception cause + && MasterService.isPublishFailureException(cause)) ? Level.DEBUG : Level.WARN, + "failed to start cleanup of dangling multipart uploads", + e + ); + } + }, refs.acquire())); + } } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 72b48c5903629..d75a3e8ad433e 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ReferenceDocs; @@ -28,6 +29,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -35,15 +37,17 @@ import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; -import org.elasticsearch.snapshots.SnapshotDeleteListener; +import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; +import java.util.Collection; import java.util.Map; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -183,6 +187,16 @@ class S3Repository extends MeteredBlobStoreRepository { S3BlobStore.MAX_BULK_DELETES ); + /** + * Maximum number of uploads to request for cleanup when doing a snapshot delete. + */ + static final Setting MAX_MULTIPART_UPLOAD_CLEANUP_SIZE = Setting.intSetting( + "max_multipart_upload_cleanup_size", + 1000, + 0, + Setting.Property.Dynamic + ); + private final S3Service service; private final String bucket; @@ -305,7 +319,7 @@ public void finalizeSnapshot(final FinalizeSnapshotContext finalizeSnapshotConte finalizeSnapshotContext.clusterMetadata(), finalizeSnapshotContext.snapshotInfo(), finalizeSnapshotContext.repositoryMetaVersion(), - delayedListener(ActionListener.runAfter(finalizeSnapshotContext, () -> metadataDone.onResponse(null))), + wrapWithWeakConsistencyProtection(ActionListener.runAfter(finalizeSnapshotContext, () -> metadataDone.onResponse(null))), info -> metadataDone.addListener(new ActionListener<>() { @Override public void onResponse(Void unused) { @@ -324,50 +338,19 @@ public void onFailure(Exception e) { super.finalizeSnapshot(wrappedFinalizeContext); } - @Override - protected SnapshotDeleteListener wrapWithWeakConsistencyProtection(SnapshotDeleteListener listener) { - return new SnapshotDeleteListener() { - @Override - public void onDone() { - listener.onDone(); - } - - @Override - public void onRepositoryDataWritten(RepositoryData repositoryData) { - logCooldownInfo(); - final Scheduler.Cancellable existing = finalizationFuture.getAndSet(threadPool.schedule(() -> { - final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); - assert cancellable != null; - listener.onRepositoryDataWritten(repositoryData); - }, coolDown, snapshotExecutor)); - assert existing == null : "Already have an ongoing finalization " + finalizationFuture; - } - - @Override - public void onFailure(Exception e) { - logCooldownInfo(); - final Scheduler.Cancellable existing = finalizationFuture.getAndSet(threadPool.schedule(() -> { - final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); - assert cancellable != null; - listener.onFailure(e); - }, coolDown, snapshotExecutor)); - assert existing == null : "Already have an ongoing finalization " + finalizationFuture; - } - }; - } - /** * Wraps given listener such that it is executed with a delay of {@link #coolDown} on the snapshot thread-pool after being invoked. * See {@link #COOLDOWN_PERIOD} for details. */ - private ActionListener delayedListener(ActionListener listener) { - final ActionListener wrappedListener = ActionListener.runBefore(listener, () -> { + @Override + protected ActionListener wrapWithWeakConsistencyProtection(ActionListener listener) { + final ActionListener wrappedListener = ActionListener.runBefore(listener, () -> { final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); assert cancellable != null; }); return new ActionListener<>() { @Override - public void onResponse(T response) { + public void onResponse(RepositoryData response) { logCooldownInfo(); final Scheduler.Cancellable existing = finalizationFuture.getAndSet( threadPool.schedule(ActionRunnable.wrap(wrappedListener, l -> l.onResponse(response)), coolDown, snapshotExecutor) @@ -459,4 +442,75 @@ public String getAnalysisFailureExtraDetail() { ReferenceDocs.S3_COMPATIBLE_REPOSITORIES ); } + + // only one multipart cleanup process running at once + private final AtomicBoolean multipartCleanupInProgress = new AtomicBoolean(); + + @Override + public void deleteSnapshots( + Collection snapshotIds, + long repositoryDataGeneration, + IndexVersion minimumNodeVersion, + ActionListener repositoryDataUpdateListener, + Runnable onCompletion + ) { + getMultipartUploadCleanupListener( + isReadOnly() ? 0 : MAX_MULTIPART_UPLOAD_CLEANUP_SIZE.get(getMetadata().settings()), + new ActionListener<>() { + @Override + public void onResponse(ActionListener multipartUploadCleanupListener) { + S3Repository.super.deleteSnapshots(snapshotIds, repositoryDataGeneration, minimumNodeVersion, new ActionListener<>() { + @Override + public void onResponse(RepositoryData repositoryData) { + multipartUploadCleanupListener.onResponse(null); + repositoryDataUpdateListener.onResponse(repositoryData); + } + + @Override + public void onFailure(Exception e) { + multipartUploadCleanupListener.onFailure(e); + repositoryDataUpdateListener.onFailure(e); + } + }, onCompletion); + } + + @Override + public void onFailure(Exception e) { + logger.warn("failed to get multipart uploads for cleanup during snapshot delete", e); + assert false : e; // getMultipartUploadCleanupListener doesn't throw and snapshotExecutor doesn't reject anything + repositoryDataUpdateListener.onFailure(e); + } + } + ); + } + + /** + * Capture the current list of multipart uploads, and (asynchronously) return a listener which, if completed successfully, aborts those + * uploads. Called at the start of a snapshot delete operation, at which point there should be no ongoing uploads (except in the case of + * a master failover). We protect against the master failover case by waiting until the delete operation successfully updates the root + * index-N blob before aborting any uploads. + */ + void getMultipartUploadCleanupListener(int maxUploads, ActionListener> listener) { + if (maxUploads == 0) { + listener.onResponse(ActionListener.noop()); + return; + } + + if (multipartCleanupInProgress.compareAndSet(false, true) == false) { + logger.info("multipart upload cleanup already in progress"); + listener.onResponse(ActionListener.noop()); + return; + } + + try (var refs = new RefCountingRunnable(() -> multipartCleanupInProgress.set(false))) { + snapshotExecutor.execute( + ActionRunnable.supply( + ActionListener.releaseAfter(listener, refs.acquire()), + () -> blobContainer() instanceof S3BlobContainer s3BlobContainer + ? s3BlobContainer.getMultipartUploadCleanupListener(maxUploads, refs) + : ActionListener.noop() + ) + ); + } + } } diff --git a/muted-tests.yml b/muted-tests.yml index 22adc4a8c44b5..463075f1f93ae 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -38,9 +38,6 @@ tests: - class: "org.elasticsearch.xpack.deprecation.DeprecationHttpIT" issue: "https://github.com/elastic/elasticsearch/issues/108628" method: "testDeprecatedSettingsReturnWarnings" -- class: "org.elasticsearch.xpack.inference.InferenceCrudIT" - issue: "https://github.com/elastic/elasticsearch/issues/109391" - method: "testDeleteEndpointWhileReferencedByPipeline" - class: "org.elasticsearch.xpack.test.rest.XPackRestIT" issue: "https://github.com/elastic/elasticsearch/issues/109687" method: "test {p0=sql/translate/Translate SQL}" @@ -65,9 +62,6 @@ tests: - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" -- class: org.elasticsearch.xpack.security.LicenseDLSFLSRoleIT - method: testQueryDLSFLSRolesShowAsDisabled - issue: https://github.com/elastic/elasticsearch/issues/110729 - class: org.elasticsearch.xpack.security.authz.store.NativePrivilegeStoreCacheTests method: testPopulationOfCacheWhenLoadingPrivilegesForAllApplications issue: https://github.com/elastic/elasticsearch/issues/110789 @@ -80,9 +74,6 @@ tests: - class: org.elasticsearch.xpack.esql.spatial.SpatialPushDownGeoPointIT method: testPushedDownQueriesSingleValue issue: https://github.com/elastic/elasticsearch/issues/111084 -- class: org.elasticsearch.xpack.esql.spatial.SpatialPushDownCartesianPointIT - method: testPushedDownQueriesSingleValue - issue: https://github.com/elastic/elasticsearch/issues/110982 - class: org.elasticsearch.multi_node.GlobalCheckpointSyncActionIT issue: https://github.com/elastic/elasticsearch/issues/111124 - class: org.elasticsearch.cluster.PrevalidateShardPathIT @@ -94,9 +85,6 @@ tests: - class: org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectAuthIT method: testAuthenticateWithImplicitFlow issue: https://github.com/elastic/elasticsearch/issues/111191 -- class: org.elasticsearch.action.admin.indices.create.SplitIndexIT - method: testSplitIndexPrimaryTerm - issue: https://github.com/elastic/elasticsearch/issues/111282 - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT issue: https://github.com/elastic/elasticsearch/issues/111319 - class: org.elasticsearch.xpack.ml.integration.InferenceIngestInputConfigIT @@ -119,9 +107,6 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=rollup/security_tests/Index-based access} issue: https://github.com/elastic/elasticsearch/issues/111631 -- class: org.elasticsearch.xpack.inference.integration.ModelRegistryIT - method: testGetModel - issue: https://github.com/elastic/elasticsearch/issues/111570 - class: org.elasticsearch.tdigest.ComparisonTests method: testSparseGaussianDistribution issue: https://github.com/elastic/elasticsearch/issues/111721 @@ -137,12 +122,6 @@ tests: - class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT method: testSnapshotRestore {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111799 -- class: org.elasticsearch.indices.breaker.HierarchyCircuitBreakerTelemetryTests - method: testCircuitBreakerTripCountMetric - issue: https://github.com/elastic/elasticsearch/issues/111778 -- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT - method: test {comparison.RangeVersion SYNC} - issue: https://github.com/elastic/elasticsearch/issues/111814 - class: org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT method: "test {p0=esql/26_aggs_bucket/friendlier BUCKET interval hourly: #110916}" issue: https://github.com/elastic/elasticsearch/issues/111901 @@ -185,8 +164,31 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/111923 - class: org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT issue: https://github.com/elastic/elasticsearch/issues/111923 +- class: org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT + method: testScaledFloat + issue: https://github.com/elastic/elasticsearch/issues/112003 +- class: org.elasticsearch.xpack.inference.InferenceRestIT + method: test {p0=inference/80_random_rerank_retriever/Random rerank retriever predictably shuffles results} + issue: https://github.com/elastic/elasticsearch/issues/111999 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testDeleteJobAfterMissingIndex + issue: https://github.com/elastic/elasticsearch/issues/112088 +- class: org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT + method: testLimitedPrivilege + issue: https://github.com/elastic/elasticsearch/issues/112110 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {stats.ByTwoCalculatedSecondOverwrites SYNC} + issue: https://github.com/elastic/elasticsearch/issues/112117 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {stats.ByTwoCalculatedSecondOverwritesReferencingFirst SYNC} + issue: https://github.com/elastic/elasticsearch/issues/112118 +- class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT + issue: https://github.com/elastic/elasticsearch/issues/112143 - class: org.elasticsearch.xpack.test.rest.XPackRestIT - issue: https://github.com/elastic/elasticsearch/issues/111944 + method: test {p0=transform/preview_transforms/Test preview transform latest} + issue: https://github.com/elastic/elasticsearch/issues/112144 +- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT + issue: https://github.com/elastic/elasticsearch/issues/112147 # Examples: # diff --git a/plugins/examples/gradle/wrapper/gradle-wrapper.properties b/plugins/examples/gradle/wrapper/gradle-wrapper.properties index efe2ff3449216..9036682bf0f0c 100644 --- a/plugins/examples/gradle/wrapper/gradle-wrapper.properties +++ b/plugins/examples/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip +distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml index 94c19a4d69e17..ca6d65349c923 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml @@ -6,20 +6,21 @@ reason: added in 8.3.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - dynamic_templates: - - no_subobjects: - match: metrics - mapping: - type: object - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -70,15 +71,16 @@ reason: added in 8.3.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -129,22 +131,23 @@ reason: added in 8.4.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - _source: - mode: synthetic - dynamic_templates: - - no_subobjects: - match: metrics - mapping: - type: object - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + _source: + mode: synthetic + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -196,17 +199,18 @@ reason: added in 8.4.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - _source: - mode: synthetic - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + _source: + mode: synthetic + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml new file mode 100644 index 0000000000000..e4fee3569fef2 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml @@ -0,0 +1,254 @@ +--- +"Metrics object indexing": + - requires: + test_runner_features: allowed_warnings_regex + cluster_features: ["mapper.subobjects_auto"] + reason: requires supporting subobjects auto setting + + - do: + indices.put_index_template: + name: test + body: + index_patterns: test-* + template: + mappings: + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { metrics.host.name: localhost, metrics.host.id: 1, metrics.time: 10, metrics.time.max: 100, metrics.time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: metrics* + - match: {fields.metrics\.host\.id.long.searchable: true} + - match: {fields.metrics\.host\.id.long.aggregatable: true} + - match: {fields.metrics\.host\.name.keyword.searchable: true} + - match: {fields.metrics\.host\.name.keyword.aggregatable: true} + - match: {fields.metrics\.time.long.searchable: true} + - match: {fields.metrics\.time.long.aggregatable: true} + - match: {fields.metrics\.time\.max.long.searchable: true} + - match: {fields.metrics\.time\.max.long.aggregatable: true} + - match: {fields.metrics\.time\.min.long.searchable: true} + - match: {fields.metrics\.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + metrics.host.name: localhost + metrics.host.id: 1 + metrics.time: 10 + metrics.time.max: 100 + metrics.time.min: 1 + +--- +"Root with metrics": + - requires: + test_runner_features: allowed_warnings_regex + cluster_features: ["mapper.subobjects_auto"] + reason: requires supporting subobjects auto setting + + - do: + indices.put_index_template: + name: test + body: + index_patterns: test-* + template: + mappings: + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { host.name: localhost, host.id: 1, time: 10, time.max: 100, time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: [host*, time*] + - match: {fields.host\.name.keyword.searchable: true} + - match: {fields.host\.name.keyword.aggregatable: true} + - match: {fields.host\.id.long.searchable: true} + - match: {fields.host\.id.long.aggregatable: true} + - match: {fields.time.long.searchable: true} + - match: {fields.time.long.aggregatable: true} + - match: {fields.time\.max.long.searchable: true} + - match: {fields.time\.max.long.aggregatable: true} + - match: {fields.time\.min.long.searchable: true} + - match: {fields.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + host.name: localhost + host.id: 1 + time: 10 + time.max: 100 + time.min: 1 + +--- +"Metrics object indexing with synthetic source": + - requires: + test_runner_features: allowed_warnings_regex + cluster_features: ["mapper.subobjects_auto"] + reason: added in 8.4.0 + + - do: + indices.put_index_template: + name: test + body: + index_patterns: test-* + template: + mappings: + _source: + mode: synthetic + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { metrics.host.name: localhost, metrics.host.id: 1, metrics.time: 10, metrics.time.max: 100, metrics.time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: metrics* + - match: {fields.metrics\.host\.id.long.searchable: true} + - match: {fields.metrics\.host\.id.long.aggregatable: true} + - match: {fields.metrics\.host\.name.keyword.searchable: true} + - match: {fields.metrics\.host\.name.keyword.aggregatable: true} + - match: {fields.metrics\.time.long.searchable: true} + - match: {fields.metrics\.time.long.aggregatable: true} + - match: {fields.metrics\.time\.max.long.searchable: true} + - match: {fields.metrics\.time\.max.long.aggregatable: true} + - match: {fields.metrics\.time\.min.long.searchable: true} + - match: {fields.metrics\.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + metrics: + host.name: localhost + host.id: 1 + time: 10 + time.max: 100 + time.min: 1 + +--- +"Root without subobjects with synthetic source": + - requires: + test_runner_features: allowed_warnings_regex + cluster_features: ["mapper.subobjects_auto"] + reason: added in 8.4.0 + + - do: + indices.put_index_template: + name: test + body: + index_patterns: test-* + template: + mappings: + _source: + mode: synthetic + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { host.name: localhost, host.id: 1, time: 10, time.max: 100, time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: [host*, time*] + - match: {fields.host\.name.keyword.searchable: true} + - match: {fields.host\.name.keyword.aggregatable: true} + - match: {fields.host\.id.long.searchable: true} + - match: {fields.host\.id.long.aggregatable: true} + - match: {fields.time.long.searchable: true} + - match: {fields.time.long.aggregatable: true} + - match: {fields.time\.max.long.searchable: true} + - match: {fields.time\.max.long.aggregatable: true} + - match: {fields.time\.min.long.searchable: true} + - match: {fields.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + host.name: localhost + host.id: 1 + time: 10 + time.max: 100 + time.min: 1 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index 22deb7012c4ed..1393d5454a9da 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -1204,3 +1204,138 @@ nested object with stored array: - match: { hits.hits.1._source.nested_array_stored.0.b.1.c: 100 } - match: { hits.hits.1._source.nested_array_stored.1.b.0.c: 20 } - match: { hits.hits.1._source.nested_array_stored.1.b.1.c: 200 } + +--- +empty nested object sorted as a first document: + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + settings: + index: + sort.field: "name" + sort.order: "asc" + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + nested: + type: nested + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "name": "B", "nested": { "a": "b" } }' + - '{ "create": { } }' + - '{ "name": "A" }' + + - match: { errors: false } + + - do: + search: + index: test + sort: name + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.nested.a: "b" } + + + +--- +subobjects auto: + - requires: + cluster_features: ["mapper.subobjects_auto"] + reason: requires tracking ignored source and supporting subobjects auto setting + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + subobjects: auto + properties: + id: + type: integer + regular: + properties: + span: + properties: + id: + type: keyword + trace: + properties: + id: + type: keyword + stored: + store_array_source: true + properties: + span: + properties: + id: + type: keyword + trace: + properties: + id: + type: keyword + nested: + type: nested + auto_obj: + type: object + subobjects: auto + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "id": 1, "foo": 10, "foo.bar": 100, "regular": [ { "trace": { "id": "a" }, "span": { "id": "1" } }, { "trace": { "id": "b" }, "span": { "id": "1" } } ] }' + - '{ "create": { } }' + - '{ "id": 2, "foo": 20, "foo.bar": 200, "stored": [ { "trace": { "id": "a" }, "span": { "id": "1" } }, { "trace": { "id": "b" }, "span": { "id": "1" } } ] }' + - '{ "create": { } }' + - '{ "id": 3, "foo": 30, "foo.bar": 300, "nested": [ { "a": 10, "b": 20 }, { "a": 100, "b": 200 } ] }' + - '{ "create": { } }' + - '{ "id": 4, "auto_obj": { "foo": 40, "foo.bar": 400 } }' + + - match: { errors: false } + + - do: + search: + index: test + sort: id + + - match: { hits.hits.0._source.id: 1 } + - match: { hits.hits.0._source.foo: 10 } + - match: { hits.hits.0._source.foo\.bar: 100 } + - match: { hits.hits.0._source.regular.span.id: "1" } + - match: { hits.hits.0._source.regular.trace.id: [ "a", "b" ] } + - match: { hits.hits.1._source.id: 2 } + - match: { hits.hits.1._source.foo: 20 } + - match: { hits.hits.1._source.foo\.bar: 200 } + - match: { hits.hits.1._source.stored.0.trace.id: a } + - match: { hits.hits.1._source.stored.0.span.id: "1" } + - match: { hits.hits.1._source.stored.1.trace.id: b } + - match: { hits.hits.1._source.stored.1.span.id: "1" } + - match: { hits.hits.2._source.id: 3 } + - match: { hits.hits.2._source.foo: 30 } + - match: { hits.hits.2._source.foo\.bar: 300 } + - match: { hits.hits.2._source.nested.0.a: 10 } + - match: { hits.hits.2._source.nested.0.b: 20 } + - match: { hits.hits.2._source.nested.1.a: 100 } + - match: { hits.hits.2._source.nested.1.b: 200 } + - match: { hits.hits.3._source.id: 4 } + - match: { hits.hits.3._source.auto_obj.foo: 40 } + - match: { hits.hits.3._source.auto_obj.foo\.bar: 400 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml index 45bcf64f98945..3d82539944a97 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml @@ -449,6 +449,115 @@ index: test-generic - match: { test-generic.mappings.properties.parent.properties.child\.grandchild.type: "keyword" } + +--- +"Composable index templates that include subobjects: auto at root": + - requires: + cluster_features: ["mapper.subobjects_auto"] + reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0" + test_runner_features: "allowed_warnings" + + - do: + cluster.put_component_template: + name: test-subobjects + body: + template: + mappings: + subobjects: auto + properties: + message: + enabled: false + + - do: + cluster.put_component_template: + name: test-field + body: + template: + mappings: + properties: + parent.subfield: + type: keyword + + - do: + allowed_warnings: + - "index template [test-composable-template] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test-composable-template] will take precedence during new index creation" + indices.put_index_template: + name: test-composable-template + body: + index_patterns: + - test-* + composed_of: + - test-subobjects + - test-field + - is_true: acknowledged + + - do: + indices.create: + index: test-generic + + - do: + indices.get_mapping: + index: test-generic + - match: { test-generic.mappings.properties.parent\.subfield.type: "keyword" } + - match: { test-generic.mappings.properties.message.type: "object" } + +--- +"Composable index templates that include subobjects: auto on arbitrary field": + - requires: + cluster_features: ["mapper.subobjects_auto"] + reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0" + test_runner_features: "allowed_warnings" + + - do: + cluster.put_component_template: + name: test-subobjects + body: + template: + mappings: + properties: + parent: + type: object + subobjects: auto + properties: + message: + enabled: false + + - do: + cluster.put_component_template: + name: test-subfield + body: + template: + mappings: + properties: + parent: + properties: + child.grandchild: + type: keyword + + - do: + allowed_warnings: + - "index template [test-composable-template] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test-composable-template] will take precedence during new index creation" + indices.put_index_template: + name: test-composable-template + body: + index_patterns: + - test-* + composed_of: + - test-subobjects + - test-subfield + - is_true: acknowledged + + - do: + indices.create: + index: test-generic + + - do: + indices.get_mapping: + index: test-generic + - match: { test-generic.mappings.properties.parent.properties.child\.grandchild.type: "keyword" } + - match: { test-generic.mappings.properties.parent.properties.message.type: "object" } + + --- "Composition of component templates with different legal field mappings": - skip: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml index 80a8ccf0d1063..11ffbe1d8464d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml @@ -16,22 +16,22 @@ more than 1: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world" } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -66,8 +66,8 @@ more than 1: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -76,8 +76,8 @@ more than 1: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -86,8 +86,8 @@ more than 1: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } @@ -117,22 +117,22 @@ exactly 1: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world" } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -167,8 +167,8 @@ exactly 1: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -177,8 +177,8 @@ exactly 1: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -187,8 +187,8 @@ exactly 1: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } @@ -221,22 +221,22 @@ nested: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world", "n": [{"foo": "goodbye world"}, {"foo": "more words"}] } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -271,8 +271,8 @@ nested: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -281,8 +281,8 @@ nested: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -291,8 +291,8 @@ nested: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml index 38bf9d72ef8ff..4c8d7736631c9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml @@ -15,22 +15,22 @@ routing required: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world" } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -65,8 +65,8 @@ routing required: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -75,8 +75,8 @@ routing required: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -85,8 +85,8 @@ routing required: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } @@ -122,22 +122,22 @@ nested: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world", "n": [{"foo": "goodbye world"}, {"foo": "more words"}] } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -172,8 +172,8 @@ nested: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -182,8 +182,8 @@ nested: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -192,8 +192,8 @@ nested: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml index 703f2a0352fbd..c120bed2d369d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml @@ -1125,3 +1125,55 @@ fetch geo_point: - match: { hits.hits.0.fields.root\.keyword.0: 'parent' } - match: { hits.hits.0.fields.root\.subfield.0: 'child' } - match: { hits.hits.0.fields.root\.subfield\.keyword.0: 'child' } + +--- +"Test with subobjects: auto": + - requires: + cluster_features: "mapper.subobjects_auto" + reason: requires support for subobjects auto setting + + - do: + indices.create: + index: test + body: + mappings: + subobjects: auto + properties: + message: + type: object + subobjects: auto + enabled: false + + - do: + index: + index: test + refresh: true + body: > + { + "root": "parent", + "root.subfield": "child", + "message": { + "foo": 10, + "foo.bar": 20 + } + } + - match: {result: "created"} + + - do: + search: + index: test + body: + query: + term: + root.subfield: + value: 'child' + fields: + - field: 'root*' + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.root.0: 'parent' } + - match: { hits.hits.0.fields.root\.keyword.0: 'parent' } + - match: { hits.hits.0.fields.root\.subfield.0: 'child' } + - match: { hits.hits.0.fields.root\.subfield\.keyword.0: 'child' } + - is_false: hits.hits.0.fields.message + - match: { hits.hits.0._source.message.foo: 10 } + - match: { hits.hits.0._source.message.foo\.bar: 20 } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 27fd54c39cc95..22549a1562dcd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -276,6 +276,7 @@ public void testSplitIndexPrimaryTerm() throws Exception { .put(indexSettings()) .put("number_of_shards", numberOfShards) .put("index.number_of_routing_shards", numberOfTargetShards) + .put("index.routing.rebalance.enable", EnableAllocationDecider.Rebalance.NONE) ).get(); ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to allocate many shards diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/BackgroundRetentionLeaseSyncActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/BackgroundRetentionLeaseSyncActionIT.java new file mode 100644 index 0000000000000..0bab5be245ecf --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/BackgroundRetentionLeaseSyncActionIT.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.stream.Stream; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class BackgroundRetentionLeaseSyncActionIT extends ESIntegTestCase { + + public void testActionCompletesWhenReplicaCircuitBreakersAreAtCapacity() throws Exception { + internalCluster().startMasterOnlyNodes(1); + String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + + String replica = internalCluster().startDataOnlyNode(); + ensureGreen("test"); + + try (var ignored = fullyAllocateCircuitBreakerOnNode(replica, CircuitBreaker.IN_FLIGHT_REQUESTS)) { + final ClusterState state = internalCluster().clusterService().state(); + final Index testIndex = resolveIndex("test"); + final ShardId testIndexShardZero = new ShardId(testIndex, 0); + final String testLeaseId = "test-lease/123"; + RetentionLeases newLeases = addTestLeaseToRetentionLeases(primary, testIndex, testLeaseId); + internalCluster().getInstance(RetentionLeaseSyncer.class, primary) + .backgroundSync( + testIndexShardZero, + state.routingTable().shardRoutingTable(testIndexShardZero).primaryShard().allocationId().getId(), + state.term(), + newLeases + ); + + // Wait for test lease to appear on replica + IndicesService replicaIndicesService = internalCluster().getInstance(IndicesService.class, replica); + assertBusy(() -> { + RetentionLeases retentionLeases = replicaIndicesService.indexService(testIndex).getShard(0).getRetentionLeases(); + assertTrue(retentionLeases.contains(testLeaseId)); + }); + } + } + + private static RetentionLeases addTestLeaseToRetentionLeases(String primaryNodeName, Index index, String leaseId) { + IndicesService primaryIndicesService = internalCluster().getInstance(IndicesService.class, primaryNodeName); + RetentionLeases currentLeases = primaryIndicesService.indexService(index).getShard(0).getRetentionLeases(); + RetentionLease newLease = new RetentionLease(leaseId, 0, System.currentTimeMillis(), "test source"); + return new RetentionLeases( + currentLeases.primaryTerm(), + currentLeases.version() + 1, + Stream.concat(currentLeases.leases().stream(), Stream.of(newLease)).toList() + ); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionIT.java new file mode 100644 index 0000000000000..2d8f455792172 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionIT.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESIntegTestCase; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RetentionLeaseSyncActionIT extends ESIntegTestCase { + + public void testActionCompletesWhenReplicaCircuitBreakersAreAtCapacity() { + internalCluster().startMasterOnlyNodes(1); + String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + + String replica = internalCluster().startDataOnlyNode(); + ensureGreen("test"); + + try (var ignored = fullyAllocateCircuitBreakerOnNode(replica, CircuitBreaker.IN_FLIGHT_REQUESTS)) { + assertThatRetentionLeaseSyncCompletesSuccessfully(primary); + } + } + + public void testActionCompletesWhenPrimaryIndexingPressureIsAtCapacity() { + internalCluster().startMasterOnlyNodes(1); + String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + + String replica = internalCluster().startDataOnlyNode(); + ensureGreen("test"); + + try (Releasable ignored = fullyAllocatePrimaryIndexingCapacityOnNode(primary)) { + assertThatRetentionLeaseSyncCompletesSuccessfully(primary); + } + } + + private static void assertThatRetentionLeaseSyncCompletesSuccessfully(String primaryNodeName) { + RetentionLeaseSyncer instance = internalCluster().getInstance(RetentionLeaseSyncer.class, primaryNodeName); + PlainActionFuture retentionLeaseSyncResult = new PlainActionFuture<>(); + ClusterState state = internalCluster().clusterService().state(); + ShardId testIndexShardZero = new ShardId(resolveIndex("test"), 0); + ShardRouting primaryShard = state.routingTable().shardRoutingTable(testIndexShardZero).primaryShard(); + instance.sync( + testIndexShardZero, + primaryShard.allocationId().getId(), + state.term(), + RetentionLeases.EMPTY, + retentionLeaseSyncResult + ); + safeGet(retentionLeaseSyncResult); + } + + /** + * Fully allocate primary indexing capacity on a node + * + * @param targetNode The name of the node on which to allocate + * @return A {@link Releasable} which will release the capacity when closed + */ + private static Releasable fullyAllocatePrimaryIndexingCapacityOnNode(String targetNode) { + return internalCluster().getInstance(IndexingPressure.class, targetNode) + .markPrimaryOperationStarted( + 1, + IndexingPressure.MAX_INDEXING_BYTES.get(internalCluster().getInstance(Settings.class, targetNode)).getBytes() + 1, + true + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java similarity index 58% rename from server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java rename to server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java index 2cbe1202520df..ff2117ea93bb9 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java @@ -6,25 +6,23 @@ * Side Public License, v 1. */ -package org.elasticsearch.indices.breaker; +package org.elasticsearch.indices.memory.breaker; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; +import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.telemetry.Measurement; -import org.elasticsearch.telemetry.RecordingInstruments; -import org.elasticsearch.telemetry.RecordingMeterRegistry; import org.elasticsearch.telemetry.TestTelemetryPlugin; -import org.elasticsearch.telemetry.metric.LongCounter; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; +import org.junit.After; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; @@ -41,54 +39,11 @@ import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, supportsDedicatedMasters = true) -public class HierarchyCircuitBreakerTelemetryTests extends ESIntegTestCase { +public class HierarchyCircuitBreakerTelemetryIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return List.of(TestCircuitBreakerTelemetryPlugin.class); - } - - public static class TestCircuitBreakerTelemetryPlugin extends TestTelemetryPlugin { - protected final MeterRegistry meter = new RecordingMeterRegistry() { - private final LongCounter tripCount = new RecordingInstruments.RecordingLongCounter( - CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL, - recorder - ) { - @Override - public void incrementBy(long inc) { - throw new UnsupportedOperationException(); - } - - @Override - public void incrementBy(long inc, Map attributes) { - throw new UnsupportedOperationException(); - } - }; - - @Override - protected LongCounter buildLongCounter(String name, String description, String unit) { - if (name.equals(tripCount.getName())) { - return tripCount; - } - throw new IllegalArgumentException("Unknown counter metric name [" + name + "]"); - } - - @Override - public LongCounter registerLongCounter(String name, String description, String unit) { - assertCircuitBreakerName(name); - return super.registerLongCounter(name, description, unit); - } - - @Override - public LongCounter getLongCounter(String name) { - assertCircuitBreakerName(name); - return super.getLongCounter(name); - } - - private void assertCircuitBreakerName(final String name) { - assertThat(name, Matchers.oneOf(CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL)); - } - }; + return List.of(TestTelemetryPlugin.class); } public void testCircuitBreakerTripCountMetric() { @@ -142,37 +97,29 @@ public void testCircuitBreakerTripCountMetric() { fail("Expected exception not thrown"); } - private List getMeasurements(String dataNodeName) { - final TestTelemetryPlugin dataNodeTelemetryPlugin = internalCluster().getInstance(PluginsService.class, dataNodeName) - .filterPlugins(TestCircuitBreakerTelemetryPlugin.class) + @After + public void resetClusterSetting() { + final var circuitBreakerSettings = Settings.builder() + .putNull(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey()) + .putNull(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey()) + .putNull(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey()) + .putNull(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey()); + updateClusterSettings(circuitBreakerSettings); + } + + private List getMeasurements(String nodeName) { + final TestTelemetryPlugin telemetryPlugin = internalCluster().getInstance(PluginsService.class, nodeName) + .filterPlugins(TestTelemetryPlugin.class) .toList() .get(0); return Measurement.combine( - Stream.of(dataNodeTelemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL).stream()) + Stream.of(telemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL).stream()) .flatMap(Function.identity()) .toList() ); } - - // Make sure circuit breaker telemetry on trip count reports the same values as circuit breaker stats - private void assertCircuitBreakerTripCount( - final HierarchyCircuitBreakerService circuitBreakerService, - final String circuitBreakerName, - int firstBytesEstimate, - int secondBytesEstimate, - long expectedTripCountValue - ) { - try { - circuitBreakerService.getBreaker(circuitBreakerName).addEstimateBytesAndMaybeBreak(firstBytesEstimate, randomAlphaOfLength(5)); - circuitBreakerService.getBreaker(circuitBreakerName).addEstimateBytesAndMaybeBreak(secondBytesEstimate, randomAlphaOfLength(5)); - } catch (final CircuitBreakingException cbex) { - final CircuitBreakerStats circuitBreakerStats = Arrays.stream(circuitBreakerService.stats().getAllStats()) - .filter(stats -> circuitBreakerName.equals(stats.getName())) - .findAny() - .get(); - assertThat(circuitBreakerService.getBreaker(circuitBreakerName).getTrippedCount(), Matchers.equalTo(expectedTripCountValue)); - assertThat(circuitBreakerStats.getTrippedCount(), Matchers.equalTo(expectedTripCountValue)); - } - } - } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 019dfe638130f..04543a1e5dea4 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -189,9 +189,13 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_ORIGINAL_INDICES = def(8_719_00_0); public static final TransportVersion ML_INFERENCE_EIS_INTEGRATION_ADDED = def(8_720_00_0); public static final TransportVersion INGEST_PIPELINE_EXCEPTION_ADDED = def(8_721_00_0); - public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_722_00_0); + public static final TransportVersion ZDT_NANOS_SUPPORT_BROKEN = def(8_722_00_0); public static final TransportVersion REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES = def(8_723_00_0); - public static final TransportVersion ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED = def(8_724_00_0); + public static final TransportVersion RANDOM_RERANKER_RETRIEVER = def(8_724_00_0); + public static final TransportVersion ESQL_PROFILE_SLEEPS = def(8_725_00_0); + public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_726_00_0); + public static final TransportVersion LTR_SERVERLESS_RELEASE = def(8_727_00_0); + public static final TransportVersion ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED = def(8_728_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index 258e5b4c9a58d..813203afe42c5 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -10,7 +10,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -91,6 +93,7 @@ final class BulkOperation extends ActionRunnable { private final OriginSettingClient rolloverClient; private final Set failureStoresToBeRolledOver = ConcurrentCollections.newConcurrentSet(); private final Set failedRolloverRequests = ConcurrentCollections.newConcurrentSet(); + private final FailureStoreMetrics failureStoreMetrics; BulkOperation( Task task, @@ -104,7 +107,8 @@ final class BulkOperation extends ActionRunnable { IndexNameExpressionResolver indexNameExpressionResolver, LongSupplier relativeTimeProvider, long startTimeNanos, - ActionListener listener + ActionListener listener, + FailureStoreMetrics failureStoreMetrics ) { this( task, @@ -120,7 +124,8 @@ final class BulkOperation extends ActionRunnable { startTimeNanos, listener, new ClusterStateObserver(clusterService, bulkRequest.timeout(), logger, threadPool.getThreadContext()), - new FailureStoreDocumentConverter() + new FailureStoreDocumentConverter(), + failureStoreMetrics ); } @@ -138,7 +143,8 @@ final class BulkOperation extends ActionRunnable { long startTimeNanos, ActionListener listener, ClusterStateObserver observer, - FailureStoreDocumentConverter failureStoreDocumentConverter + FailureStoreDocumentConverter failureStoreDocumentConverter, + FailureStoreMetrics failureStoreMetrics ) { super(listener); this.task = task; @@ -156,6 +162,7 @@ final class BulkOperation extends ActionRunnable { this.observer = observer; this.failureStoreDocumentConverter = failureStoreDocumentConverter; this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN); + this.failureStoreMetrics = failureStoreMetrics; } @Override @@ -437,17 +444,11 @@ public void onResponse(BulkShardResponse bulkShardResponse) { for (int idx = 0; idx < bulkShardResponse.getResponses().length; idx++) { // We zip the requests and responses together so that we can identify failed documents and potentially store them BulkItemResponse bulkItemResponse = bulkShardResponse.getResponses()[idx]; + BulkItemRequest bulkItemRequest = bulkShardRequest.items()[idx]; if (bulkItemResponse.isFailed()) { - BulkItemRequest bulkItemRequest = bulkShardRequest.items()[idx]; assert bulkItemRequest.id() == bulkItemResponse.getItemId() : "Bulk items were returned out of order"; - - DataStream failureStoreReference = getRedirectTarget(bulkItemRequest.request(), getClusterState().metadata()); - if (failureStoreReference != null) { - maybeMarkFailureStoreForRollover(failureStoreReference); - var cause = bulkItemResponse.getFailure().getCause(); - addDocumentToRedirectRequests(bulkItemRequest, cause, failureStoreReference.getName()); - } + processFailure(bulkItemRequest, bulkItemResponse.getFailure().getCause()); addFailure(bulkItemResponse); } else { bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); @@ -464,11 +465,7 @@ public void onFailure(Exception e) { final String indexName = request.index(); DocWriteRequest docWriteRequest = request.request(); - DataStream failureStoreReference = getRedirectTarget(docWriteRequest, getClusterState().metadata()); - if (failureStoreReference != null) { - maybeMarkFailureStoreForRollover(failureStoreReference); - addDocumentToRedirectRequests(request, e, failureStoreReference.getName()); - } + processFailure(request, e); addFailure(docWriteRequest, request.id(), indexName, e); } completeShardOperation(); @@ -479,45 +476,56 @@ private void completeShardOperation() { clusterState = null; releaseOnFinish.close(); } + + private void processFailure(BulkItemRequest bulkItemRequest, Exception cause) { + var errorType = ElasticsearchException.getExceptionName(ExceptionsHelper.unwrapCause(cause)); + DocWriteRequest docWriteRequest = bulkItemRequest.request(); + DataStream failureStoreCandidate = getRedirectTargetCandidate(docWriteRequest, getClusterState().metadata()); + // If the candidate is not null, the BulkItemRequest targets a data stream, but we'll still have to check if + // it has the failure store enabled. + if (failureStoreCandidate != null) { + // Do not redirect documents to a failure store that were already headed to one. + var isFailureStoreDoc = docWriteRequest instanceof IndexRequest indexRequest && indexRequest.isWriteToFailureStore(); + if (isFailureStoreDoc == false && failureStoreCandidate.isFailureStoreEnabled()) { + // Redirect to failure store. + maybeMarkFailureStoreForRollover(failureStoreCandidate); + addDocumentToRedirectRequests(bulkItemRequest, cause, failureStoreCandidate.getName()); + failureStoreMetrics.incrementFailureStore( + bulkItemRequest.index(), + errorType, + FailureStoreMetrics.ErrorLocation.SHARD + ); + } else { + // If we can't redirect to a failure store (because either the data stream doesn't have the failure store enabled + // or this request was already targeting a failure store), we increment the rejected counter. + failureStoreMetrics.incrementRejected( + bulkItemRequest.index(), + errorType, + FailureStoreMetrics.ErrorLocation.SHARD, + isFailureStoreDoc + ); + } + } + } }); } /** - * Determines if the write request can be redirected if it fails. Write requests can be redirected IFF they are targeting a data stream - * with a failure store and are not already redirected themselves. If the document can be redirected, the data stream name to use for - * the redirection is returned. + * Tries to find a candidate redirect target for this write request. A candidate redirect target is a data stream that may or + * may not have the failure store enabled. * * @param docWriteRequest the write request to check * @param metadata cluster state metadata for resolving index abstractions - * @return a data stream if the write request points to a data stream that has the failure store enabled, or {@code null} if it does not + * @return a data stream if the write request points to a data stream, or {@code null} if it does not */ - private static DataStream getRedirectTarget(DocWriteRequest docWriteRequest, Metadata metadata) { + private static DataStream getRedirectTargetCandidate(DocWriteRequest docWriteRequest, Metadata metadata) { // Feature flag guard if (DataStream.isFailureStoreFeatureFlagEnabled() == false) { return null; } - // Do not resolve a failure store for documents that were already headed to one - if (docWriteRequest instanceof IndexRequest indexRequest && indexRequest.isWriteToFailureStore()) { - return null; - } // If there is no index abstraction, then the request is using a pattern of some sort, which data streams do not support IndexAbstraction ia = metadata.getIndicesLookup().get(docWriteRequest.index()); - if (ia == null) { - return null; - } - if (ia.isDataStreamRelated()) { - // The index abstraction could be an alias. Alias abstractions (even for data streams) only keep track of which _index_ they - // will write to, not which _data stream_. - // We work backward to find the data stream from the concrete write index to cover this case. - Index concreteIndex = ia.getWriteIndex(); - IndexAbstraction writeIndexAbstraction = metadata.getIndicesLookup().get(concreteIndex.getName()); - DataStream parentDataStream = writeIndexAbstraction.getParentDataStream(); - if (parentDataStream != null && parentDataStream.isFailureStoreEnabled()) { - // Keep the data stream name around to resolve the redirect to failure store if the shard level request fails. - return parentDataStream; - } - } - return null; + return DataStream.resolveDataStream(ia, metadata); } /** diff --git a/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreMetrics.java b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreMetrics.java new file mode 100644 index 0000000000000..5a36f10785790 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreMetrics.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +import java.util.Map; + +/** + * A class containing APM metrics for failure stores. See the JavaDoc on the individual methods for an explanation on what they're tracking. + * General notes: + *
    + *
  • When a document is rerouted in a pipeline, the destination data stream is used for the metric attribute(s).
  • + *
+ */ +public class FailureStoreMetrics { + + public static final FailureStoreMetrics NOOP = new FailureStoreMetrics(MeterRegistry.NOOP); + + public static final String METRIC_TOTAL = "es.data_stream.ingest.documents.total"; + public static final String METRIC_FAILURE_STORE = "es.data_stream.ingest.documents.failure_store.total"; + public static final String METRIC_REJECTED = "es.data_stream.ingest.documents.rejected.total"; + + private final LongCounter totalCounter; + private final LongCounter failureStoreCounter; + private final LongCounter rejectedCounter; + + public FailureStoreMetrics(MeterRegistry meterRegistry) { + totalCounter = meterRegistry.registerLongCounter(METRIC_TOTAL, "total number of documents that were sent to a data stream", "unit"); + failureStoreCounter = meterRegistry.registerLongCounter( + METRIC_FAILURE_STORE, + "number of documents that got redirected to the failure store", + "unit" + ); + rejectedCounter = meterRegistry.registerLongCounter(METRIC_REJECTED, "number of documents that were rejected", "unit"); + } + + /** + * This counter tracks the number of documents that we tried to index into a data stream. This includes documents + * that were dropped by a pipeline. This counter will only be incremented once for every incoming document (even when it gets + * redirected to the failure store and/or gets rejected). + * @param dataStream the name of the data stream + */ + public void incrementTotal(String dataStream) { + totalCounter.incrementBy(1, Map.of("data_stream", dataStream)); + } + + /** + * This counter tracks the number of documents that we tried to store into a failure store. This includes both pipeline and + * shard-level failures. + * @param dataStream the name of the data stream + * @param errorType the error type (i.e. the name of the exception that was thrown) + * @param errorLocation where this failure occurred + */ + public void incrementFailureStore(String dataStream, String errorType, ErrorLocation errorLocation) { + failureStoreCounter.incrementBy( + 1, + Map.of("data_stream", dataStream, "error_type", errorType, "error_location", errorLocation.name()) + ); + } + + /** + * This counter tracks the number of documents that failed to get stored in Elasticsearch. Meaning, any document that did not get + * stored in the data stream or in its failure store. + * @param dataStream the name of the data stream + * @param errorType the error type (i.e. the name of the exception that was thrown) + * @param errorLocation where this failure occurred + * @param failureStore whether this failure occurred while trying to ingest into a failure store (true) or in the data + * stream itself (false) + */ + public void incrementRejected(String dataStream, String errorType, ErrorLocation errorLocation, boolean failureStore) { + rejectedCounter.incrementBy( + 1, + Map.of( + "data_stream", + dataStream, + "error_type", + errorType, + "error_location", + errorLocation.name(), + "failure_store", + failureStore + ) + ); + } + + public enum ErrorLocation { + PIPELINE, + SHARD; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java index c44ad505aea84..74864abe3ec50 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java @@ -222,7 +222,7 @@ private void processBulkIndexIngestRequest( original.numberOfActions(), () -> bulkRequestModifier, bulkRequestModifier::markItemAsDropped, - (indexName) -> shouldStoreFailure(indexName, metadata, threadPool.absoluteTimeInMillis()), + (indexName) -> resolveFailureStore(indexName, metadata, threadPool.absoluteTimeInMillis()), bulkRequestModifier::markItemForFailureStore, bulkRequestModifier::markItemAsFailed, (originalThread, exception) -> { @@ -274,13 +274,15 @@ public boolean isForceExecution() { /** * Determines if an index name is associated with either an existing data stream or a template * for one that has the failure store enabled. + * * @param indexName The index name to check. * @param metadata Cluster state metadata. * @param epochMillis A timestamp to use when resolving date math in the index name. * @return true if this is not a simulation, and the given index name corresponds to a data stream with a failure store - * or if it matches a template that has a data stream failure store enabled. + * or if it matches a template that has a data stream failure store enabled. Returns false if the index name corresponds to a + * data stream, but it doesn't have the failure store enabled. Returns null when it doesn't correspond to a data stream. */ - protected abstract boolean shouldStoreFailure(String indexName, Metadata metadata, long epochMillis); + protected abstract Boolean resolveFailureStore(String indexName, Metadata metadata, long epochMillis); /** * Retrieves the {@link IndexRequest} from the provided {@link DocWriteRequest} for index or upsert actions. Upserts are diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index a695e0f5e8ab6..bdda4ff487f6b 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -42,7 +42,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.features.FeatureService; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.VersionType; @@ -57,7 +56,6 @@ import java.util.HashSet; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.SortedMap; import java.util.concurrent.Executor; @@ -82,6 +80,7 @@ public class TransportBulkAction extends TransportAbstractBulkAction { private final NodeClient client; private final IndexNameExpressionResolver indexNameExpressionResolver; private final OriginSettingClient rolloverClient; + private final FailureStoreMetrics failureStoreMetrics; @Inject public TransportBulkAction( @@ -94,7 +93,8 @@ public TransportBulkAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, - SystemIndices systemIndices + SystemIndices systemIndices, + FailureStoreMetrics failureStoreMetrics ) { this( threadPool, @@ -107,7 +107,8 @@ public TransportBulkAction( indexNameExpressionResolver, indexingPressure, systemIndices, - threadPool::relativeTimeInNanos + threadPool::relativeTimeInNanos, + failureStoreMetrics ); } @@ -122,7 +123,8 @@ public TransportBulkAction( IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, SystemIndices systemIndices, - LongSupplier relativeTimeProvider + LongSupplier relativeTimeProvider, + FailureStoreMetrics failureStoreMetrics ) { this( TYPE, @@ -137,7 +139,8 @@ public TransportBulkAction( indexNameExpressionResolver, indexingPressure, systemIndices, - relativeTimeProvider + relativeTimeProvider, + failureStoreMetrics ); } @@ -154,7 +157,8 @@ public TransportBulkAction( IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, SystemIndices systemIndices, - LongSupplier relativeTimeProvider + LongSupplier relativeTimeProvider, + FailureStoreMetrics failureStoreMetrics ) { super( bulkAction, @@ -173,6 +177,7 @@ public TransportBulkAction( this.client = client; this.indexNameExpressionResolver = indexNameExpressionResolver; this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN); + this.failureStoreMetrics = failureStoreMetrics; } public static ActionListener unwrappingSingleItemBulkResponse( @@ -199,6 +204,8 @@ protected void doInternalExecute( ActionListener listener, long relativeStartTimeNanos ) { + trackIndexRequests(bulkRequest); + Map indicesToAutoCreate = new HashMap<>(); Set dataStreamsToBeRolledOver = new HashSet<>(); Set failureStoresToBeRolledOver = new HashSet<>(); @@ -216,6 +223,27 @@ protected void doInternalExecute( ); } + /** + * Track the number of index requests in our APM metrics. We'll track almost all docs here (pipeline or no pipeline, + * failure store or original), but some docs don't reach this place (dropped and rejected docs), so we increment for those docs in + * different places. + */ + private void trackIndexRequests(BulkRequest bulkRequest) { + final Metadata metadata = clusterService.state().metadata(); + for (DocWriteRequest request : bulkRequest.requests) { + if (request instanceof IndexRequest == false) { + continue; + } + String resolvedIndexName = IndexNameExpressionResolver.resolveDateMathExpression(request.index()); + IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(resolvedIndexName); + DataStream dataStream = DataStream.resolveDataStream(indexAbstraction, metadata); + // We only track index requests into data streams. + if (dataStream != null) { + failureStoreMetrics.incrementTotal(dataStream.getName()); + } + } + } + /** * Determine all the targets (i.e. indices, data streams, failure stores) that require an action before we can proceed with the bulk * request. Indices might need to be created, and data streams and failure stores might need to be rolled over when they're marked @@ -535,29 +563,29 @@ void executeBulk( indexNameExpressionResolver, relativeTimeNanosProvider, startTimeNanos, - listener + listener, + failureStoreMetrics ).run(); } /** - * Determines if an index name is associated with either an existing data stream or a template - * for one that has the failure store enabled. - * @param indexName The index name to check. - * @param metadata Cluster state metadata. - * @param epochMillis A timestamp to use when resolving date math in the index name. - * @return true if the given index name corresponds to a data stream with a failure store, - * or if it matches a template that has a data stream failure store enabled. + * See {@link #resolveFailureStore(String, Metadata, long)} */ - static boolean shouldStoreFailureInternal(String indexName, Metadata metadata, long epochMillis) { - return DataStream.isFailureStoreFeatureFlagEnabled() - && resolveFailureStoreFromMetadata(indexName, metadata, epochMillis).or( - () -> resolveFailureStoreFromTemplate(indexName, metadata) - ).orElse(false); + // Visibility for testing + static Boolean resolveFailureInternal(String indexName, Metadata metadata, long epochMillis) { + if (DataStream.isFailureStoreFeatureFlagEnabled() == false) { + return null; + } + var resolution = resolveFailureStoreFromMetadata(indexName, metadata, epochMillis); + if (resolution != null) { + return resolution; + } + return resolveFailureStoreFromTemplate(indexName, metadata); } @Override - protected boolean shouldStoreFailure(String indexName, Metadata metadata, long time) { - return shouldStoreFailureInternal(indexName, metadata, time); + protected Boolean resolveFailureStore(String indexName, Metadata metadata, long time) { + return resolveFailureInternal(indexName, metadata, time); } /** @@ -567,30 +595,24 @@ protected boolean shouldStoreFailure(String indexName, Metadata metadata, long t * @param epochMillis A timestamp to use when resolving date math in the index name. * @return true if the given index name corresponds to an existing data stream with a failure store enabled. */ - private static Optional resolveFailureStoreFromMetadata(String indexName, Metadata metadata, long epochMillis) { + private static Boolean resolveFailureStoreFromMetadata(String indexName, Metadata metadata, long epochMillis) { if (indexName == null) { - return Optional.empty(); + return null; } // Get index abstraction, resolving date math if it exists IndexAbstraction indexAbstraction = metadata.getIndicesLookup() .get(IndexNameExpressionResolver.resolveDateMathExpression(indexName, epochMillis)); - - // We only store failures if the failure is being written to a data stream, - // not when directly writing to backing indices/failure stores if (indexAbstraction == null || indexAbstraction.isDataStreamRelated() == false) { - return Optional.empty(); + return null; } - // Locate the write index for the abstraction, and check if it has a data stream associated with it. - // This handles alias resolution as well as data stream resolution. - Index writeIndex = indexAbstraction.getWriteIndex(); - assert writeIndex != null : "Could not resolve write index for resource [" + indexName + "]"; - IndexAbstraction writeAbstraction = metadata.getIndicesLookup().get(writeIndex.getName()); - DataStream targetDataStream = writeAbstraction.getParentDataStream(); + // We only store failures if the failure is being written to a data stream, + // not when directly writing to backing indices/failure stores + DataStream targetDataStream = DataStream.resolveDataStream(indexAbstraction, metadata); // We will store the failure if the write target belongs to a data stream with a failure store. - return Optional.of(targetDataStream != null && targetDataStream.isFailureStoreEnabled()); + return targetDataStream != null && targetDataStream.isFailureStoreEnabled(); } /** @@ -599,9 +621,9 @@ private static Optional resolveFailureStoreFromMetadata(String indexNam * @param metadata Cluster state metadata. * @return true if the given index name corresponds to an index template with a data stream failure store enabled. */ - private static Optional resolveFailureStoreFromTemplate(String indexName, Metadata metadata) { + private static Boolean resolveFailureStoreFromTemplate(String indexName, Metadata metadata) { if (indexName == null) { - return Optional.empty(); + return null; } // Check to see if the index name matches any templates such that an index would have been attributed @@ -612,11 +634,11 @@ private static Optional resolveFailureStoreFromTemplate(String indexNam ComposableIndexTemplate composableIndexTemplate = metadata.templatesV2().get(template); if (composableIndexTemplate.getDataStreamTemplate() != null) { // Check if the data stream has the failure store enabled - return Optional.of(composableIndexTemplate.getDataStreamTemplate().hasFailureStore()); + return composableIndexTemplate.getDataStreamTemplate().hasFailureStore(); } } // Could not locate a failure store via template - return Optional.empty(); + return null; } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index a4648a7accb5a..2312a75b91084 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -166,8 +166,8 @@ protected IngestService getIngestService(BulkRequest request) { } @Override - protected boolean shouldStoreFailure(String indexName, Metadata metadata, long epochMillis) { + protected Boolean resolveFailureStore(String indexName, Metadata metadata, long epochMillis) { // A simulate bulk request should not change any persistent state in the system, so we never write to the failure store - return false; + return null; } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 89282b8db3646..2fcc5ce3702c1 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -556,7 +556,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws for (DataStreamInfo dataStream : dataStreams) { dataStream.toXContent( builder, - DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + DataStreamLifecycle.addEffectiveRetentionParams(params), rolloverConfiguration, globalRetention ); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java index 4dc9ada5dc01f..d51f00681bb5e 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java @@ -217,7 +217,7 @@ public Iterator toXContentChunked(ToXContent.Params outerP builder.field(explainIndexDataLifecycle.getIndex()); explainIndexDataLifecycle.toXContent( builder, - DataStreamLifecycle.maybeAddEffectiveRetentionParams(outerParams), + DataStreamLifecycle.addEffectiveRetentionParams(outerParams), rolloverConfiguration, globalRetention ); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java index e038763169ef8..39427efbac4fd 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java @@ -187,7 +187,7 @@ public XContentBuilder toXContent( builder.field(LIFECYCLE_FIELD.getPreferredName()); lifecycle.toXContent( builder, - org.elasticsearch.cluster.metadata.DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + org.elasticsearch.cluster.metadata.DataStreamLifecycle.addEffectiveRetentionParams(params), rolloverConfiguration, isSystemDataStream ? null : globalRetention ); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 6b20399a1bc59..c9743c157a622 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -1376,6 +1376,25 @@ private static Instant getTimestampFromParser(BytesReference source, XContentTyp } } + /** + * Resolve the index abstraction to a data stream. This handles alias resolution as well as data stream resolution. This does NOT + * resolve a data stream by providing a concrete backing index. + */ + public static DataStream resolveDataStream(IndexAbstraction indexAbstraction, Metadata metadata) { + // We do not consider concrete indices - only data streams and data stream aliases. + if (indexAbstraction == null || indexAbstraction.isDataStreamRelated() == false) { + return null; + } + + // Locate the write index for the abstraction, and check if it has a data stream associated with it. + Index writeIndex = indexAbstraction.getWriteIndex(); + if (writeIndex == null) { + return null; + } + IndexAbstraction writeAbstraction = metadata.getIndicesLookup().get(writeIndex.getName()); + return writeAbstraction.getParentDataStream(); + } + /** * Modifies the passed Instant object to be used as a bound for a timestamp field in TimeSeries. It needs to be called in both backing * index construction (rollover) and index selection for doc insertion. Failure to do so may lead to errors due to document timestamps diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java index 5b96f92193e98..be42916b07956 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java @@ -17,7 +17,9 @@ * Holds the factory retention configuration. Factory retention is the global retention configuration meant to be * used if a user hasn't provided other retention configuration via {@link DataStreamGlobalRetention} metadata in the * cluster state. + * @deprecated This interface is deprecated, please use {@link DataStreamGlobalRetentionSettings}. */ +@Deprecated public interface DataStreamFactoryRetention { @Nullable diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java index c74daa22cc137..185f625f6f91f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java @@ -18,14 +18,10 @@ import java.io.IOException; /** - * A cluster state entry that contains global retention settings that are configurable by the user. These settings include: - * - default retention, applied on any data stream managed by DSL that does not have an explicit retention defined - * - max retention, applied on every data stream managed by DSL + * Wrapper class for the {@link DataStreamGlobalRetentionSettings}. */ public record DataStreamGlobalRetention(@Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) implements Writeable { - public static final String TYPE = "data-stream-global-retention"; - public static final NodeFeature GLOBAL_RETENTION = new NodeFeature("data_stream.lifecycle.global_retention"); public static final TimeValue MIN_RETENTION_VALUE = TimeValue.timeValueSeconds(10); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProvider.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProvider.java deleted file mode 100644 index f1e3e18ea4d51..0000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProvider.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.cluster.metadata; - -import org.elasticsearch.core.Nullable; - -/** - * Provides the global retention configuration for data stream lifecycle as defined in the settings. - */ -public class DataStreamGlobalRetentionProvider { - - private final DataStreamFactoryRetention factoryRetention; - - public DataStreamGlobalRetentionProvider(DataStreamFactoryRetention factoryRetention) { - this.factoryRetention = factoryRetention; - } - - /** - * Return the global retention configuration as defined in the settings. If both settings are null, it returns null. - */ - @Nullable - public DataStreamGlobalRetention provide() { - if (factoryRetention.isDefined() == false) { - return null; - } - return new DataStreamGlobalRetention(factoryRetention.getDefaultRetention(), factoryRetention.getMaxRetention()); - } -} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettings.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettings.java new file mode 100644 index 0000000000000..a1fcf56a92726 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettings.java @@ -0,0 +1,180 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * This class holds the data stream global retention settings. It defines, validates and monitors the settings. + *

+ * The global retention settings apply to non-system data streams that are managed by the data stream lifecycle. They consist of: + * - The default retention which applies to data streams that do not have a retention defined. + * - The max retention which applies to all data streams that do not have retention or their retention has exceeded this value. + *

+ * Temporarily, we fall back to {@link DataStreamFactoryRetention} to facilitate a smooth transition to these settings. + */ +public class DataStreamGlobalRetentionSettings { + + private static final Logger logger = LogManager.getLogger(DataStreamGlobalRetentionSettings.class); + public static final TimeValue MIN_RETENTION_VALUE = TimeValue.timeValueSeconds(10); + + public static final Setting DATA_STREAMS_DEFAULT_RETENTION_SETTING = Setting.timeSetting( + "data_streams.lifecycle.retention.default", + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue value) {} + + @Override + public void validate(final TimeValue settingValue, final Map, Object> settings) { + TimeValue defaultRetention = getSettingValueOrNull(settingValue); + TimeValue maxRetention = getSettingValueOrNull((TimeValue) settings.get(DATA_STREAMS_MAX_RETENTION_SETTING)); + validateIsolatedRetentionValue(defaultRetention, DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey()); + validateGlobalRetentionConfiguration(defaultRetention, maxRetention); + } + + @Override + public Iterator> settings() { + final List> settings = List.of(DATA_STREAMS_MAX_RETENTION_SETTING); + return settings.iterator(); + } + }, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + public static final Setting DATA_STREAMS_MAX_RETENTION_SETTING = Setting.timeSetting( + "data_streams.lifecycle.retention.max", + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue value) {} + + @Override + public void validate(final TimeValue settingValue, final Map, Object> settings) { + TimeValue defaultRetention = getSettingValueOrNull((TimeValue) settings.get(DATA_STREAMS_DEFAULT_RETENTION_SETTING)); + TimeValue maxRetention = getSettingValueOrNull(settingValue); + validateIsolatedRetentionValue(maxRetention, DATA_STREAMS_MAX_RETENTION_SETTING.getKey()); + validateGlobalRetentionConfiguration(defaultRetention, maxRetention); + } + + @Override + public Iterator> settings() { + final List> settings = List.of(DATA_STREAMS_DEFAULT_RETENTION_SETTING); + return settings.iterator(); + } + }, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private final DataStreamFactoryRetention factoryRetention; + + @Nullable + private volatile TimeValue defaultRetention; + @Nullable + private volatile TimeValue maxRetention; + + private DataStreamGlobalRetentionSettings(DataStreamFactoryRetention factoryRetention) { + this.factoryRetention = factoryRetention; + } + + @Nullable + public TimeValue getMaxRetention() { + return shouldFallbackToFactorySettings() ? factoryRetention.getMaxRetention() : maxRetention; + } + + @Nullable + public TimeValue getDefaultRetention() { + return shouldFallbackToFactorySettings() ? factoryRetention.getDefaultRetention() : defaultRetention; + } + + public boolean areDefined() { + return getDefaultRetention() != null || getMaxRetention() != null; + } + + private boolean shouldFallbackToFactorySettings() { + return defaultRetention == null && maxRetention == null; + } + + /** + * Creates an instance and initialises the cluster settings listeners + * @param clusterSettings it will register the cluster settings listeners to monitor for changes + * @param factoryRetention for migration purposes, it will be removed shortly + */ + public static DataStreamGlobalRetentionSettings create(ClusterSettings clusterSettings, DataStreamFactoryRetention factoryRetention) { + DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = new DataStreamGlobalRetentionSettings(factoryRetention); + clusterSettings.initializeAndWatch(DATA_STREAMS_DEFAULT_RETENTION_SETTING, dataStreamGlobalRetentionSettings::setDefaultRetention); + clusterSettings.initializeAndWatch(DATA_STREAMS_MAX_RETENTION_SETTING, dataStreamGlobalRetentionSettings::setMaxRetention); + return dataStreamGlobalRetentionSettings; + } + + private void setMaxRetention(TimeValue maxRetention) { + this.maxRetention = getSettingValueOrNull(maxRetention); + logger.info("Updated max factory retention to [{}]", this.maxRetention == null ? null : maxRetention.getStringRep()); + } + + private void setDefaultRetention(TimeValue defaultRetention) { + this.defaultRetention = getSettingValueOrNull(defaultRetention); + logger.info("Updated default factory retention to [{}]", this.defaultRetention == null ? null : defaultRetention.getStringRep()); + } + + private static void validateIsolatedRetentionValue(@Nullable TimeValue retention, String settingName) { + if (retention != null && retention.getMillis() < MIN_RETENTION_VALUE.getMillis()) { + throw new IllegalArgumentException( + "Setting '" + settingName + "' should be greater than " + MIN_RETENTION_VALUE.getStringRep() + ); + } + } + + private static void validateGlobalRetentionConfiguration(@Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) { + if (defaultRetention != null && maxRetention != null && defaultRetention.getMillis() > maxRetention.getMillis()) { + throw new IllegalArgumentException( + "Setting [" + + DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey() + + "=" + + defaultRetention.getStringRep() + + "] cannot be greater than [" + + DATA_STREAMS_MAX_RETENTION_SETTING.getKey() + + "=" + + maxRetention.getStringRep() + + "]." + ); + } + } + + @Nullable + public DataStreamGlobalRetention get() { + if (areDefined() == false) { + return null; + } + return new DataStreamGlobalRetention(getDefaultRetention(), getMaxRetention()); + } + + /** + * Time value settings do not accept null as a value. To represent an undefined retention as a setting we use the value + * of -1 and this method converts this to null. + * + * @param value the retention as parsed from the setting + * @return the value when it is not -1 and null otherwise + */ + @Nullable + private static TimeValue getSettingValueOrNull(TimeValue value) { + return value == null || value.equals(TimeValue.MINUS_ONE) ? null : value; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index de9d615022975..cb09fb6108049 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -24,7 +24,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.rest.RestRequest; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -55,6 +54,7 @@ public class DataStreamLifecycle implements SimpleDiffable, // Versions over the wire public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_10_X; + public static final String EFFECTIVE_RETENTION_REST_API_CAPABILITY = "data_stream_lifecycle_effective_retention"; public static final String DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME = "data_streams.lifecycle_only.mode"; // The following XContent params are used to enrich the DataStreamLifecycle json with effective retention information @@ -367,14 +367,12 @@ public static DataStreamLifecycle fromXContent(XContentParser parser) throws IOE } /** - * Adds a retention param to signal that this serialisation should include the effective retention metadata + * Adds a retention param to signal that this serialisation should include the effective retention metadata. + * @param params the XContent params to be extended with the new flag + * @return XContent params with `include_effective_retention` set to true. If the flag exists it will override it. */ - public static ToXContent.Params maybeAddEffectiveRetentionParams(ToXContent.Params params) { - boolean shouldAddEffectiveRetention = Objects.equals(params.param(RestRequest.PATH_RESTRICTED), "serverless"); - return new DelegatingMapParams( - Map.of(INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, Boolean.toString(shouldAddEffectiveRetention)), - params - ); + public static ToXContent.Params addEffectiveRetentionParams(ToXContent.Params params) { + return new DelegatingMapParams(INCLUDE_EFFECTIVE_RETENTION_PARAMS, params); } public static Builder newBuilder(DataStreamLifecycle lifecycle) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index bfe7468b97a64..9cac6fa3e8796 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -41,18 +41,18 @@ public class MetadataDataStreamsService { private final ClusterService clusterService; private final IndicesService indicesService; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; private final MasterServiceTaskQueue updateLifecycleTaskQueue; private final MasterServiceTaskQueue setRolloverOnWriteTaskQueue; public MetadataDataStreamsService( ClusterService clusterService, IndicesService indicesService, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { this.clusterService = clusterService; this.indicesService = indicesService; - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; ClusterStateTaskExecutor updateLifecycleExecutor = new SimpleBatchedAckListenerTaskExecutor<>() { @Override @@ -223,7 +223,7 @@ ClusterState updateDataLifecycle(ClusterState currentState, List dataStr if (lifecycle != null) { if (atLeastOneDataStreamIsNotSystem) { // We don't issue any warnings if all data streams are system data streams - lifecycle.addWarningHeaderIfDataRetentionNotEffective(globalRetentionResolver.provide()); + lifecycle.addWarningHeaderIfDataRetentionNotEffective(globalRetentionSettings.get()); } } return ClusterState.builder(currentState).metadata(builder.build()).build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index c6eb56926eca0..ac56f3f670f43 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -137,7 +137,7 @@ public class MetadataIndexTemplateService { private final NamedXContentRegistry xContentRegistry; private final SystemIndices systemIndices; private final Set indexSettingProviders; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; /** * This is the cluster state task executor for all template-based actions. @@ -183,7 +183,7 @@ public MetadataIndexTemplateService( NamedXContentRegistry xContentRegistry, SystemIndices systemIndices, IndexSettingProviders indexSettingProviders, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { this.clusterService = clusterService; this.taskQueue = clusterService.createTaskQueue("index-templates", Priority.URGENT, TEMPLATE_TASK_EXECUTOR); @@ -193,7 +193,7 @@ public MetadataIndexTemplateService( this.xContentRegistry = xContentRegistry; this.systemIndices = systemIndices; this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; } public void removeTemplates( @@ -345,7 +345,7 @@ public ClusterState addComponentTemplate( tempStateWithComponentTemplateAdded.metadata(), composableTemplateName, composableTemplate, - globalRetentionResolver.provide() + globalRetentionSettings.get() ); validateIndexTemplateV2(composableTemplateName, composableTemplate, tempStateWithComponentTemplateAdded); } catch (Exception e) { @@ -369,7 +369,7 @@ public ClusterState addComponentTemplate( } if (finalComponentTemplate.template().lifecycle() != null) { - finalComponentTemplate.template().lifecycle().addWarningHeaderIfDataRetentionNotEffective(globalRetentionResolver.provide()); + finalComponentTemplate.template().lifecycle().addWarningHeaderIfDataRetentionNotEffective(globalRetentionSettings.get()); } logger.info("{} component template [{}]", existing == null ? "adding" : "updating", name); @@ -730,7 +730,7 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT validate(name, templateToValidate); validateDataStreamsStillReferenced(currentState, name, templateToValidate); - validateLifecycle(currentState.metadata(), name, templateToValidate, globalRetentionResolver.provide()); + validateLifecycle(currentState.metadata(), name, templateToValidate, globalRetentionSettings.get()); if (templateToValidate.isDeprecated() == false) { validateUseOfDeprecatedComponentTemplates(name, templateToValidate, currentState.metadata().componentTemplates()); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java index 52eee5af3f6f5..838f2998d339f 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java @@ -117,4 +117,11 @@ public void readBytes(byte[] b, int offset, int len) { System.arraycopy(bytes, pos, b, offset, len); pos += len; } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + int toRead = Math.min(len, available()); + readBytes(b, off, toRead); + return toRead; + } } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java index c0ef0e0abf39b..b84c67bd8c8a2 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java @@ -97,6 +97,11 @@ public int read() throws IOException { return delegate.read(); } + @Override + public int read(byte[] b, int off, int len) throws IOException { + return delegate.read(b, off, len); + } + @Override public void close() throws IOException { delegate.close(); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 8de49ded03a4e..ec0edb2d07e5a 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -104,6 +104,10 @@ public void setTransportVersion(TransportVersion version) { */ public abstract void readBytes(byte[] b, int offset, int len) throws IOException; + // force implementing bulk reads to avoid accidentally slow implementations + @Override + public abstract int read(byte[] b, int off, int len) throws IOException; + /** * Reads a bytes reference from this stream, copying any bytes read to a new {@code byte[]}. Use {@link #readReleasableBytesReference()} * when reading large bytes references where possible top avoid needless allocations and copying. @@ -903,8 +907,11 @@ public final Instant readOptionalInstant() throws IOException { private ZonedDateTime readZonedDateTime() throws IOException { final String timeZoneId = readString(); final Instant instant; - if (getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT)) { - instant = Instant.ofEpochSecond(readVLong(), readInt()); + if (getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT_BROKEN)) { + // epoch seconds can be negative, but it was incorrectly first written as vlong + boolean zlong = getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT); + long seconds = zlong ? readZLong() : readVLong(); + instant = Instant.ofEpochSecond(seconds, readInt()); } else { instant = Instant.ofEpochMilli(readLong()); } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 9d5b9a107ee6a..c65ae2e3463d4 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -767,8 +767,13 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep final ZonedDateTime zonedDateTime = (ZonedDateTime) v; o.writeString(zonedDateTime.getZone().getId()); Instant instant = zonedDateTime.toInstant(); - if (o.getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT)) { - o.writeVLong(instant.getEpochSecond()); + if (o.getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT_BROKEN)) { + // epoch seconds can be negative, but it was incorrectly first written as vlong + if (o.getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT)) { + o.writeZLong(instant.getEpochSecond()); + } else { + o.writeVLong(instant.getEpochSecond()); + } o.writeInt(instant.getNano()); } else { o.writeLong(instant.toEpochMilli()); diff --git a/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java b/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java index be4d7c741bc92..66b4f3c82e3cf 100644 --- a/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java +++ b/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.core.Nullable; import java.time.Clock; import java.util.Collection; @@ -39,15 +40,30 @@ */ public class SchedulerEngine { - public static class Job { - private final String id; - private final Schedule schedule; - + /** + * In most cases a Job only requires a `schedule` and an `id`, but an optional `fixedStartTime` + * can also be used. This is used as a fixed `startTime` argument for all calls to + * `schedule.nextScheduledTimeAfter(startTime, now)`. Interval-based schedules use `startTime` + * as a basis time from which all run times are calculated. If a Job does not contain a + * `fixedStartTime`, this basis time will be the time at which the Job is added to the SchedulerEngine. + * This could change if a master change or restart causes a new SchedulerEngine to be constructed. + * But using a `fixedStartTime` populated from a time stored in cluster state allows the basis time + * to remain unchanged across master changes and restarts. + * + * @param id the id of the job + * @param schedule the schedule which is used to calculate when the job runs + * @param fixedStartTime a fixed time in the past which the schedule uses to calculate run times, + */ + public record Job(String id, Schedule schedule, @Nullable Long fixedStartTime) { public Job(String id, Schedule schedule) { - this.id = id; - this.schedule = schedule; + this(id, schedule, null); } + /** + * The following getters are redundant with the getters built in by the record. + * Unfortunately, getFieldName form getters are expected by serverless. + * These getters are being added back until serverless can be updated for the new getters. + */ public String getId() { return id; } @@ -55,19 +71,23 @@ public String getId() { public Schedule getSchedule() { return schedule; } - } - public static class Event { - private final String jobName; - private final long triggeredTime; - private final long scheduledTime; + public Long getFixedStartTime() { + return fixedStartTime; + } + } - public Event(String jobName, long triggeredTime, long scheduledTime) { - this.jobName = jobName; - this.triggeredTime = triggeredTime; - this.scheduledTime = scheduledTime; + public record Event(String jobName, long triggeredTime, long scheduledTime) { + @Override + public String toString() { + return "Event[jobName=" + jobName + "," + "triggeredTime=" + triggeredTime + "," + "scheduledTime=" + scheduledTime + "]"; } + /** + * The following getters are redundant with the getters built in by the record. + * Unfortunately, getFieldName form getters are expected by serverless. + * These getters are being added back until serverless can be updated for the new getters. + */ public String getJobName() { return jobName; } @@ -79,11 +99,6 @@ public long getTriggeredTime() { public long getScheduledTime() { return scheduledTime; } - - @Override - public String toString() { - return "Event[jobName=" + jobName + "," + "triggeredTime=" + triggeredTime + "," + "scheduledTime=" + scheduledTime + "]"; - } } public interface Listener { @@ -159,12 +174,13 @@ public Set scheduledJobIds() { } public void add(Job job) { - ActiveSchedule schedule = new ActiveSchedule(job.getId(), job.getSchedule(), clock.millis()); + final long startTime = job.fixedStartTime() == null ? clock.millis() : job.fixedStartTime(); + ActiveSchedule schedule = new ActiveSchedule(job.id(), job.schedule(), startTime); schedules.compute(schedule.name, (name, previousSchedule) -> { if (previousSchedule != null) { previousSchedule.cancel(); } - logger.debug(() -> "added job [" + job.getId() + "]"); + logger.debug(() -> "added job [" + job.id() + "]"); return schedule; }); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index d5f770ebb95fc..c023b00ec820f 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -36,6 +36,7 @@ import org.elasticsearch.cluster.coordination.MasterHistory; import org.elasticsearch.cluster.coordination.NoMasterBlockService; import org.elasticsearch.cluster.coordination.Reconfigurator; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.Metadata; @@ -598,6 +599,8 @@ public void apply(Settings value, Settings current, Settings previous) { TDigestExecutionHint.SETTING, MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT_SETTING, MergePolicyConfig.DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING, - TransportService.ENABLE_STACK_OVERFLOW_AVOIDANCE + TransportService.ENABLE_STACK_OVERFLOW_AVOIDANCE, + DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING, + DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING ).filter(Objects::nonNull).collect(Collectors.toSet()); } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 0ff0bb2657a5c..7bee1e895bceb 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -579,7 +579,7 @@ public Map getTransientHeaders() { } /** - * Add the {@code value} for the specified {@code key} Any duplicate {@code value} is ignored. + * Add the {@code value} for the specified {@code key}. Any duplicate {@code value} is ignored. * * @param key the header name * @param value the header value diff --git a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java index 8208e4bd70c34..97c0679bed34f 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java +++ b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java @@ -299,7 +299,7 @@ protected void doClose() throws IOException { @Override public void triggered(SchedulerEngine.Event event) { - if (event.getJobName().equals(HEALTH_PERIODIC_LOGGER_JOB_NAME) && this.enabled) { + if (event.jobName().equals(HEALTH_PERIODIC_LOGGER_JOB_NAME) && this.enabled) { this.tryToLogHealth(); } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 49eb6d84f0b1e..b137cfe27a514 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.DocumentDimensions; @@ -297,6 +298,11 @@ public void validateSourceFieldMapper(SourceFieldMapper sourceFieldMapper) { public boolean isSyntheticSourceEnabled() { return true; } + + @Override + public String getDefaultCodec() { + return CodecService.BEST_COMPRESSION_CODEC; + } }; private static void validateTimeSeriesSettings(Map, Object> settings) { @@ -466,6 +472,10 @@ public String getName() { */ public abstract boolean isSyntheticSourceEnabled(); + public String getDefaultCodec() { + return CodecService.DEFAULT_CODEC; + } + /** * Parse a string into an {@link IndexMode}. */ diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java index 1228c908f7c18..685e9774b04a7 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java @@ -24,8 +24,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; -import java.util.Objects; - /** * Class that encapsulates the logic of figuring out the most appropriate file format for a given field, across postings, doc values and * vectors. @@ -33,7 +31,6 @@ public class PerFieldFormatSupplier { private final MapperService mapperService; - private final BigArrays bigArrays; private final DocValuesFormat docValuesFormat = new Lucene90DocValuesFormat(); private final KnnVectorsFormat knnVectorsFormat = new Lucene99HnswVectorsFormat(); private final ES87BloomFilterPostingsFormat bloomFilterPostingsFormat; @@ -43,7 +40,6 @@ public class PerFieldFormatSupplier { public PerFieldFormatSupplier(MapperService mapperService, BigArrays bigArrays) { this.mapperService = mapperService; - this.bigArrays = Objects.requireNonNull(bigArrays); this.bloomFilterPostingsFormat = new ES87BloomFilterPostingsFormat(bigArrays, this::internalGetPostingsFormatForField); this.tsdbDocValuesFormat = new ES87TSDBDocValuesFormat(); this.es812PostingsFormat = new ES812PostingsFormat(); diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java index 1aada2a153c3c..3aaf2ee5a8c4b 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java @@ -874,10 +874,6 @@ public int advance(int target) throws IOException { private void skipPositions() throws IOException { // Skip positions now: int toSkip = posPendingCount - freq; - // if (DEBUG) { - // System.out.println(" FPR.skipPositions: toSkip=" + toSkip); - // } - final int leftInBlock = BLOCK_SIZE - posBufferUpto; if (toSkip < leftInBlock) { int end = posBufferUpto + toSkip; @@ -1010,7 +1006,7 @@ final class BlockImpactsDocsEnum extends ImpactsEnum { final boolean indexHasFreqs; - private int docFreq; // number of docs in this posting list + private final int docFreq; // number of docs in this posting list private int blockUpto; // number of documents in or before the current block private int doc; // doc we last read private long accum; // accumulator for doc deltas @@ -1211,8 +1207,8 @@ final class BlockImpactsPostingsEnum extends ImpactsEnum { final boolean indexHasOffsets; final boolean indexHasPayloads; - private int docFreq; // number of docs in this posting list - private long totalTermFreq; // number of positions in this posting list + private final int docFreq; // number of docs in this posting list + private final long totalTermFreq; // number of positions in this posting list private int docUpto; // how many docs we've read private int doc; // doc we last read private long accum; // accumulator for doc deltas @@ -1228,19 +1224,19 @@ final class BlockImpactsPostingsEnum extends ImpactsEnum { private long posPendingFP; // Where this term's postings start in the .doc file: - private long docTermStartFP; + private final long docTermStartFP; // Where this term's postings start in the .pos file: - private long posTermStartFP; + private final long posTermStartFP; // Where this term's payloads/offsets start in the .pay // file: - private long payTermStartFP; + private final long payTermStartFP; // File pointer where the last (vInt encoded) pos delta // block is. We need this to know whether to bulk // decode vs vInt decode the block: - private long lastPosBlockFP; + private final long lastPosBlockFP; private int nextSkipDoc = -1; @@ -1507,8 +1503,8 @@ final class BlockImpactsEverythingEnum extends ImpactsEnum { final boolean indexHasOffsets; final boolean indexHasPayloads; - private int docFreq; // number of docs in this posting list - private long totalTermFreq; // number of positions in this posting list + private final int docFreq; // number of docs in this posting list + private final long totalTermFreq; // number of positions in this posting list private int docUpto; // how many docs we've read private int posDocUpTo; // for how many docs we've read positions, offsets, and payloads private int doc; // doc we last read @@ -1528,19 +1524,19 @@ final class BlockImpactsEverythingEnum extends ImpactsEnum { private long payPendingFP; // Where this term's postings start in the .doc file: - private long docTermStartFP; + private final long docTermStartFP; // Where this term's postings start in the .pos file: - private long posTermStartFP; + private final long posTermStartFP; // Where this term's payloads/offsets start in the .pay // file: - private long payTermStartFP; + private final long payTermStartFP; // File pointer where the last (vInt encoded) pos delta // block is. We need this to know whether to bulk // decode vs vInt decode the block: - private long lastPosBlockFP; + private final long lastPosBlockFP; private int nextSkipDoc = -1; @@ -1835,10 +1831,6 @@ public int advance(int target) throws IOException { private void skipPositions() throws IOException { // Skip positions now: int toSkip = posPendingCount - (int) freqBuffer[docBufferUpto - 1]; - // if (DEBUG) { - // System.out.println(" FPR.skipPositions: toSkip=" + toSkip); - // } - final int leftInBlock = BLOCK_SIZE - posBufferUpto; if (toSkip < leftInBlock) { int end = posBufferUpto + toSkip; diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java index f9b36114361ca..8dd99392625fd 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java @@ -54,8 +54,8 @@ *

Therefore, we'll trim df before passing it to the interface. see trim(int) */ class ES812SkipReader extends MultiLevelSkipListReader { - private long[] docPointer; - private long[] posPointer; + private final long[] docPointer; + private final long[] posPointer; private long[] payPointer; private int[] posBufferUpto; private int[] payloadByteUpto; diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java index dbfb7c86a1475..98c516fc890e8 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java @@ -51,8 +51,8 @@ * uptos(position, payload). 4. start offset. */ final class ES812SkipWriter extends MultiLevelSkipListWriter { - private int[] lastSkipDoc; - private long[] lastSkipDocPointer; + private final int[] lastSkipDoc; + private final long[] lastSkipDocPointer; private long[] lastSkipPosPointer; private long[] lastSkipPayPointer; @@ -66,7 +66,7 @@ final class ES812SkipWriter extends MultiLevelSkipListWriter { private long curPayPointer; private int curPosBufferUpto; private int curPayloadByteUpto; - private CompetitiveImpactAccumulator[] curCompetitiveFreqNorms; + private final CompetitiveImpactAccumulator[] curCompetitiveFreqNorms; private boolean fieldHasPositions; private boolean fieldHasOffsets; private boolean fieldHasPayloads; @@ -197,7 +197,7 @@ protected void writeSkipData(int level, DataOutput skipBuffer) throws IOExceptio } CompetitiveImpactAccumulator competitiveFreqNorms = curCompetitiveFreqNorms[level]; - assert competitiveFreqNorms.getCompetitiveFreqNormPairs().size() > 0; + assert competitiveFreqNorms.getCompetitiveFreqNormPairs().isEmpty() == false; if (level + 1 < numberOfSkipLevels) { curCompetitiveFreqNorms[level + 1].addAll(competitiveFreqNorms); } diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java index fb90327770674..b6e1bb503045c 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java +++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java @@ -342,14 +342,10 @@ public BytesRef lookupOrd(int ord) throws IOException { @Override public int lookupTerm(BytesRef key) throws IOException { TermsEnum.SeekStatus status = termsEnum.seekCeil(key); - switch (status) { - case FOUND: - return Math.toIntExact(termsEnum.ord()); - case NOT_FOUND: - case END: - default: - return Math.toIntExact(-1L - termsEnum.ord()); - } + return switch (status) { + case FOUND -> Math.toIntExact(termsEnum.ord()); + default -> Math.toIntExact(-1L - termsEnum.ord()); + }; } @Override @@ -384,14 +380,10 @@ public BytesRef lookupOrd(long ord) throws IOException { @Override public long lookupTerm(BytesRef key) throws IOException { TermsEnum.SeekStatus status = termsEnum.seekCeil(key); - switch (status) { - case FOUND: - return termsEnum.ord(); - case NOT_FOUND: - case END: - default: - return -1L - termsEnum.ord(); - } + return switch (status) { + case FOUND -> termsEnum.ord(); + default -> -1L - termsEnum.ord(); + }; } @Override @@ -400,7 +392,7 @@ public TermsEnum termsEnum() throws IOException { } } - private class TermsDict extends BaseTermsEnum { + private static class TermsDict extends BaseTermsEnum { static final int LZ4_DECOMPRESSOR_PADDING = 7; final TermsDictEntry entry; diff --git a/server/src/main/java/org/elasticsearch/index/codec/zstd/Zstd814StoredFieldsFormat.java b/server/src/main/java/org/elasticsearch/index/codec/zstd/Zstd814StoredFieldsFormat.java index b827bb6436f07..840b37611374a 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/zstd/Zstd814StoredFieldsFormat.java +++ b/server/src/main/java/org/elasticsearch/index/codec/zstd/Zstd814StoredFieldsFormat.java @@ -78,6 +78,10 @@ public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOCo return super.fieldsWriter(directory, si, context); } + public Mode getMode() { + return mode; + } + private static class ZstdCompressionMode extends CompressionMode { private final int level; diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 079d6479a63e4..317adcc67cf59 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.unit.MemorySizeValue; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecProvider; import org.elasticsearch.index.codec.CodecService; @@ -96,7 +97,10 @@ public Supplier retentionLeasesSupplier() { * This setting is also settable on the node and the index level, it's commonly used in hot/cold node archs where index is likely * allocated on both `kind` of nodes. */ - public static final Setting INDEX_CODEC_SETTING = new Setting<>("index.codec", "default", s -> { + public static final Setting INDEX_CODEC_SETTING = new Setting<>("index.codec", settings -> { + IndexMode indexMode = IndexSettings.MODE.get(settings); + return indexMode.getDefaultCodec(); + }, s -> { switch (s) { case CodecService.DEFAULT_CODEC: case CodecService.LEGACY_DEFAULT_CODEC: @@ -181,7 +185,7 @@ public EngineConfig( this.similarity = similarity; this.codecProvider = codecProvider; this.eventListener = eventListener; - codecName = indexSettings.getValue(INDEX_CODEC_SETTING); + this.codecName = indexSettings.getValue(INDEX_CODEC_SETTING); this.mapperService = mapperService; // We need to make the indexing buffer for this shard at least as large // as the amount of memory that is available for all engines on the diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index aad8d5f6dfa2a..35f0130c58706 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -41,6 +41,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.function.Consumer; import static org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.MAX_DIMS_COUNT; @@ -274,7 +275,7 @@ static void parseObjectOrNested(DocumentParserContext context) throws IOExceptio context.addIgnoredField( new IgnoredSourceFieldMapper.NameValue( context.parent().fullPath(), - context.parent().fullPath().indexOf(currentFieldName), + context.parent().fullPath().lastIndexOf(currentFieldName), XContentDataHelper.encodeToken(parser), context.doc() ) @@ -301,7 +302,7 @@ static void parseObjectOrNested(DocumentParserContext context) throws IOExceptio context.addIgnoredField( new IgnoredSourceFieldMapper.NameValue( context.parent().fullPath(), - context.parent().fullPath().indexOf(context.parent().leafName()), + context.parent().fullPath().lastIndexOf(context.parent().leafName()), XContentDataHelper.encodeXContentBuilder(tuple.v2()), context.doc() ) @@ -476,7 +477,7 @@ static void parseObjectOrField(DocumentParserContext context, Mapper mapper) thr private static boolean shouldFlattenObject(DocumentParserContext context, FieldMapper fieldMapper) { return context.parser().currentToken() == XContentParser.Token.START_OBJECT - && context.parent().subobjects() == false + && context.parent().subobjects() != ObjectMapper.Subobjects.ENABLED && fieldMapper.supportsParsingObject() == false; } @@ -517,7 +518,7 @@ private static void parseObject(final DocumentParserContext context, String curr private static void doParseObject(DocumentParserContext context, String currentFieldName, Mapper objectMapper) throws IOException { context.path().add(currentFieldName); boolean withinLeafObject = context.path().isWithinLeafObject(); - if (objectMapper instanceof ObjectMapper objMapper && objMapper.subobjects() == false) { + if (objectMapper instanceof ObjectMapper objMapper && objMapper.subobjects() != ObjectMapper.Subobjects.ENABLED) { context.path().setWithinLeafObject(true); } parseObjectOrField(context, objectMapper); @@ -563,7 +564,7 @@ private static void parseObjectDynamic(DocumentParserContext context, String cur } else { dynamicObjectMapper = DynamicFieldsBuilder.createDynamicObjectMapper(context, currentFieldName); } - if (context.parent().subobjects() == false) { + if (context.parent().subobjects() == ObjectMapper.Subobjects.DISABLED) { if (dynamicObjectMapper instanceof NestedObjectMapper) { throw new DocumentParsingException( context.parser().getTokenLocation(), @@ -1012,7 +1013,7 @@ private static class NoOpObjectMapper extends ObjectMapper { name, fullPath, Explicit.IMPLICIT_TRUE, - Explicit.IMPLICIT_TRUE, + Optional.empty(), Explicit.IMPLICIT_FALSE, Dynamic.RUNTIME, Collections.emptyMap() @@ -1051,7 +1052,7 @@ private static class RootDocumentParserContext extends DocumentParserContext { mappingLookup.getMapping().getRoot(), ObjectMapper.Dynamic.getRootDynamic(mappingLookup) ); - if (mappingLookup.getMapping().getRoot().subobjects()) { + if (mappingLookup.getMapping().getRoot().subobjects() == ObjectMapper.Subobjects.ENABLED) { this.parser = DotExpandingXContentParser.expandDots(parser, this.path); } else { this.parser = parser; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java index d479cb97e3fd3..6eb1920df02c8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java @@ -161,7 +161,7 @@ static Mapper createDynamicObjectMapper(DocumentParserContext context, String na Mapper mapper = createObjectMapperFromTemplate(context, name); return mapper != null ? mapper - : new ObjectMapper.Builder(name, ObjectMapper.Defaults.SUBOBJECTS).enabled(ObjectMapper.Defaults.ENABLED) + : new ObjectMapper.Builder(name, context.parent().subobjects).enabled(ObjectMapper.Defaults.ENABLED) .build(context.createDynamicMapperBuilderContext()); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java index efbc75490550d..f94a05b2a8658 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java @@ -83,7 +83,7 @@ void write(XContentBuilder builder) throws IOException { XContentDataHelper.decodeAndWrite(builder, value()); } - private String getFieldName() { + String getFieldName() { return parentOffset() == 0 ? name() : name().substring(parentOffset()); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 15d77ba6d2229..7810fcdc64773 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -31,7 +31,8 @@ public Set getFeatures() { KeywordFieldMapper.KEYWORD_DIMENSION_IGNORE_ABOVE, IndexModeFieldMapper.QUERYING_INDEX_MODE, NodeMappingStats.SEGMENT_LEVEL_FIELDS_STATS, - BooleanFieldMapper.BOOLEAN_DIMENSION + BooleanFieldMapper.BOOLEAN_DIMENSION, + ObjectMapper.SUBOBJECTS_AUTO ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index 76212f9899f5c..f61f91250516a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Stream; @@ -49,7 +50,7 @@ public static class Builder extends ObjectMapper.Builder { private final Function bitSetProducer; public Builder(String name, IndexVersion indexCreatedVersion, Function bitSetProducer) { - super(name, Explicit.IMPLICIT_TRUE); + super(name, Optional.empty()); this.indexCreatedVersion = indexCreatedVersion; this.bitSetProducer = bitSetProducer; } @@ -121,7 +122,7 @@ public static class TypeParser extends ObjectMapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, MappingParserContext parserContext) throws MapperParsingException { - if (parseSubobjects(node).explicit()) { + if (parseSubobjects(node).isPresent()) { throw new MapperParsingException("Nested type [" + name + "] does not support [subobjects] parameter"); } NestedObjectMapper.Builder builder = new NestedObjectMapper.Builder( @@ -209,7 +210,7 @@ public MapperBuilderContext createChildContext(String name, Dynamic dynamic) { Query nestedTypeFilter, Function bitsetProducer ) { - super(name, fullPath, enabled, Explicit.IMPLICIT_TRUE, storeArraySource, dynamic, mappers); + super(name, fullPath, enabled, Optional.empty(), storeArraySource, dynamic, mappers); this.parentTypeFilter = parentTypeFilter; this.nestedTypePath = nestedTypePath; this.nestedTypeFilter = nestedTypeFilter; @@ -440,8 +441,8 @@ public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf } private List collectChildren(int parentDoc, BitSet parentDocs, DocIdSetIterator childIt) throws IOException { - assert parentDocs.get(parentDoc) : "wrong context, doc " + parentDoc + " is not a parent of " + nestedTypePath; - final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1); + assert parentDoc < 0 || parentDocs.get(parentDoc) : "wrong context, doc " + parentDoc + " is not a parent of " + nestedTypePath; + final int prevParentDoc = parentDoc > 0 ? parentDocs.prevSetBit(parentDoc - 1) : -1; int childDocId = childIt.docID(); if (childDocId <= prevParentDoc) { childDocId = childIt.advance(prevParentDoc + 1); @@ -463,17 +464,14 @@ public boolean hasValue() { public void write(XContentBuilder b) throws IOException { assert (children != null && children.size() > 0); if (children.size() == 1) { - b.startObject(leafName()); + b.field(leafName()); leafStoredFieldLoader.advanceTo(children.get(0)); leafSourceLoader.write(leafStoredFieldLoader, children.get(0), b); - b.endObject(); } else { b.startArray(leafName()); for (int childId : children) { - b.startObject(); leafStoredFieldLoader.advanceTo(childId); leafSourceLoader.write(leafStoredFieldLoader, childId, b); - b.endObject(); } b.endArray(); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index a3d5999a3dcd2..29ec0357d7c1e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -9,13 +9,14 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.LeafReader; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService.MergeReason; @@ -32,6 +33,8 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; +import java.util.Optional; import java.util.TreeMap; import java.util.stream.Stream; @@ -40,10 +43,50 @@ public class ObjectMapper extends Mapper { public static final String CONTENT_TYPE = "object"; static final String STORE_ARRAY_SOURCE_PARAM = "store_array_source"; + static final NodeFeature SUBOBJECTS_AUTO = new NodeFeature("mapper.subobjects_auto"); + + /** + * Enhances the previously boolean option for subobjects support with an intermediate mode `auto` that uses + * any objects that are present in the mappings and flattens any fields defined outside the predefined objects. + */ + public enum Subobjects { + ENABLED(Boolean.TRUE), + DISABLED(Boolean.FALSE), + AUTO("auto"); + + private final Object printedValue; + + Subobjects(Object printedValue) { + this.printedValue = printedValue; + } + + static Subobjects from(Object node) { + if (node instanceof Boolean value) { + return value ? Subobjects.ENABLED : Subobjects.DISABLED; + } + if (node instanceof String value) { + if (value.equalsIgnoreCase("true")) { + return ENABLED; + } + if (value.equalsIgnoreCase("false")) { + return DISABLED; + } + if (value.equalsIgnoreCase("auto")) { + return AUTO; + } + } + throw new ElasticsearchParseException("unknown subobjects value: " + node); + } + + @Override + public String toString() { + return printedValue.toString(); + } + } public static class Defaults { public static final boolean ENABLED = true; - public static final Explicit SUBOBJECTS = Explicit.IMPLICIT_TRUE; + public static final Optional SUBOBJECTS = Optional.empty(); public static final Explicit STORE_ARRAY_SOURCE = Explicit.IMPLICIT_FALSE; public static final Dynamic DYNAMIC = Dynamic.TRUE; } @@ -74,19 +117,19 @@ DynamicFieldsBuilder getDynamicFieldsBuilder() { * If no dynamic settings are explicitly configured, we default to {@link #TRUE} */ static Dynamic getRootDynamic(MappingLookup mappingLookup) { - ObjectMapper.Dynamic rootDynamic = mappingLookup.getMapping().getRoot().dynamic; + Dynamic rootDynamic = mappingLookup.getMapping().getRoot().dynamic; return rootDynamic == null ? Defaults.DYNAMIC : rootDynamic; } } public static class Builder extends Mapper.Builder { - protected final Explicit subobjects; + protected Optional subobjects; protected Explicit enabled = Explicit.IMPLICIT_TRUE; protected Explicit storeArraySource = Defaults.STORE_ARRAY_SOURCE; protected Dynamic dynamic; protected final List mappersBuilders = new ArrayList<>(); - public Builder(String name, Explicit subobjects) { + public Builder(String name, Optional subobjects) { super(name); this.subobjects = subobjects; } @@ -131,24 +174,31 @@ public Mapper build(MapperBuilderContext context) { public final void addDynamic(String name, String prefix, Mapper mapper, DocumentParserContext context) { // If the mapper to add has no dots, or the current object mapper has subobjects set to false, // we just add it as it is for sure a leaf mapper - if (name.contains(".") == false || subobjects.value() == false) { + if (name.contains(".") == false || (subobjects.isPresent() && (subobjects.get() == Subobjects.DISABLED))) { add(name, mapper); - } - // otherwise we strip off the first object path of the mapper name, load or create - // the relevant object mapper, and then recurse down into it, passing the remainder - // of the mapper name. So for a mapper 'foo.bar.baz', we locate 'foo' and then - // call addDynamic on it with the name 'bar.baz', and next call addDynamic on 'bar' with the name 'baz'. - else { + } else { + // We strip off the first object path of the mapper name, load or create + // the relevant object mapper, and then recurse down into it, passing the remainder + // of the mapper name. So for a mapper 'foo.bar.baz', we locate 'foo' and then + // call addDynamic on it with the name 'bar.baz', and next call addDynamic on 'bar' with the name 'baz'. int firstDotIndex = name.indexOf('.'); String immediateChild = name.substring(0, firstDotIndex); String immediateChildFullName = prefix == null ? immediateChild : prefix + "." + immediateChild; - ObjectMapper.Builder parentBuilder = findObjectBuilder(immediateChildFullName, context); - parentBuilder.addDynamic(name.substring(firstDotIndex + 1), immediateChildFullName, mapper, context); - add(parentBuilder); + Builder parentBuilder = findObjectBuilder(immediateChildFullName, context); + if (parentBuilder != null) { + parentBuilder.addDynamic(name.substring(firstDotIndex + 1), immediateChildFullName, mapper, context); + add(parentBuilder); + } else if (subobjects.isPresent() && subobjects.get() == Subobjects.AUTO) { + // No matching parent object was found, the mapper is added as a leaf - similar to subobjects false. + add(name, mapper); + } else { + // Expected to find a matching parent object but got null. + throw new IllegalStateException("Missing intermediate object " + immediateChildFullName); + } } } - private static ObjectMapper.Builder findObjectBuilder(String fullName, DocumentParserContext context) { + private static Builder findObjectBuilder(String fullName, DocumentParserContext context) { // does the object mapper already exist? if so, use that ObjectMapper objectMapper = context.mappingLookup().objectMappers().get(fullName); if (objectMapper != null) { @@ -159,7 +209,8 @@ private static ObjectMapper.Builder findObjectBuilder(String fullName, DocumentP if (objectMapper != null) { return objectMapper.newBuilder(context.indexSettings().getIndexVersionCreated()); } - throw new IllegalStateException("Missing intermediate object " + fullName); + // no object mapper found + return null; } protected final Map buildMappers(MapperBuilderContext mapperBuilderContext) { @@ -175,7 +226,7 @@ protected final Map buildMappers(MapperBuilderContext mapperBuil // mix of object notation and dot notation. mapper = existing.merge(mapper, MapperMergeContext.from(mapperBuilderContext, Long.MAX_VALUE)); } - if (subobjects.value() == false && mapper instanceof ObjectMapper objectMapper) { + if (subobjects.isPresent() && subobjects.get() == Subobjects.DISABLED && mapper instanceof ObjectMapper objectMapper) { // We're parsing a mapping that has set `subobjects: false` but has defined sub-objects objectMapper.asFlattenedFieldMappers(mapperBuilderContext).forEach(m -> mappers.put(m.leafName(), m)); } else { @@ -214,8 +265,8 @@ public boolean supportsVersion(IndexVersion indexCreatedVersion) { public Mapper.Builder parse(String name, Map node, MappingParserContext parserContext) throws MapperParsingException { parserContext.incrementMappingObjectDepth(); // throws MapperParsingException if depth limit is exceeded - Explicit subobjects = parseSubobjects(node); - ObjectMapper.Builder builder = new Builder(name, subobjects); + Optional subobjects = parseSubobjects(node); + Builder builder = new Builder(name, subobjects); parseObjectFields(node, parserContext, builder); parserContext.decrementMappingObjectDepth(); return builder; @@ -237,7 +288,7 @@ protected static boolean parseObjectOrDocumentTypeProperties( String fieldName, Object fieldNode, MappingParserContext parserContext, - ObjectMapper.Builder builder + Builder builder ) { if (fieldName.equals("dynamic")) { String value = fieldNode.toString(); @@ -276,19 +327,15 @@ protected static boolean parseObjectOrDocumentTypeProperties( return false; } - protected static Explicit parseSubobjects(Map node) { + protected static Optional parseSubobjects(Map node) { Object subobjectsNode = node.remove("subobjects"); if (subobjectsNode != null) { - return Explicit.explicitBoolean(XContentMapValues.nodeBooleanValue(subobjectsNode, "subobjects.subobjects")); + return Optional.of(Subobjects.from(subobjectsNode)); } return Defaults.SUBOBJECTS; } - protected static void parseProperties( - ObjectMapper.Builder objBuilder, - Map propsNode, - MappingParserContext parserContext - ) { + protected static void parseProperties(Builder objBuilder, Map propsNode, MappingParserContext parserContext) { Iterator> iterator = propsNode.entrySet().iterator(); while (iterator.hasNext()) { Map.Entry entry = iterator.next(); @@ -320,7 +367,9 @@ protected static void parseProperties( } } - if (objBuilder.subobjects.value() == false && type.equals(NestedObjectMapper.CONTENT_TYPE)) { + if (objBuilder.subobjects.isPresent() + && objBuilder.subobjects.get() == Subobjects.DISABLED + && type.equals(NestedObjectMapper.CONTENT_TYPE)) { throw new MapperParsingException( "Tried to add nested object [" + fieldName @@ -334,7 +383,7 @@ protected static void parseProperties( throw new MapperParsingException("No handler for type [" + type + "] declared on field [" + fieldName + "]"); } Mapper.Builder fieldBuilder; - if (objBuilder.subobjects.value() == false) { + if (objBuilder.subobjects.isPresent() && objBuilder.subobjects.get() != Subobjects.ENABLED) { fieldBuilder = typeParser.parse(fieldName, propNode, parserContext); } else { String[] fieldNameParts = fieldName.split("\\."); @@ -347,7 +396,7 @@ protected static void parseProperties( for (int i = fieldNameParts.length - 2; i >= 0; --i) { String intermediateObjectName = fieldNameParts[i]; validateFieldName(intermediateObjectName, parserContext.indexVersionCreated()); - ObjectMapper.Builder intermediate = new ObjectMapper.Builder(intermediateObjectName, Defaults.SUBOBJECTS); + Builder intermediate = new Builder(intermediateObjectName, Defaults.SUBOBJECTS); intermediate.add(fieldBuilder); fieldBuilder = intermediate; } @@ -382,7 +431,7 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate private final String fullPath; protected final Explicit enabled; - protected final Explicit subobjects; + protected final Optional subobjects; protected final Explicit storeArraySource; protected final Dynamic dynamic; @@ -392,7 +441,7 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate String name, String fullPath, Explicit enabled, - Explicit subobjects, + Optional subobjects, Explicit storeArraySource, Dynamic dynamic, Map mappers @@ -410,15 +459,17 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate } else { this.mappers = Map.copyOf(mappers); } - assert subobjects.value() || this.mappers.values().stream().noneMatch(m -> m instanceof ObjectMapper) + assert subobjects.isEmpty() + || subobjects.get() != Subobjects.DISABLED + || this.mappers.values().stream().noneMatch(m -> m instanceof ObjectMapper) : "When subobjects is false, mappers must not contain an ObjectMapper"; } /** * @return a Builder that will produce an empty ObjectMapper with the same configuration as this one */ - public ObjectMapper.Builder newBuilder(IndexVersion indexVersionCreated) { - ObjectMapper.Builder builder = new ObjectMapper.Builder(leafName(), subobjects); + public Builder newBuilder(IndexVersion indexVersionCreated) { + Builder builder = new Builder(leafName(), subobjects); builder.enabled = this.enabled; builder.dynamic = this.dynamic; return builder; @@ -463,8 +514,8 @@ public final Dynamic dynamic() { return dynamic; } - public final boolean subobjects() { - return subobjects.value(); + public final Subobjects subobjects() { + return subobjects.orElse(Subobjects.ENABLED); } public final boolean storeArraySource() { @@ -473,9 +524,6 @@ public final boolean storeArraySource() { @Override public void validate(MappingLookup mappers) { - if (storeArraySource() && mappers.isSourceSynthetic() == false) { - throw new MapperParsingException("Parameter [" + STORE_ARRAY_SOURCE_PARAM + "] can only be set in synthetic source mode."); - } for (Mapper mapper : this.mappers.values()) { mapper.validate(mappers); } @@ -508,9 +556,9 @@ public ObjectMapper merge(Mapper mergeWith, MapperMergeContext parentMergeContex protected record MergeResult( Explicit enabled, - Explicit subObjects, + Optional subObjects, Explicit trackArraySource, - ObjectMapper.Dynamic dynamic, + Dynamic dynamic, Map mappers ) { static MergeResult build(ObjectMapper existing, ObjectMapper mergeWithObject, MapperMergeContext parentMergeContext) { @@ -529,11 +577,11 @@ static MergeResult build(ObjectMapper existing, ObjectMapper mergeWithObject, Ma } else { enabled = existing.enabled; } - final Explicit subObjects; - if (mergeWithObject.subobjects.explicit()) { + final Optional subObjects; + if (mergeWithObject.subobjects.isPresent()) { if (reason == MergeReason.INDEX_TEMPLATE) { subObjects = mergeWithObject.subobjects; - } else if (existing.subobjects != mergeWithObject.subobjects) { + } else if (existing.subobjects() != mergeWithObject.subobjects()) { throw new MapperException( "the [subobjects] parameter can't be updated for the object mapping [" + existing.fullPath() + "]" ); @@ -558,7 +606,7 @@ static MergeResult build(ObjectMapper existing, ObjectMapper mergeWithObject, Ma trackArraySource = existing.storeArraySource; } MapperMergeContext objectMergeContext = existing.createChildContext(parentMergeContext, existing.leafName()); - Map mergedMappers = buildMergedMappers(existing, mergeWithObject, objectMergeContext, subObjects.value()); + Map mergedMappers = buildMergedMappers(existing, mergeWithObject, objectMergeContext, subObjects); return new MergeResult( enabled, subObjects, @@ -572,11 +620,13 @@ private static Map buildMergedMappers( ObjectMapper existing, ObjectMapper mergeWithObject, MapperMergeContext objectMergeContext, - boolean subobjects + Optional subobjects ) { Map mergedMappers = new HashMap<>(); for (Mapper childOfExistingMapper : existing.mappers.values()) { - if (subobjects == false && childOfExistingMapper instanceof ObjectMapper objectMapper) { + if (subobjects.isPresent() + && subobjects.get() == Subobjects.DISABLED + && childOfExistingMapper instanceof ObjectMapper objectMapper) { // An existing mapping with sub-objects is merged with a mapping that has set `subobjects: false` objectMapper.asFlattenedFieldMappers(objectMergeContext.getMapperBuilderContext()) .forEach(m -> mergedMappers.put(m.leafName(), m)); @@ -587,7 +637,9 @@ private static Map buildMergedMappers( for (Mapper mergeWithMapper : mergeWithObject) { Mapper mergeIntoMapper = mergedMappers.get(mergeWithMapper.leafName()); if (mergeIntoMapper == null) { - if (subobjects == false && mergeWithMapper instanceof ObjectMapper objectMapper) { + if (subobjects.isPresent() + && subobjects.get() == Subobjects.DISABLED + && mergeWithMapper instanceof ObjectMapper objectMapper) { // An existing mapping that has set `subobjects: false` is merged with a mapping with sub-objects objectMapper.asFlattenedFieldMappers(objectMergeContext.getMapperBuilderContext()) .stream() @@ -599,7 +651,8 @@ private static Map buildMergedMappers( putMergedMapper(mergedMappers, truncateObjectMapper(objectMergeContext, om)); } } else if (mergeIntoMapper instanceof ObjectMapper objectMapper) { - assert subobjects : "existing object mappers are supposed to be flattened if subobjects is false"; + assert subobjects.isEmpty() || subobjects.get() != Subobjects.DISABLED + : "existing object mappers are supposed to be flattened if subobjects is false"; putMergedMapper(mergedMappers, objectMapper.merge(mergeWithMapper, objectMergeContext)); } else { assert mergeIntoMapper instanceof FieldMapper || mergeIntoMapper instanceof FieldAliasMapper; @@ -681,7 +734,7 @@ private void ensureFlattenable(MapperBuilderContext context, ContentPath path) { if (isEnabled() == false) { throwAutoFlatteningException(path, "the value of [enabled] is [false]"); } - if (subobjects.explicit() && subobjects()) { + if (subobjects.isPresent() && subobjects.get() == Subobjects.ENABLED) { throwAutoFlatteningException(path, "the value of [subobjects] is [true]"); } } @@ -716,8 +769,8 @@ void toXContent(XContentBuilder builder, Params params, ToXContent custom) throw if (isEnabled() != Defaults.ENABLED) { builder.field("enabled", enabled.value()); } - if (subobjects != Defaults.SUBOBJECTS) { - builder.field("subobjects", subobjects.value()); + if (subobjects.isPresent()) { + builder.field("subobjects", subobjects.get().printedValue); } if (storeArraySource != Defaults.STORE_ARRAY_SOURCE) { builder.field(STORE_ARRAY_SOURCE_PARAM, storeArraySource.value()); @@ -792,9 +845,9 @@ public Stream> storedFieldLoaders() { @Override public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { - List loaders = new ArrayList<>(); + List loaders = new ArrayList<>(); for (SourceLoader.SyntheticFieldLoader field : fields) { - SourceLoader.SyntheticFieldLoader.DocValuesLoader loader = field.docValuesLoader(leafReader, docIdsInLeaf); + DocValuesLoader loader = field.docValuesLoader(leafReader, docIdsInLeaf); if (loader != null) { loaders.add(loader); } @@ -806,7 +859,7 @@ public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf } private class ObjectDocValuesLoader implements DocValuesLoader { - private final List loaders; + private final List loaders; private ObjectDocValuesLoader(List loaders) { this.loaders = loaders; @@ -815,7 +868,7 @@ private ObjectDocValuesLoader(List loaders) { @Override public boolean advanceToDoc(int docId) throws IOException { boolean anyLeafHasDocValues = false; - for (SourceLoader.SyntheticFieldLoader.DocValuesLoader docValueLoader : loaders) { + for (DocValuesLoader docValueLoader : loaders) { boolean leafHasValue = docValueLoader.advanceToDoc(docId); anyLeafHasDocValues |= leafHasValue; } @@ -843,28 +896,32 @@ public void write(XContentBuilder b) throws IOException { return; } - if (isFragment == false) { - if (isRoot()) { - b.startObject(); - } else { - b.startObject(leafName()); - } + if (isRoot() || isFragment) { + b.startObject(); + } else { + b.startObject(leafName()); } if (ignoredValues != null && ignoredValues.isEmpty() == false) { // Use an ordered map between field names and writer functions, to order writing by field name. - Map> orderedFields = new TreeMap<>(); + Map orderedFields = new TreeMap<>(); for (IgnoredSourceFieldMapper.NameValue value : ignoredValues) { - orderedFields.put(value.name(), value::write); + var existing = orderedFields.get(value.name()); + if (existing == null) { + orderedFields.put(value.name(), new FieldWriter.IgnoredSource(value)); + } else if (existing instanceof FieldWriter.IgnoredSource isw) { + isw.mergeWith(value); + } } for (SourceLoader.SyntheticFieldLoader field : fields) { if (field.hasValue()) { // Skip if the field source is stored separately, to avoid double-printing. - orderedFields.putIfAbsent(field.fieldName(), field::write); + orderedFields.computeIfAbsent(field.fieldName(), k -> new FieldWriter.FieldLoader(field)); } } + for (var writer : orderedFields.values()) { - writer.accept(b); + writer.writeTo(b); } ignoredValues = null; } else { @@ -875,9 +932,7 @@ public void write(XContentBuilder b) throws IOException { } } hasValue = false; - if (isFragment == false) { - b.endObject(); - } + b.endObject(); } @Override @@ -897,6 +952,42 @@ public boolean setIgnoredValues(Map values; + + IgnoredSource(IgnoredSourceFieldMapper.NameValue initialValue) { + this.fieldName = initialValue.name(); + this.leafName = initialValue.getFieldName(); + this.values = new ArrayList<>(); + this.values.add(initialValue.value()); + } + + @Override + public void writeTo(XContentBuilder builder) throws IOException { + XContentDataHelper.writeMerged(builder, leafName, values); + } + + public FieldWriter mergeWith(IgnoredSourceFieldMapper.NameValue nameValue) { + assert Objects.equals(nameValue.name(), fieldName) : "IgnoredSource is merged with wrong field data"; + + values.add(nameValue.value()); + return this; + } + } + } } protected boolean isRoot() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java index 0b7f4de157bdc..7370fe3c61772 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java @@ -18,6 +18,7 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Optional; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue; @@ -52,7 +53,7 @@ public static class Builder extends ObjectMapper.Builder { public Builder(String name) { // Subobjects are not currently supported. - super(name, Explicit.IMPLICIT_FALSE); + super(name, Optional.of(Subobjects.DISABLED)); } @Override @@ -103,7 +104,7 @@ public PassThroughObjectMapper build(MapperBuilderContext context) { int priority ) { // Subobjects are not currently supported. - super(name, fullPath, enabled, Explicit.IMPLICIT_FALSE, Explicit.IMPLICIT_FALSE, dynamic, mappers); + super(name, fullPath, enabled, Optional.of(Subobjects.DISABLED), Explicit.IMPLICIT_FALSE, dynamic, mappers); this.timeSeriesDimensionSubFields = timeSeriesDimensionSubFields; this.priority = priority; if (priority < 0) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index 11aabd8726f4f..6c178330e5c9e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -34,6 +34,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.function.BiConsumer; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; @@ -75,7 +76,7 @@ public static class Builder extends ObjectMapper.Builder { protected Explicit dateDetection = Defaults.DATE_DETECTION; protected Explicit numericDetection = Defaults.NUMERIC_DETECTION; - public Builder(String name, Explicit subobjects) { + public Builder(String name, Optional subobjects) { super(name, subobjects); } @@ -132,7 +133,7 @@ public RootObjectMapper build(MapperBuilderContext context) { RootObjectMapper( String name, Explicit enabled, - Explicit subobjects, + Optional subobjects, Explicit trackArraySource, Dynamic dynamic, Map mappers, @@ -442,7 +443,7 @@ protected boolean isRoot() { public static RootObjectMapper.Builder parse(String name, Map node, MappingParserContext parserContext) throws MapperParsingException { - Explicit subobjects = parseSubobjects(node); + Optional subobjects = parseSubobjects(node); RootObjectMapper.Builder builder = new Builder(name, subobjects); Iterator> iterator = node.entrySet().iterator(); while (iterator.hasNext()) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java b/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java index d97e03d3874ee..fefafbf13017b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java @@ -26,6 +26,8 @@ import java.math.BigInteger; import java.nio.charset.StandardCharsets; import java.util.Arrays; +import java.util.List; +import java.util.Optional; /** * Helper class for processing field data of any type, as provided by the {@link XContentParser}. @@ -92,6 +94,70 @@ static void decodeAndWrite(XContentBuilder b, BytesRef r) throws IOException { } } + /** + * Writes encoded values to provided builder. If there are multiple values they are merged into + * a single resulting array. + * @param b destination + * @param fieldName name of the field that is written + * @param encodedParts subset of field data encoded using methods of this class. Can contain arrays which will be flattened. + * @throws IOException + */ + static void writeMerged(XContentBuilder b, String fieldName, List encodedParts) throws IOException { + if (encodedParts.isEmpty()) { + return; + } + + if (encodedParts.size() == 1) { + b.field(fieldName); + XContentDataHelper.decodeAndWrite(b, encodedParts.get(0)); + return; + } + + b.startArray(fieldName); + + for (var encodedValue : encodedParts) { + Optional encodedXContentType = switch ((char) encodedValue.bytes[encodedValue.offset]) { + case CBOR_OBJECT_ENCODING, JSON_OBJECT_ENCODING, YAML_OBJECT_ENCODING, SMILE_OBJECT_ENCODING -> Optional.of( + getXContentType(encodedValue) + ); + default -> Optional.empty(); + }; + if (encodedXContentType.isEmpty()) { + // This is a plain value, we can just write it + XContentDataHelper.decodeAndWrite(b, encodedValue); + } else { + // Encoded value could be an array which needs to be flattened + // since we are already inside an array. + try ( + XContentParser parser = encodedXContentType.get() + .xContent() + .createParser( + XContentParserConfiguration.EMPTY, + encodedValue.bytes, + encodedValue.offset + 1, + encodedValue.length - 1 + ) + ) { + if (parser.currentToken() == null) { + parser.nextToken(); + } + + // It's an array, we will flatten it. + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + b.copyCurrentStructure(parser); + } + } else { + // It is a single complex structure (an object), write it as is. + b.copyCurrentStructure(parser); + } + } + } + } + + b.endArray(); + } + /** * Returns the {@link XContentType} to use for creating an XContentBuilder to decode the passed value. */ diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index a7fa88633b806..f90d8945857b7 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -84,7 +84,7 @@ public RetentionLeaseBackgroundSyncAction( threadPool.executor(ThreadPool.Names.MANAGEMENT), SyncGlobalCheckpointAfterOperation.DoNotSync, PrimaryActionExecution.RejectOnOverload, - ReplicaActionExecution.SubjectToCircuitBreaker + ReplicaActionExecution.BypassCircuitBreaker ); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java index b7d632eab3bc5..67ed7c6e4c191 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -91,10 +91,10 @@ public RetentionLeaseSyncAction( RetentionLeaseSyncAction.Request::new, RetentionLeaseSyncAction.Request::new, new ManagementOnlyExecutorFunction(threadPool), - PrimaryActionExecution.RejectOnOverload, + PrimaryActionExecution.Force, indexingPressure, systemIndices, - ReplicaActionExecution.SubjectToCircuitBreaker + ReplicaActionExecution.BypassCircuitBreaker ); } diff --git a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java index 6d1456040c8fa..9420d923107e1 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java @@ -66,6 +66,15 @@ public void readBytes(byte[] b, int offset, int len) throws IOException { digest.update(b, offset, len); } + @Override + public int read(byte[] b, int off, int len) throws IOException { + int read = delegate.read(b, off, len); + if (read > 0) { + digest.update(b, off, read); + } + return read; + } + private static final ThreadLocal buffer = ThreadLocal.withInitial(() -> new byte[8]); @Override diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java b/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java index 62166115820f5..f8330404c1538 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java @@ -9,12 +9,12 @@ package org.elasticsearch.inference; import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import java.util.List; import java.util.Map; -public interface InferenceServiceResults extends NamedWriteable, ToXContentFragment { +public interface InferenceServiceResults extends NamedWriteable, ChunkedToXContent { /** * Transform the result to match the format required for the TransportCoordinatedInferenceAction. diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 0b1a135a17214..20f97e1871483 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Strings; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; @@ -18,6 +19,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.ingest.DeletePipelineRequest; @@ -88,6 +90,7 @@ import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.IntConsumer; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -117,6 +120,7 @@ public class IngestService implements ClusterStateApplier, ReportingService pipelines = Map.of(); private final ThreadPool threadPool; private final IngestMetric totalMetrics = new IngestMetric(); + private final FailureStoreMetrics failureStoreMetrics; private final List> ingestClusterStateListeners = new CopyOnWriteArrayList<>(); private volatile ClusterState state; @@ -190,7 +194,8 @@ public IngestService( List ingestPlugins, Client client, MatcherWatchdog matcherWatchdog, - DocumentParsingProvider documentParsingProvider + DocumentParsingProvider documentParsingProvider, + FailureStoreMetrics failureStoreMetrics ) { this.clusterService = clusterService; this.scriptService = scriptService; @@ -212,6 +217,7 @@ public IngestService( ); this.threadPool = threadPool; this.taskQueue = clusterService.createTaskQueue("ingest-pipelines", Priority.NORMAL, PIPELINE_TASK_EXECUTOR); + this.failureStoreMetrics = failureStoreMetrics; } /** @@ -228,6 +234,7 @@ public IngestService( this.taskQueue = ingestService.taskQueue; this.pipelines = ingestService.pipelines; this.state = ingestService.state; + this.failureStoreMetrics = ingestService.failureStoreMetrics; } private static Map processorFactories(List ingestPlugins, Processor.Parameters parameters) { @@ -691,7 +698,7 @@ private static IngestPipelinesExecutionResult failAndStoreFor(String index, Exce * @param actionRequests The collection of requests to be processed. * @param onDropped A callback executed when a document is dropped by a pipeline. * Accepts the slot in the collection of requests that the document occupies. - * @param shouldStoreFailure A predicate executed on each ingest failure to determine if the + * @param resolveFailureStore A function executed on each ingest failure to determine if the * failure should be stored somewhere. * @param onStoreFailure A callback executed when a document fails ingest but the failure should * be persisted elsewhere. Accepts the slot in the collection of requests @@ -709,7 +716,7 @@ public void executeBulkRequest( final int numberOfActionRequests, final Iterable> actionRequests, final IntConsumer onDropped, - final Predicate shouldStoreFailure, + final Function resolveFailureStore, final TriConsumer onStoreFailure, final BiConsumer onFailure, final BiConsumer onCompletion, @@ -794,7 +801,7 @@ public void onFailure(Exception e) { } ); - executePipelines(pipelines, indexRequest, ingestDocument, shouldStoreFailure, documentListener); + executePipelines(pipelines, indexRequest, ingestDocument, resolveFailureStore, documentListener); indexRequest.setNormalisedBytesParsed(meteringParserDecorator.meteredDocumentSize().ingestedBytes()); assert actionRequest.index() != null; @@ -885,7 +892,7 @@ private void executePipelines( final PipelineIterator pipelines, final IndexRequest indexRequest, final IngestDocument ingestDocument, - final Predicate shouldStoreFailure, + final Function resolveFailureStore, final ActionListener listener ) { assert pipelines.hasNext(); @@ -898,9 +905,22 @@ private void executePipelines( ingestDocument.resetReroute(); final String originalIndex = indexRequest.indices()[0]; final Consumer exceptionHandler = (Exception e) -> { - if (shouldStoreFailure.test(originalIndex)) { + String errorType = ElasticsearchException.getExceptionName(ExceptionsHelper.unwrapCause(e)); + // If `failureStoreResolution` is true, we store the failure. If it's false, the target is a data stream, + // but it doesn't have the failure store enabled. If it's null, the target wasn't a data stream. + Boolean failureStoreResolution = resolveFailureStore.apply(originalIndex); + if (failureStoreResolution != null && failureStoreResolution) { + failureStoreMetrics.incrementFailureStore(originalIndex, errorType, FailureStoreMetrics.ErrorLocation.PIPELINE); listener.onResponse(IngestPipelinesExecutionResult.failAndStoreFor(originalIndex, e)); } else { + if (failureStoreResolution != null) { + // If this document targeted a data stream that didn't have the failure store enabled, we increment + // the rejected counter. + // We also increment the total counter because this request will not reach the code that increments + // the total counter for non-rejected documents. + failureStoreMetrics.incrementTotal(originalIndex); + failureStoreMetrics.incrementRejected(originalIndex, errorType, FailureStoreMetrics.ErrorLocation.PIPELINE, false); + } listener.onFailure(e); } }; @@ -928,6 +948,20 @@ private void executePipelines( } if (keep == false) { + // We only increment the total counter for dropped docs here, because these docs don't reach the code + // that ordinarily take care of that. + // We reuse `resolveFailureStore` here to determine whether the index request targets a data stream, + // because we only want to track these metrics for data streams. + Boolean failureStoreResolution = resolveFailureStore.apply(originalIndex); + if (failureStoreResolution != null) { + // Get index abstraction, resolving date math if it exists + IndexAbstraction indexAbstraction = state.metadata() + .getIndicesLookup() + .get(IndexNameExpressionResolver.resolveDateMathExpression(originalIndex, threadPool.absoluteTimeInMillis())); + DataStream dataStream = DataStream.resolveDataStream(indexAbstraction, state.metadata()); + String dataStreamName = dataStream != null ? dataStream.getName() : originalIndex; + failureStoreMetrics.incrementTotal(dataStreamName); + } listener.onResponse(IngestPipelinesExecutionResult.DISCARD_RESULT); return; // document dropped! } @@ -1019,7 +1053,7 @@ private void executePipelines( } if (newPipelines.hasNext()) { - executePipelines(newPipelines, indexRequest, ingestDocument, shouldStoreFailure, listener); + executePipelines(newPipelines, indexRequest, ingestDocument, resolveFailureStore, listener); } else { // update the index request's source and (potentially) cache the timestamp for TSDB updateIndexRequestSource(indexRequest, ingestDocument); diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 27a82cf6a2501..9c5b72a573d44 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.repositories.reservedstate.ReservedRepositoryAction; import org.elasticsearch.action.admin.indices.template.reservedstate.ReservedComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService; import org.elasticsearch.action.ingest.ReservedPipelineAction; import org.elasticsearch.action.search.SearchExecutionStatsCollector; @@ -42,7 +43,7 @@ import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService; import org.elasticsearch.cluster.features.NodeFeaturesFixupListener; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService; @@ -588,25 +589,27 @@ private ScriptService createScriptService(SettingsModule settingsModule, ThreadP return scriptService; } - private DataStreamGlobalRetentionProvider createDataStreamServicesAndGlobalRetentionResolver( + private DataStreamGlobalRetentionSettings createDataStreamServicesAndGlobalRetentionResolver( + Settings settings, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService, MetadataCreateIndexService metadataCreateIndexService ) { - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider = new DataStreamGlobalRetentionProvider( + DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create( + clusterService.getClusterSettings(), DataStreamFactoryRetention.load(pluginsService, clusterService.getClusterSettings()) ); - modules.bindToInstance(DataStreamGlobalRetentionProvider.class, dataStreamGlobalRetentionProvider); + modules.bindToInstance(DataStreamGlobalRetentionSettings.class, dataStreamGlobalRetentionSettings); modules.bindToInstance( MetadataCreateDataStreamService.class, new MetadataCreateDataStreamService(threadPool, clusterService, metadataCreateIndexService) ); modules.bindToInstance( MetadataDataStreamsService.class, - new MetadataDataStreamsService(clusterService, indicesService, dataStreamGlobalRetentionProvider) + new MetadataDataStreamsService(clusterService, indicesService, dataStreamGlobalRetentionSettings) ); - return dataStreamGlobalRetentionProvider; + return dataStreamGlobalRetentionSettings; } private UpdateHelper createUpdateHelper(DocumentParsingProvider documentParsingProvider, ScriptService scriptService) { @@ -657,6 +660,7 @@ private void construct( modules.bindToInstance(DocumentParsingProvider.class, documentParsingProvider); + FailureStoreMetrics failureStoreMetrics = new FailureStoreMetrics(telemetryProvider.getMeterRegistry()); final IngestService ingestService = new IngestService( clusterService, threadPool, @@ -666,7 +670,8 @@ private void construct( pluginsService.filterPlugins(IngestPlugin.class).toList(), client, IngestService.createGrokThreadWatchdog(environment, threadPool), - documentParsingProvider + documentParsingProvider, + failureStoreMetrics ); SystemIndices systemIndices = createSystemIndices(settings); @@ -815,7 +820,8 @@ private void construct( threadPool ); - final DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider = createDataStreamServicesAndGlobalRetentionResolver( + final DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = createDataStreamServicesAndGlobalRetentionResolver( + settings, threadPool, clusterService, indicesService, @@ -840,7 +846,7 @@ record PluginServiceInstances( IndicesService indicesService, FeatureService featureService, SystemIndices systemIndices, - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider, + DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings, DocumentParsingProvider documentParsingProvider ) implements Plugin.PluginServices {} PluginServiceInstances pluginServices = new PluginServiceInstances( @@ -861,7 +867,7 @@ record PluginServiceInstances( indicesService, featureService, systemIndices, - dataStreamGlobalRetentionProvider, + dataStreamGlobalRetentionSettings, documentParsingProvider ); @@ -895,7 +901,7 @@ record PluginServiceInstances( systemIndices, indexSettingProviders, metadataCreateIndexService, - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ), pluginsService.loadSingletonServiceProvider(RestExtension.class, RestExtension::allowAll) ); @@ -1151,6 +1157,7 @@ record PluginServiceInstances( b.bind(FileSettingsService.class).toInstance(fileSettingsService); b.bind(CompatibilityVersions.class).toInstance(compatibilityVersions); b.bind(DataStreamAutoShardingService.class).toInstance(dataStreamAutoShardingService); + b.bind(FailureStoreMetrics.class).toInstance(failureStoreMetrics); }); if (ReadinessService.enabled(environment)) { @@ -1465,7 +1472,7 @@ private List> buildReservedStateHandlers( SystemIndices systemIndices, IndexSettingProviders indexSettingProviders, MetadataCreateIndexService metadataCreateIndexService, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { List> reservedStateHandlers = new ArrayList<>(); @@ -1480,7 +1487,7 @@ private List> buildReservedStateHandlers( xContentRegistry, systemIndices, indexSettingProviders, - globalRetentionResolver + globalRetentionSettings ); reservedStateHandlers.add(new ReservedComposableIndexTemplateAction(templateService, settingsModule.getIndexScopedSettings())); diff --git a/server/src/main/java/org/elasticsearch/plugins/Plugin.java b/server/src/main/java/org/elasticsearch/plugins/Plugin.java index 1815f4403019f..a8bfda54b0646 100644 --- a/server/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -10,7 +10,7 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.routing.RerouteService; @@ -156,10 +156,10 @@ public interface PluginServices { SystemIndices systemIndices(); /** - * A service that resolves the data stream global retention that applies to + * A service that holds the data stream global retention settings that applies to * data streams managed by the data stream lifecycle. */ - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider(); + DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings(); /** * A provider of utilities to observe and report parsing of documents diff --git a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java index 37f1850c1fb2d..67d59924652db 100644 --- a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java @@ -22,7 +22,6 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -85,9 +84,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - in.deleteSnapshots(snapshotIds, repositoryDataGeneration, minimumNodeVersion, listener); + in.deleteSnapshots(snapshotIds, repositoryDataGeneration, minimumNodeVersion, repositoryDataUpdateListener, onCompletion); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java b/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java index 948ae747e11a9..2aba6fbbebce2 100644 --- a/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java @@ -21,7 +21,6 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -92,9 +91,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(createCreationException()); + repositoryDataUpdateListener.onFailure(createCreationException()); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index 06a53053bca88..fd52c21cad3f8 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -22,7 +22,6 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.threadpool.ThreadPool; @@ -161,13 +160,19 @@ public void onFailure(Exception e) { * @param repositoryDataGeneration the generation of the {@link RepositoryData} in the repository at the start of the deletion * @param minimumNodeVersion the minimum {@link IndexVersion} across the nodes in the cluster, with which the repository * format must remain compatible - * @param listener completion listener, see {@link SnapshotDeleteListener}. + * @param repositoryDataUpdateListener listener completed when the {@link RepositoryData} is updated, or when the process fails + * without changing the repository contents - in either case, it is now safe for the next operation + * on this repository to proceed. + * @param onCompletion action executed on completion of the cleanup actions that follow a successful + * {@link RepositoryData} update; not called if {@code repositoryDataUpdateListener} completes + * exceptionally. */ void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ); /** diff --git a/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java b/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java index 7821c865e166c..853de48a483a1 100644 --- a/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java @@ -21,7 +21,6 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -90,9 +89,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(createUnknownTypeException()); + repositoryDataUpdateListener.onFailure(createUnknownTypeException()); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index ddef1e1b808fe..e8af752bec179 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -123,7 +123,6 @@ import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.snapshots.AbortedSnapshotException; import org.elasticsearch.snapshots.PausedSnapshotException; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -847,8 +846,8 @@ private RepositoryData safeRepositoryData(long repositoryDataGeneration, Map wrapWithWeakConsistencyProtection(ActionListener listener) { + return listener; } @Override @@ -856,19 +855,15 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - createSnapshotsDeletion(snapshotIds, repositoryDataGeneration, minimumNodeVersion, new ActionListener<>() { - @Override - public void onResponse(SnapshotsDeletion snapshotsDeletion) { - snapshotsDeletion.runDelete(listener); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + createSnapshotsDeletion( + snapshotIds, + repositoryDataGeneration, + minimumNodeVersion, + repositoryDataUpdateListener.delegateFailureAndWrap((l, snapshotsDeletion) -> snapshotsDeletion.runDelete(l, onCompletion)) + ); } /** @@ -933,7 +928,7 @@ private void createSnapshotsDeletion( * *

* Until the {@link RepositoryData} is updated there should be no other activities in the repository, and in particular the root - * blob must not change until it is updated by this deletion and {@link SnapshotDeleteListener#onRepositoryDataWritten} is called. + * blob must not change until it is updated by this deletion and the {@code repositoryDataUpdateListener} is completed. *

*/ class SnapshotsDeletion { @@ -1027,40 +1022,29 @@ class SnapshotsDeletion { // --------------------------------------------------------------------------------------------------------------------------------- // The overall flow of execution - void runDelete(SnapshotDeleteListener listener) { - final var releasingListener = new SnapshotDeleteListener() { - @Override - public void onDone() { - try { - shardBlobsToDelete.close(); - } finally { - listener.onDone(); - } - } - - @Override - public void onRepositoryDataWritten(RepositoryData repositoryData) { - listener.onRepositoryDataWritten(repositoryData); + void runDelete(ActionListener repositoryDataUpdateListener, Runnable onCompletion) { + final var releasingListener = repositoryDataUpdateListener.delegateResponse((l, e) -> { + try { + shardBlobsToDelete.close(); + } finally { + l.onFailure(e); } - - @Override - public void onFailure(Exception e) { - try { - shardBlobsToDelete.close(); - } finally { - listener.onFailure(e); - } - + }); + final Runnable releasingOnCompletion = () -> { + try { + shardBlobsToDelete.close(); + } finally { + onCompletion.run(); } }; if (useShardGenerations) { - runWithUniqueShardMetadataNaming(releasingListener); + runWithUniqueShardMetadataNaming(releasingListener, releasingOnCompletion); } else { - runWithLegacyNumericShardMetadataNaming(wrapWithWeakConsistencyProtection(releasingListener)); + runWithLegacyNumericShardMetadataNaming(wrapWithWeakConsistencyProtection(releasingListener), releasingOnCompletion); } } - private void runWithUniqueShardMetadataNaming(SnapshotDeleteListener listener) { + private void runWithUniqueShardMetadataNaming(ActionListener repositoryDataUpdateListener, Runnable onCompletion) { SubscribableListener // First write the new shard state metadata (without the removed snapshots) and compute deletion targets @@ -1082,30 +1066,29 @@ private void runWithUniqueShardMetadataNaming(SnapshotDeleteListener listener) { ); }) - .addListener( - ActionListener.wrap( - // Once we have updated the repository, run the clean-ups - newRepositoryData -> { - listener.onRepositoryDataWritten(newRepositoryData); - // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion - try (var refs = new RefCountingRunnable(listener::onDone)) { - cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener()); - cleanupUnlinkedShardLevelBlobs(refs.acquireListener()); - } - }, - listener::onFailure - ) - ); + .andThen((l, newRepositoryData) -> { + l.onResponse(newRepositoryData); + // Once we have updated the repository, run the unreferenced blobs cleanup in parallel to shard-level snapshot deletion + try (var refs = new RefCountingRunnable(onCompletion)) { + cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener()); + cleanupUnlinkedShardLevelBlobs(refs.acquireListener()); + } + }) + + .addListener(repositoryDataUpdateListener); } - private void runWithLegacyNumericShardMetadataNaming(SnapshotDeleteListener listener) { + private void runWithLegacyNumericShardMetadataNaming( + ActionListener repositoryDataUpdateListener, + Runnable onCompletion + ) { // Write the new repository data first (with the removed snapshot), using no shard generations updateRepositoryData( originalRepositoryData.removeSnapshots(snapshotIds, ShardGenerations.EMPTY), - ActionListener.wrap(newRepositoryData -> { + repositoryDataUpdateListener.delegateFailure((delegate, newRepositoryData) -> { try (var refs = new RefCountingRunnable(() -> { - listener.onRepositoryDataWritten(newRepositoryData); - listener.onDone(); + delegate.onResponse(newRepositoryData); + onCompletion.run(); })) { // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener()); @@ -1120,7 +1103,7 @@ private void runWithLegacyNumericShardMetadataNaming(SnapshotDeleteListener list ) ); } - }, listener::onFailure) + }) ); } diff --git a/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java b/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java index 02834f03f54ab..9faa5e4e4450c 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java +++ b/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java @@ -74,16 +74,12 @@ public void computeRankScoresForGlobalResults( RankFeatureDoc[] featureDocs = extractFeatureDocs(rankSearchResults); // generate the final `topResults` results, and pass them to fetch phase through the `rankListener` - if (featureDocs.length == 0) { - rankListener.onResponse(new RankFeatureDoc[0]); - } else { - computeScores(featureDocs, rankListener.delegateFailureAndWrap((listener, scores) -> { - for (int i = 0; i < featureDocs.length; i++) { - featureDocs[i].score = scores[i]; - } - listener.onResponse(featureDocs); - })); - } + computeScores(featureDocs, rankListener.delegateFailureAndWrap((listener, scores) -> { + for (int i = 0; i < featureDocs.length; i++) { + featureDocs[i].score = scores[i]; + } + listener.onResponse(featureDocs); + })); } /** diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java deleted file mode 100644 index 324ad736d7248..0000000000000 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.snapshots; - -import org.elasticsearch.repositories.RepositoryData; - -public interface SnapshotDeleteListener { - - /** - * Invoked once the snapshots have been fully deleted from the repository, including all async cleanup operations, indicating that - * listeners waiting for the end of the deletion can now be notified. - */ - void onDone(); - - /** - * Invoked once the updated {@link RepositoryData} has been written to the repository and it is safe for the next repository operation - * to proceed. - * - * @param repositoryData updated repository data - */ - void onRepositoryDataWritten(RepositoryData repositoryData); - - /** - * Invoked if writing updated {@link RepositoryData} to the repository failed. Once {@link #onRepositoryDataWritten(RepositoryData)} has - * been invoked this method will never be invoked. - * - * @param e exception during metadata steps of snapshot delete - */ - void onFailure(Exception e); -} diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 6d7404d7472e5..ed88b7272245f 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.RefCountingRunnable; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -2491,19 +2492,11 @@ private void deleteSnapshotsFromRepository( ); return; } + final SubscribableListener doneFuture = new SubscribableListener<>(); repositoriesService.repository(deleteEntry.repository()) - .deleteSnapshots(snapshotIds, repositoryData.getGenId(), minNodeVersion, new SnapshotDeleteListener() { - - private final ListenableFuture doneFuture = new ListenableFuture<>(); - - @Override - public void onDone() { - logger.info("snapshots {} deleted", snapshotIds); - doneFuture.onResponse(null); - } - + .deleteSnapshots(snapshotIds, repositoryData.getGenId(), minNodeVersion, new ActionListener<>() { @Override - public void onRepositoryDataWritten(RepositoryData updatedRepoData) { + public void onResponse(RepositoryData updatedRepoData) { removeSnapshotDeletionFromClusterState( deleteEntry, updatedRepoData, @@ -2549,6 +2542,9 @@ protected void handleListeners(List> deleteListeners) { } ); } + }, () -> { + logger.info("snapshots {} deleted", snapshotIds); + doneFuture.onResponse(null); }); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index e541fef65a0f9..64b9b4f0b69d8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -158,15 +158,11 @@ protected NodeResponse nodeOperation(CancellableNodeRequest request, Task task) if (shouldBlock) { // Simulate a job that takes forever to finish // Using periodic checks method to identify that the task was cancelled - try { - waitUntil(() -> { - ((CancellableTask) task).ensureNotCancelled(); - return false; - }); - fail("It should have thrown an exception"); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } + waitUntil(() -> { + ((CancellableTask) task).ensureNotCancelled(); + return false; + }); + fail("It should have thrown an exception"); } debugDelay("op4"); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 16392b3f59baa..903ecfe2b2aa7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -283,16 +283,12 @@ protected void doExecute(Task task, NodesRequest request, ActionListener { - if (((CancellableTask) task).isCancelled()) { - throw new RuntimeException("Cancelled!"); - } - return ((TestTask) task).isBlocked() == false; - }); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } + waitUntil(() -> { + if (((CancellableTask) task).isCancelled()) { + throw new RuntimeException("Cancelled!"); + } + return ((TestTask) task).isBlocked() == false; + }); } logger.info("Test task finished on the node {}", clusterService.localNode()); return new NodeResponse(clusterService.localNode()); @@ -301,9 +297,7 @@ protected NodeResponse nodeOperation(NodeRequest request, Task task) { public static class UnblockTestTaskResponse implements Writeable { - UnblockTestTaskResponse() { - - } + UnblockTestTaskResponse() {} UnblockTestTaskResponse(StreamInput in) {} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java index b2a29e2bcfeb7..32a74fef61209 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; @@ -75,7 +76,7 @@ public class ReservedComposableIndexTemplateActionTests extends ESTestCase { ClusterService clusterService; IndexScopedSettings indexScopedSettings; IndicesService indicesService; - private DataStreamGlobalRetentionProvider globalRetentionResolver; + private DataStreamGlobalRetentionSettings globalRetentionSettings; @Before public void setup() throws IOException { @@ -92,7 +93,10 @@ public void setup() throws IOException { doReturn(mapperService).when(indexService).mapperService(); doReturn(indexService).when(indicesService).createIndex(any(), any(), anyBoolean()); - globalRetentionResolver = new DataStreamGlobalRetentionProvider(DataStreamFactoryRetention.emptyFactoryRetention()); + globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ); templateService = new MetadataIndexTemplateService( clusterService, mock(MetadataCreateIndexService.class), @@ -101,7 +105,7 @@ public void setup() throws IOException { mock(NamedXContentRegistry.class), mock(SystemIndices.class), new IndexSettingProviders(Set.of()), - globalRetentionResolver + globalRetentionSettings ); } @@ -896,7 +900,7 @@ public void testTemplatesWithReservedPrefix() throws Exception { mock(NamedXContentRegistry.class), mock(SystemIndices.class), new IndexSettingProviders(Set.of()), - globalRetentionResolver + globalRetentionSettings ); ClusterState state = ClusterState.builder(new ClusterName("elasticsearch")).metadata(metadata).build(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java index 76bf8dc79b855..e950901a538b4 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java @@ -1164,7 +1164,8 @@ private BulkOperation newBulkOperation( timeZero, listener, observer, - failureStoreDocumentConverter + failureStoreDocumentConverter, + FailureStoreMetrics.NOOP ); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 4ca4e7158e454..1d3d514da13a3 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -130,7 +130,8 @@ public boolean hasIndexAbstraction(String indexAbstraction, ClusterState state) mock(ActionFilters.class), indexNameExpressionResolver, new IndexingPressure(Settings.EMPTY), - EmptySystemIndices.INSTANCE + EmptySystemIndices.INSTANCE, + FailureStoreMetrics.NOOP ) { @Override void executeBulk( diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 3683c2c271739..609237f268807 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -69,7 +69,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; -import java.util.function.Predicate; +import java.util.function.Function; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.sameInstance; @@ -110,7 +110,7 @@ public class TransportBulkActionIngestTests extends ESTestCase { /** Arguments to callbacks we want to capture, but which require generics, so we must use @Captor */ @Captor - ArgumentCaptor> redirectPredicate; + ArgumentCaptor> redirectPredicate; @Captor ArgumentCaptor> redirectHandler; @Captor @@ -155,7 +155,8 @@ class TestTransportBulkAction extends TransportBulkAction { new ActionFilters(Collections.emptySet()), TestIndexNameExpressionResolver.newInstance(), new IndexingPressure(SETTINGS), - EmptySystemIndices.INSTANCE + EmptySystemIndices.INSTANCE, + FailureStoreMetrics.NOOP ); } @@ -410,9 +411,10 @@ public void testIngestLocal() throws Exception { Iterator> req = bulkDocsItr.getValue().iterator(); failureHandler.getValue().accept(0, exception); // have an exception for our one index request indexRequest2.setPipeline(IngestService.NOOP_PIPELINE_NAME); // this is done by the real pipeline execution service when processing - assertTrue(redirectPredicate.getValue().test(WITH_FAILURE_STORE_ENABLED + "-1")); // ensure redirects on failure store data stream - assertFalse(redirectPredicate.getValue().test(WITH_DEFAULT_PIPELINE)); // no redirects for random existing indices - assertFalse(redirectPredicate.getValue().test("index")); // no redirects for non-existant indices with no templates + // ensure redirects on failure store data stream + assertTrue(redirectPredicate.getValue().apply(WITH_FAILURE_STORE_ENABLED + "-1")); + assertNull(redirectPredicate.getValue().apply(WITH_DEFAULT_PIPELINE)); // no redirects for random existing indices + assertNull(redirectPredicate.getValue().apply("index")); // no redirects for non-existent indices with no templates redirectHandler.getValue().apply(2, WITH_FAILURE_STORE_ENABLED + "-1", exception); // exception and redirect for request 3 (slot 2) completionHandler.getValue().accept(DUMMY_WRITE_THREAD, null); // all ingestion completed assertTrue(action.isExecuted); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index db3a985c00ad0..ed7cc93f0ab43 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -71,6 +71,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; import static org.junit.Assume.assumeThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -103,7 +104,8 @@ class TestTransportBulkAction extends TransportBulkAction { new ActionFilters(Collections.emptySet()), new Resolver(), new IndexingPressure(Settings.EMPTY), - EmptySystemIndices.INSTANCE + EmptySystemIndices.INSTANCE, + FailureStoreMetrics.NOOP ); } @@ -417,13 +419,16 @@ public void testResolveFailureStoreFromMetadata() throws Exception { .build(); // Data stream with failure store should store failures - assertThat(TransportBulkAction.shouldStoreFailureInternal(dataStreamWithFailureStore, metadata, testTime), is(true)); + assertThat(TransportBulkAction.resolveFailureInternal(dataStreamWithFailureStore, metadata, testTime), is(true)); // Data stream without failure store should not - assertThat(TransportBulkAction.shouldStoreFailureInternal(dataStreamWithoutFailureStore, metadata, testTime), is(false)); + assertThat(TransportBulkAction.resolveFailureInternal(dataStreamWithoutFailureStore, metadata, testTime), is(false)); // An index should not be considered for failure storage - assertThat(TransportBulkAction.shouldStoreFailureInternal(backingIndex1.getIndex().getName(), metadata, testTime), is(false)); + assertThat(TransportBulkAction.resolveFailureInternal(backingIndex1.getIndex().getName(), metadata, testTime), is(nullValue())); // even if that index is itself a failure store - assertThat(TransportBulkAction.shouldStoreFailureInternal(failureStoreIndex1.getIndex().getName(), metadata, testTime), is(false)); + assertThat( + TransportBulkAction.resolveFailureInternal(failureStoreIndex1.getIndex().getName(), metadata, testTime), + is(nullValue()) + ); } public void testResolveFailureStoreFromTemplate() throws Exception { @@ -454,11 +459,11 @@ public void testResolveFailureStoreFromTemplate() throws Exception { .build(); // Data stream with failure store should store failures - assertThat(TransportBulkAction.shouldStoreFailureInternal(dsTemplateWithFailureStore + "-1", metadata, testTime), is(true)); + assertThat(TransportBulkAction.resolveFailureInternal(dsTemplateWithFailureStore + "-1", metadata, testTime), is(true)); // Data stream without failure store should not - assertThat(TransportBulkAction.shouldStoreFailureInternal(dsTemplateWithoutFailureStore + "-1", metadata, testTime), is(false)); + assertThat(TransportBulkAction.resolveFailureInternal(dsTemplateWithoutFailureStore + "-1", metadata, testTime), is(false)); // An index template should not be considered for failure storage - assertThat(TransportBulkAction.shouldStoreFailureInternal(indexTemplate + "-1", metadata, testTime), is(false)); + assertThat(TransportBulkAction.resolveFailureInternal(indexTemplate + "-1", metadata, testTime), is(nullValue())); } private BulkRequest buildBulkRequest(List indices) { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java index 09513351652b8..626f07fe61216 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java @@ -254,7 +254,8 @@ static class TestTransportBulkAction extends TransportBulkAction { indexNameExpressionResolver, new IndexingPressure(Settings.EMPTY), EmptySystemIndices.INSTANCE, - relativeTimeProvider + relativeTimeProvider, + FailureStoreMetrics.NOOP ); } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java index aea3359e18bf6..b620495472e28 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -90,7 +91,8 @@ public void setup() { Collections.singletonList(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); Map factories = ingestService.getProcessorFactories(); assertTrue(factories.containsKey("set")); diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index a45730a82dbc2..67c8599f47029 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -45,6 +45,7 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -582,11 +583,12 @@ public void testFailOnExtraCharacters() throws IOException { """, null); fail("should have caught second line; extra closing brackets"); } catch (XContentParseException e) { - assertEquals( - "[1:31] Unexpected close marker '}': expected ']' (for root starting at " - + "[Source: (byte[])\"{ \"query\": {\"match_all\": {}}}}}}different error message\"; line: 1, column: 0])\n " - + "at [Source: (byte[])\"{ \"query\": {\"match_all\": {}}}}}}different error message\"; line: 1, column: 31]", - e.getMessage() + assertThat( + e.getMessage(), + containsString( + "Unexpected close marker '}': expected ']' (for root starting at " + + "[Source: (byte[])\"{ \"query\": {\"match_all\": {}}}}}}different error message\"" + ) ); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProviderTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProviderTests.java deleted file mode 100644 index f22664ea5b7d0..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProviderTests.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.cluster.metadata; - -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.test.ESTestCase; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -public class DataStreamGlobalRetentionProviderTests extends ESTestCase { - - public void testOnlyFactoryRetentionFallback() { - DataStreamFactoryRetention factoryRetention = randomNonEmptyFactoryRetention(); - DataStreamGlobalRetentionProvider resolver = new DataStreamGlobalRetentionProvider(factoryRetention); - DataStreamGlobalRetention globalRetention = resolver.provide(); - assertThat(globalRetention, notNullValue()); - assertThat(globalRetention.defaultRetention(), equalTo(factoryRetention.getDefaultRetention())); - assertThat(globalRetention.maxRetention(), equalTo(factoryRetention.getMaxRetention())); - } - - private static DataStreamFactoryRetention randomNonEmptyFactoryRetention() { - boolean withDefault = randomBoolean(); - TimeValue defaultRetention = withDefault ? TimeValue.timeValueDays(randomIntBetween(10, 20)) : null; - TimeValue maxRetention = withDefault && randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(50, 200)); - return new DataStreamFactoryRetention() { - @Override - public TimeValue getMaxRetention() { - return maxRetention; - } - - @Override - public TimeValue getDefaultRetention() { - return defaultRetention; - } - - @Override - public void init(ClusterSettings clusterSettings) { - - } - }; - } - - public void testNoRetentionConfiguration() { - DataStreamGlobalRetentionProvider resolver = new DataStreamGlobalRetentionProvider( - DataStreamFactoryRetention.emptyFactoryRetention() - ); - assertThat(resolver.provide(), nullValue()); - } -} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettingsTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettingsTests.java new file mode 100644 index 0000000000000..78184fd7568e5 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettingsTests.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class DataStreamGlobalRetentionSettingsTests extends ESTestCase { + + public void testDefaults() { + DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ); + + assertThat(globalRetentionSettings.getDefaultRetention(), nullValue()); + assertThat(globalRetentionSettings.getMaxRetention(), nullValue()); + + // Fallback to factory settings + TimeValue maxFactoryValue = randomPositiveTimeValue(); + TimeValue defaultFactoryValue = randomPositiveTimeValue(); + DataStreamGlobalRetentionSettings withFactorySettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + new DataStreamFactoryRetention() { + @Override + public TimeValue getMaxRetention() { + return maxFactoryValue; + } + + @Override + public TimeValue getDefaultRetention() { + return defaultFactoryValue; + } + + @Override + public void init(ClusterSettings clusterSettings) { + + } + } + ); + + assertThat(withFactorySettings.getDefaultRetention(), equalTo(defaultFactoryValue)); + assertThat(withFactorySettings.getMaxRetention(), equalTo(maxFactoryValue)); + } + + public void testMonitorsDefaultRetention() { + ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(); + DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + clusterSettings, + DataStreamFactoryRetention.emptyFactoryRetention() + ); + + // Test valid update + TimeValue newDefaultRetention = TimeValue.timeValueDays(randomIntBetween(1, 10)); + Settings newSettings = Settings.builder() + .put( + DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), + newDefaultRetention.toHumanReadableString(0) + ) + .build(); + clusterSettings.applySettings(newSettings); + + assertThat(newDefaultRetention, equalTo(globalRetentionSettings.getDefaultRetention())); + + // Test invalid update + Settings newInvalidSettings = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), TimeValue.ZERO) + .build(); + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings(newInvalidSettings) + ); + assertThat( + exception.getCause().getMessage(), + containsString("Setting 'data_streams.lifecycle.retention.default' should be greater than") + ); + } + + public void testMonitorsMaxRetention() { + ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(); + DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + clusterSettings, + DataStreamFactoryRetention.emptyFactoryRetention() + ); + + // Test valid update + TimeValue newMaxRetention = TimeValue.timeValueDays(randomIntBetween(10, 30)); + Settings newSettings = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), newMaxRetention.toHumanReadableString(0)) + .build(); + clusterSettings.applySettings(newSettings); + + assertThat(newMaxRetention, equalTo(globalRetentionSettings.getMaxRetention())); + + // Test invalid update + Settings newInvalidSettings = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), TimeValue.ZERO) + .build(); + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings(newInvalidSettings) + ); + assertThat( + exception.getCause().getMessage(), + containsString("Setting 'data_streams.lifecycle.retention.max' should be greater than") + ); + } + + public void testCombinationValidation() { + ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(); + DataStreamGlobalRetentionSettings.create(clusterSettings, DataStreamFactoryRetention.emptyFactoryRetention()); + + // Test invalid update + Settings newInvalidSettings = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), TimeValue.timeValueDays(90)) + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), TimeValue.timeValueDays(30)) + .build(); + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings(newInvalidSettings) + ); + assertThat( + exception.getCause().getMessage(), + containsString( + "Setting [data_streams.lifecycle.retention.default=90d] cannot be greater than [data_streams.lifecycle.retention.max=30d]" + ) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java index 50ab76ed794d8..a6ced9185dbad 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java @@ -39,7 +39,6 @@ import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.DATA_STREAM_CONFIGURATION; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.DEFAULT_GLOBAL_RETENTION; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.MAX_GLOBAL_RETENTION; -import static org.elasticsearch.rest.RestRequest.PATH_RESTRICTED; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -348,21 +347,11 @@ public void testEffectiveRetention() { } public void testEffectiveRetentionParams() { - { - ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams(new ToXContent.MapParams(Map.of())); - assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(false)); - } - { - ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams( - new ToXContent.MapParams(Map.of(PATH_RESTRICTED, "not-serverless")) - ); - assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(false)); - } - { - ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams( - new ToXContent.MapParams(Map.of(PATH_RESTRICTED, "serverless")) - ); - assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(true)); + Map initialParams = randomMap(0, 10, () -> Tuple.tuple(randomAlphaOfLength(10), randomAlphaOfLength(10))); + ToXContent.Params params = DataStreamLifecycle.addEffectiveRetentionParams(new ToXContent.MapParams(initialParams)); + assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(true)); + for (String key : initialParams.keySet()) { + assertThat(initialParams.get(key), equalTo(params.param(key))); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java index acfe2b4f847c4..f6417da4fa2da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java @@ -128,16 +128,22 @@ public void testUpdatingLifecycleOnADataStream() { HeaderWarning.setThreadContext(threadContext); String dataStream = randomAlphaOfLength(5); TimeValue defaultRetention = randomTimeValue(2, 100, TimeUnit.DAYS); - - DataStreamFactoryRetention factoryRetention = getDefaultFactoryRetention(defaultRetention); ClusterState before = ClusterState.builder( DataStreamTestHelper.getClusterStateWithDataStreams(List.of(new Tuple<>(dataStream, 2)), List.of()) ).build(); + Settings settingsWithDefaultRetention = builder().put( + DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), + defaultRetention + ).build(); + MetadataDataStreamsService metadataDataStreamsService = new MetadataDataStreamsService( mock(ClusterService.class), mock(IndicesService.class), - new DataStreamGlobalRetentionProvider(factoryRetention) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(settingsWithDefaultRetention), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); ClusterState after = metadataDataStreamsService.updateDataLifecycle(before, List.of(dataStream), DataStreamLifecycle.DEFAULT); @@ -245,7 +251,9 @@ public void testValidateLifecycleInComponentTemplate() throws Exception { new IndexSettingProviders(Set.of()) ); TimeValue defaultRetention = randomTimeValue(2, 100, TimeUnit.DAYS); - DataStreamFactoryRetention factoryRetention = getDefaultFactoryRetention(defaultRetention); + Settings settingsWithDefaultRetention = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), defaultRetention) + .build(); ClusterState state = ClusterState.EMPTY_STATE; MetadataIndexTemplateService metadataIndexTemplateService = new MetadataIndexTemplateService( clusterService, @@ -255,7 +263,10 @@ public void testValidateLifecycleInComponentTemplate() throws Exception { xContentRegistry(), EmptySystemIndices.INSTANCE, new IndexSettingProviders(Set.of()), - new DataStreamGlobalRetentionProvider(factoryRetention) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(settingsWithDefaultRetention), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); @@ -283,23 +294,4 @@ public void testValidateLifecycleInComponentTemplate() throws Exception { ) ); } - - private DataStreamFactoryRetention getDefaultFactoryRetention(TimeValue defaultRetention) { - return new DataStreamFactoryRetention() { - @Override - public TimeValue getMaxRetention() { - return null; - } - - @Override - public TimeValue getDefaultRetention() { - return defaultRetention; - } - - @Override - public void init(ClusterSettings clusterSettings) { - - } - }; - } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java index 7ce418301a352..e0f4936300c0e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; @@ -400,7 +401,10 @@ public void testUpdateLifecycle() { MetadataDataStreamsService service = new MetadataDataStreamsService( mock(ClusterService.class), mock(IndicesService.class), - new DataStreamGlobalRetentionProvider(DataStreamFactoryRetention.emptyFactoryRetention()) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); { // Remove lifecycle diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index f5daac8ecd090..e66dd32b718b7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.PutRequest; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -2501,7 +2502,10 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr xContentRegistry, EmptySystemIndices.INSTANCE, new IndexSettingProviders(Set.of()), - new DataStreamGlobalRetentionProvider(DataStreamFactoryRetention.emptyFactoryRetention()) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); final List throwables = new ArrayList<>(); @@ -2543,9 +2547,6 @@ public void onFailure(Exception e) { private MetadataIndexTemplateService getMetadataIndexTemplateService() { IndicesService indicesService = getInstanceFromNode(IndicesService.class); ClusterService clusterService = getInstanceFromNode(ClusterService.class); - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider = new DataStreamGlobalRetentionProvider( - DataStreamFactoryRetention.emptyFactoryRetention() - ); MetadataCreateIndexService createIndexService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, @@ -2568,7 +2569,10 @@ private MetadataIndexTemplateService getMetadataIndexTemplateService() { xContentRegistry(), EmptySystemIndices.INSTANCE, new IndexSettingProviders(Set.of()), - dataStreamGlobalRetentionProvider + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java index b4aa58ae13f7b..8451d2fd64b9c 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java @@ -10,7 +10,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -53,6 +52,8 @@ import static java.time.Instant.ofEpochSecond; import static java.time.ZonedDateTime.ofInstant; +import static org.elasticsearch.TransportVersions.ZDT_NANOS_SUPPORT; +import static org.elasticsearch.TransportVersions.ZDT_NANOS_SUPPORT_BROKEN; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; @@ -726,11 +727,15 @@ public void testReadAfterReachingEndOfStream() throws IOException { } public void testZonedDateTimeSerialization() throws IOException { - checkZonedDateTimeSerialization(TransportVersions.ZDT_NANOS_SUPPORT); + checkZonedDateTimeSerialization(ZDT_NANOS_SUPPORT); + } + + public void testZonedDateTimeMillisBwcSerializationV1() throws IOException { + checkZonedDateTimeSerialization(TransportVersionUtils.getPreviousVersion(ZDT_NANOS_SUPPORT_BROKEN)); } public void testZonedDateTimeMillisBwcSerialization() throws IOException { - checkZonedDateTimeSerialization(TransportVersionUtils.getPreviousVersion(TransportVersions.ZDT_NANOS_SUPPORT)); + checkZonedDateTimeSerialization(TransportVersionUtils.getPreviousVersion(ZDT_NANOS_SUPPORT)); } public void checkZonedDateTimeSerialization(TransportVersion tv) throws IOException { @@ -738,14 +743,18 @@ public void checkZonedDateTimeSerialization(TransportVersion tv) throws IOExcept assertGenericRoundtrip(ofInstant(ofEpochSecond(1), randomZone()), tv); // just want to test a large number that will use 5+ bytes long maxEpochSecond = Integer.MAX_VALUE; + long minEpochSecond = tv.between(ZDT_NANOS_SUPPORT_BROKEN, ZDT_NANOS_SUPPORT) ? 0 : Integer.MIN_VALUE; assertGenericRoundtrip(ofInstant(ofEpochSecond(maxEpochSecond), randomZone()), tv); - assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond)), randomZone()), tv); - assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond), 1_000_000), randomZone()), tv); - assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond), 999_000_000), randomZone()), tv); - if (tv.onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT)) { - assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond), 999_999_999), randomZone()), tv); + assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond)), randomZone()), tv); + assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond), 1_000_000), randomZone()), tv); + assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond), 999_000_000), randomZone()), tv); + if (tv.onOrAfter(ZDT_NANOS_SUPPORT)) { + assertGenericRoundtrip( + ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond), 999_999_999), randomZone()), + tv + ); assertGenericRoundtrip( - ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond), randomIntBetween(0, 999_999_999)), randomZone()), + ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond), randomIntBetween(0, 999_999_999)), randomZone()), tv ); } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java index 645461778f637..cda1f9b0e29de 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java @@ -24,6 +24,15 @@ public class StreamInputTests extends ESTestCase { private StreamInput in = Mockito.spy(StreamInput.class); + + { + try { + Mockito.when(in.skip(anyLong())).thenAnswer(a -> a.getArguments()[0]); + } catch (IOException e) { + throw new AssertionError(e); + } + } + byte[] bytes = "0123456789".getBytes(UTF_8); public void testCalculateByteLengthOfAscii() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/common/scheduler/SchedulerEngineTests.java b/server/src/test/java/org/elasticsearch/common/scheduler/SchedulerEngineTests.java index e10898da978be..8672189220a9f 100644 --- a/server/src/test/java/org/elasticsearch/common/scheduler/SchedulerEngineTests.java +++ b/server/src/test/java/org/elasticsearch/common/scheduler/SchedulerEngineTests.java @@ -166,7 +166,7 @@ public void testCancellingDuringRunPreventsRescheduling() throws Exception { final String jobId = randomAlphaOfLength(4); try { engine.register(event -> { - assertThat(event.getJobName(), is(jobId)); + assertThat(event.jobName(), is(jobId)); calledCount.incrementAndGet(); jobRunningLatch.countDown(); try { diff --git a/server/src/test/java/org/elasticsearch/index/codec/LegacyCodecTests.java b/server/src/test/java/org/elasticsearch/index/codec/CodecIntegrationTests.java similarity index 51% rename from server/src/test/java/org/elasticsearch/index/codec/LegacyCodecTests.java rename to server/src/test/java/org/elasticsearch/index/codec/CodecIntegrationTests.java index dbe83af1a0cfb..05b9cf42e6236 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/LegacyCodecTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/CodecIntegrationTests.java @@ -9,11 +9,12 @@ package org.elasticsearch.index.codec; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.codec.zstd.Zstd814StoredFieldsFormat; import org.elasticsearch.test.ESSingleNodeTestCase; import static org.hamcrest.Matchers.equalTo; -public class LegacyCodecTests extends ESSingleNodeTestCase { +public class CodecIntegrationTests extends ESSingleNodeTestCase { public void testCanConfigureLegacySettings() { assumeTrue("Only when zstd_stored_fields feature flag is enabled", CodecService.ZSTD_STORED_FIELDS_FEATURE_FLAG.isEnabled()); @@ -26,4 +27,28 @@ public void testCanConfigureLegacySettings() { codec = client().admin().indices().prepareGetSettings("index2").execute().actionGet().getSetting("index2", "index.codec"); assertThat(codec, equalTo("legacy_best_compression")); } + + public void testDefaultCodecLogsdb() { + assumeTrue("Only when zstd_stored_fields feature flag is enabled", CodecService.ZSTD_STORED_FIELDS_FEATURE_FLAG.isEnabled()); + + var indexService = createIndex("index1", Settings.builder().put("index.mode", "logsdb").build()); + var storedFieldsFormat = (Zstd814StoredFieldsFormat) indexService.getShard(0) + .getEngineOrNull() + .config() + .getCodec() + .storedFieldsFormat(); + assertThat(storedFieldsFormat.getMode(), equalTo(Zstd814StoredFieldsFormat.Mode.BEST_COMPRESSION)); + } + + public void testDefaultCodec() { + assumeTrue("Only when zstd_stored_fields feature flag is enabled", CodecService.ZSTD_STORED_FIELDS_FEATURE_FLAG.isEnabled()); + + var indexService = createIndex("index1"); + var storedFieldsFormat = (Zstd814StoredFieldsFormat) indexService.getShard(0) + .getEngineOrNull() + .config() + .getCodec() + .storedFieldsFormat(); + assertThat(storedFieldsFormat.getMode(), equalTo(Zstd814StoredFieldsFormat.Mode.BEST_SPEED)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java b/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java index 9b58e785131c9..20ae59e113c33 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java @@ -141,6 +141,9 @@ private void assertSortedDocValues(LeafReader baselineReader, LeafReader contend for (int i = 0; i < docIdsToAdvanceTo.length; i++) { int docId = docIdsToAdvanceTo[i]; int baselineTarget = assertAdvance(docId, baselineReader, contenderReader, baseline, contender); + if (baselineTarget == NO_MORE_DOCS) { + break; + } assertEquals(baseline.ordValue(), contender.ordValue()); assertEquals(baseline.lookupOrd(baseline.ordValue()), contender.lookupOrd(contender.ordValue())); i = shouldSkipDocIds(i, docId, baselineTarget, docIdsToAdvanceTo); diff --git a/server/src/test/java/org/elasticsearch/index/codec/zstd/StoredFieldCodecDuelTests.java b/server/src/test/java/org/elasticsearch/index/codec/zstd/StoredFieldCodecDuelTests.java new file mode 100644 index 0000000000000..93e9911746d18 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/codec/zstd/StoredFieldCodecDuelTests.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.zstd; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.tests.index.ForceMergePolicy; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.index.codec.LegacyPerFieldMapperCodec; +import org.elasticsearch.index.codec.PerFieldMapperCodec; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class StoredFieldCodecDuelTests extends ESTestCase { + + private static final String STRING_FIELD = "string_field_1"; + private static final String BINARY_FIELD = "binary_field_2"; + private static final String INT_FIELD = "int_field_3"; + private static final String LONG_FIELD = "long_field_4"; + private static final String FLOAT_FIELD = "float_field_5"; + private static final String DOUBLE_FIELD = "double_field_5"; + + public void testDuelBestSpeed() throws IOException { + var baseline = new LegacyPerFieldMapperCodec(Lucene99Codec.Mode.BEST_SPEED, null, BigArrays.NON_RECYCLING_INSTANCE); + var contender = new PerFieldMapperCodec(Zstd814StoredFieldsFormat.Mode.BEST_SPEED, null, BigArrays.NON_RECYCLING_INSTANCE); + doTestDuel(baseline, contender); + } + + public void testDuelBestCompression() throws IOException { + var baseline = new LegacyPerFieldMapperCodec(Lucene99Codec.Mode.BEST_COMPRESSION, null, BigArrays.NON_RECYCLING_INSTANCE); + var contender = new PerFieldMapperCodec(Zstd814StoredFieldsFormat.Mode.BEST_COMPRESSION, null, BigArrays.NON_RECYCLING_INSTANCE); + doTestDuel(baseline, contender); + } + + static void doTestDuel(Codec baslineCodec, Codec contenderCodec) throws IOException { + try (var baselineDirectory = newDirectory(); var contenderDirectory = newDirectory()) { + int numDocs = randomIntBetween(256, 8096); + + var mergePolicy = new ForceMergePolicy(newLogMergePolicy()); + var baselineConfig = newIndexWriterConfig(); + baselineConfig.setMergePolicy(mergePolicy); + baselineConfig.setCodec(baslineCodec); + var contenderConf = newIndexWriterConfig(); + contenderConf.setCodec(contenderCodec); + contenderConf.setMergePolicy(mergePolicy); + + try ( + var baselineIw = new RandomIndexWriter(random(), baselineDirectory, baselineConfig); + var contenderIw = new RandomIndexWriter(random(), contenderDirectory, contenderConf) + ) { + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + doc.add(new StoredField(STRING_FIELD, randomAlphaOfLength(randomIntBetween(1, 4096)))); + doc.add(new StoredField(BINARY_FIELD, randomByteArrayOfLength(randomIntBetween(1, 4096)))); + doc.add(new StoredField(INT_FIELD, randomInt())); + doc.add(new StoredField(LONG_FIELD, randomLong())); + doc.add(new StoredField(FLOAT_FIELD, randomFloat())); + doc.add(new StoredField(DOUBLE_FIELD, randomDouble())); + baselineIw.addDocument(doc); + contenderIw.addDocument(doc); + } + baselineIw.forceMerge(1); + contenderIw.forceMerge(1); + } + try (var baselineIr = DirectoryReader.open(baselineDirectory); var contenderIr = DirectoryReader.open(contenderDirectory)) { + assertEquals(1, baselineIr.leaves().size()); + assertEquals(1, contenderIr.leaves().size()); + + var baseLeafReader = baselineIr.leaves().get(0).reader(); + var contenderLeafReader = contenderIr.leaves().get(0).reader(); + assertEquals(baseLeafReader.maxDoc(), contenderLeafReader.maxDoc()); + + for (int docId = 0; docId < contenderLeafReader.maxDoc(); docId++) { + Document baselineDoc = baseLeafReader.storedFields().document(docId); + Document contenderDoc = contenderLeafReader.storedFields().document(docId); + assertThat(contenderDoc.getFields().size(), equalTo(baselineDoc.getFields().size())); + for (int i = 0; i < baselineDoc.getFields().size(); i++) { + var baselineField = baselineDoc.getFields().get(i); + var contenderField = contenderDoc.getFields().get(i); + assertThat(contenderField.name(), equalTo(baselineField.name())); + switch (baselineField.name()) { + case STRING_FIELD -> assertThat(contenderField.stringValue(), equalTo(baselineField.stringValue())); + case BINARY_FIELD -> assertThat(contenderField.binaryValue(), equalTo(baselineField.binaryValue())); + case INT_FIELD, LONG_FIELD, FLOAT_FIELD, DOUBLE_FIELD -> assertThat( + contenderField.numericValue(), + equalTo(baselineField.numericValue()) + ); + default -> fail("unexpected field [" + baselineField.name() + "]"); + } + } + } + } + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 7fa08acd53882..1a0e2376797b8 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -2642,7 +2642,7 @@ same name need to be part of the same mappings (hence the same document). If th } public void testDeeplyNestedDocument() throws Exception { - int depth = 10000; + int depth = 20; DocumentMapper docMapper = createMapperService(Settings.builder().put(getIndexSettings()).build(), mapping(b -> {})) .documentMapper(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java index a138f0910e6ec..878bdc91bba06 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.common.Explicit; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; @@ -19,6 +18,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Optional; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -69,7 +69,7 @@ public void testCreateDynamicStringFieldAsKeywordForDimension() throws IOExcepti SourceToParse sourceToParse = new SourceToParse("test", new BytesArray(source), XContentType.JSON); SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null, Settings.EMPTY, false).setSynthetic().build(); - RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper root = new RootObjectMapper.Builder("_doc", Optional.empty()).add( new PassThroughObjectMapper.Builder("labels").setPriority(0).setContainsDimensions().dynamic(ObjectMapper.Dynamic.TRUE) ).build(MapperBuilderContext.root(false, false)); Mapping mapping = new Mapping(root, new MetadataFieldMapper[] { sourceMapper }, Map.of()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java index 61926d72982d8..a5a5d9726f233 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java @@ -1132,6 +1132,14 @@ public void testDynamicRuntimeWithDynamicTemplate() throws IOException { } private MapperService createDynamicTemplateNoSubobjects() throws IOException { + return createDynamicTemplateWithSubobjects("false"); + } + + private MapperService createDynamicTemplateAutoSubobjects() throws IOException { + return createDynamicTemplateWithSubobjects("auto"); + } + + private MapperService createDynamicTemplateWithSubobjects(String subobjects) throws IOException { return createMapperService(topMapping(b -> { b.startArray("dynamic_templates"); { @@ -1141,7 +1149,7 @@ private MapperService createDynamicTemplateNoSubobjects() throws IOException { { b.field("match_mapping_type", "object"); b.field("match", "metric"); - b.startObject("mapping").field("type", "object").field("subobjects", false).endObject(); + b.startObject("mapping").field("type", "object").field("subobjects", subobjects).endObject(); } b.endObject(); } @@ -1388,7 +1396,7 @@ public void testDynamicSubobjectsFalseDynamicFalse() throws Exception { assertEquals(ObjectMapper.Dynamic.FALSE, metrics.dynamic()); assertEquals(1, metrics.mappers.size()); ObjectMapper service = (ObjectMapper) metrics.getMapper("service"); - assertFalse(service.subobjects()); + assertEquals(ObjectMapper.Subobjects.DISABLED, service.subobjects()); assertEquals(1, service.mappers.size()); assertNotNull(service.getMapper("time")); } @@ -1434,6 +1442,255 @@ public void testSubobjectsFalseWithInnerNestedFromDynamicTemplate() { ); } + public void testSubobjectsAutoFlatPaths() throws IOException { + MapperService mapperService = createDynamicTemplateAutoSubobjects(); + ParsedDocument doc = mapperService.documentMapper().parse(source(b -> { + b.field("foo.metric.count", 10); + b.field("foo.bar.baz", 10); + b.field("foo.metric.count.min", 4); + b.field("foo.metric.count.max", 15); + })); + merge(mapperService, dynamicMapping(doc.dynamicMappingsUpdate())); + assertNoSubobjects(mapperService); + } + + public void testSubobjectsAutoStructuredPaths() throws IOException { + MapperService mapperService = createDynamicTemplateAutoSubobjects(); + ParsedDocument doc = mapperService.documentMapper().parse(source(b -> { + b.startObject("foo"); + { + b.startObject("metric"); + { + b.field("count", 10); + b.field("count.min", 4); + b.field("count.max", 15); + } + b.endObject(); + b.startObject("bar"); + b.field("baz", 10); + b.endObject(); + } + b.endObject(); + })); + merge(mapperService, dynamicMapping(doc.dynamicMappingsUpdate())); + assertNoSubobjects(mapperService); + } + + public void testSubobjectsAutoArrayOfObjects() throws IOException { + MapperService mapperService = createDynamicTemplateAutoSubobjects(); + ParsedDocument doc = mapperService.documentMapper().parse(source(b -> { + b.startObject("foo"); + { + b.startArray("metric"); + { + b.startObject(); + { + b.field("count", 10); + b.field("count.min", 4); + b.field("count.max", 15); + } + b.endObject(); + b.startObject(); + { + b.field("count", 5); + b.field("count.min", 3); + b.field("count.max", 50); + } + b.endObject(); + } + b.endArray(); + b.startObject("bar"); + b.field("baz", 10); + b.endObject(); + } + b.endObject(); + })); + merge(mapperService, dynamicMapping(doc.dynamicMappingsUpdate())); + assertNoSubobjects(mapperService); + } + + public void testSubobjectAutoDynamicNested() throws IOException { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.startArray("dynamic_templates"); + { + b.startObject(); + b.startObject("nested"); + { + b.field("match", "object"); + b.startObject("mapping"); + { + b.field("type", "nested"); + } + b.endObject(); + } + b.endObject(); + b.endObject(); + } + b.endArray(); + b.startObject("properties"); + b.startObject("metrics").field("type", "object").field("subobjects", "auto").endObject(); + b.endObject(); + })); + + ParsedDocument doc = mapper.parse(source(""" + { + "metrics.object" : { + "foo" : "bar" + } + } + """)); + + assertNotNull(doc.docs().get(0).get("metrics.object.foo")); + assertThat( + ((ObjectMapper) doc.dynamicMappingsUpdate().getRoot().getMapper("metrics")).getMapper("object"), + instanceOf(NestedObjectMapper.class) + ); + } + + public void testRootSubobjectAutoDynamicNested() throws IOException { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.startArray("dynamic_templates"); + { + b.startObject(); + b.startObject("nested"); + { + b.field("match", "object"); + b.startObject("mapping"); + { + b.field("type", "nested"); + } + b.endObject(); + } + b.endObject(); + b.endObject(); + } + b.endArray(); + b.field("subobjects", "auto"); + })); + + ParsedDocument doc = mapper.parse(source(""" + { + "object" : { + "foo" : "bar" + } + } + """)); + + assertNotNull(doc.docs().get(0).get("object.foo")); + assertThat(doc.dynamicMappingsUpdate().getRoot().getMapper("object"), instanceOf(NestedObjectMapper.class)); + } + + public void testDynamicSubobjectsAutoDynamicFalse() throws Exception { + // verify that we read the dynamic value properly from the parent mapper. DocumentParser#dynamicOrDefault splits the field + // name where dots are found, but it does that only for the parent prefix e.g. metrics.service and not for the leaf suffix time.max + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.startArray("dynamic_templates"); + { + b.startObject(); + b.startObject("metrics"); + { + b.field("match", "metrics"); + b.startObject("mapping"); + { + b.field("type", "object"); + b.field("dynamic", "false"); + b.startObject("properties"); + { + b.startObject("service"); + { + b.field("type", "object"); + b.field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + b.field("type", "keyword"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + b.endObject(); + } + b.endArray(); + })); + + ParsedDocument doc = mapper.parse(source(""" + { + "metrics": { + "service": { + "time" : 10, + "time.max" : 500 + } + } + } + """)); + + assertNotNull(doc.rootDoc().getField("metrics.service.time")); + assertNull(doc.rootDoc().getField("metrics.service.time.max")); + assertNotNull(doc.dynamicMappingsUpdate()); + ObjectMapper metrics = (ObjectMapper) doc.dynamicMappingsUpdate().getRoot().getMapper("metrics"); + assertEquals(ObjectMapper.Dynamic.FALSE, metrics.dynamic()); + assertEquals(1, metrics.mappers.size()); + ObjectMapper service = (ObjectMapper) metrics.getMapper("service"); + assertEquals(ObjectMapper.Subobjects.AUTO, service.subobjects()); + assertEquals(1, service.mappers.size()); + assertNotNull(service.getMapper("time")); + } + + public void testSubobjectsAutoWithInnerNestedFromDynamicTemplate() throws IOException { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.startArray("dynamic_templates"); + { + b.startObject(); + { + b.startObject("test"); + { + b.field("match", "metrics"); + b.startObject("mapping"); + { + b.field("type", "object").field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + b.field("type", "nested"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endArray(); + })); + + ParsedDocument doc = mapper.parse(source(""" + { + "metrics": { + "time" : { + "foo" : "bar" + }, + "time.max" : 500 + } + } + """)); + + assertNotNull(doc.rootDoc().get("metrics.time.max")); + assertNotNull(doc.docs().get(0).get("metrics.time.foo")); + assertThat( + ((ObjectMapper) doc.dynamicMappingsUpdate().getRoot().getMapper("metrics")).getMapper("time"), + instanceOf(NestedObjectMapper.class) + ); + } + public void testDynamicSubobject() throws IOException { MapperService mapperService = createMapperService(topMapping(b -> { b.startArray("dynamic_templates"); @@ -1803,7 +2060,7 @@ public void testSubobjectsFalseDocWithEmptyObject() throws IOException { Mapping mapping = doc.dynamicMappingsUpdate(); ObjectMapper artifacts = (ObjectMapper) mapping.getRoot().getMapper("artifacts"); ObjectMapper leaf = (ObjectMapper) artifacts.getMapper("leaf"); - assertFalse(leaf.subobjects()); + assertEquals(ObjectMapper.Subobjects.DISABLED, leaf.subobjects()); } public void testSubobjectsFalseFlattened() throws IOException { @@ -1853,6 +2110,53 @@ public void testSubobjectsFalseFlattened() throws IOException { assertEquals("flattened", fooStructuredMapper.typeName()); } + public void testSubobjectsAutoFlattened() throws IOException { + String mapping = """ + { + "_doc": { + "properties": { + "attributes": { + "type": "object", + "subobjects": "auto" + } + }, + "dynamic_templates": [ + { + "test": { + "path_match": "attributes.resource.*", + "match_mapping_type": "object", + "mapping": { + "type": "flattened" + } + } + } + ] + } + } + """; + String docJson = """ + { + "attributes.resource": { + "complex.attribute": { + "a": "b" + }, + "foo.bar": "baz" + } + } + """; + + MapperService mapperService = createMapperService(mapping); + ParsedDocument parsedDoc = mapperService.documentMapper().parse(source(docJson)); + merge(mapperService, dynamicMapping(parsedDoc.dynamicMappingsUpdate())); + + Mapper fooBarMapper = mapperService.documentMapper().mappers().getMapper("attributes.resource.foo.bar"); + assertNotNull(fooBarMapper); + assertEquals("text", fooBarMapper.typeName()); + Mapper fooStructuredMapper = mapperService.documentMapper().mappers().getMapper("attributes.resource.complex.attribute"); + assertNotNull(fooStructuredMapper); + assertEquals("flattened", fooStructuredMapper.typeName()); + } + public void testMatchWithArrayOfFieldNames() throws IOException { String mapping = """ { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java index d913b86aed2d5..a8669a0befd0d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java @@ -16,6 +16,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -176,7 +177,7 @@ private static ObjectMapper createObjectMapper(String name) { name, name, Explicit.IMPLICIT_TRUE, - Explicit.IMPLICIT_TRUE, + Optional.empty(), Explicit.IMPLICIT_FALSE, ObjectMapper.Dynamic.FALSE, emptyMap() diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java index fc30b9b6677f1..dcb5cd1711c8c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java @@ -8,8 +8,10 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.DirectoryReader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.test.FieldMaskingReader; import org.elasticsearch.xcontent.XContentBuilder; import org.hamcrest.Matchers; @@ -19,6 +21,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; public class IgnoredSourceFieldMapperTests extends MapperServiceTestCase { @@ -633,6 +636,132 @@ public void testArrayWithinArray() throws IOException { {"path":[{"to":[{"name":"A"},{"name":"B"}]},{"to":[{"name":"C"},{"name":"D"}]}]}""", booleanValue), syntheticSource); } + public void testDisabledObjectWithinHigherLevelArray() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path"); + { + b.field("type", "object"); + b.startObject("properties"); + { + b.startObject("to").field("type", "object").field("enabled", false); + { + b.startObject("properties"); + { + b.startObject("name").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + + boolean booleanValue = randomBoolean(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startArray("path"); + { + b.startObject(); + { + b.startObject("to").field("name", "A").endObject(); + } + b.endObject(); + b.startObject(); + { + b.startObject("to").field("name", "B").endObject(); + } + b.endObject(); + } + b.endArray(); + }); + assertEquals(String.format(Locale.ROOT, """ + {"path":{"to":[{"name":"A"},{"name":"B"}]}}""", booleanValue), syntheticSource); + } + + public void testStoredArrayWithinHigherLevelArray() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path"); + { + b.field("type", "object"); + b.startObject("properties"); + { + b.startObject("to").field("type", "object").field("store_array_source", true); + { + b.startObject("properties"); + { + b.startObject("name").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + + boolean booleanValue = randomBoolean(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startArray("path"); + { + b.startObject(); + { + b.startArray("to"); + { + b.startObject().field("name", "A").endObject(); + b.startObject().field("name", "B").endObject(); + } + b.endArray(); + } + b.endObject(); + b.startObject(); + { + b.startArray("to"); + { + b.startObject().field("name", "C").endObject(); + b.startObject().field("name", "D").endObject(); + } + b.endArray(); + } + b.endObject(); + } + b.endArray(); + }); + assertEquals(String.format(Locale.ROOT, """ + {"path":{"to":[{"name":"A"},{"name":"B"},{"name":"C"},{"name":"D"}]}}""", booleanValue), syntheticSource); + } + + public void testFallbackFieldWithinHigherLevelArray() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path"); + { + b.field("type", "object"); + b.startObject("properties"); + { + b.startObject("name").field("type", "keyword").field("doc_values", false).endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + + boolean booleanValue = randomBoolean(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startArray("path"); + { + + b.startObject().field("name", "A").endObject(); + b.startObject().field("name", "B").endObject(); + b.startObject().field("name", "C").endObject(); + b.startObject().field("name", "D").endObject(); + } + b.endArray(); + }); + assertEquals(String.format(Locale.ROOT, """ + {"path":{"name":["A","B","C","D"]}}""", booleanValue), syntheticSource); + } + public void testFieldOrdering() throws IOException { DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { b.startObject("A").field("type", "integer").endObject(); @@ -1055,4 +1184,58 @@ public void testRuntimeDynamicObjectNestedArray() throws IOException { assertEquals(""" {"path":[{"to":{"foo":"A","bar":"B"}},{"to":{"foo":"C","bar":"D"}}]}""", syntheticSource); } + + public void testDisabledSubObjectWithNameOverlappingParentName() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path"); + b.startObject("properties"); + { + b.startObject("at").field("type", "object").field("enabled", "false").endObject(); + } + b.endObject(); + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.startObject("at").field("foo", "A").endObject(); + } + b.endObject(); + }); + assertEquals(""" + {"path":{"at":{"foo":"A"}}}""", syntheticSource); + } + + public void testStoredNestedSubObjectWithNameOverlappingParentName() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path"); + b.startObject("properties"); + { + b.startObject("at").field("type", "nested").field("store_array_source", "true").endObject(); + } + b.endObject(); + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.startObject("at").field("foo", "A").endObject(); + } + b.endObject(); + }); + assertEquals(""" + {"path":{"at":{"foo":"A"}}}""", syntheticSource); + } + + protected void validateRoundTripReader(String syntheticSource, DirectoryReader reader, DirectoryReader roundTripReader) + throws IOException { + // We exclude ignored source field since in some cases it contains an exact copy of a part of document source. + // Sometime synthetic source is different in this case (structurally but not logically) + // and since the copy is exact, contents of ignored source are different. + assertReaderEquals( + "round trip " + syntheticSource, + new FieldMaskingReader(Set.of(SourceFieldMapper.RECOVERY_SOURCE_NAME, IgnoredSourceFieldMapper.NAME), reader), + new FieldMaskingReader(Set.of(SourceFieldMapper.RECOVERY_SOURCE_NAME, IgnoredSourceFieldMapper.NAME), roundTripReader) + ); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java index 251b0ae62f3c5..6a790f7e91118 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java @@ -26,6 +26,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -81,7 +82,7 @@ public void testSubfieldOverride() { "object", "object", Explicit.EXPLICIT_TRUE, - Explicit.IMPLICIT_TRUE, + Optional.empty(), Explicit.IMPLICIT_FALSE, ObjectMapper.Dynamic.TRUE, Collections.singletonMap("object.subfield", fieldMapper) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index 306887099849b..13bd5955d67a5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -1575,11 +1575,11 @@ public void testStoreArraySourceinSyntheticSourceMode() throws IOException { assertNotNull(mapper.mapping().getRoot().getMapper("o")); } - public void testStoreArraySourceThrowsInNonSyntheticSourceMode() { - var exception = expectThrows(MapperParsingException.class, () -> createDocumentMapper(mapping(b -> { + public void testStoreArraySourceNoopInNonSyntheticSourceMode() throws IOException { + DocumentMapper mapper = createDocumentMapper(mapping(b -> { b.startObject("o").field("type", "nested").field(ObjectMapper.STORE_ARRAY_SOURCE_PARAM, true).endObject(); - }))); - assertEquals("Parameter [store_array_source] can only be set in synthetic source mode.", exception.getMessage()); + })); + assertNotNull(mapper.mapping().getRoot().getMapper("o")); } public void testSyntheticNestedWithObject() throws IOException { @@ -1737,6 +1737,97 @@ public void testSyntheticNestedWithIncludeInRoot() throws IOException { {"path":{"bar":"B","foo":"A"}}""", syntheticSource); } + public void testSyntheticNestedWithEmptyObject() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").field("type", "nested"); + { + b.startObject("properties"); + { + b.startObject("foo").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { b.startObject("path").nullField("foo").endObject(); }); + assertEquals(""" + {"path":{}}""", syntheticSource); + } + + public void testSyntheticNestedWithEmptySubObject() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").field("type", "nested"); + { + b.startObject("properties"); + { + b.startObject("to").startObject("properties"); + { + b.startObject("foo").field("type", "keyword").endObject(); + } + b.endObject().endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.startObject("to").nullField("foo").endObject(); + } + b.endObject(); + }); + assertEquals(""" + {"path":{}}""", syntheticSource); + } + + public void testSyntheticNestedWithArrayContainingEmptyObject() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").field("type", "nested"); + { + b.startObject("properties"); + { + b.startObject("foo").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startArray("path"); + { + b.startObject().field("foo", "A").endObject(); + b.startObject().nullField("foo").endObject(); + } + b.endArray(); + }); + assertEquals(""" + {"path":[{"foo":"A"},{}]}""", syntheticSource); + } + + public void testSyntheticNestedWithArrayContainingOnlyEmptyObject() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").field("type", "nested"); + { + b.startObject("properties"); + { + b.startObject("foo").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startArray("path"); + { + b.startObject().nullField("foo").endObject(); + } + b.endArray(); + }); + assertEquals(""" + {"path":{}}""", syntheticSource); + } + private NestedObjectMapper createNestedObjectMapperWithAllParametersSet(CheckedConsumer propertiesBuilder) throws IOException { DocumentMapper mapper = createDocumentMapper(mapping(b -> { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index b3bb8cbe697a5..ea6ddf0257d6f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -7,11 +7,11 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.common.Explicit; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; import java.util.Collections; +import java.util.Optional; import static org.elasticsearch.index.mapper.MapperService.MergeReason.INDEX_TEMPLATE; import static org.elasticsearch.index.mapper.MapperService.MergeReason.MAPPING_UPDATE; @@ -26,9 +26,9 @@ private RootObjectMapper createMapping( boolean includeBarField, boolean includeBazField ) { - RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("type1", Explicit.IMPLICIT_TRUE); - rootBuilder.add(new ObjectMapper.Builder("disabled", Explicit.IMPLICIT_TRUE).enabled(disabledFieldEnabled)); - ObjectMapper.Builder fooBuilder = new ObjectMapper.Builder("foo", Explicit.IMPLICIT_TRUE).enabled(fooFieldEnabled); + RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("type1", Optional.empty()); + rootBuilder.add(new ObjectMapper.Builder("disabled", Optional.empty()).enabled(disabledFieldEnabled)); + ObjectMapper.Builder fooBuilder = new ObjectMapper.Builder("foo", Optional.empty()).enabled(fooFieldEnabled); if (includeBarField) { fooBuilder.add(new TextFieldMapper.Builder("bar", createDefaultIndexAnalyzers(), false)); } @@ -77,8 +77,8 @@ public void testMergeWhenDisablingField() { public void testMergeDisabledField() { // GIVEN a mapping with "foo" field disabled // the field is disabled, and we are not trying to re-enable it, hence merge should work - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("disabled", Explicit.IMPLICIT_TRUE) + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("disabled", Optional.empty()) ).build(MapperBuilderContext.root(false, false)); RootObjectMapper merged = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); @@ -100,10 +100,8 @@ public void testMergeEnabled() { public void testMergeEnabledForRootMapper() { String type = MapperService.SINGLE_MAPPING_NAME; - ObjectMapper firstMapper = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).build( - MapperBuilderContext.root(false, false) - ); - ObjectMapper secondMapper = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).enabled(false) + ObjectMapper firstMapper = new RootObjectMapper.Builder("_doc", Optional.empty()).build(MapperBuilderContext.root(false, false)); + ObjectMapper secondMapper = new RootObjectMapper.Builder("_doc", Optional.empty()).enabled(false) .build(MapperBuilderContext.root(false, false)); MapperException e = expectThrows( @@ -144,12 +142,10 @@ public void testMergedFieldNamesFieldWithDotsSubobjectsFalseAtRoot() { } public void testMergedFieldNamesFieldWithDotsSubobjectsFalse() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - createObjectSubobjectsFalseLeafWithDots() - ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - createObjectSubobjectsFalseLeafWithDots() - ).build(MapperBuilderContext.root(false, false)); + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add(createObjectSubobjectsFalseLeafWithDots()) + .build(MapperBuilderContext.root(false, false)); + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add(createObjectSubobjectsFalseLeafWithDots()) + .build(MapperBuilderContext.root(false, false)); final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); @@ -161,9 +157,9 @@ public void testMergedFieldNamesFieldWithDotsSubobjectsFalse() { } public void testMergedFieldNamesMultiFields() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add(createTextKeywordMultiField("text")) + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add(createTextKeywordMultiField("text")) .build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add(createTextKeywordMultiField("text")) + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add(createTextKeywordMultiField("text")) .build(MapperBuilderContext.root(false, false)); final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); @@ -177,10 +173,10 @@ public void testMergedFieldNamesMultiFields() { } public void testMergedFieldNamesMultiFieldsWithinSubobjectsFalse() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add( createObjectSubobjectsFalseLeafWithMultiField() ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( createObjectSubobjectsFalseLeafWithMultiField() ).build(MapperBuilderContext.root(false, false)); @@ -212,9 +208,9 @@ public void testMergeWithLimit() { } public void testMergeWithLimitTruncatedObjectField() { - RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_FALSE).add( + RootObjectMapper root = new RootObjectMapper.Builder("_doc", Optional.empty()).build(MapperBuilderContext.root(false, false)); + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.DISABLED)).add( new KeywordFieldMapper.Builder("child1", IndexVersion.current()) ).add(new KeywordFieldMapper.Builder("child2", IndexVersion.current())) ).build(MapperBuilderContext.root(false, false)); @@ -243,11 +239,11 @@ public void testMergeWithLimitTruncatedObjectField() { } public void testMergeSameObjectDifferentFields() { - RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).add(new KeywordFieldMapper.Builder("child1", IndexVersion.current())) + RootObjectMapper root = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.empty()).add(new KeywordFieldMapper.Builder("child1", IndexVersion.current())) ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.empty()).add( new KeywordFieldMapper.Builder("child1", IndexVersion.current()).ignoreAbove(42) ).add(new KeywordFieldMapper.Builder("child2", IndexVersion.current())) ).build(MapperBuilderContext.root(false, false)); @@ -270,10 +266,10 @@ public void testMergeSameObjectDifferentFields() { } public void testMergeWithLimitMultiField() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add( createTextKeywordMultiField("text", "keyword1") ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( createTextKeywordMultiField("text", "keyword2") ).build(MapperBuilderContext.root(false, false)); @@ -287,10 +283,10 @@ public void testMergeWithLimitMultiField() { } public void testMergeWithLimitRuntimeField() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).addRuntimeField( + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).addRuntimeField( new TestRuntimeField("existing_runtime_field", "keyword") ).add(createTextKeywordMultiField("text", "keyword1")).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).addRuntimeField( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).addRuntimeField( new TestRuntimeField("existing_runtime_field", "keyword") ).addRuntimeField(new TestRuntimeField("new_runtime_field", "keyword")).build(MapperBuilderContext.root(false, false)); @@ -304,12 +300,12 @@ public void testMergeWithLimitRuntimeField() { } public void testMergeSubobjectsFalseWithObject() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_FALSE) + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.DISABLED)) ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("child", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.empty()).add( + new ObjectMapper.Builder("child", Optional.empty()).add( new KeywordFieldMapper.Builder("grandchild", IndexVersion.current()) ) ) @@ -326,7 +322,7 @@ private static RootObjectMapper createRootSubobjectFalseLeafWithDots() { FieldMapper fieldMapper = fieldBuilder.build(MapperBuilderContext.root(false, false)); assertEquals("host.name", fieldMapper.leafName()); assertEquals("host.name", fieldMapper.fullPath()); - return new RootObjectMapper.Builder("_doc", Explicit.EXPLICIT_FALSE).add(fieldBuilder) + return new RootObjectMapper.Builder("_doc", Optional.of(ObjectMapper.Subobjects.DISABLED)).add(fieldBuilder) .build(MapperBuilderContext.root(false, false)); } @@ -346,7 +342,7 @@ private static ObjectMapper.Builder createObjectSubobjectsFalseLeafWithDots() { assertEquals("host.name", fieldMapper.leafName()); assertEquals("foo.metrics.host.name", fieldMapper.fullPath()); return new ObjectMapper.Builder("foo", ObjectMapper.Defaults.SUBOBJECTS).add( - new ObjectMapper.Builder("metrics", Explicit.EXPLICIT_FALSE).add(fieldBuilder) + new ObjectMapper.Builder("metrics", Optional.of(ObjectMapper.Subobjects.DISABLED)).add(fieldBuilder) ); } @@ -369,7 +365,7 @@ private ObjectMapper.Builder createObjectSubobjectsFalseLeafWithMultiField() { assertEquals("keyword", fieldMapper.leafName()); assertEquals("foo.metrics.host.name.keyword", fieldMapper.fullPath()); return new ObjectMapper.Builder("foo", ObjectMapper.Defaults.SUBOBJECTS).add( - new ObjectMapper.Builder("metrics", Explicit.EXPLICIT_FALSE).add(fieldBuilder) + new ObjectMapper.Builder("metrics", Optional.of(ObjectMapper.Subobjects.DISABLED)).add(fieldBuilder) ); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index 6687a28883716..49d8ba9c2ca29 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -21,9 +20,11 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.hamcrest.core.IsInstanceOf; import java.io.IOException; import java.util.List; +import java.util.Optional; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -164,7 +165,7 @@ public void testMergeEnabledForIndexTemplates() throws IOException { ObjectMapper objectMapper = mapper.mappers().objectMappers().get("object"); assertNotNull(objectMapper); assertFalse(objectMapper.isEnabled()); - assertTrue(objectMapper.subobjects()); + assertEquals(ObjectMapper.Subobjects.ENABLED, objectMapper.subobjects()); assertFalse(objectMapper.storeArraySource()); // Setting 'enabled' to true is allowed, and updates the mapping. @@ -175,7 +176,7 @@ public void testMergeEnabledForIndexTemplates() throws IOException { .startObject("object") .field("type", "object") .field("enabled", true) - .field("subobjects", false) + .field("subobjects", "auto") .field(ObjectMapper.STORE_ARRAY_SOURCE_PARAM, true) .endObject() .endObject() @@ -186,7 +187,7 @@ public void testMergeEnabledForIndexTemplates() throws IOException { objectMapper = mapper.mappers().objectMappers().get("object"); assertNotNull(objectMapper); assertTrue(objectMapper.isEnabled()); - assertFalse(objectMapper.subobjects()); + assertEquals(ObjectMapper.Subobjects.AUTO, objectMapper.subobjects()); assertTrue(objectMapper.storeArraySource()); } @@ -500,6 +501,141 @@ public void testSubobjectsCannotBeUpdatedOnRoot() throws IOException { assertEquals("the [subobjects] parameter can't be updated for the object mapping [_doc]", exception.getMessage()); } + public void testSubobjectsAuto() throws Exception { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("metrics.service"); + { + b.field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + b.field("type", "long"); + b.endObject(); + b.startObject("time.max"); + b.field("type", "long"); + b.endObject(); + b.startObject("attributes"); + { + b.field("type", "object"); + b.field("enabled", "false"); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + assertNotNull(mapperService.fieldType("metrics.service.time")); + assertNotNull(mapperService.fieldType("metrics.service.time.max")); + assertNotNull(mapperService.documentMapper().mappers().objectMappers().get("metrics.service.attributes")); + } + + public void testSubobjectsAutoWithInnerObject() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("metrics.service"); + { + b.field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + { + b.startObject("properties"); + { + b.startObject("max"); + b.field("type", "long"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + b.startObject("foo"); + b.field("type", "keyword"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + assertNull(mapperService.fieldType("metrics.service.time")); + assertNotNull(mapperService.fieldType("metrics.service.time.max")); + assertNotNull(mapperService.fieldType("metrics.service.foo")); + assertNotNull(mapperService.documentMapper().mappers().objectMappers().get("metrics.service.time")); + assertNotNull(mapperService.documentMapper().mappers().getMapper("metrics.service.foo")); + } + + public void testSubobjectsAutoWithInnerNested() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("metrics.service"); + { + b.field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + b.field("type", "nested"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + assertThat( + mapperService.documentMapper().mappers().objectMappers().get("metrics.service.time"), + IsInstanceOf.instanceOf(NestedObjectMapper.class) + ); + } + + public void testSubobjectsAutoRoot() throws Exception { + MapperService mapperService = createMapperService(mappingWithSubobjects(b -> { + b.startObject("metrics.service.time"); + b.field("type", "long"); + b.endObject(); + b.startObject("metrics.service.time.max"); + b.field("type", "long"); + b.endObject(); + b.startObject("metrics.attributes"); + { + b.field("type", "object"); + b.field("enabled", "false"); + } + b.endObject(); + }, "auto")); + assertNotNull(mapperService.fieldType("metrics.service.time")); + assertNotNull(mapperService.fieldType("metrics.service.time.max")); + assertNotNull(mapperService.documentMapper().mappers().objectMappers().get("metrics.attributes")); + } + + public void testSubobjectsAutoRootWithInnerObject() throws IOException { + MapperService mapperService = createMapperService(mappingWithSubobjects(b -> { + b.startObject("metrics.service.time"); + { + b.startObject("properties"); + { + b.startObject("max"); + b.field("type", "long"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + }, "auto")); + assertNull(mapperService.fieldType("metrics.service.time")); + assertNotNull(mapperService.fieldType("metrics.service.time.max")); + assertNotNull(mapperService.documentMapper().mappers().objectMappers().get("metrics.service.time")); + assertNotNull(mapperService.documentMapper().mappers().getMapper("metrics.service.time.max")); + } + + public void testSubobjectsAutoRootWithInnerNested() throws IOException { + MapperService mapperService = createMapperService(mappingWithSubobjects(b -> { + b.startObject("metrics.service"); + b.field("type", "nested"); + b.endObject(); + }, "auto")); + assertThat( + mapperService.documentMapper().mappers().objectMappers().get("metrics.service"), + IsInstanceOf.instanceOf(NestedObjectMapper.class) + ); + } + /** * Makes sure that an empty object mapper returns {@code null} from * {@link SourceLoader.SyntheticFieldLoader#docValuesLoader}. This @@ -546,16 +682,16 @@ public void testStoreArraySourceinSyntheticSourceMode() throws IOException { assertNotNull(mapper.mapping().getRoot().getMapper("o")); } - public void testStoreArraySourceThrowsInNonSyntheticSourceMode() { - var exception = expectThrows(MapperParsingException.class, () -> createDocumentMapper(mapping(b -> { + public void testStoreArraySourceNoopInNonSyntheticSourceMode() throws IOException { + DocumentMapper mapper = createDocumentMapper(mapping(b -> { b.startObject("o").field("type", "object").field(ObjectMapper.STORE_ARRAY_SOURCE_PARAM, true).endObject(); - }))); - assertEquals("Parameter [store_array_source] can only be set in synthetic source mode.", exception.getMessage()); + })); + assertNotNull(mapper.mapping().getRoot().getMapper("o")); } public void testNestedObjectWithMultiFieldsgetTotalFieldsCount() { - ObjectMapper.Builder mapperBuilder = new ObjectMapper.Builder("parent_size_1", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("child_size_2", Explicit.IMPLICIT_TRUE).add( + ObjectMapper.Builder mapperBuilder = new ObjectMapper.Builder("parent_size_1", Optional.empty()).add( + new ObjectMapper.Builder("child_size_2", Optional.empty()).add( new TextFieldMapper.Builder("grand_child_size_3", createDefaultIndexAnalyzers(), false).addMultiField( new KeywordFieldMapper.Builder("multi_field_size_4", IndexVersion.current()) ) @@ -602,10 +738,26 @@ private ObjectMapper createObjectMapperWithAllParametersSet(CheckedConsumer fields = objectMapper.asFlattenedFieldMappers(rootContext).stream().map(FieldMapper::fullPath).toList(); + assertThat(fields, containsInAnyOrder("parent.keyword1", "parent.child.keyword2")); + } + + public void testFlattenSubobjectsAuto() { + MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.AUTO)).add( + new ObjectMapper.Builder("child", Optional.empty()).add(new KeywordFieldMapper.Builder("keyword2", IndexVersion.current())) + ).add(new KeywordFieldMapper.Builder("keyword1", IndexVersion.current())).build(rootContext); + List fields = objectMapper.asFlattenedFieldMappers(rootContext).stream().map(FieldMapper::fullPath).toList(); + assertThat(fields, containsInAnyOrder("parent.keyword1", "parent.child.keyword2")); + } + + public void testFlattenSubobjectsFalse() { + MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.DISABLED)).add( + new ObjectMapper.Builder("child", Optional.empty()).add(new KeywordFieldMapper.Builder("keyword2", IndexVersion.current())) ).add(new KeywordFieldMapper.Builder("keyword1", IndexVersion.current())).build(rootContext); List fields = objectMapper.asFlattenedFieldMappers(rootContext).stream().map(FieldMapper::fullPath).toList(); assertThat(fields, containsInAnyOrder("parent.keyword1", "parent.child.keyword2")); @@ -613,8 +765,8 @@ public void testFlatten() { public void testFlattenDynamicIncompatible() { MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); - ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("child", Explicit.IMPLICIT_TRUE).dynamic(Dynamic.FALSE) + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.empty()).add( + new ObjectMapper.Builder("child", Optional.empty()).dynamic(Dynamic.FALSE) ).build(rootContext); IllegalArgumentException exception = expectThrows( @@ -631,7 +783,7 @@ public void testFlattenDynamicIncompatible() { public void testFlattenEnabledFalse() { MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); - ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).enabled(false).build(rootContext); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.empty()).enabled(false).build(rootContext); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, @@ -646,7 +798,7 @@ public void testFlattenEnabledFalse() { public void testFlattenExplicitSubobjectsTrue() { MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); - ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Explicit.EXPLICIT_TRUE).build(rootContext); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.ENABLED)).build(rootContext); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/XContentDataHelperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/XContentDataHelperTests.java index cd5b43d0af771..a4532bca67778 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/XContentDataHelperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/XContentDataHelperTests.java @@ -8,9 +8,11 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -21,8 +23,12 @@ import java.io.IOException; import java.math.BigDecimal; import java.math.BigInteger; +import java.util.Arrays; import java.util.Base64; +import java.util.Collection; import java.util.List; +import java.util.Map; +import java.util.stream.Stream; import static org.hamcrest.Matchers.equalTo; @@ -168,4 +174,94 @@ public void testCloneSubContextWithParser() throws IOException { assertEquals(data, dataInParser(tuple.v2())); assertTrue(tuple.v1().getClonedSource()); } + + public void testWriteMergedWithSingleValue() throws IOException { + testWriteMergedWithSingleValue(randomLong()); + testWriteMergedWithSingleValue(randomDouble()); + testWriteMergedWithSingleValue(randomBoolean()); + testWriteMergedWithSingleValue(randomAlphaOfLength(5)); + testWriteMergedWithSingleValue(null); + testWriteMergedWithSingleValue(Map.of("object_field", randomAlphaOfLength(5))); + testWriteMergedWithSingleValue(Map.of("object_field", Map.of("nested_object_field", randomAlphaOfLength(5)))); + } + + private void testWriteMergedWithSingleValue(Object value) throws IOException { + var map = executeWriteMergeOnRepeated(value); + assertEquals(Arrays.asList(value, value), map.get("foo")); + } + + public void testWriteMergedWithMultipleValues() throws IOException { + testWriteMergedWithMultipleValues(List.of(randomLong(), randomLong())); + testWriteMergedWithMultipleValues(List.of(randomDouble(), randomDouble())); + testWriteMergedWithMultipleValues(List.of(randomBoolean(), randomBoolean())); + testWriteMergedWithMultipleValues(List.of(randomAlphaOfLength(5), randomAlphaOfLength(5))); + testWriteMergedWithMultipleValues(Arrays.asList(null, null)); + testWriteMergedWithMultipleValues( + List.of(Map.of("object_field", randomAlphaOfLength(5)), Map.of("object_field", randomAlphaOfLength(5))) + ); + testWriteMergedWithMultipleValues( + List.of( + Map.of("object_field", Map.of("nested_object_field", randomAlphaOfLength(5))), + Map.of("object_field", Map.of("nested_object_field", randomAlphaOfLength(5))) + ) + ); + } + + private void testWriteMergedWithMultipleValues(List value) throws IOException { + var map = executeWriteMergeOnRepeated(value); + var expected = Stream.of(value, value).flatMap(Collection::stream).toList(); + assertEquals(expected, map.get("foo")); + } + + public void testWriteMergedWithMixedValues() throws IOException { + testWriteMergedWithMixedValues(randomLong(), List.of(randomLong(), randomLong())); + testWriteMergedWithMixedValues(randomDouble(), List.of(randomDouble(), randomDouble())); + testWriteMergedWithMixedValues(randomBoolean(), List.of(randomBoolean(), randomBoolean())); + testWriteMergedWithMixedValues(randomAlphaOfLength(5), List.of(randomAlphaOfLength(5), randomAlphaOfLength(5))); + testWriteMergedWithMixedValues(null, Arrays.asList(null, null)); + testWriteMergedWithMixedValues( + Map.of("object_field", randomAlphaOfLength(5)), + List.of(Map.of("object_field", randomAlphaOfLength(5)), Map.of("object_field", randomAlphaOfLength(5))) + ); + testWriteMergedWithMixedValues( + Map.of("object_field", Map.of("nested_object_field", randomAlphaOfLength(5))), + List.of( + Map.of("object_field", Map.of("nested_object_field", randomAlphaOfLength(5))), + Map.of("object_field", Map.of("nested_object_field", randomAlphaOfLength(5))) + ) + ); + } + + private void testWriteMergedWithMixedValues(Object value, List multipleValues) throws IOException { + var map = executeWriteMergeOnTwoEncodedValues(value, multipleValues); + var expected = Stream.concat(Stream.of(value), multipleValues.stream()).toList(); + assertEquals(expected, map.get("foo")); + } + + private Map executeWriteMergeOnRepeated(Object value) throws IOException { + return executeWriteMergeOnTwoEncodedValues(value, value); + } + + private Map executeWriteMergeOnTwoEncodedValues(Object first, Object second) throws IOException { + var xContentType = randomFrom(XContentType.values()); + + var firstEncoded = encodeSingleValue(first, xContentType); + var secondEncoded = encodeSingleValue(second, xContentType); + + var destination = XContentFactory.contentBuilder(xContentType); + destination.startObject(); + XContentDataHelper.writeMerged(destination, "foo", List.of(firstEncoded, secondEncoded)); + destination.endObject(); + + return XContentHelper.convertToMap(BytesReference.bytes(destination), false, xContentType).v2(); + } + + private BytesRef encodeSingleValue(Object value, XContentType xContentType) throws IOException { + var builder = XContentFactory.contentBuilder(xContentType); + builder.value(value); + + XContentParser parser = createParser(builder); + parser.nextToken(); + return XContentDataHelper.encodeToken(parser); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java index 9cd1df700a618..ffca4352f0ae6 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java @@ -27,7 +27,6 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.Explicit; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; @@ -90,6 +89,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.BiFunction; import java.util.function.Function; @@ -384,7 +384,7 @@ public void testSearchRequestRuntimeFieldsAndMultifieldDetection() { public void testSyntheticSourceSearchLookup() throws IOException { // Build a mapping using synthetic source SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null, Settings.EMPTY, false).setSynthetic().build(); - RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper root = new RootObjectMapper.Builder("_doc", Optional.empty()).add( new KeywordFieldMapper.Builder("cat", IndexVersion.current()).ignoreAbove(100) ).build(MapperBuilderContext.root(true, false)); Mapping mapping = new Mapping(root, new MetadataFieldMapper[] { sourceMapper }, Map.of()); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index bc81614c9e237..5c07c2344cf13 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -88,9 +89,9 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.IntConsumer; import java.util.function.LongSupplier; -import java.util.function.Predicate; import java.util.stream.Collectors; import static org.elasticsearch.cluster.service.ClusterStateTaskExecutorUtils.executeAndAssertSuccessful; @@ -152,7 +153,8 @@ public void testIngestPlugin() { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); Map factories = ingestService.getProcessorFactories(); assertTrue(factories.containsKey("foo")); @@ -172,7 +174,8 @@ public void testIngestPluginDuplicate() { List.of(DUMMY_PLUGIN, DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ) ); assertTrue(e.getMessage(), e.getMessage().contains("already registered")); @@ -189,7 +192,8 @@ public void testExecuteIndexPipelineDoesNotExist() { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); final IndexRequest indexRequest = new IndexRequest("_index").id("_id") .source(Map.of()) @@ -1665,7 +1669,7 @@ public void testExecuteFailureRedirection() throws Exception { .setFinalPipeline("_id2"); doThrow(new RuntimeException()).when(processor) .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Map.of()), any()); - final Predicate redirectCheck = (idx) -> indexRequest.index().equals(idx); + final Function redirectCheck = (idx) -> indexRequest.index().equals(idx); @SuppressWarnings("unchecked") final TriConsumer redirectHandler = mock(TriConsumer.class); @SuppressWarnings("unchecked") @@ -1722,7 +1726,7 @@ public void testExecuteFailureRedirectionWithNestedOnFailure() throws Exception .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Map.of()), any()); doThrow(new RuntimeException()).when(processor) .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Map.of()), any()); - final Predicate redirectPredicate = (idx) -> indexRequest.index().equals(idx); + final Function redirectCheck = (idx) -> indexRequest.index().equals(idx); @SuppressWarnings("unchecked") final TriConsumer redirectHandler = mock(TriConsumer.class); @SuppressWarnings("unchecked") @@ -1733,7 +1737,7 @@ public void testExecuteFailureRedirectionWithNestedOnFailure() throws Exception 1, List.of(indexRequest), indexReq -> {}, - redirectPredicate, + redirectCheck, redirectHandler, failureHandler, completionHandler, @@ -1826,9 +1830,9 @@ public void testBulkRequestExecution() throws Exception { for (int i = 0; i < numRequest; i++) { IndexRequest indexRequest = new IndexRequest("_index").id("_id").setPipeline(pipelineId).setFinalPipeline("_none"); indexRequest.source(xContentType, "field1", "value1"); - boolean shouldListExecutedPipelines = randomBoolean(); - executedPipelinesExpected.add(shouldListExecutedPipelines); - indexRequest.setListExecutedPipelines(shouldListExecutedPipelines); + boolean shouldListExecutedPiplines = randomBoolean(); + executedPipelinesExpected.add(shouldListExecutedPiplines); + indexRequest.setListExecutedPipelines(shouldListExecutedPiplines); bulkRequest.add(indexRequest); } @@ -2320,7 +2324,8 @@ public Map getProcessors(Processor.Parameters paramet List.of(testPlugin), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); ingestService.addIngestClusterStateListener(ingestClusterStateListener); @@ -2675,7 +2680,8 @@ private void testUpdatingPipeline(String pipelineString) throws Exception { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, clusterState)); @@ -2974,7 +2980,8 @@ public Map getProcessors(final Processor.Parameters p }), client, null, - documentParsingProvider + documentParsingProvider, + FailureStoreMetrics.NOOP ); if (randomBoolean()) { /* diff --git a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java index 30145ab37c322..18f66676cfd1f 100644 --- a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.ingest; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.bulk.SimulateBulkRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; @@ -115,11 +116,23 @@ private static IngestService createWithProcessors(Map ThreadPool threadPool = mock(ThreadPool.class); when(threadPool.generic()).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); when(threadPool.executor(anyString())).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); - return new IngestService(mock(ClusterService.class), threadPool, null, null, null, List.of(new IngestPlugin() { + var ingestPlugin = new IngestPlugin() { @Override public Map getProcessors(final Processor.Parameters parameters) { return processors; } - }), client, null, DocumentParsingProvider.EMPTY_INSTANCE); + }; + return new IngestService( + mock(ClusterService.class), + threadPool, + null, + null, + null, + List.of(ingestPlugin), + client, + null, + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP + ); } } diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index 83cb189415f7e..59e0b955d1cff 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -41,7 +41,6 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.test.ClusterServiceUtils; @@ -454,9 +453,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(new UnsupportedOperationException()); + repositoryDataUpdateListener.onFailure(new UnsupportedOperationException()); } @Override diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index b54a786e05c9d..c6086a8259fbb 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.index.IndexRequest; @@ -2395,14 +2396,16 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { Collections.emptyList(), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ), mockFeatureService, client, actionFilters, indexNameExpressionResolver, new IndexingPressure(settings), - EmptySystemIndices.INSTANCE + EmptySystemIndices.INSTANCE, + FailureStoreMetrics.NOOP ) ); final TransportShardBulkAction transportShardBulkAction = new TransportShardBulkAction( diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java index 7f363fe0b87c3..447e225005b58 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java @@ -61,6 +61,7 @@ public class S3HttpHandler implements HttpHandler { private final String bucket; private final String path; + private final String basePrefix; private final ConcurrentMap blobs = new ConcurrentHashMap<>(); private final ConcurrentMap uploads = new ConcurrentHashMap<>(); @@ -71,6 +72,7 @@ public S3HttpHandler(final String bucket) { public S3HttpHandler(final String bucket, @Nullable final String basePath) { this.bucket = Objects.requireNonNull(bucket); + this.basePrefix = Objects.requireNonNullElse(basePath, ""); this.path = bucket + (basePath != null && basePath.isEmpty() == false ? "/" + basePath : ""); } @@ -96,7 +98,9 @@ public void handle(final HttpExchange exchange) throws IOException { } else { exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); } - } else if (Regex.simpleMatch("GET /" + bucket + "/?uploads&prefix=*", request)) { + } else if (isListMultipartUploadsRequest(request)) { + assert request.contains("prefix=" + basePrefix) : basePrefix + " vs " + request; + final Map params = new HashMap<>(); RestUtils.decodeQueryString(request, request.indexOf('?') + 1, params); final var prefix = params.get("prefix"); @@ -329,6 +333,11 @@ public void handle(final HttpExchange exchange) throws IOException { } } + private boolean isListMultipartUploadsRequest(String request) { + return Regex.simpleMatch("GET /" + bucket + "/?uploads&prefix=*", request) + || Regex.simpleMatch("GET /" + bucket + "/?uploads&max-uploads=*&prefix=*", request); + } + public Map blobs() { return blobs; } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index c5aa03d5548f6..7c11e7446e5c5 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -426,8 +426,13 @@ protected static XContentBuilder topMapping(CheckedConsumer buildFields) throws IOException { + return mappingWithSubobjects(buildFields, "false"); + } + + protected static XContentBuilder mappingWithSubobjects(CheckedConsumer buildFields, String subobjects) + throws IOException { return topMapping(xContentBuilder -> { - xContentBuilder.field("subobjects", false); + xContentBuilder.field("subobjects", subobjects); xContentBuilder.startObject("properties"); buildFields.accept(xContentBuilder); xContentBuilder.endObject(); @@ -783,7 +788,8 @@ protected TriFunction, MappedFieldType.F } protected RandomIndexWriter indexWriterForSyntheticSource(Directory directory) throws IOException { - return new RandomIndexWriter(random(), directory); + // MockAnalyzer (rarely) produces random payloads that lead to failures during assertReaderEquals. + return new RandomIndexWriter(random(), directory, new StandardAnalyzer()); } protected final String syntheticSource(DocumentMapper mapper, CheckedConsumer build) throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java index 26e887338158d..92ce7e083df3e 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java @@ -27,7 +27,6 @@ import org.elasticsearch.repositories.ShardGenerations; import org.elasticsearch.repositories.ShardSnapshotResult; import org.elasticsearch.repositories.SnapshotShardContext; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -110,9 +109,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(new UnsupportedOperationException()); + repositoryDataUpdateListener.onFailure(new UnsupportedOperationException()); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultMappingParametersHandler.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultMappingParametersHandler.java index aeb34ad2e7049..9eea4e6ae932f 100644 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultMappingParametersHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultMappingParametersHandler.java @@ -31,10 +31,15 @@ private Supplier> keywordMapping() { return () -> Map.of("store", ESTestCase.randomBoolean(), "index", ESTestCase.randomBoolean()); } - // TODO enable doc_values: false - // It is disabled because it hits a bug in synthetic source. private Supplier> numberMapping() { - return () -> Map.of("store", ESTestCase.randomBoolean(), "index", ESTestCase.randomBoolean()); + return () -> Map.of( + "store", + ESTestCase.randomBoolean(), + "index", + ESTestCase.randomBoolean(), + "doc_values", + ESTestCase.randomBoolean() + ); } private Supplier> unsignedLongMapping() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ByteSizeEqualsMatcher.java b/test/framework/src/main/java/org/elasticsearch/test/ByteSizeEqualsMatcher.java new file mode 100644 index 0000000000000..172d5f2076a0f --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/ByteSizeEqualsMatcher.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.hamcrest.Description; +import org.hamcrest.TypeSafeMatcher; + +/** + * Equality matcher for {@link ByteSizeValue} that has a nice description of failures. + */ +public class ByteSizeEqualsMatcher extends TypeSafeMatcher { + public static ByteSizeEqualsMatcher byteSizeEquals(ByteSizeValue expected) { + return new ByteSizeEqualsMatcher(expected); + } + + private final ByteSizeValue expected; + + private ByteSizeEqualsMatcher(ByteSizeValue expected) { + this.expected = expected; + } + + @Override + protected boolean matchesSafely(ByteSizeValue byteSizeValue) { + return expected.equals(byteSizeValue); + } + + @Override + public void describeTo(Description description) { + description.appendValue(expected.toString()).appendText(" (").appendValue(expected.getBytes()).appendText(" bytes)"); + } + + @Override + protected void describeMismatchSafely(ByteSizeValue item, Description mismatchDescription) { + mismatchDescription.appendValue(item.toString()).appendText(" (").appendValue(item.getBytes()).appendText(" bytes)"); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index fa686a0bc753a..cf469546b6f63 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1192,23 +1192,19 @@ public static List> findTasks(Cl @Nullable public static DiscoveryNode waitAndGetHealthNode(InternalTestCluster internalCluster) { DiscoveryNode[] healthNode = new DiscoveryNode[1]; - try { - waitUntil(() -> { - ClusterState state = internalCluster.client() - .admin() - .cluster() - .prepareState() - .clear() - .setMetadata(true) - .setNodes(true) - .get() - .getState(); - healthNode[0] = HealthNode.findHealthNode(state); - return healthNode[0] != null; - }, 15, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } + waitUntil(() -> { + ClusterState state = internalCluster.client() + .admin() + .cluster() + .prepareState() + .clear() + .setMetadata(true) + .setNodes(true) + .get() + .getState(); + healthNode[0] = HealthNode.findHealthNode(state); + return healthNode[0] != null; + }, 15, TimeUnit.SECONDS); return healthNode[0]; } @@ -1640,7 +1636,7 @@ protected static IndicesAdminClient indicesAdmin() { return admin().indices(); } - public void indexRandom(boolean forceRefresh, String index, int numDocs) throws InterruptedException { + public void indexRandom(boolean forceRefresh, String index, int numDocs) { IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { builders[i] = prepareIndex(index).setSource("field", "value"); @@ -1651,11 +1647,11 @@ public void indexRandom(boolean forceRefresh, String index, int numDocs) throws /** * Convenience method that forwards to {@link #indexRandom(boolean, List)}. */ - public void indexRandom(boolean forceRefresh, IndexRequestBuilder... builders) throws InterruptedException { + public void indexRandom(boolean forceRefresh, IndexRequestBuilder... builders) { indexRandom(forceRefresh, Arrays.asList(builders)); } - public void indexRandom(boolean forceRefresh, boolean dummyDocuments, IndexRequestBuilder... builders) throws InterruptedException { + public void indexRandom(boolean forceRefresh, boolean dummyDocuments, IndexRequestBuilder... builders) { indexRandom(forceRefresh, dummyDocuments, Arrays.asList(builders)); } @@ -1674,7 +1670,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, IndexReque * @param builders the documents to index. * @see #indexRandom(boolean, boolean, java.util.List) */ - public void indexRandom(boolean forceRefresh, List builders) throws InterruptedException { + public void indexRandom(boolean forceRefresh, List builders) { indexRandom(forceRefresh, forceRefresh, builders); } @@ -1690,7 +1686,7 @@ public void indexRandom(boolean forceRefresh, List builders * all documents are indexed. This is useful to produce deleted documents on the server side. * @param builders the documents to index. */ - public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List builders) throws InterruptedException { + public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List builders) { indexRandom(forceRefresh, dummyDocuments, true, builders); } @@ -1707,8 +1703,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List builders) - throws InterruptedException { + public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List builders) { Random random = random(); Set indices = new HashSet<>(); builders = new ArrayList<>(builders); @@ -1822,8 +1817,7 @@ private static CountDownLatch newLatch(List latches) { /** * Maybe refresh, force merge, or flush then always make sure there aren't too many in flight async operations. */ - private void postIndexAsyncActions(String[] indices, List inFlightAsyncOperations, boolean maybeFlush) - throws InterruptedException { + private void postIndexAsyncActions(String[] indices, List inFlightAsyncOperations, boolean maybeFlush) { if (rarely()) { if (rarely()) { indicesAdmin().prepareRefresh(indices) @@ -1843,7 +1837,7 @@ private void postIndexAsyncActions(String[] indices, List inFlig } while (inFlightAsyncOperations.size() > MAX_IN_FLIGHT_ASYNC_INDEXES) { int waitFor = between(0, inFlightAsyncOperations.size() - 1); - inFlightAsyncOperations.remove(waitFor).await(); + safeAwait(inFlightAsyncOperations.remove(waitFor)); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 08709ff6459ce..58487d6552bcd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -213,6 +213,7 @@ import static org.hamcrest.Matchers.emptyCollectionOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.startsWith; /** @@ -1420,9 +1421,8 @@ public static void assertBusy(CheckedRunnable codeBlock, long maxWait * * @param breakSupplier determines whether to return immediately or continue waiting. * @return the last value returned by breakSupplier - * @throws InterruptedException if any sleep calls were interrupted. */ - public static boolean waitUntil(BooleanSupplier breakSupplier) throws InterruptedException { + public static boolean waitUntil(BooleanSupplier breakSupplier) { return waitUntil(breakSupplier, 10, TimeUnit.SECONDS); } @@ -1438,9 +1438,8 @@ public static boolean waitUntil(BooleanSupplier breakSupplier) throws Interrupte * @param maxWaitTime the maximum amount of time to wait * @param unit the unit of tie for maxWaitTime * @return the last value returned by breakSupplier - * @throws InterruptedException if any sleep calls were interrupted. */ - public static boolean waitUntil(BooleanSupplier breakSupplier, long maxWaitTime, TimeUnit unit) throws InterruptedException { + public static boolean waitUntil(BooleanSupplier breakSupplier, long maxWaitTime, TimeUnit unit) { long maxTimeInMillis = TimeUnit.MILLISECONDS.convert(maxWaitTime, unit); long timeInMillis = 1; long sum = 0; @@ -1448,12 +1447,12 @@ public static boolean waitUntil(BooleanSupplier breakSupplier, long maxWaitTime, if (breakSupplier.getAsBoolean()) { return true; } - Thread.sleep(timeInMillis); + safeSleep(timeInMillis); sum += timeInMillis; timeInMillis = Math.min(AWAIT_BUSY_THRESHOLD, timeInMillis * 2); } timeInMillis = maxTimeInMillis - sum; - Thread.sleep(Math.max(timeInMillis, 0)); + safeSleep(Math.max(timeInMillis, 0)); return breakSupplier.getAsBoolean(); } @@ -2505,7 +2504,7 @@ public static T expectThrows(Class expectedType, Reques * Same as {@link #runInParallel(int, IntConsumer)} but also attempts to start all tasks at the same time by blocking execution on a * barrier until all threads are started and ready to execute their task. */ - public static void startInParallel(int numberOfTasks, IntConsumer taskFactory) throws InterruptedException { + public static void startInParallel(int numberOfTasks, IntConsumer taskFactory) { final CyclicBarrier barrier = new CyclicBarrier(numberOfTasks); runInParallel(numberOfTasks, i -> { safeAwait(barrier); @@ -2519,7 +2518,7 @@ public static void startInParallel(int numberOfTasks, IntConsumer taskFactory) t * @param numberOfTasks number of tasks to run in parallel * @param taskFactory task factory */ - public static void runInParallel(int numberOfTasks, IntConsumer taskFactory) throws InterruptedException { + public static void runInParallel(int numberOfTasks, IntConsumer taskFactory) { final ArrayList> futures = new ArrayList<>(numberOfTasks); final Thread[] threads = new Thread[numberOfTasks - 1]; for (int i = 0; i < numberOfTasks; i++) { @@ -2534,16 +2533,26 @@ public static void runInParallel(int numberOfTasks, IntConsumer taskFactory) thr threads[i].start(); } } - for (Thread thread : threads) { - thread.join(); - } Exception e = null; - for (Future future : futures) { - try { - future.get(); - } catch (Exception ex) { - e = ExceptionsHelper.useOrSuppress(e, ex); + try { + for (Thread thread : threads) { + // no sense in waiting for the rest of the threads, nor any futures, if interrupted, just bail out and fail + thread.join(); + } + for (Future future : futures) { + try { + future.get(); + } catch (InterruptedException interruptedException) { + // no sense in waiting for the rest of the futures if interrupted, just bail out and fail + Thread.currentThread().interrupt(); + throw interruptedException; + } catch (Exception executionException) { + e = ExceptionsHelper.useOrSuppress(e, executionException); + } } + } catch (InterruptedException interruptedException) { + Thread.currentThread().interrupt(); + e = ExceptionsHelper.useOrSuppress(e, interruptedException); } if (e != null) { throw new AssertionError(e); diff --git a/test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java b/test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java index 0db85e4e67711..e0c2456db144e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java +++ b/test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java @@ -14,16 +14,20 @@ import org.apache.lucene.tests.index.FieldFilterLeafReader; import java.io.IOException; -import java.util.Collections; +import java.util.Set; public class FieldMaskingReader extends FilterDirectoryReader { - private final String field; + private final Set fields; public FieldMaskingReader(String field, DirectoryReader in) throws IOException { + this(Set.of(field), in); + } + + public FieldMaskingReader(Set fields, DirectoryReader in) throws IOException { super(in, new FilterDirectoryReader.SubReaderWrapper() { @Override public LeafReader wrap(LeafReader reader) { - return new FilterLeafReader(new FieldFilterLeafReader(reader, Collections.singleton(field), true)) { + return new FilterLeafReader(new FieldFilterLeafReader(reader, fields, true)) { // FieldFilterLeafReader does not forward cache helpers // since it considers it is illegal because of the fact @@ -43,13 +47,13 @@ public CacheHelper getCoreCacheHelper() { }; } }); - this.field = field; + this.fields = fields; } @Override protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { - return new FieldMaskingReader(field, in); + return new FieldMaskingReader(fields, in); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 0b69245177c7a..332df7123fd1b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1744,11 +1744,7 @@ private synchronized void startAndPublishNodesAndClients(List nod .filter(nac -> nodes.containsKey(nac.name) == false) // filter out old masters .count(); rebuildUnicastHostFiles(nodeAndClients); // ensure that new nodes can find the existing nodes when they start - try { - runInParallel(nodeAndClients.size(), i -> nodeAndClients.get(i).startNode()); - } catch (InterruptedException e) { - throw new AssertionError("interrupted while starting nodes", e); - } + runInParallel(nodeAndClients.size(), i -> nodeAndClients.get(i).startNode()); nodeAndClients.forEach(this::publishNode); if (autoManageMasterNodes && newMasters > 0) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/MapMatcher.java b/test/framework/src/main/java/org/elasticsearch/test/MapMatcher.java index 7a788eaacc6d4..b702809de5bed 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MapMatcher.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MapMatcher.java @@ -14,6 +14,8 @@ import org.hamcrest.StringDescription; import org.hamcrest.TypeSafeMatcher; +import java.io.PrintWriter; +import java.io.StringWriter; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; @@ -292,17 +294,24 @@ static void describeEntryUnexepectedButOk(Object value, Description description) } static void describeEntryValue(int keyWidth, Matcher matcher, Object v, Description description) { - if (v instanceof Map && matcher instanceof MapMatcher) { - ((MapMatcher) matcher).describePotentialMismatch(keyWidth + INDENT, (Map) v, description); + if (v instanceof Map && matcher instanceof MapMatcher mm) { + mm.describePotentialMismatch(keyWidth + INDENT, (Map) v, description); return; } - if (v instanceof List && matcher instanceof ListMatcher) { - ((ListMatcher) matcher).describePotentialMismatch(keyWidth + INDENT, (List) v, description); + if (v instanceof List && matcher instanceof ListMatcher lm) { + lm.describePotentialMismatch(keyWidth + INDENT, (List) v, description); return; } if (false == matcher.matches(v)) { - description.appendText("expected ").appendDescriptionOf(matcher).appendText(" but "); - matcher.describeMismatch(v, description); + try { + description.appendText("expected ").appendDescriptionOf(matcher).appendText(" but "); + matcher.describeMismatch(v, description); + } catch (Exception e) { + description.appendText("error describing "); + StringWriter trace = new StringWriter(); + e.printStackTrace(new PrintWriter(trace)); + description.appendValue(trace); + } return; } description.appendValue(v); diff --git a/test/framework/src/test/java/org/elasticsearch/test/MapMatcherTests.java b/test/framework/src/test/java/org/elasticsearch/test/MapMatcherTests.java index 48c9fcab3898a..3822c0d93d28d 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/MapMatcherTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/MapMatcherTests.java @@ -11,8 +11,10 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; +import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.StringDescription; +import org.hamcrest.TypeSafeMatcher; import java.io.IOException; import java.io.InputStream; @@ -24,7 +26,9 @@ import static org.elasticsearch.test.ListMatcher.matchesList; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -395,6 +399,45 @@ public void testSubMapDescribeTo() { baz: <0>""")); } + public void testSubMatcherDescribeFails() { + assertMismatch(Map.of("foo", 2.0, "bar", 2), matchesMap().entry("foo", new TypeSafeMatcher() { + @Override + public void describeTo(Description description) { + throw new IllegalStateException("intentional failure"); + } + + @Override + protected boolean matchesSafely(Object o) { + return false; + } + }).entry("bar", 2), both(containsString(""" + a map containing + foo: expected error describing """))); + } + + public void testSubMatcherMismatchFails() { + assertMismatch(Map.of("foo", 2.0, "bar", 2), matchesMap().entry("foo", new TypeSafeMatcher() { + @Override + protected void describeMismatchSafely(Object item, Description mismatchDescription) { + throw new IllegalStateException("intentional failure"); + } + + @Override + public void describeTo(Description description) { + description.appendValue("foo"); + } + + @Override + protected boolean matchesSafely(Object o) { + return false; + } + }).entry("bar", 2), both(containsString(""" + a map containing + foo: expected "foo" but error describing """))); + } + static void assertMismatch(T v, Matcher matcher, Matcher mismatchDescriptionMatcher) { assertMap(v, not(matcher)); StringDescription description = new StringDescription(); diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml index 1e2a6a679dc30..c1d004b4e7bf4 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml @@ -28,9 +28,14 @@ template: return; } def exception = params['_source'].error?.exception; - def exceptionMessage = exception != null && exception.length > 0 ? exception[0]?.message : null; - if (exceptionMessage != null && exceptionMessage != "") { - emit(exception[0].message); + if (exception != null && exception.isEmpty() == false) { + def exceptionMessage = exception instanceof Map ? exception?.message : exception[0]?.message; + if (exceptionMessage instanceof List) { + exceptionMessage = exceptionMessage[0] + } + if (exceptionMessage != null && exceptionMessage != "") { + emit(exceptionMessage); + } } # http.* diff --git a/x-pack/plugin/apm-data/src/main/resources/resources.yaml b/x-pack/plugin/apm-data/src/main/resources/resources.yaml index fa38fda679e49..3e66769d939ad 100644 --- a/x-pack/plugin/apm-data/src/main/resources/resources.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/resources.yaml @@ -1,7 +1,7 @@ # "version" holds the version of the templates and ingest pipelines installed # by xpack-plugin apm-data. This must be increased whenever an existing template or # pipeline is changed, in order for it to be updated on Elasticsearch upgrade. -version: 6 +version: 8 component-templates: # Data lifecycle. diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_grouping.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_grouping.yml index f7cd386227fe8..37a1651da562b 100644 --- a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_grouping.yml +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_grouping.yml @@ -39,6 +39,10 @@ setup: - create: {} - '{"@timestamp": "2017-06-22", "error": {"log": {"message": ""}, "exception": [{"message": "exception_used"}]}}' + # Non-empty error.exception.message used from array + - create: {} + - '{"@timestamp": "2017-06-22", "error": {"log": {"message": ""}, "exception": [{"message": "first_exception_used"}, {"message": "2_ignored"}]}}' + - is_false: errors - do: @@ -46,7 +50,7 @@ setup: index: logs-apm.error-testing body: fields: ["error.grouping_name"] - - length: { hits.hits: 7 } + - length: { hits.hits: 8 } - match: { hits.hits.0.fields: null } - match: { hits.hits.1.fields: null } - match: { hits.hits.2.fields: null } @@ -54,3 +58,4 @@ setup: - match: { hits.hits.4.fields: null } - match: { hits.hits.5.fields: {"error.grouping_name": ["log_used"]} } - match: { hits.hits.6.fields: {"error.grouping_name": ["exception_used"]} } + - match: { hits.hits.7.fields: {"error.grouping_name": ["first_exception_used"]} } diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 3242a02dff525..584e551f1cf6b 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.RefCountingRunnable; -import org.elasticsearch.action.support.UnsafePlainActionFuture; import org.elasticsearch.blobcache.BlobCacheMetrics; import org.elasticsearch.blobcache.BlobCacheUtils; import org.elasticsearch.blobcache.common.ByteRange; @@ -39,7 +38,6 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.monitor.fs.FsProbe; import org.elasticsearch.node.NodeRoleSettings; -import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -650,13 +648,14 @@ private RangeMissingHandler writerWithOffset(RangeMissingHandler writer, int wri // no need to allocate a new capturing lambda if the offset isn't adjusted return writer; } - return (channel, channelPos, streamFactory, relativePos, len, progressUpdater) -> writer.fillCacheRange( + return (channel, channelPos, streamFactory, relativePos, len, progressUpdater, completionListener) -> writer.fillCacheRange( channel, channelPos, streamFactory, relativePos - writeOffset, len, - progressUpdater + progressUpdater, + completionListener ); } @@ -991,16 +990,17 @@ void populateAndRead( executor.execute(fillGapRunnable(gap, writer, null, refs.acquireListener())); } } else { - final List gapFillingTasks = gaps.stream() - .map(gap -> fillGapRunnable(gap, writer, streamFactory, refs.acquireListener())) - .toList(); - executor.execute(() -> { - try (streamFactory) { + var gapFillingListener = refs.acquireListener(); + try (var gfRefs = new RefCountingRunnable(ActionRunnable.run(gapFillingListener, streamFactory::close))) { + final List gapFillingTasks = gaps.stream() + .map(gap -> fillGapRunnable(gap, writer, streamFactory, gfRefs.acquireListener())) + .toList(); + executor.execute(() -> { // Fill the gaps in order. If a gap fails to fill for whatever reason, the task for filling the next // gap will still be executed. gapFillingTasks.forEach(Runnable::run); - } - }); + }); + } } } } @@ -1009,13 +1009,13 @@ void populateAndRead( } } - private AbstractRunnable fillGapRunnable( + private Runnable fillGapRunnable( SparseFileTracker.Gap gap, RangeMissingHandler writer, @Nullable SourceInputStreamFactory streamFactory, ActionListener listener ) { - return ActionRunnable.run(listener.delegateResponse((l, e) -> failGapAndListener(gap, l, e)), () -> { + return () -> ActionListener.run(listener, l -> { var ioRef = io; assert regionOwners.get(ioRef) == CacheFileRegion.this; assert CacheFileRegion.this.hasReferences() : CacheFileRegion.this; @@ -1026,10 +1026,15 @@ private AbstractRunnable fillGapRunnable( streamFactory, start, Math.toIntExact(gap.end() - start), - progress -> gap.onProgress(start + progress) + progress -> gap.onProgress(start + progress), + l.map(unused -> { + assert regionOwners.get(ioRef) == CacheFileRegion.this; + assert CacheFileRegion.this.hasReferences() : CacheFileRegion.this; + writeCount.increment(); + gap.onCompletion(); + return null; + }).delegateResponse((delegate, e) -> failGapAndListener(gap, delegate, e)) ); - writeCount.increment(); - gap.onCompletion(); }); } @@ -1117,12 +1122,23 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int length, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completionListener ) throws IOException { - writer.fillCacheRange(channel, channelPos, streamFactory, relativePos, length, progressUpdater); - var elapsedTime = TimeUnit.NANOSECONDS.toMillis(relativeTimeInNanosSupplier.getAsLong() - startTime); - SharedBlobCacheService.this.blobCacheMetrics.getCacheMissLoadTimes().record(elapsedTime); - SharedBlobCacheService.this.blobCacheMetrics.getCacheMissCounter().increment(); + writer.fillCacheRange( + channel, + channelPos, + streamFactory, + relativePos, + length, + progressUpdater, + completionListener.map(unused -> { + var elapsedTime = TimeUnit.NANOSECONDS.toMillis(relativeTimeInNanosSupplier.getAsLong() - startTime); + blobCacheMetrics.getCacheMissLoadTimes().record(elapsedTime); + blobCacheMetrics.getCacheMissCounter().increment(); + return null; + }) + ); } }; if (rangeToRead.isEmpty()) { @@ -1144,9 +1160,7 @@ private int readSingleRegion( RangeMissingHandler writer, int region ) throws InterruptedException, ExecutionException { - final PlainActionFuture readFuture = new UnsafePlainActionFuture<>( - BlobStoreRepository.STATELESS_SHARD_PREWARMING_THREAD_NAME - ); + final PlainActionFuture readFuture = new PlainActionFuture<>(); final CacheFileRegion fileRegion = get(cacheKey, length, region); final long regionStart = getRegionStart(region); fileRegion.populateAndRead( @@ -1168,9 +1182,7 @@ private int readMultiRegions( int startRegion, int endRegion ) throws InterruptedException, ExecutionException { - final PlainActionFuture readsComplete = new UnsafePlainActionFuture<>( - BlobStoreRepository.STATELESS_SHARD_PREWARMING_THREAD_NAME - ); + final PlainActionFuture readsComplete = new PlainActionFuture<>(); final AtomicInteger bytesRead = new AtomicInteger(); try (var listeners = new RefCountingListener(1, readsComplete)) { for (int region = startRegion; region <= endRegion; region++) { @@ -1215,9 +1227,18 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int len, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completionListener ) throws IOException { - delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos - writeOffset, len, progressUpdater); + delegate.fillCacheRange( + channel, + channelPos, + streamFactory, + relativePos - writeOffset, + len, + progressUpdater, + completionListener + ); } }; } @@ -1230,14 +1251,25 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int len, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completionListener ) throws IOException { assert assertValidRegionAndLength(fileRegion, channelPos, len); - delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos, len, progressUpdater); - assert regionOwners.get(fileRegion.io) == fileRegion - : "File chunk [" + fileRegion.regionKey + "] no longer owns IO [" + fileRegion.io + "]"; + delegate.fillCacheRange( + channel, + channelPos, + streamFactory, + relativePos, + len, + progressUpdater, + Assertions.ENABLED ? ActionListener.runBefore(completionListener, () -> { + assert regionOwners.get(fileRegion.io) == fileRegion + : "File chunk [" + fileRegion.regionKey + "] no longer owns IO [" + fileRegion.io + "]"; + }) : completionListener + ); } }; + } return adjustedWriter; } @@ -1324,6 +1356,7 @@ default SourceInputStreamFactory sharedInputStreamFactory(List completionListener ) throws IOException; } @@ -1343,9 +1377,9 @@ public interface SourceInputStreamFactory extends Releasable { /** * Create the input stream at the specified position. * @param relativePos the relative position in the remote storage to read from. - * @return the input stream ready to be read from. + * @param listener listener for the input stream ready to be read from. */ - InputStream create(int relativePos) throws IOException; + void create(int relativePos, ActionListener listener) throws IOException; } private abstract static class DelegatingRangeMissingHandler implements RangeMissingHandler { @@ -1367,9 +1401,10 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int length, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completionListener ) throws IOException { - delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos, length, progressUpdater); + delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener); } } diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java index e477673c90d6d..6c49b50c06e82 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.StoppableExecutorServiceWrapper; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.TestEnvironment; @@ -72,6 +73,13 @@ private static long size(long numPages) { return numPages * SharedBytes.PAGE_SIZE; } + private static void completeWith(ActionListener listener, CheckedRunnable runnable) { + ActionListener.completeWith(listener, () -> { + runnable.run(); + return null; + }); + } + public void testBasicEviction() throws IOException { Settings settings = Settings.builder() .put(NODE_NAME_SETTING.getKey(), "node") @@ -115,7 +123,10 @@ public void testBasicEviction() throws IOException { ByteRange.of(0L, 1L), ByteRange.of(0L, 1L), (channel, channelPos, relativePos, length) -> 1, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> progressUpdater.accept(length) + ), taskQueue.getThreadPool().generic(), bytesReadFuture ); @@ -552,11 +563,14 @@ public void execute(Runnable command) { cacheService.maybeFetchFullEntry( cacheKey, size, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - assert streamFactory == null : streamFactory; - bytesRead.addAndGet(-length); - progressUpdater.accept(length); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assert streamFactory == null : streamFactory; + bytesRead.addAndGet(-length); + progressUpdater.accept(length); + } + ), bulkExecutor, future ); @@ -570,9 +584,15 @@ public void execute(Runnable command) { // a download that would use up all regions should not run final var cacheKey = generateCacheKey(); assertEquals(2, cacheService.freeRegionCount()); - var configured = cacheService.maybeFetchFullEntry(cacheKey, size(500), (ch, chPos, streamFactory, relPos, len, update) -> { - throw new AssertionError("Should never reach here"); - }, bulkExecutor, ActionListener.noop()); + var configured = cacheService.maybeFetchFullEntry( + cacheKey, + size(500), + (ch, chPos, streamFactory, relPos, len, update, completionListener) -> completeWith(completionListener, () -> { + throw new AssertionError("Should never reach here"); + }), + bulkExecutor, + ActionListener.noop() + ); assertFalse(configured); assertEquals(2, cacheService.freeRegionCount()); } @@ -613,9 +633,14 @@ public void testFetchFullCacheEntryConcurrently() throws Exception { (ActionListener listener) -> cacheService.maybeFetchFullEntry( cacheKey, size, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept( - length - ), + ( + channel, + channelPos, + streamFactory, + relativePos, + length, + progressUpdater, + completionListener) -> completeWith(completionListener, () -> progressUpdater.accept(length)), bulkExecutor, listener ) @@ -859,7 +884,10 @@ public void testMaybeEvictLeastUsed() throws Exception { var entry = cacheService.get(cacheKey, regionSize, 0); entry.populate( ByteRange.of(0L, regionSize), - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> progressUpdater.accept(length) + ), taskQueue.getThreadPool().generic(), ActionListener.noop() ); @@ -954,11 +982,14 @@ public void execute(Runnable command) { cacheKey, 0, blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - assert streamFactory == null : streamFactory; - bytesRead.addAndGet(length); - progressUpdater.accept(length); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assert streamFactory == null : streamFactory; + bytesRead.addAndGet(length); + progressUpdater.accept(length); + } + ), bulkExecutor, future ); @@ -985,11 +1016,14 @@ public void execute(Runnable command) { cacheKey, region, blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - assert streamFactory == null : streamFactory; - bytesRead.addAndGet(length); - progressUpdater.accept(length); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assert streamFactory == null : streamFactory; + bytesRead.addAndGet(length); + progressUpdater.accept(length); + } + ), bulkExecutor, listener ); @@ -1010,9 +1044,12 @@ public void execute(Runnable command) { cacheKey, randomIntBetween(0, 10), randomLongBetween(1L, regionSize), - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - throw new AssertionError("should not be executed"); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + throw new AssertionError("should not be executed"); + } + ), bulkExecutor, future ); @@ -1032,11 +1069,14 @@ public void execute(Runnable command) { cacheKey, 0, blobLength, - (channel, channelPos, ignore, relativePos, length, progressUpdater) -> { - assert ignore == null : ignore; - bytesRead.addAndGet(length); - progressUpdater.accept(length); - }, + (channel, channelPos, ignore, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assert ignore == null : ignore; + bytesRead.addAndGet(length); + progressUpdater.accept(length); + } + ), bulkExecutor, future ); @@ -1110,12 +1150,15 @@ public void execute(Runnable command) { region, range, blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - assertThat(range.start() + relativePos, equalTo(cacheService.getRegionStart(region) + regionRange.start())); - assertThat(channelPos, equalTo(Math.toIntExact(regionRange.start()))); - assertThat(length, equalTo(Math.toIntExact(regionRange.length()))); - bytesCopied.addAndGet(length); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assertThat(range.start() + relativePos, equalTo(cacheService.getRegionStart(region) + regionRange.start())); + assertThat(channelPos, equalTo(Math.toIntExact(regionRange.start()))); + assertThat(length, equalTo(Math.toIntExact(regionRange.length()))); + bytesCopied.addAndGet(length); + } + ), bulkExecutor, future ); @@ -1150,7 +1193,10 @@ public void execute(Runnable command) { region, ByteRange.of(0L, blobLength), blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> bytesCopied.addAndGet(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> bytesCopied.addAndGet(length) + ), bulkExecutor, listener ); @@ -1173,9 +1219,12 @@ public void execute(Runnable command) { randomIntBetween(0, 10), ByteRange.of(0L, blobLength), blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - throw new AssertionError("should not be executed"); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + throw new AssertionError("should not be executed"); + } + ), bulkExecutor, future ); @@ -1196,7 +1245,10 @@ public void execute(Runnable command) { 0, ByteRange.of(0L, blobLength), blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> bytesCopied.addAndGet(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> bytesCopied.addAndGet(length) + ), bulkExecutor, future ); @@ -1237,10 +1289,18 @@ public void testPopulate() throws Exception { var entry = cacheService.get(cacheKey, blobLength, 0); AtomicLong bytesWritten = new AtomicLong(0L); final PlainActionFuture future1 = new PlainActionFuture<>(); - entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - bytesWritten.addAndGet(length); - progressUpdater.accept(length); - }, taskQueue.getThreadPool().generic(), future1); + entry.populate( + ByteRange.of(0, regionSize - 1), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + } + ), + taskQueue.getThreadPool().generic(), + future1 + ); assertThat(future1.isDone(), is(false)); assertThat(taskQueue.hasRunnableTasks(), is(true)); @@ -1248,18 +1308,34 @@ public void testPopulate() throws Exception { // start populating the second region entry = cacheService.get(cacheKey, blobLength, 1); final PlainActionFuture future2 = new PlainActionFuture<>(); - entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - bytesWritten.addAndGet(length); - progressUpdater.accept(length); - }, taskQueue.getThreadPool().generic(), future2); + entry.populate( + ByteRange.of(0, regionSize - 1), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + } + ), + taskQueue.getThreadPool().generic(), + future2 + ); // start populating again the first region, listener should be called immediately entry = cacheService.get(cacheKey, blobLength, 0); final PlainActionFuture future3 = new PlainActionFuture<>(); - entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - bytesWritten.addAndGet(length); - progressUpdater.accept(length); - }, taskQueue.getThreadPool().generic(), future3); + entry.populate( + ByteRange.of(0, regionSize - 1), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + } + ), + taskQueue.getThreadPool().generic(), + future3 + ); assertThat(future3.isDone(), is(true)); var written = future3.get(10L, TimeUnit.SECONDS); @@ -1377,7 +1453,10 @@ public void testSharedSourceInputStreamFactory() throws Exception { range, range, (channel, channelPos, relativePos, length) -> length, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> progressUpdater.accept(length) + ), EsExecutors.DIRECT_EXECUTOR_SERVICE, future ); @@ -1394,8 +1473,8 @@ public void testSharedSourceInputStreamFactory() throws Exception { final var factoryClosed = new AtomicBoolean(false); final var dummyStreamFactory = new SourceInputStreamFactory() { @Override - public InputStream create(int relativePos) { - return null; + public void create(int relativePos, ActionListener listener) { + listener.onResponse(null); } @Override @@ -1420,17 +1499,20 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int length, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completion ) throws IOException { - if (invocationCounter.incrementAndGet() == 1) { - final Thread witness = invocationThread.compareAndExchange(null, Thread.currentThread()); - assertThat(witness, nullValue()); - } else { - assertThat(invocationThread.get(), sameInstance(Thread.currentThread())); - } - assertThat(streamFactory, sameInstance(dummyStreamFactory)); - assertThat(position.getAndSet(relativePos), lessThan(relativePos)); - progressUpdater.accept(length); + completeWith(completion, () -> { + if (invocationCounter.incrementAndGet() == 1) { + final Thread witness = invocationThread.compareAndExchange(null, Thread.currentThread()); + assertThat(witness, nullValue()); + } else { + assertThat(invocationThread.get(), sameInstance(Thread.currentThread())); + } + assertThat(streamFactory, sameInstance(dummyStreamFactory)); + assertThat(position.getAndSet(relativePos), lessThan(relativePos)); + progressUpdater.accept(length); + }); } }; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index d5a6e3c7e65c8..97e3a409d590d 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -82,7 +82,6 @@ import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.repositories.blobstore.FileRestoreContext; import org.elasticsearch.snapshots.Snapshot; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -371,9 +370,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(new UnsupportedOperationException("Unsupported for repository of type: " + TYPE)); + repositoryDataUpdateListener.onFailure(new UnsupportedOperationException("Unsupported for repository of type: " + TYPE)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java index b352a9abce886..f5123b9352fe3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java @@ -289,11 +289,11 @@ public void triggered(SchedulerEngine.Event event) { final LicensesMetadata licensesMetadata = getLicensesMetadata(); if (licensesMetadata != null) { final License license = licensesMetadata.getLicense(); - if (event.getJobName().equals(LICENSE_JOB)) { + if (event.jobName().equals(LICENSE_JOB)) { updateXPackLicenseState(license); - } else if (event.getJobName().startsWith(ExpirationCallback.EXPIRATION_JOB_PREFIX)) { + } else if (event.jobName().startsWith(ExpirationCallback.EXPIRATION_JOB_PREFIX)) { expirationCallbacks.stream() - .filter(expirationCallback -> expirationCallback.getId().equals(event.getJobName())) + .filter(expirationCallback -> expirationCallback.getId().equals(event.jobName())) .forEach(expirationCallback -> expirationCallback.on(license)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncExecutionId.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncExecutionId.java index b4cc8bc0d3f30..8316b4cfa605a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncExecutionId.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncExecutionId.java @@ -21,6 +21,9 @@ * A class that contains all information related to a submitted async execution. */ public final class AsyncExecutionId { + public static final String ASYNC_EXECUTION_ID_HEADER = "X-Elasticsearch-Async-Id"; + public static final String ASYNC_EXECUTION_IS_RUNNING_HEADER = "X-Elasticsearch-Async-Is-Running"; + private final String docId; private final TaskId taskId; private final String encoded; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java index 53e404b48dc2e..7ecb5aef4ce8d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java @@ -14,8 +14,11 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceServiceResults; @@ -24,8 +27,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.inference.results.LegacyTextEmbeddingResults; import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; @@ -34,6 +36,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.EnumSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -318,7 +321,7 @@ public String toString() { } } - public static class Response extends ActionResponse implements ToXContentObject { + public static class Response extends ActionResponse implements ChunkedToXContentObject { private final InferenceServiceResults results; @@ -398,11 +401,12 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - results.toXContent(builder, params); - builder.endObject(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat( + ChunkedToXContentHelper.startObject(), + results.toXContentChunked(params), + ChunkedToXContentHelper.endObject() + ); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ChatCompletionResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ChatCompletionResults.java index bbd4d026f0d55..902c69cef558e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ChatCompletionResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ChatCompletionResults.java @@ -10,12 +10,15 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; @@ -46,13 +49,8 @@ public ChatCompletionResults(StreamInput in) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(COMPLETION); - for (Result result : results) { - result.toXContent(builder, params); - } - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.array(COMPLETION, results.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ErrorChunkedInferenceResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ErrorChunkedInferenceResults.java index 376b8763a5eb9..18f88a8ff022a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ErrorChunkedInferenceResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ErrorChunkedInferenceResults.java @@ -11,10 +11,11 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContent; -import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Iterator; @@ -89,9 +90,8 @@ public String toString() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(NAME, exception.getMessage()); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.field(NAME, exception.getMessage()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedSparseEmbeddingResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedSparseEmbeddingResults.java index f1265873ad6dd..187b186fcd91d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedSparseEmbeddingResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedSparseEmbeddingResults.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.xcontent.ToXContent; @@ -77,13 +78,8 @@ public List getChunkedResults() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(FIELD_NAME); - for (MlChunkedTextExpansionResults.ChunkedResult chunk : chunkedResults) { - chunk.toXContent(builder, params); - } - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.array(FIELD_NAME, chunkedResults.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingByteResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingByteResults.java index b78bce8c5c2cd..cc245c40c51e3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingByteResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingByteResults.java @@ -12,8 +12,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -61,14 +63,8 @@ public InferenceChunkedTextEmbeddingByteResults(StreamInput in) throws IOExcepti } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - // TODO add isTruncated flag - builder.startArray(FIELD_NAME); - for (var embedding : chunks) { - embedding.toXContent(builder, params); - } - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.array(FIELD_NAME, chunks.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingFloatResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingFloatResults.java index 9fead334dcbc0..4b4d77cd3f043 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingFloatResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingFloatResults.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.xcontent.ToXContent; @@ -74,14 +75,9 @@ public static InferenceChunkedTextEmbeddingFloatResults ofMlResults(MlChunkedTex } @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + public Iterator toXContentChunked(ToXContent.Params params) { // TODO add isTruncated flag - builder.startArray(FIELD_NAME); - for (var embedding : chunks) { - embedding.toXContent(builder, params); - } - builder.endArray(); - return builder; + return ChunkedToXContentHelper.array(FIELD_NAME, chunks.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingByteResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingByteResults.java index 8d94083bf3241..16dca7b04d526 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingByteResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingByteResults.java @@ -13,8 +13,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.inference.results.MlTextEmbeddingResults; @@ -22,6 +24,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -58,13 +61,8 @@ public int getFirstEmbeddingSize() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(TEXT_EMBEDDING_BYTES); - for (InferenceByteEmbedding embedding : embeddings) { - embedding.toXContent(builder, params); - } - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.array(TEXT_EMBEDDING_BYTES, embeddings.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingFloatResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingFloatResults.java index 1822e3af28c2d..9f9bdfec7cfae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingFloatResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingFloatResults.java @@ -14,10 +14,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.inference.results.MlTextEmbeddingResults; @@ -25,6 +27,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -99,13 +102,8 @@ public int getFirstEmbeddingSize() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(TEXT_EMBEDDING); - for (InferenceFloatEmbedding embedding : embeddings) { - embedding.toXContent(builder, params); - } - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.array(TEXT_EMBEDDING, embeddings.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResults.java index 9196a57c868ba..6ebf15bf34937 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResults.java @@ -11,17 +11,20 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.TaskType; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.ArrayList; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -172,13 +175,8 @@ public List getRankedDocs() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(RERANK); - for (RankedDoc rankedDoc : rankedDocs) { - rankedDoc.toXContent(builder, params); - } - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.array(RERANK, rankedDocs.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/SparseEmbeddingResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/SparseEmbeddingResults.java index 1db6dcc802d00..dd8229c604ecb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/SparseEmbeddingResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/SparseEmbeddingResults.java @@ -12,10 +12,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResults; @@ -23,6 +25,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -68,15 +71,8 @@ public static SparseEmbeddingResults of(List results } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(SPARSE_EMBEDDING); - - for (Embedding embedding : embeddings) { - embedding.toXContent(builder, params); - } - - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.array(SPARSE_EMBEDDING, embeddings.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java index 33e510fcb227c..9929e59a9c803 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -40,7 +41,7 @@ public class LearningToRankConfig extends RegressionConfig implements Rewriteable { public static final ParseField NAME = new ParseField("learning_to_rank"); - static final TransportVersion MIN_SUPPORTED_TRANSPORT_VERSION = TransportVersion.current(); + static final TransportVersion MIN_SUPPORTED_TRANSPORT_VERSION = TransportVersions.LTR_SERVERLESS_RELEASE; public static final ParseField NUM_TOP_FEATURE_IMPORTANCE_VALUES = new ParseField("num_top_feature_importance_values"); public static final ParseField FEATURE_EXTRACTORS = new ParseField("feature_extractors"); public static final ParseField DEFAULT_PARAMS = new ParseField("default_params"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java index fb892a318f07c..23bf21004040a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.slm; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.elasticsearch.cluster.SimpleDiffable; @@ -15,6 +14,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.scheduler.SchedulerEngine; +import org.elasticsearch.common.scheduler.TimeValueSchedule; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.snapshots.SnapshotsService; @@ -24,9 +25,11 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.scheduler.Cron; +import org.elasticsearch.xpack.core.scheduler.CronSchedule; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.time.Clock; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -48,6 +51,7 @@ public class SnapshotLifecyclePolicy implements SimpleDiffable configuration; private final SnapshotRetentionConfiguration retentionPolicy; + private final boolean isCronSchedule; private static final ParseField NAME = new ParseField("name"); private static final ParseField SCHEDULE = new ParseField("schedule"); @@ -92,6 +96,7 @@ public SnapshotLifecyclePolicy( this.repository = Objects.requireNonNull(repository, "policy snapshot repository is required"); this.configuration = configuration; this.retentionPolicy = retentionPolicy; + this.isCronSchedule = isCronSchedule(schedule); } public SnapshotLifecyclePolicy(StreamInput in) throws IOException { @@ -101,6 +106,7 @@ public SnapshotLifecyclePolicy(StreamInput in) throws IOException { this.repository = in.readString(); this.configuration = in.readGenericMap(); this.retentionPolicy = in.readOptionalWriteable(SnapshotRetentionConfiguration::new); + this.isCronSchedule = isCronSchedule(schedule); } public String getId() { @@ -129,9 +135,43 @@ public SnapshotRetentionConfiguration getRetentionPolicy() { return this.retentionPolicy; } - public long calculateNextExecution() { - final Cron scheduleEvaluator = new Cron(this.schedule); - return scheduleEvaluator.getNextValidTimeAfter(System.currentTimeMillis()); + boolean isCronSchedule() { + return this.isCronSchedule; + } + + /** + * @return whether `schedule` is a cron expression + */ + static boolean isCronSchedule(String schedule) { + try { + new Cron(schedule); + return true; + } catch (IllegalArgumentException e) { + return false; + } + } + + /** + * @return whether `schedule` is an interval time unit expression + */ + public static boolean isIntervalSchedule(String schedule) { + try { + TimeValue.parseTimeValue(schedule, "schedule"); + return true; + } catch (IllegalArgumentException e) { + return false; + } + } + + public long calculateNextExecution(long modifiedDate, Clock clock) { + if (isCronSchedule()) { + final Cron scheduleEvaluator = new Cron(this.schedule); + return scheduleEvaluator.getNextValidTimeAfter(clock.millis()); + } else { + final TimeValue interval = TimeValue.parseTimeValue(this.schedule, SCHEDULE.getPreferredName()); + final TimeValueSchedule timeValueSchedule = new TimeValueSchedule(interval); + return timeValueSchedule.nextScheduledTimeAfter(modifiedDate, clock.millis()); + } } /** @@ -139,13 +179,17 @@ public long calculateNextExecution() { *

* In ordinary cases, this can be treated as the interval between executions of the schedule (for schedules like 'twice an hour' or * 'every five minutes'). - * + * @param clock a clock to provide current time * @return a {@link TimeValue} representing the difference between the next two valid times after now, or {@link TimeValue#MINUS_ONE} * if either of the next two times after now is unsupported according to @{@link Cron#getNextValidTimeAfter(long)} */ - public TimeValue calculateNextInterval() { + public TimeValue calculateNextInterval(Clock clock) { + if (isCronSchedule() == false) { + return TimeValue.parseTimeValue(schedule, SCHEDULE.getPreferredName()); + } + final Cron scheduleEvaluator = new Cron(this.schedule); - long next1 = scheduleEvaluator.getNextValidTimeAfter(System.currentTimeMillis()); + long next1 = scheduleEvaluator.getNextValidTimeAfter(clock.millis()); long next2 = scheduleEvaluator.getNextValidTimeAfter(next1); if (next1 > 0 && next2 > 0) { return TimeValue.timeValueMillis(next2 - next1); @@ -154,6 +198,15 @@ public TimeValue calculateNextInterval() { } } + public SchedulerEngine.Job buildSchedulerJob(String jobId, long modifiedDate) { + if (isCronSchedule()) { + return new SchedulerEngine.Job(jobId, new CronSchedule(schedule)); + } else { + TimeValue timeValue = TimeValue.parseTimeValue(schedule, "schedule"); + return new SchedulerEngine.Job(jobId, new TimeValueSchedule(timeValue), modifiedDate); + } + } + public ActionRequestValidationException validate() { ActionRequestValidationException err = new ActionRequestValidationException(); @@ -182,13 +235,19 @@ public ActionRequestValidationException validate() { } // Schedule validation + // n.b. there's more validation beyond this in SnapshotLifecycleService#validateMinimumInterval if (Strings.hasText(schedule) == false) { err.addValidationError("invalid schedule [" + schedule + "]: must not be empty"); } else { try { - new Cron(schedule); - } catch (IllegalArgumentException e) { - err.addValidationError("invalid schedule: " + ExceptionsHelper.unwrapCause(e).getMessage()); + var intervalTimeValue = TimeValue.parseTimeValue(schedule, SCHEDULE.getPreferredName()); + if (intervalTimeValue.millis() == 0) { + err.addValidationError("invalid schedule [" + schedule + "]: time unit must be at least 1 millisecond"); + } + } catch (IllegalArgumentException e1) { + if (isCronSchedule(schedule) == false) { + err.addValidationError("invalid schedule [" + schedule + "]: must be a valid cron expression or time unit"); + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItem.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItem.java index 6a352461c2e1e..c3c70e595eb75 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItem.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItem.java @@ -20,6 +20,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.time.Clock; import java.util.Objects; /** @@ -171,7 +172,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.timeField( SnapshotLifecyclePolicyMetadata.NEXT_EXECUTION_MILLIS.getPreferredName(), SnapshotLifecyclePolicyMetadata.NEXT_EXECUTION.getPreferredName(), - policy.calculateNextExecution() + policy.calculateNextExecution(modifiedDate, Clock.systemUTC()) ); if (snapshotInProgress != null) { builder.field(SNAPSHOT_IN_PROGRESS.getPreferredName(), snapshotInProgress); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java index 0a97810fadacf..672578787762e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java @@ -11,6 +11,7 @@ import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.scheduler.SchedulerEngine; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; @@ -181,6 +182,10 @@ public long getInvocationsSinceLastSuccess() { return invocationsSinceLastSuccess; } + public SchedulerEngine.Job buildSchedulerJob(String jobId) { + return policy.buildSchedulerJob(jobId, modifiedDate); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java index be43705984435..e2218dfab1f1c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.license; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; @@ -121,13 +120,12 @@ public void testLicenseTombstoneFromXContext() throws Exception { assertThat(metadataFromXContent.getLicense(), equalTo(LicensesMetadata.LICENSE_TOMBSTONE)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103093") public void testLicenseTombstoneWithUsedTrialFromXContext() throws Exception { final XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); builder.startObject("licenses"); builder.nullField("license"); - builder.field("trial_license", Version.CURRENT.toString()); + builder.field("trial_license", TrialLicenseVersion.CURRENT); builder.endObject(); builder.endObject(); LicensesMetadata metadataFromXContent = getLicensesMetadataFromXContent(createParser(builder)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResultsTests.java index 603531f0aedf9..b84aaa2bcfc1b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResultsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResultsTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; +import org.elasticsearch.xpack.core.ml.AbstractChunkedBWCSerializationTestCase; import java.io.IOException; import java.util.ArrayList; @@ -18,7 +18,7 @@ import static org.elasticsearch.TransportVersions.ML_RERANK_DOC_OPTIONAL; -public class RankedDocsResultsTests extends AbstractBWCSerializationTestCase { +public class RankedDocsResultsTests extends AbstractChunkedBWCSerializationTestCase { @Override protected Writeable.Reader instanceReader() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractChunkedBWCSerializationTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractChunkedBWCSerializationTestCase.java new file mode 100644 index 0000000000000..a23ce2c107fe3 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractChunkedBWCSerializationTestCase.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.test.AbstractChunkedSerializingTestCase; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase.DEFAULT_BWC_VERSIONS; + +public abstract class AbstractChunkedBWCSerializationTestCase extends + AbstractChunkedSerializingTestCase { + + /** + * Returns the expected instance if serialized from the given version. + */ + protected abstract T mutateInstanceForVersion(T instance, TransportVersion version); + + /** + * The bwc versions to test serialization against + */ + protected List bwcVersions() { + return DEFAULT_BWC_VERSIONS; + } + + /** + * Test serialization and deserialization of the test instance across versions + */ + public final void testBwcSerialization() throws IOException { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + T testInstance = createTestInstance(); + for (TransportVersion bwcVersion : bwcVersions()) { + assertBwcSerialization(testInstance, bwcVersion); + } + } + } + + /** + * Assert that instances copied at a particular version are equal. The version is useful + * for sanity checking the backwards compatibility of the wire. It isn't a substitute for + * real backwards compatibility tests but it is *so* much faster. + */ + protected final void assertBwcSerialization(T testInstance, TransportVersion version) throws IOException { + T deserializedInstance = copyWriteable(testInstance, getNamedWriteableRegistry(), instanceReader(), version); + assertOnBWCObject(deserializedInstance, mutateInstanceForVersion(testInstance, version), version); + } + + /** + * @param bwcSerializedObject The object deserialized from the previous version + * @param testInstance The original test instance + * @param version The version which serialized + */ + protected void assertOnBWCObject(T bwcSerializedObject, T testInstance, TransportVersion version) { + assertNotSame(version.toString(), bwcSerializedObject, testInstance); + assertEquals(version.toString(), bwcSerializedObject, testInstance); + assertEquals(version.toString(), bwcSerializedObject.hashCode(), testInstance.hashCode()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java index 3eeaa18f0a81e..2dd1d8d4ec13a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java @@ -67,7 +67,7 @@ protected SnapshotLifecyclePolicyItem mutateInstance(SnapshotLifecyclePolicyItem return new SnapshotLifecyclePolicyItem( instance.getPolicy(), instance.getVersion(), - randomValueOtherThan(instance.getModifiedDate(), ESTestCase::randomNonNegativeLong), + randomValueOtherThan(instance.getModifiedDate(), SnapshotLifecyclePolicyMetadataTests::randomModifiedTime), instance.getLastSuccess(), instance.getLastFailure(), instance.getSnapshotInProgress(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java index 090b4fe78253d..66e25c3b91db2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java @@ -13,6 +13,8 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.time.Clock; +import java.time.Duration; import java.util.HashMap; import java.util.Map; @@ -79,7 +81,7 @@ public static SnapshotLifecyclePolicyMetadata createRandomPolicyMetadata(String SnapshotLifecyclePolicyMetadata.Builder builder = SnapshotLifecyclePolicyMetadata.builder() .setPolicy(randomSnapshotLifecyclePolicy(policyId)) .setVersion(randomNonNegativeLong()) - .setModifiedDate(randomNonNegativeLong()); + .setModifiedDate(randomModifiedTime()); if (randomBoolean()) { builder.setHeaders(randomHeaders()); } @@ -102,6 +104,7 @@ public static SnapshotLifecyclePolicy randomSnapshotLifecyclePolicy(String polic for (int i = 0; i < randomIntBetween(2, 5); i++) { config.put(randomAlphaOfLength(4), randomAlphaOfLength(4)); } + return new SnapshotLifecyclePolicy( policyId, randomAlphaOfLength(4), @@ -122,7 +125,41 @@ public static SnapshotRetentionConfiguration randomRetention() { ); } - public static String randomSchedule() { + public static String randomCronSchedule() { return randomIntBetween(0, 59) + " " + randomIntBetween(0, 59) + " " + randomIntBetween(0, 12) + " * * ?"; } + + public static String randomTimeValueString() { + // restrict to intervals greater than slm.minimum_interval value of 15 minutes + Duration minInterval = Duration.ofMinutes(15); + Map unitMinVal = Map.of( + "nanos", + minInterval.toNanos(), + "micros", + minInterval.toNanos() * 1000, + "ms", + minInterval.toMillis(), + "s", + minInterval.toSeconds(), + "m", + minInterval.toMinutes(), + "h", + minInterval.toHours(), + "d", + minInterval.toDays() + ); + var unit = randomFrom(unitMinVal.keySet()); + long minVal = Math.max(1, unitMinVal.get(unit)); + long value = randomLongBetween(minVal, 1000 * minVal); + return value + unit; + } + + public static String randomSchedule() { + return randomBoolean() ? randomCronSchedule() : randomTimeValueString(); + } + + public static long randomModifiedTime() { + // if modified time is after the current time, validation will fail + return randomLongBetween(0, Clock.systemUTC().millis()); + } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json index 9b90f97682306..8f50ebd334f16 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json @@ -77,6 +77,9 @@ }, "service.name": { "type": "keyword" + }, + "container.id": { + "type": "keyword" } } } diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java index 35c2071188864..0130bd5537a11 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; @@ -29,6 +30,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiConsumer; import java.util.function.LongSupplier; +import java.util.function.ToLongBiFunction; /** * A simple cache for enrich that uses {@link Cache}. There is one instance of this cache and @@ -61,12 +63,29 @@ public final class EnrichCache { this(maxSize, System::nanoTime); } + EnrichCache(ByteSizeValue maxByteSize) { + this(maxByteSize, System::nanoTime); + } + // non-private for unit testing only EnrichCache(long maxSize, LongSupplier relativeNanoTimeProvider) { this.relativeNanoTimeProvider = relativeNanoTimeProvider; - this.cache = CacheBuilder.builder().setMaximumWeight(maxSize).removalListener(notification -> { + this.cache = createCache(maxSize, null); + } + + EnrichCache(ByteSizeValue maxByteSize, LongSupplier relativeNanoTimeProvider) { + this.relativeNanoTimeProvider = relativeNanoTimeProvider; + this.cache = createCache(maxByteSize.getBytes(), (key, value) -> value.sizeInBytes); + } + + private Cache createCache(long maxWeight, ToLongBiFunction weigher) { + var builder = CacheBuilder.builder().setMaximumWeight(maxWeight).removalListener(notification -> { sizeInBytes.getAndAdd(-1 * notification.getValue().sizeInBytes); - }).build(); + }); + if (weigher != null) { + builder.weigher(weigher); + } + return builder.build(); } /** diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java index 868ec49ff1d97..1a68ada60b6f1 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java @@ -12,17 +12,22 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.MemorySizeValue; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.ingest.Processor; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SystemIndexPlugin; @@ -121,14 +126,29 @@ public class EnrichPlugin extends Plugin implements SystemIndexPlugin, IngestPlu return String.valueOf(maxConcurrentRequests * maxLookupsPerRequest); }, val -> Setting.parseInt(val, 1, Integer.MAX_VALUE, QUEUE_CAPACITY_SETTING_NAME), Setting.Property.NodeScope); - public static final Setting CACHE_SIZE = Setting.longSetting("enrich.cache_size", 1000, 0, Setting.Property.NodeScope); + public static final String CACHE_SIZE_SETTING_NAME = "enrich.cache.size"; + public static final Setting CACHE_SIZE = new Setting<>( + "enrich.cache.size", + (String) null, + (String s) -> FlatNumberOrByteSizeValue.parse( + s, + CACHE_SIZE_SETTING_NAME, + new FlatNumberOrByteSizeValue(ByteSizeValue.ofBytes((long) (0.01 * JvmInfo.jvmInfo().getConfiguredMaxHeapSize()))) + ), + Setting.Property.NodeScope + ); private final Settings settings; private final EnrichCache enrichCache; public EnrichPlugin(final Settings settings) { this.settings = settings; - this.enrichCache = new EnrichCache(CACHE_SIZE.get(settings)); + FlatNumberOrByteSizeValue maxSize = CACHE_SIZE.get(settings); + if (maxSize.byteSizeValue() != null) { + this.enrichCache = new EnrichCache(maxSize.byteSizeValue()); + } else { + this.enrichCache = new EnrichCache(maxSize.flatNumber()); + } } @Override @@ -265,4 +285,45 @@ public String getFeatureName() { public String getFeatureDescription() { return "Manages data related to Enrich policies"; } + + /** + * A class that specifies either a flat (unit-less) number or a byte size value. + */ + public static class FlatNumberOrByteSizeValue { + + @Nullable + private final Long flatNumber; + @Nullable + private final ByteSizeValue byteSizeValue; + + public FlatNumberOrByteSizeValue(ByteSizeValue byteSizeValue) { + this.byteSizeValue = byteSizeValue; + this.flatNumber = null; + } + + public FlatNumberOrByteSizeValue(Long flatNumber) { + this.flatNumber = flatNumber; + this.byteSizeValue = null; + } + + public static FlatNumberOrByteSizeValue parse(String value, String settingName, FlatNumberOrByteSizeValue defaultValue) { + if (Strings.hasText(value) == false) { + return defaultValue; + } + if (Character.isDigit(value.charAt(value.length() - 1)) == false) { + return new FlatNumberOrByteSizeValue(MemorySizeValue.parseBytesSizeValueOrHeapRatio(value, settingName)); + } + return new FlatNumberOrByteSizeValue(Long.parseLong(value)); + } + + @Nullable + public ByteSizeValue byteSizeValue() { + return byteSizeValue; + } + + @Nullable + public Long flatNumber() { + return flatNumber; + } + } } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/FlatNumberOrByteSizeValueTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/FlatNumberOrByteSizeValueTests.java new file mode 100644 index 0000000000000..809b78c50b35a --- /dev/null +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/FlatNumberOrByteSizeValueTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.enrich; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.xpack.enrich.EnrichPlugin.FlatNumberOrByteSizeValue; + +public class FlatNumberOrByteSizeValueTests extends ESTestCase { + + private static final String SETTING_NAME = "test.setting"; + + public void testParse() { + int number = randomIntBetween(1, Integer.MAX_VALUE); + assertEquals( + new FlatNumberOrByteSizeValue((long) number), + FlatNumberOrByteSizeValue.parse(Integer.toString(number), SETTING_NAME, null) + ); + assertEquals( + new FlatNumberOrByteSizeValue(ByteSizeValue.ofGb(number)), + FlatNumberOrByteSizeValue.parse(number + "GB", SETTING_NAME, null) + ); + assertEquals( + new FlatNumberOrByteSizeValue(ByteSizeValue.ofGb(number)), + FlatNumberOrByteSizeValue.parse(number + "g", SETTING_NAME, null) + ); + int percentage = randomIntBetween(0, 100); + assertEquals( + new FlatNumberOrByteSizeValue( + ByteSizeValue.ofBytes((long) ((double) percentage / 100 * JvmInfo.jvmInfo().getConfiguredMaxHeapSize())) + ), + FlatNumberOrByteSizeValue.parse(percentage + "%", SETTING_NAME, null) + ); + assertEquals(new FlatNumberOrByteSizeValue(0L), FlatNumberOrByteSizeValue.parse("0", SETTING_NAME, null)); + assertEquals(new FlatNumberOrByteSizeValue(ByteSizeValue.ZERO), FlatNumberOrByteSizeValue.parse("0GB", SETTING_NAME, null)); + assertEquals(new FlatNumberOrByteSizeValue(ByteSizeValue.ZERO), FlatNumberOrByteSizeValue.parse("0%", SETTING_NAME, null)); + // Assert default value. + assertEquals( + new FlatNumberOrByteSizeValue((long) number), + FlatNumberOrByteSizeValue.parse(null, SETTING_NAME, new FlatNumberOrByteSizeValue((long) number)) + ); + assertThrows(ElasticsearchParseException.class, () -> FlatNumberOrByteSizeValue.parse("5GB%", SETTING_NAME, null)); + assertThrows(ElasticsearchParseException.class, () -> FlatNumberOrByteSizeValue.parse("5%GB", SETTING_NAME, null)); + assertThrows(ElasticsearchParseException.class, () -> FlatNumberOrByteSizeValue.parse("5GBX", SETTING_NAME, null)); + } + + private void assertEquals(FlatNumberOrByteSizeValue expected, FlatNumberOrByteSizeValue actual) { + assertEquals(expected.byteSizeValue(), actual.byteSizeValue()); + assertEquals(expected.flatNumber(), actual.flatNumber()); + } +} diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index 943d1275364fb..1652495197fc0 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -260,6 +260,11 @@ public void readBytes(byte[] b, int offset, int len) throws IOException { } + @Override + public int read(byte[] b, int off, int len) throws IOException { + return 0; + } + @Override public void close() throws IOException { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index 065ada06bfa1e..979368c300e00 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -447,6 +447,14 @@ public String esType() { return esType; } + /** + * Return the Elasticsearch field name of this type if there is one, + * otherwise return the ESQL specific name. + */ + public String esNameIfPossible() { + return esType != null ? esType : typeName; + } + /** * The name we give to types on the response. */ diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java index 9b3d7950c2a01..8b15893f8a056 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java @@ -130,7 +130,13 @@ private static String makeErrorMessage(Map> typesToIndices) errorMessage.append("["); errorMessage.append(e.getKey()); errorMessage.append("] in "); - errorMessage.append(e.getValue()); + if (e.getValue().size() <= 3) { + errorMessage.append(e.getValue()); + } else { + errorMessage.append(e.getValue().stream().sorted().limit(3).collect(Collectors.toList())); + errorMessage.append(" and [" + (e.getValue().size() - 3) + "] other "); + errorMessage.append(e.getValue().size() == 4 ? "index" : "indices"); + } } return errorMessage.toString(); } diff --git a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Aggregator.java b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Aggregator.java index 69db6a1310c9e..444dbcc1b9e58 100644 --- a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Aggregator.java +++ b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Aggregator.java @@ -57,4 +57,10 @@ IntermediateState[] value() default {}; + /** + * Exceptions thrown by the `combine*(...)` methods to catch and convert + * into a warning and turn into a null value. + */ + Class[] warnExceptions() default {}; + } diff --git a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java index 0216ea07e5c7c..8d81b60e20e4d 100644 --- a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java +++ b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java @@ -22,6 +22,12 @@ IntermediateState[] value() default {}; + /** + * Exceptions thrown by the `combine*(...)` methods to catch and convert + * into a warning and turn into a null value. + */ + Class[] warnExceptions() default {}; + /** * If {@code true} then the @timestamp LongVector will be appended to the input blocks of the aggregation function. */ diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index ccf93a277a50d..971bfd39c231f 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -446,6 +446,32 @@ tasks.named('stringTemplates').configure { it.inputFile = stateInputFile it.outputFile = "org/elasticsearch/compute/aggregation/DoubleState.java" } + File fallibleStateInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleState.java.st") + template { + it.properties = booleanProperties + it.inputFile = fallibleStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/BooleanFallibleState.java" + } + template { + it.properties = intProperties + it.inputFile = fallibleStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/IntFallibleState.java" + } + template { + it.properties = longProperties + it.inputFile = fallibleStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/LongFallibleState.java" + } + template { + it.properties = floatProperties + it.inputFile = fallibleStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/FloatFallibleState.java" + } + template { + it.properties = doubleProperties + it.inputFile = fallibleStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/DoubleFallibleState.java" + } // block lookups File lookupInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/data/X-Lookup.java.st") template { @@ -504,6 +530,32 @@ tasks.named('stringTemplates').configure { it.inputFile = arrayStateInputFile it.outputFile = "org/elasticsearch/compute/aggregation/FloatArrayState.java" } + File fallibleArrayStateInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleArrayState.java.st") + template { + it.properties = booleanProperties + it.inputFile = fallibleArrayStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/BooleanFallibleArrayState.java" + } + template { + it.properties = intProperties + it.inputFile = fallibleArrayStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/IntFallibleArrayState.java" + } + template { + it.properties = longProperties + it.inputFile = fallibleArrayStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/LongFallibleArrayState.java" + } + template { + it.properties = doubleProperties + it.inputFile = fallibleArrayStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/DoubleFallibleArrayState.java" + } + template { + it.properties = floatProperties + it.inputFile = fallibleArrayStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/FloatFallibleArrayState.java" + } File valuesAggregatorInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-ValuesAggregator.java.st") template { it.properties = intProperties diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java index 3f031db2978f9..f11ccbced6fbe 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java @@ -10,6 +10,7 @@ import com.squareup.javapoet.ClassName; import com.squareup.javapoet.JavaFile; import com.squareup.javapoet.MethodSpec; +import com.squareup.javapoet.TypeName; import com.squareup.javapoet.TypeSpec; import org.elasticsearch.compute.ann.Aggregator; @@ -31,6 +32,7 @@ import static org.elasticsearch.compute.gen.Types.AGGREGATOR_FUNCTION_SUPPLIER; import static org.elasticsearch.compute.gen.Types.DRIVER_CONTEXT; import static org.elasticsearch.compute.gen.Types.LIST_INTEGER; +import static org.elasticsearch.compute.gen.Types.STRING; /** * Implements "AggregationFunctionSupplier" from a class annotated with both @@ -40,6 +42,7 @@ public class AggregatorFunctionSupplierImplementer { private final TypeElement declarationType; private final AggregatorImplementer aggregatorImplementer; private final GroupingAggregatorImplementer groupingAggregatorImplementer; + private final boolean hasWarnings; private final List createParameters; private final ClassName implementation; @@ -47,11 +50,13 @@ public AggregatorFunctionSupplierImplementer( Elements elements, TypeElement declarationType, AggregatorImplementer aggregatorImplementer, - GroupingAggregatorImplementer groupingAggregatorImplementer + GroupingAggregatorImplementer groupingAggregatorImplementer, + boolean hasWarnings ) { this.declarationType = declarationType; this.aggregatorImplementer = aggregatorImplementer; this.groupingAggregatorImplementer = groupingAggregatorImplementer; + this.hasWarnings = hasWarnings; Set createParameters = new LinkedHashSet<>(); if (aggregatorImplementer != null) { @@ -86,6 +91,11 @@ private TypeSpec type() { builder.addModifiers(Modifier.PUBLIC, Modifier.FINAL); builder.addSuperinterface(AGGREGATOR_FUNCTION_SUPPLIER); + if (hasWarnings) { + builder.addField(TypeName.INT, "warningsLineNumber"); + builder.addField(TypeName.INT, "warningsColumnNumber"); + builder.addField(STRING, "warningsSourceText"); + } createParameters.stream().forEach(p -> p.declareField(builder)); builder.addMethod(ctor()); if (aggregatorImplementer != null) { @@ -100,6 +110,14 @@ private TypeSpec type() { private MethodSpec ctor() { MethodSpec.Builder builder = MethodSpec.constructorBuilder().addModifiers(Modifier.PUBLIC); + if (hasWarnings) { + builder.addParameter(TypeName.INT, "warningsLineNumber"); + builder.addParameter(TypeName.INT, "warningsColumnNumber"); + builder.addParameter(STRING, "warningsSourceText"); + builder.addStatement("this.warningsLineNumber = warningsLineNumber"); + builder.addStatement("this.warningsColumnNumber = warningsColumnNumber"); + builder.addStatement("this.warningsSourceText = warningsSourceText"); + } createParameters.stream().forEach(p -> p.buildCtor(builder)); return builder.build(); } @@ -114,30 +132,48 @@ private MethodSpec unsupportedNonGroupingAggregator() { } private MethodSpec aggregator() { - MethodSpec.Builder builder = MethodSpec.methodBuilder("aggregator") - .addParameter(DRIVER_CONTEXT, "driverContext") - .returns(aggregatorImplementer.implementation()); + MethodSpec.Builder builder = MethodSpec.methodBuilder("aggregator"); builder.addAnnotation(Override.class).addModifiers(Modifier.PUBLIC); + builder.addParameter(DRIVER_CONTEXT, "driverContext"); + builder.returns(aggregatorImplementer.implementation()); + + if (hasWarnings) { + builder.addStatement( + "var warnings = Warnings.createWarnings(driverContext.warningsMode(), " + + "warningsLineNumber, warningsColumnNumber, warningsSourceText)" + ); + } + builder.addStatement( "return $T.create($L)", aggregatorImplementer.implementation(), - Stream.concat(Stream.of("driverContext, channels"), aggregatorImplementer.createParameters().stream().map(Parameter::name)) - .collect(Collectors.joining(", ")) + Stream.concat( + Stream.concat(hasWarnings ? Stream.of("warnings") : Stream.of(), Stream.of("driverContext, channels")), + aggregatorImplementer.createParameters().stream().map(Parameter::name) + ).collect(Collectors.joining(", ")) ); return builder.build(); } private MethodSpec groupingAggregator() { - MethodSpec.Builder builder = MethodSpec.methodBuilder("groupingAggregator") - .addParameter(DRIVER_CONTEXT, "driverContext") - .returns(groupingAggregatorImplementer.implementation()); + MethodSpec.Builder builder = MethodSpec.methodBuilder("groupingAggregator"); builder.addAnnotation(Override.class).addModifiers(Modifier.PUBLIC); + builder.addParameter(DRIVER_CONTEXT, "driverContext"); + builder.returns(groupingAggregatorImplementer.implementation()); + + if (hasWarnings) { + builder.addStatement( + "var warnings = Warnings.createWarnings(driverContext.warningsMode(), " + + "warningsLineNumber, warningsColumnNumber, warningsSourceText)" + ); + } + builder.addStatement( "return $T.create($L)", groupingAggregatorImplementer.implementation(), Stream.concat( - Stream.of("channels, driverContext"), + Stream.concat(hasWarnings ? Stream.of("warnings") : Stream.of(), Stream.of("channels, driverContext")), groupingAggregatorImplementer.createParameters().stream().map(Parameter::name) ).collect(Collectors.joining(", ")) ); diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java index b3d32a82cc7a9..67ce0cf709704 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java @@ -21,10 +21,15 @@ import java.util.Arrays; import java.util.List; import java.util.Locale; +import java.util.Objects; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.Modifier; import javax.lang.model.element.TypeElement; +import javax.lang.model.type.TypeMirror; import javax.lang.model.util.Elements; import static java.util.stream.Collectors.joining; @@ -40,6 +45,7 @@ import static org.elasticsearch.compute.gen.Types.BYTES_REF; import static org.elasticsearch.compute.gen.Types.BYTES_REF_BLOCK; import static org.elasticsearch.compute.gen.Types.BYTES_REF_VECTOR; +import static org.elasticsearch.compute.gen.Types.COMPUTE_WARNINGS; import static org.elasticsearch.compute.gen.Types.DOUBLE_BLOCK; import static org.elasticsearch.compute.gen.Types.DOUBLE_VECTOR; import static org.elasticsearch.compute.gen.Types.DRIVER_CONTEXT; @@ -68,6 +74,7 @@ */ public class AggregatorImplementer { private final TypeElement declarationType; + private final List warnExceptions; private final ExecutableElement init; private final ExecutableElement combine; private final ExecutableElement combineValueCount; @@ -76,18 +83,28 @@ public class AggregatorImplementer { private final ClassName implementation; private final TypeName stateType; private final boolean stateTypeHasSeen; + private final boolean stateTypeHasFailed; private final boolean valuesIsBytesRef; private final List intermediateState; private final List createParameters; - public AggregatorImplementer(Elements elements, TypeElement declarationType, IntermediateState[] interStateAnno) { + public AggregatorImplementer( + Elements elements, + TypeElement declarationType, + IntermediateState[] interStateAnno, + List warnExceptions + ) { this.declarationType = declarationType; + this.warnExceptions = warnExceptions; this.init = findRequiredMethod(declarationType, new String[] { "init", "initSingle" }, e -> true); this.stateType = choseStateType(); - stateTypeHasSeen = elements.getAllMembers(elements.getTypeElement(stateType.toString())) + this.stateTypeHasSeen = elements.getAllMembers(elements.getTypeElement(stateType.toString())) .stream() .anyMatch(e -> e.toString().equals("seen()")); + this.stateTypeHasFailed = elements.getAllMembers(elements.getTypeElement(stateType.toString())) + .stream() + .anyMatch(e -> e.toString().equals("failed()")); this.combine = findRequiredMethod(declarationType, new String[] { "combine" }, e -> { if (e.getParameters().size() == 0) { @@ -102,7 +119,7 @@ public AggregatorImplementer(Elements elements, TypeElement declarationType, Int this.createParameters = init.getParameters() .stream() .map(Parameter::from) - .filter(f -> false == f.type().equals(BIG_ARRAYS)) + .filter(f -> false == f.type().equals(BIG_ARRAYS) && false == f.type().equals(DRIVER_CONTEXT)) .toList(); this.implementation = ClassName.get( @@ -126,7 +143,10 @@ private TypeName choseStateType() { if (false == initReturn.isPrimitive()) { return initReturn; } - return ClassName.get("org.elasticsearch.compute.aggregation", firstUpper(initReturn.toString()) + "State"); + if (warnExceptions.isEmpty()) { + return ClassName.get("org.elasticsearch.compute.aggregation", firstUpper(initReturn.toString()) + "State"); + } + return ClassName.get("org.elasticsearch.compute.aggregation", firstUpper(initReturn.toString()) + "FallibleState"); } static String valueType(ExecutableElement init, ExecutableElement combine) { @@ -202,6 +222,11 @@ private TypeSpec type() { .initializer(initInterState()) .build() ); + + if (warnExceptions.isEmpty() == false) { + builder.addField(COMPUTE_WARNINGS, "warnings", Modifier.PRIVATE, Modifier.FINAL); + } + builder.addField(DRIVER_CONTEXT, "driverContext", Modifier.PRIVATE, Modifier.FINAL); builder.addField(stateType, "state", Modifier.PRIVATE, Modifier.FINAL); builder.addField(LIST_INTEGER, "channels", Modifier.PRIVATE, Modifier.FINAL); @@ -228,17 +253,26 @@ private TypeSpec type() { private MethodSpec create() { MethodSpec.Builder builder = MethodSpec.methodBuilder("create"); builder.addModifiers(Modifier.PUBLIC, Modifier.STATIC).returns(implementation); + if (warnExceptions.isEmpty() == false) { + builder.addParameter(COMPUTE_WARNINGS, "warnings"); + } builder.addParameter(DRIVER_CONTEXT, "driverContext"); builder.addParameter(LIST_INTEGER, "channels"); for (Parameter p : createParameters) { builder.addParameter(p.type(), p.name()); } if (createParameters.isEmpty()) { - builder.addStatement("return new $T(driverContext, channels, $L)", implementation, callInit()); + builder.addStatement( + "return new $T($LdriverContext, channels, $L)", + implementation, + warnExceptions.isEmpty() ? "" : "warnings, ", + callInit() + ); } else { builder.addStatement( - "return new $T(driverContext, channels, $L, $L)", + "return new $T($LdriverContext, channels, $L, $L)", implementation, + warnExceptions.isEmpty() ? "" : "warnings, ", callInit(), createParameters.stream().map(p -> p.name()).collect(joining(", ")) ); @@ -275,16 +309,22 @@ private CodeBlock initInterState() { private MethodSpec ctor() { MethodSpec.Builder builder = MethodSpec.constructorBuilder().addModifiers(Modifier.PUBLIC); + if (warnExceptions.isEmpty() == false) { + builder.addParameter(COMPUTE_WARNINGS, "warnings"); + } builder.addParameter(DRIVER_CONTEXT, "driverContext"); builder.addParameter(LIST_INTEGER, "channels"); builder.addParameter(stateType, "state"); + + if (warnExceptions.isEmpty() == false) { + builder.addStatement("this.warnings = warnings"); + } builder.addStatement("this.driverContext = driverContext"); builder.addStatement("this.channels = channels"); builder.addStatement("this.state = state"); for (Parameter p : createParameters()) { - builder.addParameter(p.type(), p.name()); - builder.addStatement("this.$N = $N", p.name(), p.name()); + p.buildCtor(builder); } return builder.build(); } @@ -306,6 +346,11 @@ private MethodSpec intermediateBlockCount() { private MethodSpec addRawInput() { MethodSpec.Builder builder = MethodSpec.methodBuilder("addRawInput"); builder.addAnnotation(Override.class).addModifiers(Modifier.PUBLIC).addParameter(PAGE, "page"); + if (stateTypeHasFailed) { + builder.beginControlFlow("if (state.failed())"); + builder.addStatement("return"); + builder.endControlFlow(); + } builder.addStatement("$T block = page.getBlock(channels.get(0))", valueBlockType(init, combine)); builder.addStatement("$T vector = block.asVector()", valueVectorType(init, combine)); builder.beginControlFlow("if (vector != null)").addStatement("addRawVector(vector)"); @@ -366,20 +411,27 @@ private MethodSpec addRawBlock() { } private void combineRawInput(MethodSpec.Builder builder, String blockVariable) { + TypeName returnType = TypeName.get(combine.getReturnType()); + if (warnExceptions.isEmpty() == false) { + builder.beginControlFlow("try"); + } if (valuesIsBytesRef) { combineRawInputForBytesRef(builder, blockVariable); - return; - } - TypeName returnType = TypeName.get(combine.getReturnType()); - if (returnType.isPrimitive()) { + } else if (returnType.isPrimitive()) { combineRawInputForPrimitive(returnType, builder, blockVariable); - return; - } - if (returnType == TypeName.VOID) { + } else if (returnType == TypeName.VOID) { combineRawInputForVoid(builder, blockVariable); - return; + } else { + throw new IllegalArgumentException("combine must return void or a primitive"); + } + if (warnExceptions.isEmpty() == false) { + String catchPattern = "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)"; + builder.nextControlFlow(catchPattern, warnExceptions.stream().map(TypeName::get).toArray()); + builder.addStatement("warnings.registerException(e)"); + builder.addStatement("state.failed(true)"); + builder.addStatement("return"); + builder.endControlFlow(); } - throw new IllegalArgumentException("combine must return void or a primitive"); } private void combineRawInputForPrimitive(TypeName returnType, MethodSpec.Builder builder, String blockVariable) { @@ -423,16 +475,37 @@ private MethodSpec addIntermediateInput() { } builder.addStatement("$T.combineIntermediate(state, " + intermediateStateRowAccess() + ")", declarationType); } else if (hasPrimitiveState()) { - assert intermediateState.size() == 2; - assert intermediateState.get(1).name().equals("seen"); - builder.beginControlFlow("if (seen.getBoolean(0))"); - { - var state = intermediateState.get(0); - var s = "state.$L($T.combine(state.$L(), " + state.name() + "." + vectorAccessorName(state.elementType()) + "(0)))"; - builder.addStatement(s, primitiveStateMethod(), declarationType, primitiveStateMethod()); - builder.addStatement("state.seen(true)"); + if (warnExceptions.isEmpty()) { + assert intermediateState.size() == 2; + assert intermediateState.get(1).name().equals("seen"); + builder.beginControlFlow("if (seen.getBoolean(0))"); + } else { + assert intermediateState.size() == 3; + assert intermediateState.get(1).name().equals("seen"); + assert intermediateState.get(2).name().equals("failed"); + builder.beginControlFlow("if (failed.getBoolean(0))"); + { + builder.addStatement("state.failed(true)"); + builder.addStatement("state.seen(true)"); + } + builder.nextControlFlow("else if (seen.getBoolean(0))"); + } + + if (warnExceptions.isEmpty() == false) { + builder.beginControlFlow("try"); + } + var state = intermediateState.get(0); + var s = "state.$L($T.combine(state.$L(), " + state.name() + "." + vectorAccessorName(state.elementType()) + "(0)))"; + builder.addStatement(s, primitiveStateMethod(), declarationType, primitiveStateMethod()); + builder.addStatement("state.seen(true)"); + if (warnExceptions.isEmpty() == false) { + String catchPattern = "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)"; + builder.nextControlFlow(catchPattern, warnExceptions.stream().map(TypeName::get).toArray()); + builder.addStatement("warnings.registerException(e)"); + builder.addStatement("state.failed(true)"); builder.endControlFlow(); } + builder.endControlFlow(); } else { throw new IllegalArgumentException("Don't know how to combine intermediate input. Define combineIntermediate"); } @@ -445,15 +518,15 @@ String intermediateStateRowAccess() { private String primitiveStateMethod() { switch (stateType.toString()) { - case "org.elasticsearch.compute.aggregation.BooleanState": + case "org.elasticsearch.compute.aggregation.BooleanState", "org.elasticsearch.compute.aggregation.BooleanFallibleState": return "booleanValue"; - case "org.elasticsearch.compute.aggregation.IntState": + case "org.elasticsearch.compute.aggregation.IntState", "org.elasticsearch.compute.aggregation.IntFallibleState": return "intValue"; - case "org.elasticsearch.compute.aggregation.LongState": + case "org.elasticsearch.compute.aggregation.LongState", "org.elasticsearch.compute.aggregation.LongFallibleState": return "longValue"; - case "org.elasticsearch.compute.aggregation.DoubleState": + case "org.elasticsearch.compute.aggregation.DoubleState", "org.elasticsearch.compute.aggregation.DoubleFallibleState": return "doubleValue"; - case "org.elasticsearch.compute.aggregation.FloatState": + case "org.elasticsearch.compute.aggregation.FloatState", "org.elasticsearch.compute.aggregation.FloatFallibleState": return "floatValue"; default: throw new IllegalArgumentException( @@ -480,8 +553,11 @@ private MethodSpec evaluateFinal() { .addParameter(BLOCK_ARRAY, "blocks") .addParameter(TypeName.INT, "offset") .addParameter(DRIVER_CONTEXT, "driverContext"); - if (stateTypeHasSeen) { - builder.beginControlFlow("if (state.seen() == false)"); + if (stateTypeHasSeen || stateTypeHasFailed) { + var condition = Stream.of(stateTypeHasSeen ? "state.seen() == false" : null, stateTypeHasFailed ? "state.failed()" : null) + .filter(Objects::nonNull) + .collect(joining(" || ")); + builder.beginControlFlow("if ($L)", condition); builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1)", BLOCK); builder.addStatement("return"); builder.endControlFlow(); @@ -496,19 +572,19 @@ private MethodSpec evaluateFinal() { private void primitiveStateToResult(MethodSpec.Builder builder) { switch (stateType.toString()) { - case "org.elasticsearch.compute.aggregation.BooleanState": + case "org.elasticsearch.compute.aggregation.BooleanState", "org.elasticsearch.compute.aggregation.BooleanFallibleState": builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantBooleanBlockWith(state.booleanValue(), 1)"); return; - case "org.elasticsearch.compute.aggregation.IntState": + case "org.elasticsearch.compute.aggregation.IntState", "org.elasticsearch.compute.aggregation.IntFallibleState": builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantIntBlockWith(state.intValue(), 1)"); return; - case "org.elasticsearch.compute.aggregation.LongState": + case "org.elasticsearch.compute.aggregation.LongState", "org.elasticsearch.compute.aggregation.LongFallibleState": builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantLongBlockWith(state.longValue(), 1)"); return; - case "org.elasticsearch.compute.aggregation.DoubleState": + case "org.elasticsearch.compute.aggregation.DoubleState", "org.elasticsearch.compute.aggregation.DoubleFallibleState": builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantDoubleBlockWith(state.doubleValue(), 1)"); return; - case "org.elasticsearch.compute.aggregation.FloatState": + case "org.elasticsearch.compute.aggregation.FloatState", "org.elasticsearch.compute.aggregation.FloatFallibleState": builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantFloatBlockWith(state.floatValue(), 1)"); return; default: @@ -534,13 +610,12 @@ private MethodSpec close() { return builder.build(); } + private static final Pattern PRIMITIVE_STATE_PATTERN = Pattern.compile( + "org.elasticsearch.compute.aggregation.(Boolean|Int|Long|Double|Float)(Fallible)?State" + ); + private boolean hasPrimitiveState() { - return switch (stateType.toString()) { - case "org.elasticsearch.compute.aggregation.BooleanState", "org.elasticsearch.compute.aggregation.IntState", - "org.elasticsearch.compute.aggregation.LongState", "org.elasticsearch.compute.aggregation.DoubleState", - "org.elasticsearch.compute.aggregation.FloatState" -> true; - default -> false; - }; + return PRIMITIVE_STATE_PATTERN.matcher(stateType.toString()).matches(); } record IntermediateStateDesc(String name, String elementType, boolean block) { diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorProcessor.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorProcessor.java index d07b24047b7e2..4b1f946a1d176 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorProcessor.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorProcessor.java @@ -80,9 +80,14 @@ public boolean process(Set set, RoundEnvironment roundEnv } for (TypeElement aggClass : annotatedClasses) { AggregatorImplementer implementer = null; + var warnExceptionsTypes = Annotations.listAttributeValues( + aggClass, + Set.of(Aggregator.class, GroupingAggregator.class), + "warnExceptions" + ); if (aggClass.getAnnotation(Aggregator.class) != null) { IntermediateState[] intermediateState = aggClass.getAnnotation(Aggregator.class).value(); - implementer = new AggregatorImplementer(env.getElementUtils(), aggClass, intermediateState); + implementer = new AggregatorImplementer(env.getElementUtils(), aggClass, intermediateState, warnExceptionsTypes); write(aggClass, "aggregator", implementer.sourceFile(), env); } GroupingAggregatorImplementer groupingAggregatorImplementer = null; @@ -96,6 +101,7 @@ public boolean process(Set set, RoundEnvironment roundEnv env.getElementUtils(), aggClass, intermediateState, + warnExceptionsTypes, includeTimestamps ); write(aggClass, "grouping aggregator", groupingAggregatorImplementer.sourceFile(), env); @@ -104,8 +110,13 @@ public boolean process(Set set, RoundEnvironment roundEnv write( aggClass, "aggregator function supplier", - new AggregatorFunctionSupplierImplementer(env.getElementUtils(), aggClass, implementer, groupingAggregatorImplementer) - .sourceFile(), + new AggregatorFunctionSupplierImplementer( + env.getElementUtils(), + aggClass, + implementer, + groupingAggregatorImplementer, + warnExceptionsTypes.isEmpty() == false + ).sourceFile(), env ); } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Annotations.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Annotations.java new file mode 100644 index 0000000000000..d3892f7d2a40b --- /dev/null +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Annotations.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.gen; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import javax.lang.model.element.AnnotationValue; +import javax.lang.model.element.Element; +import javax.lang.model.type.TypeMirror; + +public class Annotations { + private Annotations() {} + + /** + * Returns the values of the requested attribute, from all the matching annotations on the given element. + * + * @param element the element to inspect + * @param annotations the annotations to look for + * @param attributeName the attribute to extract + */ + public static List listAttributeValues(Element element, Set> annotations, String attributeName) { + List result = new ArrayList<>(); + for (var mirror : element.getAnnotationMirrors()) { + String annotationType = mirror.getAnnotationType().toString(); + if (annotations.stream().anyMatch(a -> a.getName().equals(annotationType))) { + for (var e : mirror.getElementValues().entrySet()) { + if (false == e.getKey().getSimpleName().toString().equals(attributeName)) { + continue; + } + for (var v : (List) e.getValue().getValue()) { + result.add((TypeMirror) ((AnnotationValue) v).getValue()); + } + } + } + } + return result; + } +} diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorProcessor.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorProcessor.java index ea3ee938298de..09012c7b3a48a 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorProcessor.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorProcessor.java @@ -11,7 +11,6 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.MvEvaluator; -import java.util.ArrayList; import java.util.List; import java.util.Set; @@ -21,11 +20,9 @@ import javax.annotation.processing.RoundEnvironment; import javax.lang.model.SourceVersion; import javax.lang.model.element.AnnotationMirror; -import javax.lang.model.element.AnnotationValue; import javax.lang.model.element.Element; import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.TypeElement; -import javax.lang.model.type.TypeMirror; import javax.tools.Diagnostic; /** @@ -69,6 +66,11 @@ public Iterable getCompletions( public boolean process(Set set, RoundEnvironment roundEnvironment) { for (TypeElement ann : set) { for (Element evaluatorMethod : roundEnvironment.getElementsAnnotatedWith(ann)) { + var warnExceptionsTypes = Annotations.listAttributeValues( + evaluatorMethod, + Set.of(Evaluator.class, MvEvaluator.class, ConvertEvaluator.class), + "warnExceptions" + ); Evaluator evaluatorAnn = evaluatorMethod.getAnnotation(Evaluator.class); if (evaluatorAnn != null) { try { @@ -80,7 +82,7 @@ public boolean process(Set set, RoundEnvironment roundEnv env.getTypeUtils(), (ExecutableElement) evaluatorMethod, evaluatorAnn.extraName(), - warnExceptions(evaluatorMethod) + warnExceptionsTypes ).sourceFile(), env ); @@ -102,7 +104,7 @@ public boolean process(Set set, RoundEnvironment roundEnv mvEvaluatorAnn.finish(), mvEvaluatorAnn.single(), mvEvaluatorAnn.ascending(), - warnExceptions(evaluatorMethod) + warnExceptionsTypes ).sourceFile(), env ); @@ -121,7 +123,7 @@ public boolean process(Set set, RoundEnvironment roundEnv env.getElementUtils(), (ExecutableElement) evaluatorMethod, convertEvaluatorAnn.extraName(), - warnExceptions(evaluatorMethod) + warnExceptionsTypes ).sourceFile(), env ); @@ -134,25 +136,4 @@ public boolean process(Set set, RoundEnvironment roundEnv } return true; } - - private static List warnExceptions(Element evaluatorMethod) { - List result = new ArrayList<>(); - for (var mirror : evaluatorMethod.getAnnotationMirrors()) { - String annotationType = mirror.getAnnotationType().toString(); - if (annotationType.equals(Evaluator.class.getName()) - || annotationType.equals(MvEvaluator.class.getName()) - || annotationType.equals(ConvertEvaluator.class.getName())) { - - for (var e : mirror.getElementValues().entrySet()) { - if (false == e.getKey().getSimpleName().toString().equals("warnExceptions")) { - continue; - } - for (var v : (List) e.getValue().getValue()) { - result.add((TypeMirror) ((AnnotationValue) v).getValue()); - } - } - } - } - return result; - } } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java index 79df41f304c06..3dffbcf84eb78 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java @@ -22,11 +22,13 @@ import java.util.List; import java.util.Locale; import java.util.function.Consumer; +import java.util.regex.Pattern; import java.util.stream.Collectors; import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.Modifier; import javax.lang.model.element.TypeElement; +import javax.lang.model.type.TypeMirror; import javax.lang.model.util.Elements; import static java.util.stream.Collectors.joining; @@ -38,6 +40,7 @@ import static org.elasticsearch.compute.gen.Types.BIG_ARRAYS; import static org.elasticsearch.compute.gen.Types.BLOCK_ARRAY; import static org.elasticsearch.compute.gen.Types.BYTES_REF; +import static org.elasticsearch.compute.gen.Types.COMPUTE_WARNINGS; import static org.elasticsearch.compute.gen.Types.DRIVER_CONTEXT; import static org.elasticsearch.compute.gen.Types.ELEMENT_TYPE; import static org.elasticsearch.compute.gen.Types.GROUPING_AGGREGATOR_FUNCTION; @@ -63,6 +66,7 @@ */ public class GroupingAggregatorImplementer { private final TypeElement declarationType; + private final List warnExceptions; private final ExecutableElement init; private final ExecutableElement combine; private final ExecutableElement combineStates; @@ -79,9 +83,11 @@ public GroupingAggregatorImplementer( Elements elements, TypeElement declarationType, IntermediateState[] interStateAnno, + List warnExceptions, boolean includeTimestampVector ) { this.declarationType = declarationType; + this.warnExceptions = warnExceptions; this.init = findRequiredMethod(declarationType, new String[] { "init", "initGrouping" }, e -> true); this.stateType = choseStateType(); @@ -129,7 +135,10 @@ private TypeName choseStateType() { } String head = initReturn.toString().substring(0, 1).toUpperCase(Locale.ROOT); String tail = initReturn.toString().substring(1); - return ClassName.get("org.elasticsearch.compute.aggregation", head + tail + "ArrayState"); + if (warnExceptions.isEmpty()) { + return ClassName.get("org.elasticsearch.compute.aggregation", head + tail + "ArrayState"); + } + return ClassName.get("org.elasticsearch.compute.aggregation", head + tail + "FallibleArrayState"); } public JavaFile sourceFile() { @@ -154,6 +163,9 @@ private TypeSpec type() { .build() ); builder.addField(stateType, "state", Modifier.PRIVATE, Modifier.FINAL); + if (warnExceptions.isEmpty() == false) { + builder.addField(COMPUTE_WARNINGS, "warnings", Modifier.PRIVATE, Modifier.FINAL); + } builder.addField(LIST_INTEGER, "channels", Modifier.PRIVATE, Modifier.FINAL); builder.addField(DRIVER_CONTEXT, "driverContext", Modifier.PRIVATE, Modifier.FINAL); @@ -182,17 +194,26 @@ private TypeSpec type() { private MethodSpec create() { MethodSpec.Builder builder = MethodSpec.methodBuilder("create"); builder.addModifiers(Modifier.PUBLIC, Modifier.STATIC).returns(implementation); + if (warnExceptions.isEmpty() == false) { + builder.addParameter(COMPUTE_WARNINGS, "warnings"); + } builder.addParameter(LIST_INTEGER, "channels"); builder.addParameter(DRIVER_CONTEXT, "driverContext"); for (Parameter p : createParameters) { builder.addParameter(p.type(), p.name()); } if (createParameters.isEmpty()) { - builder.addStatement("return new $T(channels, $L, driverContext)", implementation, callInit()); + builder.addStatement( + "return new $T($Lchannels, $L, driverContext)", + implementation, + warnExceptions.isEmpty() ? "" : "warnings, ", + callInit() + ); } else { builder.addStatement( - "return new $T(channels, $L, driverContext, $L)", + "return new $T($Lchannels, $L, driverContext, $L)", implementation, + warnExceptions.isEmpty() ? "" : "warnings, ", callInit(), createParameters.stream().map(p -> p.name()).collect(joining(", ")) ); @@ -235,9 +256,15 @@ private CodeBlock initInterState() { private MethodSpec ctor() { MethodSpec.Builder builder = MethodSpec.constructorBuilder().addModifiers(Modifier.PUBLIC); + if (warnExceptions.isEmpty() == false) { + builder.addParameter(COMPUTE_WARNINGS, "warnings"); + } builder.addParameter(LIST_INTEGER, "channels"); builder.addParameter(stateType, "state"); builder.addParameter(DRIVER_CONTEXT, "driverContext"); + if (warnExceptions.isEmpty() == false) { + builder.addStatement("this.warnings = warnings"); + } builder.addStatement("this.channels = channels"); builder.addStatement("this.state = state"); builder.addStatement("this.driverContext = driverContext"); @@ -337,16 +364,21 @@ private MethodSpec addRawInputLoop(TypeName groupsType, TypeName valuesType) { builder.beginControlFlow("for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++)"); { if (groupsIsBlock) { - // TODO we can drop this once we stop sending null group keys builder.beginControlFlow("if (groups.isNull(groupPosition))"); builder.addStatement("continue"); builder.endControlFlow(); builder.addStatement("int groupStart = groups.getFirstValueIndex(groupPosition)"); builder.addStatement("int groupEnd = groupStart + groups.getValueCount(groupPosition)"); builder.beginControlFlow("for (int g = groupStart; g < groupEnd; g++)"); - builder.addStatement("int groupId = Math.toIntExact(groups.getInt(g))"); + builder.addStatement("int groupId = groups.getInt(g)"); } else { - builder.addStatement("int groupId = Math.toIntExact(groups.getInt(groupPosition))"); + builder.addStatement("int groupId = groups.getInt(groupPosition)"); + } + + if (warnExceptions.isEmpty() == false) { + builder.beginControlFlow("if (state.hasFailed(groupId))"); + builder.addStatement("continue"); + builder.endControlFlow(); } if (valuesIsBlock) { @@ -371,31 +403,35 @@ private MethodSpec addRawInputLoop(TypeName groupsType, TypeName valuesType) { } private void combineRawInput(MethodSpec.Builder builder, String blockVariable, String offsetVariable) { - if (valuesIsBytesRef) { - combineRawInputForBytesRef(builder, blockVariable, offsetVariable); - return; - } - if (includeTimestampVector) { - combineRawInputWithTimestamp(builder, offsetVariable); - return; - } TypeName valueType = TypeName.get(combine.getParameters().get(combine.getParameters().size() - 1).asType()); - if (valueType.isPrimitive() == false) { - throw new IllegalArgumentException("second parameter to combine must be a primitive"); - } String secondParameterGetter = "get" + valueType.toString().substring(0, 1).toUpperCase(Locale.ROOT) + valueType.toString().substring(1); TypeName returnType = TypeName.get(combine.getReturnType()); - if (returnType.isPrimitive()) { - combineRawInputForPrimitive(builder, secondParameterGetter, blockVariable, offsetVariable); - return; + + if (warnExceptions.isEmpty() == false) { + builder.beginControlFlow("try"); } - if (returnType == TypeName.VOID) { + if (valuesIsBytesRef) { + combineRawInputForBytesRef(builder, blockVariable, offsetVariable); + } else if (includeTimestampVector) { + combineRawInputWithTimestamp(builder, offsetVariable); + } else if (valueType.isPrimitive() == false) { + throw new IllegalArgumentException("second parameter to combine must be a primitive"); + } else if (returnType.isPrimitive()) { + combineRawInputForPrimitive(builder, secondParameterGetter, blockVariable, offsetVariable); + } else if (returnType == TypeName.VOID) { combineRawInputForVoid(builder, secondParameterGetter, blockVariable, offsetVariable); - return; + } else { + throw new IllegalArgumentException("combine must return void or a primitive"); + } + if (warnExceptions.isEmpty() == false) { + String catchPattern = "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)"; + builder.nextControlFlow(catchPattern, warnExceptions.stream().map(TypeName::get).toArray()); + builder.addStatement("warnings.registerException(e)"); + builder.addStatement("state.setFailed(groupId)"); + builder.endControlFlow(); } - throw new IllegalArgumentException("combine must return void or a primitive"); } private void combineRawInputForPrimitive( @@ -479,22 +515,42 @@ private MethodSpec addIntermediateInput() { } builder.beginControlFlow("for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++)"); { - builder.addStatement("int groupId = Math.toIntExact(groups.getInt(groupPosition))"); + builder.addStatement("int groupId = groups.getInt(groupPosition)"); if (hasPrimitiveState()) { - assert intermediateState.size() == 2; - assert intermediateState.get(1).name().equals("seen"); - builder.beginControlFlow("if (seen.getBoolean(groupPosition + positionOffset))"); - { - var name = intermediateState.get(0).name(); - var m = vectorAccessorName(intermediateState.get(0).elementType()); - builder.addStatement( - "state.set(groupId, $T.combine(state.getOrDefault(groupId), $L.$L(groupPosition + positionOffset)))", - declarationType, - name, - m - ); + if (warnExceptions.isEmpty()) { + assert intermediateState.size() == 2; + assert intermediateState.get(1).name().equals("seen"); + builder.beginControlFlow("if (seen.getBoolean(groupPosition + positionOffset))"); + } else { + assert intermediateState.size() == 3; + assert intermediateState.get(1).name().equals("seen"); + assert intermediateState.get(2).name().equals("failed"); + builder.beginControlFlow("if (failed.getBoolean(groupPosition + positionOffset))"); + { + builder.addStatement("state.setFailed(groupId)"); + } + builder.nextControlFlow("else if (seen.getBoolean(groupPosition + positionOffset))"); + } + + if (warnExceptions.isEmpty() == false) { + builder.beginControlFlow("try"); + } + var name = intermediateState.get(0).name(); + var vectorAccessor = vectorAccessorName(intermediateState.get(0).elementType()); + builder.addStatement( + "state.set(groupId, $T.combine(state.getOrDefault(groupId), $L.$L(groupPosition + positionOffset)))", + declarationType, + name, + vectorAccessor + ); + if (warnExceptions.isEmpty() == false) { + String catchPattern = "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)"; + builder.nextControlFlow(catchPattern, warnExceptions.stream().map(TypeName::get).toArray()); + builder.addStatement("warnings.registerException(e)"); + builder.addStatement("state.setFailed(groupId)"); builder.endControlFlow(); } + builder.endControlFlow(); } else { builder.addStatement("$T.combineIntermediate(state, groupId, " + intermediateStateRowAccess() + ")", declarationType); } @@ -582,12 +638,11 @@ private MethodSpec close() { return builder.build(); } + private static final Pattern PRIMITIVE_STATE_PATTERN = Pattern.compile( + "org.elasticsearch.compute.aggregation.(Boolean|Int|Long|Double|Float)(Fallible)?ArrayState" + ); + private boolean hasPrimitiveState() { - return switch (stateType.toString()) { - case "org.elasticsearch.compute.aggregation.BooleanArrayState", "org.elasticsearch.compute.aggregation.IntArrayState", - "org.elasticsearch.compute.aggregation.LongArrayState", "org.elasticsearch.compute.aggregation.DoubleArrayState", - "org.elasticsearch.compute.aggregation.FloatArrayState" -> true; - default -> false; - }; + return PRIMITIVE_STATE_PATTERN.matcher(stateType.toString()).matches(); } } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java index 3150741ddcb05..096d0b86e6cff 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java @@ -27,6 +27,8 @@ public class Types { private static final String OPERATOR_PACKAGE = PACKAGE + ".operator"; private static final String DATA_PACKAGE = PACKAGE + ".data"; + static final TypeName STRING = ClassName.get("java.lang", "String"); + static final TypeName LIST_INTEGER = ParameterizedTypeName.get(ClassName.get(List.class), TypeName.INT.box()); static final ClassName PAGE = ClassName.get(DATA_PACKAGE, "Page"); @@ -34,6 +36,7 @@ public class Types { static final TypeName BLOCK_ARRAY = ArrayTypeName.of(BLOCK); static final ClassName VECTOR = ClassName.get(DATA_PACKAGE, "Vector"); + static final ClassName CIRCUIT_BREAKER = ClassName.get("org.elasticsearch.common.breaker", "CircuitBreaker"); static final ClassName BIG_ARRAYS = ClassName.get("org.elasticsearch.common.util", "BigArrays"); static final ClassName BOOLEAN_BLOCK = ClassName.get(DATA_PACKAGE, "BooleanBlock"); @@ -127,6 +130,11 @@ public class Types { ); static final ClassName WARNINGS = ClassName.get("org.elasticsearch.xpack.esql.expression.function", "Warnings"); + /** + * Warnings class used in compute module. + * It uses no external dependencies (Like Warnings and Source). + */ + static final ClassName COMPUTE_WARNINGS = ClassName.get("org.elasticsearch.compute.aggregation", "Warnings"); static final ClassName SOURCE = ClassName.get("org.elasticsearch.xpack.esql.core.tree", "Source"); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanFallibleArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanFallibleArrayState.java new file mode 100644 index 0000000000000..6367fdfb6617e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanFallibleArrayState.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of booleans, that also tracks failures. + * It is created in a mode where it won't track + * the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

+ * This class is generated. Do not edit it. + *

+ */ +final class BooleanFallibleArrayState extends AbstractFallibleArrayState implements GroupingAggregatorState { + private final boolean init; + + private BitArray values; + private int size; + + BooleanFallibleArrayState(BigArrays bigArrays, boolean init) { + super(bigArrays); + this.values = new BitArray(1, bigArrays); + this.size = 1; + this.values.set(0, init); + this.init = init; + } + + boolean get(int groupId) { + return values.get(groupId); + } + + boolean getOrDefault(int groupId) { + return groupId < size ? values.get(groupId) : init; + } + + void set(int groupId, boolean value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds() && false == anyFailure()) { + try (var builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.appendBoolean(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group) && !hasFailed(group)) { + builder.appendBoolean(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + if (groupId >= size) { + values.fill(size, groupId + 1, init); + size = groupId + 1; + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 3; + try ( + var valuesBuilder = driverContext.blockFactory().newBooleanBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()); + var hasFailedBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < size) { + valuesBuilder.appendBoolean(values.get(group)); + } else { + valuesBuilder.appendBoolean(false); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + hasFailedBuilder.appendBoolean(i, hasFailed(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + blocks[offset + 2] = hasFailedBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanFallibleState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanFallibleState.java new file mode 100644 index 0000000000000..073f31c390a6f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanFallibleState.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single boolean. + * It stores a third boolean to store if the aggregation failed. + * This class is generated. Do not edit it. + */ +final class BooleanFallibleState implements AggregatorState { + private boolean value; + private boolean seen; + private boolean failed; + + BooleanFallibleState(boolean init) { + this.value = init; + } + + boolean booleanValue() { + return value; + } + + void booleanValue(boolean value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + boolean failed() { + return failed; + } + + void failed(boolean failed) { + this.failed = failed; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 3; + blocks[offset + 0] = driverContext.blockFactory().newConstantBooleanBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + blocks[offset + 2] = driverContext.blockFactory().newConstantBooleanBlockWith(failed, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java index 7d225c7c06a72..ba4d133dee553 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java @@ -18,10 +18,6 @@ final class BooleanState implements AggregatorState { private boolean value; private boolean seen; - BooleanState() { - this(false); - } - BooleanState(boolean init) { this.value = init; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleFallibleArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleFallibleArrayState.java new file mode 100644 index 0000000000000..dd1d60f7bd246 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleFallibleArrayState.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.DoubleArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of doubles, that also tracks failures. + * It is created in a mode where it won't track + * the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

+ * This class is generated. Do not edit it. + *

+ */ +final class DoubleFallibleArrayState extends AbstractFallibleArrayState implements GroupingAggregatorState { + private final double init; + + private DoubleArray values; + + DoubleFallibleArrayState(BigArrays bigArrays, double init) { + super(bigArrays); + this.values = bigArrays.newDoubleArray(1, false); + this.values.set(0, init); + this.init = init; + } + + double get(int groupId) { + return values.get(groupId); + } + + double getOrDefault(int groupId) { + return groupId < values.size() ? values.get(groupId) : init; + } + + void set(int groupId, double value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds() && false == anyFailure()) { + try (var builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.appendDouble(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group) && !hasFailed(group)) { + builder.appendDouble(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + if (groupId >= values.size()) { + long prevSize = values.size(); + values = bigArrays.grow(values, groupId + 1); + values.fill(prevSize, values.size(), init); + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 3; + try ( + var valuesBuilder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()); + var hasFailedBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < values.size()) { + valuesBuilder.appendDouble(values.get(group)); + } else { + valuesBuilder.appendDouble(0); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + hasFailedBuilder.appendBoolean(i, hasFailed(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + blocks[offset + 2] = hasFailedBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleFallibleState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleFallibleState.java new file mode 100644 index 0000000000000..4cdeddec724bf --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleFallibleState.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single double. + * It stores a third boolean to store if the aggregation failed. + * This class is generated. Do not edit it. + */ +final class DoubleFallibleState implements AggregatorState { + private double value; + private boolean seen; + private boolean failed; + + DoubleFallibleState(double init) { + this.value = init; + } + + double doubleValue() { + return value; + } + + void doubleValue(double value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + boolean failed() { + return failed; + } + + void failed(boolean failed) { + this.failed = failed; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 3; + blocks[offset + 0] = driverContext.blockFactory().newConstantDoubleBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + blocks[offset + 2] = driverContext.blockFactory().newConstantBooleanBlockWith(failed, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleState.java index f1c92c685bcab..90ecc2c1d3c03 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleState.java @@ -18,10 +18,6 @@ final class DoubleState implements AggregatorState { private double value; private boolean seen; - DoubleState() { - this(0); - } - DoubleState(double init) { this.value = init; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatFallibleArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatFallibleArrayState.java new file mode 100644 index 0000000000000..055cf345033c5 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatFallibleArrayState.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.FloatArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of floats, that also tracks failures. + * It is created in a mode where it won't track + * the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

+ * This class is generated. Do not edit it. + *

+ */ +final class FloatFallibleArrayState extends AbstractFallibleArrayState implements GroupingAggregatorState { + private final float init; + + private FloatArray values; + + FloatFallibleArrayState(BigArrays bigArrays, float init) { + super(bigArrays); + this.values = bigArrays.newFloatArray(1, false); + this.values.set(0, init); + this.init = init; + } + + float get(int groupId) { + return values.get(groupId); + } + + float getOrDefault(int groupId) { + return groupId < values.size() ? values.get(groupId) : init; + } + + void set(int groupId, float value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds() && false == anyFailure()) { + try (var builder = driverContext.blockFactory().newFloatVectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.appendFloat(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try (FloatBlock.Builder builder = driverContext.blockFactory().newFloatBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group) && !hasFailed(group)) { + builder.appendFloat(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + if (groupId >= values.size()) { + long prevSize = values.size(); + values = bigArrays.grow(values, groupId + 1); + values.fill(prevSize, values.size(), init); + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 3; + try ( + var valuesBuilder = driverContext.blockFactory().newFloatBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()); + var hasFailedBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < values.size()) { + valuesBuilder.appendFloat(values.get(group)); + } else { + valuesBuilder.appendFloat(0); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + hasFailedBuilder.appendBoolean(i, hasFailed(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + blocks[offset + 2] = hasFailedBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatFallibleState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatFallibleState.java new file mode 100644 index 0000000000000..b050c86258dcd --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatFallibleState.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single float. + * It stores a third boolean to store if the aggregation failed. + * This class is generated. Do not edit it. + */ +final class FloatFallibleState implements AggregatorState { + private float value; + private boolean seen; + private boolean failed; + + FloatFallibleState(float init) { + this.value = init; + } + + float floatValue() { + return value; + } + + void floatValue(float value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + boolean failed() { + return failed; + } + + void failed(boolean failed) { + this.failed = failed; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 3; + blocks[offset + 0] = driverContext.blockFactory().newConstantFloatBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + blocks[offset + 2] = driverContext.blockFactory().newConstantBooleanBlockWith(failed, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatState.java index 81bdd39e51b6e..6f608271b6e42 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatState.java @@ -18,10 +18,6 @@ final class FloatState implements AggregatorState { private float value; private boolean seen; - FloatState() { - this(0); - } - FloatState(float init) { this.value = init; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntFallibleArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntFallibleArrayState.java new file mode 100644 index 0000000000000..e45d84720ca1a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntFallibleArrayState.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of ints, that also tracks failures. + * It is created in a mode where it won't track + * the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

+ * This class is generated. Do not edit it. + *

+ */ +final class IntFallibleArrayState extends AbstractFallibleArrayState implements GroupingAggregatorState { + private final int init; + + private IntArray values; + + IntFallibleArrayState(BigArrays bigArrays, int init) { + super(bigArrays); + this.values = bigArrays.newIntArray(1, false); + this.values.set(0, init); + this.init = init; + } + + int get(int groupId) { + return values.get(groupId); + } + + int getOrDefault(int groupId) { + return groupId < values.size() ? values.get(groupId) : init; + } + + void set(int groupId, int value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds() && false == anyFailure()) { + try (var builder = driverContext.blockFactory().newIntVectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.appendInt(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group) && !hasFailed(group)) { + builder.appendInt(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + if (groupId >= values.size()) { + long prevSize = values.size(); + values = bigArrays.grow(values, groupId + 1); + values.fill(prevSize, values.size(), init); + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 3; + try ( + var valuesBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()); + var hasFailedBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < values.size()) { + valuesBuilder.appendInt(values.get(group)); + } else { + valuesBuilder.appendInt(0); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + hasFailedBuilder.appendBoolean(i, hasFailed(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + blocks[offset + 2] = hasFailedBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntFallibleState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntFallibleState.java new file mode 100644 index 0000000000000..360f3fdb009e4 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntFallibleState.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single int. + * It stores a third boolean to store if the aggregation failed. + * This class is generated. Do not edit it. + */ +final class IntFallibleState implements AggregatorState { + private int value; + private boolean seen; + private boolean failed; + + IntFallibleState(int init) { + this.value = init; + } + + int intValue() { + return value; + } + + void intValue(int value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + boolean failed() { + return failed; + } + + void failed(boolean failed) { + this.failed = failed; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 3; + blocks[offset + 0] = driverContext.blockFactory().newConstantIntBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + blocks[offset + 2] = driverContext.blockFactory().newConstantBooleanBlockWith(failed, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntState.java index e7db40eccf9c8..c539c576ef36d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntState.java @@ -18,10 +18,6 @@ final class IntState implements AggregatorState { private int value; private boolean seen; - IntState() { - this(0); - } - IntState(int init) { this.value = init; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongFallibleArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongFallibleArrayState.java new file mode 100644 index 0000000000000..cb69579906871 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongFallibleArrayState.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of longs, that also tracks failures. + * It is created in a mode where it won't track + * the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

+ * This class is generated. Do not edit it. + *

+ */ +final class LongFallibleArrayState extends AbstractFallibleArrayState implements GroupingAggregatorState { + private final long init; + + private LongArray values; + + LongFallibleArrayState(BigArrays bigArrays, long init) { + super(bigArrays); + this.values = bigArrays.newLongArray(1, false); + this.values.set(0, init); + this.init = init; + } + + long get(int groupId) { + return values.get(groupId); + } + + long getOrDefault(int groupId) { + return groupId < values.size() ? values.get(groupId) : init; + } + + void set(int groupId, long value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + + void increment(int groupId, long value) { + ensureCapacity(groupId); + values.increment(groupId, value); + trackGroupId(groupId); + } + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds() && false == anyFailure()) { + try (var builder = driverContext.blockFactory().newLongVectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.appendLong(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group) && !hasFailed(group)) { + builder.appendLong(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + if (groupId >= values.size()) { + long prevSize = values.size(); + values = bigArrays.grow(values, groupId + 1); + values.fill(prevSize, values.size(), init); + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 3; + try ( + var valuesBuilder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()); + var hasFailedBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < values.size()) { + valuesBuilder.appendLong(values.get(group)); + } else { + valuesBuilder.appendLong(0); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + hasFailedBuilder.appendBoolean(i, hasFailed(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + blocks[offset + 2] = hasFailedBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongFallibleState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongFallibleState.java new file mode 100644 index 0000000000000..98669ef627d04 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongFallibleState.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single long. + * It stores a third boolean to store if the aggregation failed. + * This class is generated. Do not edit it. + */ +final class LongFallibleState implements AggregatorState { + private long value; + private boolean seen; + private boolean failed; + + LongFallibleState(long init) { + this.value = init; + } + + long longValue() { + return value; + } + + void longValue(long value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + boolean failed() { + return failed; + } + + void failed(boolean failed) { + this.failed = failed; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 3; + blocks[offset + 0] = driverContext.blockFactory().newConstantLongBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + blocks[offset + 2] = driverContext.blockFactory().newConstantBooleanBlockWith(failed, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongState.java index da78b649782d5..e9d97dcfe7fc1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongState.java @@ -18,10 +18,6 @@ final class LongState implements AggregatorState { private long value; private boolean seen; - LongState() { - this(0); - } - LongState(long init) { this.value = init; } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java index a12677e70e8a9..98e57b71db416 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java @@ -90,7 +90,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BooleanBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -104,7 +104,7 @@ private void addRawInput(int positionOffset, IntVector groups, BooleanBlock valu private void addRawInput(int positionOffset, IntVector groups, BooleanVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctBooleanAggregator.combine(state, groupId, values.getBoolean(groupPosition + positionOffset)); } } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanBlock value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -138,7 +138,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); CountDistinctBooleanAggregator.combine(state, groupId, values.getBoolean(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector tbit = ((BooleanBlock) tbitUncast).asVector(); assert fbit.getPositionCount() == tbit.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctBooleanAggregator.combineIntermediate(state, groupId, fbit.getBoolean(groupPosition + positionOffset), tbit.getBoolean(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java index 4879df5cf1c2c..35fd83598b9d6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java @@ -94,7 +94,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -145,7 +145,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); CountDistinctBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctBytesRefAggregator.combineIntermediate(state, groupId, hll.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java index 1e0ce58377f9e..894b81b311363 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java @@ -95,7 +95,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -122,7 +122,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); CountDistinctDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctDoubleAggregator.combineIntermediate(state, groupId, hll.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java index 60c1755b88c6a..5f6b4211e6c5e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java @@ -95,7 +95,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -122,7 +122,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); CountDistinctFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctFloatAggregator.combineIntermediate(state, groupId, hll.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java index 99e6ace52b256..83300393e560d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java @@ -93,7 +93,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -107,7 +107,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -120,7 +120,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -141,7 +141,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); CountDistinctIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -158,7 +158,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctIntAggregator.combineIntermediate(state, groupId, hll.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java index 85f823296c886..44e9fefb3161c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java @@ -95,7 +95,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -122,7 +122,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); CountDistinctLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctLongAggregator.combineIntermediate(state, groupId, hll.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java index f404fccd45d51..084e346a7b093 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java @@ -90,7 +90,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BooleanBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -104,7 +104,7 @@ private void addRawInput(int positionOffset, IntVector groups, BooleanBlock valu private void addRawInput(int positionOffset, IntVector groups, BooleanVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); } } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanBlock value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -138,7 +138,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert max.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), max.getBoolean(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunction.java new file mode 100644 index 0000000000000..62897c61ea80e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunction.java @@ -0,0 +1,133 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link MaxBytesRefAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxBytesRefAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("max", ElementType.BYTES_REF), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final DriverContext driverContext; + + private final MaxBytesRefAggregator.SingleState state; + + private final List channels; + + public MaxBytesRefAggregatorFunction(DriverContext driverContext, List channels, + MaxBytesRefAggregator.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static MaxBytesRefAggregatorFunction create(DriverContext driverContext, + List channels) { + return new MaxBytesRefAggregatorFunction(driverContext, channels, MaxBytesRefAggregator.initSingle(driverContext)); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + BytesRefBlock block = page.getBlock(channels.get(0)); + BytesRefVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(BytesRefVector vector) { + BytesRef scratch = new BytesRef(); + for (int i = 0; i < vector.getPositionCount(); i++) { + MaxBytesRefAggregator.combine(state, vector.getBytesRef(i, scratch)); + } + } + + private void addRawBlock(BytesRefBlock block) { + BytesRef scratch = new BytesRef(); + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + MaxBytesRefAggregator.combine(state, block.getBytesRef(i, scratch)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { + return; + } + BytesRefVector max = ((BytesRefBlock) maxUncast).asVector(); + assert max.getPositionCount() == 1; + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; + BytesRef scratch = new BytesRef(); + MaxBytesRefAggregator.combineIntermediate(state, max.getBytesRef(0, scratch), seen.getBoolean(0)); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = MaxBytesRefAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..7c8af2e0c7e6d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link MaxBytesRefAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxBytesRefAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public MaxBytesRefAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public MaxBytesRefAggregatorFunction aggregator(DriverContext driverContext) { + return MaxBytesRefAggregatorFunction.create(driverContext, channels); + } + + @Override + public MaxBytesRefGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return MaxBytesRefGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "max of bytes"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..a50cf8593a6e1 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java @@ -0,0 +1,210 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link MaxBytesRefAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxBytesRefGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("max", ElementType.BYTES_REF), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final MaxBytesRefAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public MaxBytesRefGroupingAggregatorFunction(List channels, + MaxBytesRefAggregator.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static MaxBytesRefGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new MaxBytesRefGroupingAggregatorFunction(channels, MaxBytesRefAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + BytesRefBlock valuesBlock = page.getBlock(channels.get(0)); + BytesRefVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + MaxBytesRefAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + MaxBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + MaxBytesRefAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + MaxBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { + return; + } + BytesRefVector max = ((BytesRefBlock) maxUncast).asVector(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert max.getPositionCount() == seen.getPositionCount(); + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + MaxBytesRefAggregator.combineIntermediate(state, groupId, max.getBytesRef(groupPosition + positionOffset, scratch), seen.getBoolean(groupPosition + positionOffset)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + MaxBytesRefAggregator.GroupingState inState = ((MaxBytesRefGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + MaxBytesRefAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = MaxBytesRefAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunction.java index da93320eaf96e..b874bc43dc238 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MaxDoubleAggregator.combine(state.getOrDefault(groupId), values.getDouble(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MaxDoubleAggregator.combine(state.getOrDefault(groupId), values.getDouble(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert max.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MaxDoubleAggregator.combine(state.getOrDefault(groupId), max.getDouble(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java index 85708792732a7..f3ebd468ebc72 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MaxFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MaxFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert max.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MaxFloatAggregator.combine(state.getOrDefault(groupId), max.getFloat(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunction.java index c8b1b6910c0aa..8b364e7a02e96 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunction.java @@ -90,7 +90,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -104,7 +104,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MaxIntAggregator.combine(state.getOrDefault(groupId), values.getInt(groupPosition + positionOffset))); } } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -138,7 +138,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MaxIntAggregator.combine(state.getOrDefault(groupId), values.getInt(groupPosition + positionOffset))); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert max.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MaxIntAggregator.combine(state.getOrDefault(groupId), max.getInt(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIpGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIpGroupingAggregatorFunction.java index c556b23215e6b..a722d95f3b108 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIpGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIpGroupingAggregatorFunction.java @@ -94,7 +94,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MaxIpAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -145,7 +145,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); MaxIpAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -168,7 +168,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page assert max.getPositionCount() == seen.getPositionCount(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MaxIpAggregator.combineIntermediate(state, groupId, max.getBytesRef(groupPosition + positionOffset, scratch), seen.getBoolean(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunction.java index 41d893f9bbf0c..fee2f5a9c2e7c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MaxLongAggregator.combine(state.getOrDefault(groupId), values.getLong(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MaxLongAggregator.combine(state.getOrDefault(groupId), values.getLong(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert max.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MaxLongAggregator.combine(state.getOrDefault(groupId), max.getLong(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java index e08488685d2cb..836248428f231 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); MedianAbsoluteDeviationDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -157,7 +157,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationDoubleAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java index 84646476fcee0..7a67f0d3449f0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); MedianAbsoluteDeviationFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -157,7 +157,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationFloatAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java index 02866ee15b961..315034a28ff8f 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java @@ -90,7 +90,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -104,7 +104,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -138,7 +138,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); MedianAbsoluteDeviationIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -155,7 +155,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationIntAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java index 36c40e10e54d5..af0374012be52 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); MedianAbsoluteDeviationLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -157,7 +157,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationLongAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java index 6175cad3924e2..45e677ee25b56 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java @@ -90,7 +90,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BooleanBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -104,7 +104,7 @@ private void addRawInput(int positionOffset, IntVector groups, BooleanBlock valu private void addRawInput(int positionOffset, IntVector groups, BooleanVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); } } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanBlock value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -138,7 +138,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert min.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), min.getBoolean(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunction.java new file mode 100644 index 0000000000000..3346dd762f17f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunction.java @@ -0,0 +1,133 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link MinBytesRefAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinBytesRefAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("min", ElementType.BYTES_REF), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final DriverContext driverContext; + + private final MinBytesRefAggregator.SingleState state; + + private final List channels; + + public MinBytesRefAggregatorFunction(DriverContext driverContext, List channels, + MinBytesRefAggregator.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static MinBytesRefAggregatorFunction create(DriverContext driverContext, + List channels) { + return new MinBytesRefAggregatorFunction(driverContext, channels, MinBytesRefAggregator.initSingle(driverContext)); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + BytesRefBlock block = page.getBlock(channels.get(0)); + BytesRefVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(BytesRefVector vector) { + BytesRef scratch = new BytesRef(); + for (int i = 0; i < vector.getPositionCount(); i++) { + MinBytesRefAggregator.combine(state, vector.getBytesRef(i, scratch)); + } + } + + private void addRawBlock(BytesRefBlock block) { + BytesRef scratch = new BytesRef(); + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + MinBytesRefAggregator.combine(state, block.getBytesRef(i, scratch)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { + return; + } + BytesRefVector min = ((BytesRefBlock) minUncast).asVector(); + assert min.getPositionCount() == 1; + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; + BytesRef scratch = new BytesRef(); + MinBytesRefAggregator.combineIntermediate(state, min.getBytesRef(0, scratch), seen.getBoolean(0)); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = MinBytesRefAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..cb6ab0d06d401 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link MinBytesRefAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinBytesRefAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public MinBytesRefAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public MinBytesRefAggregatorFunction aggregator(DriverContext driverContext) { + return MinBytesRefAggregatorFunction.create(driverContext, channels); + } + + @Override + public MinBytesRefGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return MinBytesRefGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "min of bytes"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..e092dd93210f6 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java @@ -0,0 +1,210 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link MinBytesRefAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinBytesRefGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("min", ElementType.BYTES_REF), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final MinBytesRefAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public MinBytesRefGroupingAggregatorFunction(List channels, + MinBytesRefAggregator.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static MinBytesRefGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new MinBytesRefGroupingAggregatorFunction(channels, MinBytesRefAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + BytesRefBlock valuesBlock = page.getBlock(channels.get(0)); + BytesRefVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + MinBytesRefAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + MinBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + MinBytesRefAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + MinBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { + return; + } + BytesRefVector min = ((BytesRefBlock) minUncast).asVector(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert min.getPositionCount() == seen.getPositionCount(); + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + MinBytesRefAggregator.combineIntermediate(state, groupId, min.getBytesRef(groupPosition + positionOffset, scratch), seen.getBoolean(groupPosition + positionOffset)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + MinBytesRefAggregator.GroupingState inState = ((MinBytesRefGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + MinBytesRefAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = MinBytesRefAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunction.java index 7d0374b3d21f7..970a8a7597514 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MinDoubleAggregator.combine(state.getOrDefault(groupId), values.getDouble(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MinDoubleAggregator.combine(state.getOrDefault(groupId), values.getDouble(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert min.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MinDoubleAggregator.combine(state.getOrDefault(groupId), min.getDouble(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java index 2f00bbf1335ed..4e8b4cc9417c8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MinFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MinFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert min.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MinFloatAggregator.combine(state.getOrDefault(groupId), min.getFloat(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunction.java index 6625fd327237b..6e976a582a892 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunction.java @@ -90,7 +90,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -104,7 +104,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MinIntAggregator.combine(state.getOrDefault(groupId), values.getInt(groupPosition + positionOffset))); } } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -138,7 +138,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MinIntAggregator.combine(state.getOrDefault(groupId), values.getInt(groupPosition + positionOffset))); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert min.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MinIntAggregator.combine(state.getOrDefault(groupId), min.getInt(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIpGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIpGroupingAggregatorFunction.java index 5b51f041bd966..146515d363af7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIpGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIpGroupingAggregatorFunction.java @@ -94,7 +94,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MinIpAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -145,7 +145,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); MinIpAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -168,7 +168,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page assert max.getPositionCount() == seen.getPositionCount(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MinIpAggregator.combineIntermediate(state, groupId, max.getBytesRef(groupPosition + positionOffset, scratch), seen.getBoolean(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunction.java index f0c3727d7db0b..a3db9a2704660 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MinLongAggregator.combine(state.getOrDefault(groupId), values.getLong(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MinLongAggregator.combine(state.getOrDefault(groupId), values.getLong(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert min.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MinLongAggregator.combine(state.getOrDefault(groupId), min.getLong(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java index 9d486b9614dab..871e93a72d900 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java @@ -95,7 +95,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -122,7 +122,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); PercentileDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileDoubleAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java index 564e0e90018c2..8b0f28b2632d1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java @@ -95,7 +95,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -122,7 +122,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); PercentileFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileFloatAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java index 8c2bd7091143f..fc1031dcbe0d0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java @@ -93,7 +93,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -107,7 +107,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -120,7 +120,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -141,7 +141,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); PercentileIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -158,7 +158,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileIntAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java index c1c332ba0094d..1b14f02356b8f 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java @@ -95,7 +95,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -122,7 +122,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); PercentileLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileLongAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java index 8d9e011891e95..c85cf78a39c45 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java @@ -103,7 +103,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -118,7 +118,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); var valuePosition = groupPosition + positionOffset; RateDoubleAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getDouble(valuePosition)); } @@ -133,7 +133,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -155,7 +155,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); var valuePosition = groupPosition + positionOffset; RateDoubleAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getDouble(valuePosition)); } @@ -183,7 +183,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); RateDoubleAggregator.combineIntermediate(state, groupId, timestamps, values, resets.getDouble(groupPosition + positionOffset), groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java index 40f53741bf3da..a5d2131a2445a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java @@ -105,7 +105,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -120,7 +120,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); var valuePosition = groupPosition + positionOffset; RateFloatAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getFloat(valuePosition)); } @@ -135,7 +135,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values, int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -157,7 +157,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); var valuePosition = groupPosition + positionOffset; RateFloatAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getFloat(valuePosition)); } @@ -185,7 +185,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); RateFloatAggregator.combineIntermediate(state, groupId, timestamps, values, resets.getDouble(groupPosition + positionOffset), groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java index 6bd4b833dc9e6..0fb0b05c11164 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java @@ -103,7 +103,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -118,7 +118,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values, private void addRawInput(int positionOffset, IntVector groups, IntVector values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); var valuePosition = groupPosition + positionOffset; RateIntAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getInt(valuePosition)); } @@ -133,7 +133,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values, int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -155,7 +155,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values, int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); var valuePosition = groupPosition + positionOffset; RateIntAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getInt(valuePosition)); } @@ -183,7 +183,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); RateIntAggregator.combineIntermediate(state, groupId, timestamps, values, resets.getDouble(groupPosition + positionOffset), groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java index 27318d6496737..82297b618b03e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java @@ -103,7 +103,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -118,7 +118,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values, private void addRawInput(int positionOffset, IntVector groups, LongVector values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); var valuePosition = groupPosition + positionOffset; RateLongAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getLong(valuePosition)); } @@ -133,7 +133,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values, int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -155,7 +155,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values, int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); var valuePosition = groupPosition + positionOffset; RateLongAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getLong(valuePosition)); } @@ -183,7 +183,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); RateLongAggregator.combineIntermediate(state, groupId, timestamps, values, resets.getDouble(groupPosition + positionOffset), groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java index 5085cfc3bebcf..4f0bcae66ee4a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java @@ -93,7 +93,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -107,7 +107,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SumDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -120,7 +120,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -141,7 +141,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); SumDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -168,7 +168,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert value.getPositionCount() == delta.getPositionCount() && value.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SumDoubleAggregator.combineIntermediate(state, groupId, value.getDouble(groupPosition + positionOffset), delta.getDouble(groupPosition + positionOffset), seen.getBoolean(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java index c69ce16f0bccb..2f4165dfeadfa 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java @@ -95,7 +95,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SumFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -122,7 +122,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); SumFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -170,7 +170,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert value.getPositionCount() == delta.getPositionCount() && value.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SumFloatAggregator.combineIntermediate(state, groupId, value.getDouble(groupPosition + positionOffset), delta.getDouble(groupPosition + positionOffset), seen.getBoolean(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunction.java index 6891fe548908f..95d380c455bf4 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, SumIntAggregator.combine(state.getOrDefault(groupId), values.getInt(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, SumIntAggregator.combine(state.getOrDefault(groupId), values.getInt(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert sum.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, SumIntAggregator.combine(state.getOrDefault(groupId), sum.getLong(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunction.java index 507aa343aa74e..324d8f53e65cb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, SumLongAggregator.combine(state.getOrDefault(groupId), values.getLong(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, SumLongAggregator.combine(state.getOrDefault(groupId), values.getLong(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert sum.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, SumLongAggregator.combine(state.getOrDefault(groupId), sum.getLong(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopBooleanGroupingAggregatorFunction.java index 53b5149e4da7e..d169c456329b7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopBooleanGroupingAggregatorFunction.java @@ -96,7 +96,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BooleanBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -110,7 +110,7 @@ private void addRawInput(int positionOffset, IntVector groups, BooleanBlock valu private void addRawInput(int positionOffset, IntVector groups, BooleanVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopBooleanAggregator.combine(state, groupId, values.getBoolean(groupPosition + positionOffset)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanBlock value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -144,7 +144,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); TopBooleanAggregator.combine(state, groupId, values.getBoolean(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } BooleanBlock top = (BooleanBlock) topUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopBooleanAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java index c54dce5715846..07da387f88ce6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java @@ -96,7 +96,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -110,7 +110,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -144,7 +144,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); TopDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } DoubleBlock top = (DoubleBlock) topUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopDoubleAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java index 4c00f4d2c237d..369fa7401e508 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java @@ -96,7 +96,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -110,7 +110,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -144,7 +144,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); TopFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } FloatBlock top = (FloatBlock) topUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopFloatAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java index 37384238b7297..04b53fe6aab69 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java @@ -94,7 +94,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -108,7 +108,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -121,7 +121,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -142,7 +142,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); TopIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -158,7 +158,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } IntBlock top = (IntBlock) topUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopIntAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIpGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIpGroupingAggregatorFunction.java index d9e480c324676..272b4827b5817 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIpGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIpGroupingAggregatorFunction.java @@ -98,7 +98,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -113,7 +113,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopIpAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -127,7 +127,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -149,7 +149,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); TopIpAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -166,7 +166,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefBlock top = (BytesRefBlock) topUncast; BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopIpAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java index 7b199b2a81389..9d1ed395c5964 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java @@ -96,7 +96,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -110,7 +110,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -144,7 +144,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); TopLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } LongBlock top = (LongBlock) topUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopLongAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBooleanGroupingAggregatorFunction.java index 16e92a7c69ca8..062a49dbf4f7c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBooleanGroupingAggregatorFunction.java @@ -89,7 +89,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BooleanBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -103,7 +103,7 @@ private void addRawInput(int positionOffset, IntVector groups, BooleanBlock valu private void addRawInput(int positionOffset, IntVector groups, BooleanVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesBooleanAggregator.combine(state, groupId, values.getBoolean(groupPosition + positionOffset)); } } @@ -116,7 +116,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanBlock value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -137,7 +137,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); ValuesBooleanAggregator.combine(state, groupId, values.getBoolean(groupPosition + positionOffset)); } } @@ -153,7 +153,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } BooleanBlock values = (BooleanBlock) valuesUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesBooleanAggregator.combineIntermediate(state, groupId, values, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBytesRefGroupingAggregatorFunction.java index f9a51fcc52221..0a929913e9fde 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBytesRefGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBytesRefGroupingAggregatorFunction.java @@ -91,7 +91,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -120,7 +120,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -142,7 +142,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); ValuesBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -159,7 +159,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefBlock values = (BytesRefBlock) valuesUncast; BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesBytesRefAggregator.combineIntermediate(state, groupId, values, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunction.java index 11a0eb96c6a8e..b8ca2d2b9665b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunction.java @@ -89,7 +89,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -103,7 +103,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -116,7 +116,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -137,7 +137,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); ValuesDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -153,7 +153,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } DoubleBlock values = (DoubleBlock) valuesUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesDoubleAggregator.combineIntermediate(state, groupId, values, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java index 54cc06072cd24..0c4e9c32328c7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java @@ -89,7 +89,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -103,7 +103,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -116,7 +116,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -137,7 +137,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); ValuesFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -153,7 +153,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } FloatBlock values = (FloatBlock) valuesUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesFloatAggregator.combineIntermediate(state, groupId, values, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunction.java index 67722cd1318c0..95e527c018cd1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunction.java @@ -87,7 +87,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -101,7 +101,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -114,7 +114,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -135,7 +135,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); ValuesIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -151,7 +151,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } IntBlock values = (IntBlock) valuesUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesIntAggregator.combineIntermediate(state, groupId, values, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunction.java index 06508ce360ba4..a7963447037a8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunction.java @@ -89,7 +89,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -103,7 +103,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -116,7 +116,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -137,7 +137,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); ValuesLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -153,7 +153,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } LongBlock values = (LongBlock) valuesUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesLongAggregator.combineIntermediate(state, groupId, values, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction.java index 795207b245023..dc3c1cf2917ec 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction.java @@ -98,7 +98,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -112,7 +112,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidCartesianPointDocValuesAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -125,7 +125,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -146,7 +146,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); SpatialCentroidCartesianPointDocValuesAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -183,7 +183,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page LongVector count = ((LongBlock) countUncast).asVector(); assert xVal.getPositionCount() == xDel.getPositionCount() && xVal.getPositionCount() == yVal.getPositionCount() && xVal.getPositionCount() == yDel.getPositionCount() && xVal.getPositionCount() == count.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidCartesianPointDocValuesAggregator.combineIntermediate(state, groupId, xVal.getDouble(groupPosition + positionOffset), xDel.getDouble(groupPosition + positionOffset), yVal.getDouble(groupPosition + positionOffset), yDel.getDouble(groupPosition + positionOffset), count.getLong(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointSourceValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointSourceValuesGroupingAggregatorFunction.java index 12c0f24ef43e3..0d1378ce988f3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointSourceValuesGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointSourceValuesGroupingAggregatorFunction.java @@ -102,7 +102,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidCartesianPointSourceValuesAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -131,7 +131,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -153,7 +153,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); SpatialCentroidCartesianPointSourceValuesAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -190,7 +190,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page LongVector count = ((LongBlock) countUncast).asVector(); assert xVal.getPositionCount() == xDel.getPositionCount() && xVal.getPositionCount() == yVal.getPositionCount() && xVal.getPositionCount() == yDel.getPositionCount() && xVal.getPositionCount() == count.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidCartesianPointSourceValuesAggregator.combineIntermediate(state, groupId, xVal.getDouble(groupPosition + positionOffset), xDel.getDouble(groupPosition + positionOffset), yVal.getDouble(groupPosition + positionOffset), yDel.getDouble(groupPosition + positionOffset), count.getLong(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointDocValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointDocValuesGroupingAggregatorFunction.java index 2447939d56db9..f5604e9e23200 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointDocValuesGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointDocValuesGroupingAggregatorFunction.java @@ -98,7 +98,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -112,7 +112,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidGeoPointDocValuesAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -125,7 +125,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -146,7 +146,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); SpatialCentroidGeoPointDocValuesAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -183,7 +183,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page LongVector count = ((LongBlock) countUncast).asVector(); assert xVal.getPositionCount() == xDel.getPositionCount() && xVal.getPositionCount() == yVal.getPositionCount() && xVal.getPositionCount() == yDel.getPositionCount() && xVal.getPositionCount() == count.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidGeoPointDocValuesAggregator.combineIntermediate(state, groupId, xVal.getDouble(groupPosition + positionOffset), xDel.getDouble(groupPosition + positionOffset), yVal.getDouble(groupPosition + positionOffset), yDel.getDouble(groupPosition + positionOffset), count.getLong(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointSourceValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointSourceValuesGroupingAggregatorFunction.java index 075f8749503b8..b3caeef925a73 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointSourceValuesGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointSourceValuesGroupingAggregatorFunction.java @@ -102,7 +102,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidGeoPointSourceValuesAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -131,7 +131,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -153,7 +153,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); SpatialCentroidGeoPointSourceValuesAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -190,7 +190,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page LongVector count = ((LongBlock) countUncast).asVector(); assert xVal.getPositionCount() == xDel.getPositionCount() && xVal.getPositionCount() == yVal.getPositionCount() && xVal.getPositionCount() == yDel.getPositionCount() && xVal.getPositionCount() == count.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidGeoPointSourceValuesAggregator.combineIntermediate(state, groupId, xVal.getDouble(groupPosition + positionOffset), xDel.getDouble(groupPosition + positionOffset), yVal.getDouble(groupPosition + positionOffset), yDel.getDouble(groupPosition + positionOffset), count.getLong(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java index 0dc008cb22396..f9962922cc4a7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java @@ -12,6 +12,13 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +/** + * Base class for array states that track which group ids have been set. + * Most of this class subclasses are autogenerated. + *

+ * Most of this class subclasses are autogenerated. + *

+ */ public class AbstractArrayState implements Releasable { protected final BigArrays bigArrays; @@ -21,7 +28,7 @@ public AbstractArrayState(BigArrays bigArrays) { this.bigArrays = bigArrays; } - final boolean hasValue(int groupId) { + boolean hasValue(int groupId) { return seen == null || seen.get(groupId); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractFallibleArrayState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractFallibleArrayState.java new file mode 100644 index 0000000000000..d5ad3189e2f9e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractFallibleArrayState.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.core.Releasables; + +/** + * Base class that extends {@link AbstractArrayState} to add failure tracking. + * That is, when a group id fails, it is marked as failed in the state. + *

+ * Most of this class subclasses are autogenerated. + *

+ */ +public class AbstractFallibleArrayState extends AbstractArrayState { + private BitArray failed; + + public AbstractFallibleArrayState(BigArrays bigArrays) { + super(bigArrays); + } + + final boolean hasFailed(int groupId) { + return failed != null && failed.get(groupId); + } + + protected final boolean anyFailure() { + return failed != null; + } + + protected final void setFailed(int groupId) { + if (failed == null) { + failed = new BitArray(groupId + 1, bigArrays); + } + failed.set(groupId); + } + + @Override + public void close() { + super.close(); + Releasables.close(failed); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/BytesRefArrayState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/BytesRefArrayState.java new file mode 100644 index 0000000000000..eb0a992c8610f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/BytesRefArrayState.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of BytesRefs. It is created in a mode where it + * won't track the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

+ * This class is a specialized version of the {@code X-ArrayState.java.st} template. + *

+ */ +public final class BytesRefArrayState implements GroupingAggregatorState, Releasable { + private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private final String breakerLabel; + private ObjectArray values; + /** + * If false, no group id is expected to have nulls. + * If true, they may have nulls. + */ + private boolean groupIdTrackingEnabled; + + BytesRefArrayState(BigArrays bigArrays, CircuitBreaker breaker, String breakerLabel) { + this.bigArrays = bigArrays; + this.breaker = breaker; + this.breakerLabel = breakerLabel; + this.values = bigArrays.newObjectArray(0); + } + + BytesRef get(int groupId) { + return values.get(groupId).bytesRefView(); + } + + void set(int groupId, BytesRef value) { + ensureCapacity(groupId); + + var currentBuilder = values.get(groupId); + if (currentBuilder == null) { + currentBuilder = new BreakingBytesRefBuilder(breaker, breakerLabel, value.length); + values.set(groupId, currentBuilder); + } + + currentBuilder.copyBytes(value); + } + + Block toValuesBlock(IntVector selected, DriverContext driverContext) { + if (false == groupIdTrackingEnabled) { + try (var builder = driverContext.blockFactory().newBytesRefVectorBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + var value = get(group); + builder.appendBytesRef(value); + } + return builder.build().asBlock(); + } + } + try (var builder = driverContext.blockFactory().newBytesRefBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group)) { + var value = get(group); + builder.appendBytesRef(value); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + var minSize = groupId + 1; + if (minSize > values.size()) { + long prevSize = values.size(); + values = bigArrays.grow(values, minSize); + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + assert blocks.length >= offset + 2; + try ( + var valuesBuilder = driverContext.blockFactory().newBytesRefVectorBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + var emptyBytesRef = new BytesRef(); + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group)) { + var value = get(group); + valuesBuilder.appendBytesRef(value); + } else { + valuesBuilder.appendBytesRef(emptyBytesRef); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + } + blocks[offset] = valuesBuilder.build().asBlock(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + } + } + + boolean hasValue(int groupId) { + return groupId < values.size() && values.get(groupId) != null; + } + + /** + * Switches this array state into tracking which group ids are set. This is + * idempotent and fast if already tracking so it's safe to, say, call it once + * for every block of values that arrives containing {@code null}. + * + *

+ * This class tracks seen group IDs differently from {@code AbstractArrayState}, as it just + * stores a flag to know if optimizations can be made. + *

+ */ + void enableGroupIdTracking(SeenGroupIds seenGroupIds) { + this.groupIdTrackingEnabled = true; + } + + @Override + public void close() { + for (int i = 0; i < values.size(); i++) { + Releasables.closeWhileHandlingException(values.get(i)); + } + + Releasables.close(values); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java index 13a4204edfd8f..c32f6f4703a79 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java @@ -52,7 +52,7 @@ public static List intermediateStateDesc() { private final boolean countAll; public static CountAggregatorFunction create(List inputChannels) { - return new CountAggregatorFunction(inputChannels, new LongState()); + return new CountAggregatorFunction(inputChannels, new LongState(0)); } private CountAggregatorFunction(List channels, LongState state) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxBytesRefAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxBytesRefAggregator.java new file mode 100644 index 0000000000000..144214f93571e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxBytesRefAggregator.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator for `Max`, that works with BytesRef values. + * Gets the biggest BytesRef value, based on its bytes natural order (Delegated to {@link BytesRef#compareTo}). + */ +@Aggregator({ @IntermediateState(name = "max", type = "BYTES_REF"), @IntermediateState(name = "seen", type = "BOOLEAN") }) +@GroupingAggregator +class MaxBytesRefAggregator { + private static boolean isBetter(BytesRef value, BytesRef otherValue) { + return value.compareTo(otherValue) > 0; + } + + public static SingleState initSingle(DriverContext driverContext) { + return new SingleState(driverContext.breaker()); + } + + public static void combine(SingleState state, BytesRef value) { + state.add(value); + } + + public static void combineIntermediate(SingleState state, BytesRef value, boolean seen) { + if (seen) { + combine(state, value); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toBlock(driverContext); + } + + public static GroupingState initGrouping(DriverContext driverContext) { + return new GroupingState(driverContext.bigArrays(), driverContext.breaker()); + } + + public static void combine(GroupingState state, int groupId, BytesRef value) { + state.add(groupId, value); + } + + public static void combineIntermediate(GroupingState state, int groupId, BytesRef value, boolean seen) { + if (seen) { + state.add(groupId, value); + } + } + + public static void combineStates(GroupingState state, int groupId, GroupingState otherState, int otherGroupId) { + state.combine(groupId, otherState, otherGroupId); + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, DriverContext driverContext) { + return state.toBlock(selected, driverContext); + } + + public static class GroupingState implements Releasable { + private final BytesRefArrayState internalState; + + private GroupingState(BigArrays bigArrays, CircuitBreaker breaker) { + this.internalState = new BytesRefArrayState(bigArrays, breaker, "max_bytes_ref_grouping_aggregator"); + } + + public void add(int groupId, BytesRef value) { + if (internalState.hasValue(groupId) == false || isBetter(value, internalState.get(groupId))) { + internalState.set(groupId, value); + } + } + + public void combine(int groupId, GroupingState otherState, int otherGroupId) { + if (otherState.internalState.hasValue(otherGroupId)) { + add(groupId, otherState.internalState.get(otherGroupId)); + } + } + + void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + internalState.toIntermediate(blocks, offset, selected, driverContext); + } + + Block toBlock(IntVector selected, DriverContext driverContext) { + return internalState.toValuesBlock(selected, driverContext); + } + + void enableGroupIdTracking(SeenGroupIds seen) { + internalState.enableGroupIdTracking(seen); + } + + @Override + public void close() { + Releasables.close(internalState); + } + } + + public static class SingleState implements Releasable { + private final BreakingBytesRefBuilder internalState; + private boolean seen; + + private SingleState(CircuitBreaker breaker) { + this.internalState = new BreakingBytesRefBuilder(breaker, "max_bytes_ref_aggregator"); + this.seen = false; + } + + public void add(BytesRef value) { + if (seen == false || isBetter(value, internalState.bytesRefView())) { + seen = true; + + internalState.grow(value.length); + internalState.setLength(value.length); + + System.arraycopy(value.bytes, value.offset, internalState.bytes(), 0, value.length); + } + } + + void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = driverContext.blockFactory().newConstantBytesRefBlockWith(internalState.bytesRefView(), 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + } + + Block toBlock(DriverContext driverContext) { + if (seen == false) { + return driverContext.blockFactory().newConstantNullBlock(1); + } + + return driverContext.blockFactory().newConstantBytesRefBlockWith(internalState.bytesRefView(), 1); + } + + @Override + public void close() { + Releasables.close(internalState); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinBytesRefAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinBytesRefAggregator.java new file mode 100644 index 0000000000000..830900702a371 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinBytesRefAggregator.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator for `Min`, that works with BytesRef values. + * Gets the smallest BytesRef value, based on its bytes natural order (Delegated to {@link BytesRef#compareTo}). + */ +@Aggregator({ @IntermediateState(name = "min", type = "BYTES_REF"), @IntermediateState(name = "seen", type = "BOOLEAN") }) +@GroupingAggregator +class MinBytesRefAggregator { + private static boolean isBetter(BytesRef value, BytesRef otherValue) { + return value.compareTo(otherValue) < 0; + } + + public static SingleState initSingle(DriverContext driverContext) { + return new SingleState(driverContext.breaker()); + } + + public static void combine(SingleState state, BytesRef value) { + state.add(value); + } + + public static void combineIntermediate(SingleState state, BytesRef value, boolean seen) { + if (seen) { + combine(state, value); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toBlock(driverContext); + } + + public static GroupingState initGrouping(DriverContext driverContext) { + return new GroupingState(driverContext.bigArrays(), driverContext.breaker()); + } + + public static void combine(GroupingState state, int groupId, BytesRef value) { + state.add(groupId, value); + } + + public static void combineIntermediate(GroupingState state, int groupId, BytesRef value, boolean seen) { + if (seen) { + state.add(groupId, value); + } + } + + public static void combineStates(GroupingState state, int groupId, GroupingState otherState, int otherGroupId) { + state.combine(groupId, otherState, otherGroupId); + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, DriverContext driverContext) { + return state.toBlock(selected, driverContext); + } + + public static class GroupingState implements Releasable { + private final BytesRefArrayState internalState; + + private GroupingState(BigArrays bigArrays, CircuitBreaker breaker) { + this.internalState = new BytesRefArrayState(bigArrays, breaker, "min_bytes_ref_grouping_aggregator"); + } + + public void add(int groupId, BytesRef value) { + if (internalState.hasValue(groupId) == false || isBetter(value, internalState.get(groupId))) { + internalState.set(groupId, value); + } + } + + public void combine(int groupId, GroupingState otherState, int otherGroupId) { + if (otherState.internalState.hasValue(otherGroupId)) { + add(groupId, otherState.internalState.get(otherGroupId)); + } + } + + void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + internalState.toIntermediate(blocks, offset, selected, driverContext); + } + + Block toBlock(IntVector selected, DriverContext driverContext) { + return internalState.toValuesBlock(selected, driverContext); + } + + void enableGroupIdTracking(SeenGroupIds seen) { + internalState.enableGroupIdTracking(seen); + } + + @Override + public void close() { + Releasables.close(internalState); + } + } + + public static class SingleState implements Releasable { + private final BreakingBytesRefBuilder internalState; + private boolean seen; + + private SingleState(CircuitBreaker breaker) { + this.internalState = new BreakingBytesRefBuilder(breaker, "min_bytes_ref_aggregator"); + this.seen = false; + } + + public void add(BytesRef value) { + if (seen == false || isBetter(value, internalState.bytesRefView())) { + seen = true; + + internalState.grow(value.length); + internalState.setLength(value.length); + + System.arraycopy(value.bytes, value.offset, internalState.bytes(), 0, value.length); + } + } + + void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = driverContext.blockFactory().newConstantBytesRefBlockWith(internalState.bytesRefView(), 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + } + + Block toBlock(DriverContext driverContext) { + if (seen == false) { + return driverContext.blockFactory().newConstantNullBlock(1); + } + + return driverContext.blockFactory().newConstantBytesRefBlockWith(internalState.bytesRefView(), 1); + } + + @Override + public void close() { + Releasables.close(internalState); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/Warnings.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/Warnings.java new file mode 100644 index 0000000000000..eb2255a4e349b --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/Warnings.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.operator.DriverContext; + +import static org.elasticsearch.common.logging.HeaderWarning.addWarning; +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; + +/** + * Utilities to collect warnings for running an executor. + */ +public class Warnings { + static final int MAX_ADDED_WARNINGS = 20; + + private final String location; + private final String first; + + private int addedWarnings; + + public static final Warnings NOOP_WARNINGS = new Warnings(-1, -2, "") { + @Override + public void registerException(Exception exception) { + // this space intentionally left blank + } + }; + + /** + * Create a new warnings object based on the given mode + * @param warningsMode The warnings collection strategy to use + * @param lineNumber The line number of the source text. Same as `source.getLineNumber()` + * @param columnNumber The column number of the source text. Same as `source.getColumnNumber()` + * @param sourceText The source text that caused the warning. Same as `source.text()` + * @return A warnings collector object + */ + public static Warnings createWarnings(DriverContext.WarningsMode warningsMode, int lineNumber, int columnNumber, String sourceText) { + switch (warningsMode) { + case COLLECT -> { + return new Warnings(lineNumber, columnNumber, sourceText); + } + case IGNORE -> { + return NOOP_WARNINGS; + } + } + throw new IllegalStateException("Unreachable"); + } + + public Warnings(int lineNumber, int columnNumber, String sourceText) { + location = format("Line {}:{}: ", lineNumber, columnNumber); + first = format( + null, + "{}evaluation of [{}] failed, treating result as null. Only first {} failures recorded.", + location, + sourceText, + MAX_ADDED_WARNINGS + ); + } + + public void registerException(Exception exception) { + if (addedWarnings < MAX_ADDED_WARNINGS) { + if (addedWarnings == 0) { + addWarning(first); + } + // location needs to be added to the exception too, since the headers are deduplicated + addWarning(location + exception.getClass().getName() + ": " + exception.getMessage()); + addedWarnings++; + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleArrayState.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleArrayState.java.st new file mode 100644 index 0000000000000..3c57ab948a79f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleArrayState.java.st @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +$if(boolean)$ +import org.elasticsearch.common.util.BitArray; +$else$ +import org.elasticsearch.common.util.$Type$Array; +$endif$ +import org.elasticsearch.compute.data.Block; +$if(long)$ +import org.elasticsearch.compute.data.IntVector; +$endif$ +import org.elasticsearch.compute.data.$Type$Block; +$if(int)$ +import org.elasticsearch.compute.data.$Type$Vector; +$endif$ +$if(boolean||double||float)$ +import org.elasticsearch.compute.data.IntVector; +$endif$ +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of $type$s, that also tracks failures. + * It is created in a mode where it won't track + * the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

+ * This class is generated. Do not edit it. + *

+ */ +final class $Type$FallibleArrayState extends AbstractFallibleArrayState implements GroupingAggregatorState { + private final $type$ init; + +$if(boolean)$ + private BitArray values; + private int size; + +$else$ + private $Type$Array values; +$endif$ + + $Type$FallibleArrayState(BigArrays bigArrays, $type$ init) { + super(bigArrays); +$if(boolean)$ + this.values = new BitArray(1, bigArrays); + this.size = 1; +$else$ + this.values = bigArrays.new$Type$Array(1, false); +$endif$ + this.values.set(0, init); + this.init = init; + } + + $type$ get(int groupId) { + return values.get(groupId); + } + + $type$ getOrDefault(int groupId) { +$if(boolean)$ + return groupId < size ? values.get(groupId) : init; +$else$ + return groupId < values.size() ? values.get(groupId) : init; +$endif$ + } + + void set(int groupId, $type$ value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + +$if(long)$ + void increment(int groupId, long value) { + ensureCapacity(groupId); + values.increment(groupId, value); + trackGroupId(groupId); + } +$endif$ + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds() && false == anyFailure()) { + try (var builder = driverContext.blockFactory().new$Type$VectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.append$Type$(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try ($Type$Block.Builder builder = driverContext.blockFactory().new$Type$BlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group) && !hasFailed(group)) { + builder.append$Type$(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { +$if(boolean)$ + if (groupId >= size) { + values.fill(size, groupId + 1, init); + size = groupId + 1; + } +$else$ + if (groupId >= values.size()) { + long prevSize = values.size(); + values = bigArrays.grow(values, groupId + 1); + values.fill(prevSize, values.size(), init); + } +$endif$ + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 3; + try ( + var valuesBuilder = driverContext.blockFactory().new$Type$BlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()); + var hasFailedBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < $if(boolean)$size$else$values.size()$endif$) { + valuesBuilder.append$Type$(values.get(group)); + } else { + valuesBuilder.append$Type$($if(boolean)$false$else$0$endif$); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + hasFailedBuilder.appendBoolean(i, hasFailed(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + blocks[offset + 2] = hasFailedBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleState.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleState.java.st new file mode 100644 index 0000000000000..27609383e4f61 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleState.java.st @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single $type$. + * It stores a third boolean to store if the aggregation failed. + * This class is generated. Do not edit it. + */ +final class $Type$FallibleState implements AggregatorState { + private $type$ value; + private boolean seen; + private boolean failed; + + $Type$FallibleState($type$ init) { + this.value = init; + } + + $type$ $type$Value() { + return value; + } + + void $type$Value($type$ value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + boolean failed() { + return failed; + } + + void failed(boolean failed) { + this.failed = failed; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 3; + blocks[offset + 0] = driverContext.blockFactory().newConstant$Type$BlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + blocks[offset + 2] = driverContext.blockFactory().newConstantBooleanBlockWith(failed, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st index 2d2d706c9454f..7e0949c86faaa 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st @@ -18,14 +18,6 @@ final class $Type$State implements AggregatorState { private $type$ value; private boolean seen; - $Type$State() { -$if(boolean)$ - this(false); -$else$ - this(0); -$endif$ - } - $Type$State($type$ init) { this.value = init; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java index a697a3f6c15fa..3df389135e9d3 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java @@ -87,6 +87,8 @@ public static Block[] fromListRow(BlockFactory blockFactory, List row, i } else { wrapper.builder.mvOrdering(Block.MvOrdering.DEDUPLICATED_UNORDERD); } + } else if (isAscending(listVal) && random.nextBoolean()) { + wrapper.builder.mvOrdering(Block.MvOrdering.SORTED_ASCENDING); } blocks[i] = wrapper.builder.build(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java index 79359737b1b35..92213eca7b477 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java @@ -199,7 +199,7 @@ public Page getOutput() { } @Override - public SubscribableListener isBlocked() { + public IsBlockedResult isBlocked() { // TODO: Add an exchange service between async operation instead? if (finished) { return Operator.NOT_BLOCKED; @@ -216,7 +216,7 @@ public SubscribableListener isBlocked() { if (blockedFuture == null) { blockedFuture = new SubscribableListener<>(); } - return blockedFuture; + return new IsBlockedResult(blockedFuture, getClass().getSimpleName()); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilder.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilder.java index 17e67335919b1..2578452ad9062 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilder.java @@ -131,6 +131,14 @@ public void append(BytesRef bytes) { append(bytes.bytes, bytes.offset, bytes.length); } + /** + * Set the content of the builder to the given bytes. + */ + public void copyBytes(BytesRef newBytes) { + clear(); + append(newBytes); + } + /** * Reset the builder to an empty bytes array. Doesn't deallocate any memory. */ @@ -141,7 +149,7 @@ public void clear() { /** * Returns a view of the data added as a {@link BytesRef}. Importantly, this does not * copy the bytes and any further modification to the {@link BreakingBytesRefBuilder} - * will modify the returned {@link BytesRef}. The called must copy the bytes + * will modify the returned {@link BytesRef}. The caller must copy the bytes * if they wish to keep them. */ public BytesRef bytesRefView() { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java index 785db826aadd6..acbf8a17b31fd 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java @@ -127,7 +127,17 @@ public Driver( this.statusNanos = statusInterval.nanos(); this.releasable = releasable; this.status = new AtomicReference<>( - new DriverStatus(sessionId, startTime, System.currentTimeMillis(), 0, 0, DriverStatus.Status.QUEUED, List.of(), List.of()) + new DriverStatus( + sessionId, + startTime, + System.currentTimeMillis(), + 0, + 0, + DriverStatus.Status.QUEUED, + List.of(), + List.of(), + DriverSleeps.empty() + ) ); } @@ -170,35 +180,36 @@ public DriverContext driverContext() { * thread to do other work instead of blocking or busy-spinning on the blocked operator. */ SubscribableListener run(TimeValue maxTime, int maxIterations, LongSupplier nowSupplier) { + updateStatus(0, 0, DriverStatus.Status.RUNNING, "driver running"); long maxTimeNanos = maxTime.nanos(); long startTime = nowSupplier.getAsLong(); long nextStatus = startTime + statusNanos; int iter = 0; while (true) { - SubscribableListener fut = runSingleLoopIteration(); + IsBlockedResult isBlocked = runSingleLoopIteration(); iter++; - if (fut.isDone() == false) { - updateStatus(nowSupplier.getAsLong() - startTime, iter, DriverStatus.Status.ASYNC); - return fut; + if (isBlocked.listener().isDone() == false) { + updateStatus(nowSupplier.getAsLong() - startTime, iter, DriverStatus.Status.ASYNC, isBlocked.reason()); + return isBlocked.listener(); } if (isFinished()) { finishNanos = nowSupplier.getAsLong(); - updateStatus(finishNanos - startTime, iter, DriverStatus.Status.DONE); + updateStatus(finishNanos - startTime, iter, DriverStatus.Status.DONE, "driver done"); driverContext.finish(); Releasables.close(releasable, driverContext.getSnapshot()); - return Operator.NOT_BLOCKED; + return Operator.NOT_BLOCKED.listener(); } long now = nowSupplier.getAsLong(); if (iter >= maxIterations) { - updateStatus(now - startTime, iter, DriverStatus.Status.WAITING); - return Operator.NOT_BLOCKED; + updateStatus(now - startTime, iter, DriverStatus.Status.WAITING, "driver iterations"); + return Operator.NOT_BLOCKED.listener(); } if (now - startTime >= maxTimeNanos) { - updateStatus(now - startTime, iter, DriverStatus.Status.WAITING); - return Operator.NOT_BLOCKED; + updateStatus(now - startTime, iter, DriverStatus.Status.WAITING, "driver time"); + return Operator.NOT_BLOCKED.listener(); } if (now > nextStatus) { - updateStatus(now - startTime, iter, DriverStatus.Status.RUNNING); + updateStatus(now - startTime, iter, DriverStatus.Status.RUNNING, "driver running"); nextStatus = now + statusNanos; } } @@ -230,7 +241,7 @@ public void abort(Exception reason, ActionListener listener) { } } - private SubscribableListener runSingleLoopIteration() { + private IsBlockedResult runSingleLoopIteration() { ensureNotCancelled(); boolean movedPage = false; @@ -239,7 +250,7 @@ private SubscribableListener runSingleLoopIteration() { Operator nextOp = activeOperators.get(i + 1); // skip blocked operator - if (op.isBlocked().isDone() == false) { + if (op.isBlocked().listener().isDone() == false) { continue; } @@ -290,7 +301,10 @@ private SubscribableListener runSingleLoopIteration() { if (movedPage == false) { return oneOf( - activeOperators.stream().map(Operator::isBlocked).filter(laf -> laf.isDone() == false).collect(Collectors.toList()) + activeOperators.stream() + .map(Operator::isBlocked) + .filter(laf -> laf.listener().isDone() == false) + .collect(Collectors.toList()) ); } return Operator.NOT_BLOCKED; @@ -327,7 +341,7 @@ public static void start( ) { driver.completionListener.addListener(listener); if (driver.started.compareAndSet(false, true)) { - driver.updateStatus(0, 0, DriverStatus.Status.STARTING); + driver.updateStatus(0, 0, DriverStatus.Status.STARTING, "driver starting"); schedule(DEFAULT_TIME_BEFORE_YIELDING, maxIterations, threadContext, executor, driver, driver.completionListener); } } @@ -394,18 +408,23 @@ void onComplete(ActionListener listener) { }); } - private static SubscribableListener oneOf(List> futures) { - if (futures.isEmpty()) { + private static IsBlockedResult oneOf(List results) { + if (results.isEmpty()) { return Operator.NOT_BLOCKED; } - if (futures.size() == 1) { - return futures.get(0); + if (results.size() == 1) { + return results.get(0); } SubscribableListener oneOf = new SubscribableListener<>(); - for (SubscribableListener fut : futures) { - fut.addListener(oneOf); + StringBuilder reason = new StringBuilder(); + for (IsBlockedResult r : results) { + r.listener().addListener(oneOf); + if (reason.isEmpty() == false) { + reason.append(" OR "); + } + reason.append(r.reason()); } - return oneOf; + return new IsBlockedResult(oneOf, reason.toString()); } @Override @@ -440,7 +459,15 @@ public DriverProfile profile() { if (status.status() != DriverStatus.Status.DONE) { throw new IllegalStateException("can only get profile from finished driver"); } - return new DriverProfile(finishNanos - startNanos, status.cpuNanos(), status.iterations(), status.completedOperators()); + return new DriverProfile( + status.started(), + status.lastUpdated(), + finishNanos - startNanos, + status.cpuNanos(), + status.iterations(), + status.completedOperators(), + status.sleeps() + ); } /** @@ -449,17 +476,44 @@ public DriverProfile profile() { * @param extraIterations how many iterations to add to the previous status * @param status the status of the overall driver request */ - private void updateStatus(long extraCpuNanos, int extraIterations, DriverStatus.Status status) { + private void updateStatus(long extraCpuNanos, int extraIterations, DriverStatus.Status status, String reason) { this.status.getAndUpdate(prev -> { + long now = System.currentTimeMillis(); + DriverSleeps sleeps = prev.sleeps(); + + // Rebuild the sleeps or bail entirely based on the updated status. + // Sorry for the complexity here. If anyone has a nice way to refactor this, be my guest. + switch (status) { + case ASYNC, WAITING -> sleeps = sleeps.sleep(reason, now); + case RUNNING -> { + switch (prev.status()) { + case ASYNC, WAITING -> sleeps = sleeps.wake(now); + case STARTING -> { + if (extraIterations == 0) { + /* + * 0 extraIterations means we haven't started the loop - we're just + * signaling that we've woken up. We don't need to signal that when + * the state is already STARTING because we don't have anything + * interesting to report. And some tests rely on the status staying + * in the STARTING state until the first status report. + */ + return prev; + } + } + } + } + } + return new DriverStatus( sessionId, startTime, - System.currentTimeMillis(), + now, prev.cpuNanos() + extraCpuNanos, prev.iterations() + extraIterations, status, statusOfCompletedOperators, - activeOperators.stream().map(op -> new DriverStatus.OperatorStatus(op.toString(), op.status())).toList() + activeOperators.stream().map(op -> new DriverStatus.OperatorStatus(op.toString(), op.status())).toList(), + sleeps ); }); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java index 414fbbbca8294..e7b16072f4b66 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java @@ -27,6 +27,16 @@ * Profile results from a single {@link Driver}. */ public class DriverProfile implements Writeable, ChunkedToXContentObject { + /** + * Millis since epoch when the driver started. + */ + private final long startMillis; + + /** + * Millis since epoch when the driver stopped. + */ + private final long stopMillis; + /** * Nanos between creation and completion of the {@link Driver}. */ @@ -45,18 +55,38 @@ public class DriverProfile implements Writeable, ChunkedToXContentObject { private final long iterations; /** - * Status of each {@link Operator} in the driver when it finishes. + * Status of each {@link Operator} in the driver when it finished. */ private final List operators; - public DriverProfile(long tookNanos, long cpuNanos, long iterations, List operators) { + private final DriverSleeps sleeps; + + public DriverProfile( + long startMillis, + long stopMillis, + long tookNanos, + long cpuNanos, + long iterations, + List operators, + DriverSleeps sleeps + ) { + this.startMillis = startMillis; + this.stopMillis = stopMillis; this.tookNanos = tookNanos; this.cpuNanos = cpuNanos; this.iterations = iterations; this.operators = operators; + this.sleeps = sleeps; } public DriverProfile(StreamInput in) throws IOException { + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_SLEEPS)) { + this.startMillis = in.readVLong(); + this.stopMillis = in.readVLong(); + } else { + this.startMillis = 0; + this.stopMillis = 0; + } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { this.tookNanos = in.readVLong(); this.cpuNanos = in.readVLong(); @@ -67,16 +97,36 @@ public DriverProfile(StreamInput in) throws IOException { this.iterations = 0; } this.operators = in.readCollectionAsImmutableList(DriverStatus.OperatorStatus::new); + this.sleeps = DriverSleeps.read(in); } @Override public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_SLEEPS)) { + out.writeVLong(startMillis); + out.writeVLong(stopMillis); + } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { out.writeVLong(tookNanos); out.writeVLong(cpuNanos); out.writeVLong(iterations); } out.writeCollection(operators); + sleeps.writeTo(out); + } + + /** + * Millis since epoch when the driver started. + */ + public long startMillis() { + return startMillis; + } + + /** + * Millis since epoch when the driver stopped. + */ + public long stopMillis() { + return stopMillis; } /** @@ -102,13 +152,25 @@ public long iterations() { return iterations; } + /** + * Status of each {@link Operator} in the driver when it finished. + */ public List operators() { return operators; } + /** + * Records of the times the driver has slept. + */ + public DriverSleeps sleeps() { + return sleeps; + } + @Override public Iterator toXContentChunked(ToXContent.Params params) { return Iterators.concat(ChunkedToXContentHelper.startObject(), Iterators.single((b, p) -> { + b.timeField("start_millis", "start", startMillis); + b.timeField("stop_millis", "stop", stopMillis); b.field("took_nanos", tookNanos); if (b.humanReadable()) { b.field("took_time", TimeValue.timeValueNanos(tookNanos)); @@ -119,7 +181,11 @@ public Iterator toXContentChunked(ToXContent.Params params } b.field("iterations", iterations); return b; - }), ChunkedToXContentHelper.array("operators", operators.iterator()), ChunkedToXContentHelper.endObject()); + }), + ChunkedToXContentHelper.array("operators", operators.iterator()), + Iterators.single((b, p) -> b.field("sleeps", sleeps)), + ChunkedToXContentHelper.endObject() + ); } @Override @@ -131,15 +197,18 @@ public boolean equals(Object o) { return false; } DriverProfile that = (DriverProfile) o; - return tookNanos == that.tookNanos + return startMillis == that.startMillis + && stopMillis == that.stopMillis + && tookNanos == that.tookNanos && cpuNanos == that.cpuNanos && iterations == that.iterations - && Objects.equals(operators, that.operators); + && Objects.equals(operators, that.operators) + && sleeps.equals(that.sleeps); } @Override public int hashCode() { - return Objects.hash(tookNanos, cpuNanos, iterations, operators); + return Objects.hash(startMillis, stopMillis, tookNanos, cpuNanos, iterations, operators, sleeps); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java new file mode 100644 index 0000000000000..217a0b033bed4 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java @@ -0,0 +1,180 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +/** + * Records of the times the driver has slept. + * @param counts map from the reason the driver has slept to the number of times it slept for that reason + * @param first the first few times the driver slept + * @param last the last few times the driver slept + */ +public record DriverSleeps(Map counts, List first, List last) implements Writeable, ToXContentObject { + /** + * A record of a time the driver slept. + * @param reason The reason the driver slept + * @param sleep Millis since epoch when the driver slept + * @param wake Millis since epoch when the driver woke, or 0 if it is currently sleeping + */ + public record Sleep(String reason, long sleep, long wake) implements Writeable, ToXContentObject { + Sleep(StreamInput in) throws IOException { + this(in.readString(), in.readLong(), in.readLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(reason); + out.writeLong(sleep); + out.writeLong(wake); + } + + Sleep wake(long now) { + if (isStillSleeping() == false) { + throw new IllegalStateException("Already awake."); + } + return new Sleep(reason, sleep, now); + } + + public boolean isStillSleeping() { + return wake == 0; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("reason", reason); + builder.timeField("sleep_millis", "sleep", sleep); + if (wake > 0) { + builder.timeField("wake_millis", "wake", wake); + } + return builder.endObject(); + } + } + + /** + * How many sleeps of the first and last sleeps and wakes to keep. + */ + static final int RECORDS = 10; + + public static DriverSleeps read(StreamInput in) throws IOException { + if (in.getTransportVersion().before(TransportVersions.ESQL_PROFILE_SLEEPS)) { + return empty(); + } + return new DriverSleeps( + in.readImmutableMap(StreamInput::readVLong), + in.readCollectionAsList(Sleep::new), + in.readCollectionAsList(Sleep::new) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().before(TransportVersions.ESQL_PROFILE_SLEEPS)) { + return; + } + out.writeMap(counts, StreamOutput::writeVLong); + out.writeCollection(first); + out.writeCollection(last); + } + + public static DriverSleeps empty() { + return new DriverSleeps(Map.of(), List.of(), List.of()); + } + + /** + * Record a sleep. + * @param reason the reason for the sleep + * @param now the current time + */ + public DriverSleeps sleep(String reason, long now) { + if (last.isEmpty() == false) { + Sleep lastLast = last.get(last.size() - 1); + if (lastLast.isStillSleeping()) { + throw new IllegalStateException("Still sleeping."); + } + } + Map newCounts = new TreeMap<>(counts); + newCounts.compute(reason, (k, v) -> v == null ? 1 : v + 1); + List newFirst = first.size() < RECORDS ? append(first, reason, now) : first; + List newLast = last.size() < RECORDS ? append(last, reason, now) : rollOnto(last, reason, now); + return new DriverSleeps(newCounts, newFirst, newLast); + } + + /** + * Record a wake. + * @param now the current time + */ + public DriverSleeps wake(long now) { + if (now == 0) { + throw new IllegalStateException("Can't wake at epoch. That's used to signal sleeping."); + } + if (last.isEmpty()) { + throw new IllegalStateException("Never slept."); + } + Sleep lastFirst = first.get(first.size() - 1); + List newFirst = lastFirst.wake == 0 ? wake(first, now) : first; + return new DriverSleeps(counts, newFirst, wake(last, now)); + } + + private List append(List old, String reason, long now) { + List sleeps = new ArrayList<>(old.size() + 1); + sleeps.addAll(old); + sleeps.add(new Sleep(reason, now, 0)); + return Collections.unmodifiableList(sleeps); + } + + private List rollOnto(List old, String reason, long now) { + List sleeps = new ArrayList<>(old.size()); + for (int i = 1; i < old.size(); i++) { + sleeps.add(old.get(i)); + } + sleeps.add(new Sleep(reason, now, 0)); + return Collections.unmodifiableList(sleeps); + } + + private List wake(List old, long now) { + List sleeps = new ArrayList<>(old); + sleeps.set(sleeps.size() - 1, old.get(old.size() - 1).wake(now)); + return Collections.unmodifiableList(sleeps); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject("counts"); + for (Map.Entry count : counts.entrySet()) { + builder.field(count.getKey(), count.getValue()); + } + builder.endObject(); + toXContent(builder, params, "first", first); + toXContent(builder, params, "last", last); + return builder.endObject(); + } + + private static void toXContent(XContentBuilder builder, ToXContent.Params params, String name, List sleeps) throws IOException { + builder.startArray(name); + for (Sleep sleep : sleeps) { + sleep.toXContent(builder, params); + } + builder.endArray(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java index c7a0c7d4bacb9..42e3908231206 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java @@ -79,6 +79,8 @@ public class DriverStatus implements Task.Status { */ private final List activeOperators; + private final DriverSleeps sleeps; + DriverStatus( String sessionId, long started, @@ -87,7 +89,8 @@ public class DriverStatus implements Task.Status { long iterations, Status status, List completedOperators, - List activeOperators + List activeOperators, + DriverSleeps sleeps ) { this.sessionId = sessionId; this.started = started; @@ -97,6 +100,7 @@ public class DriverStatus implements Task.Status { this.status = status; this.completedOperators = completedOperators; this.activeOperators = activeOperators; + this.sleeps = sleeps; } public DriverStatus(StreamInput in) throws IOException { @@ -105,13 +109,14 @@ public DriverStatus(StreamInput in) throws IOException { this.lastUpdated = in.readLong(); this.cpuNanos = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0; this.iterations = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0; - this.status = Status.valueOf(in.readString()); + this.status = Status.read(in); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { this.completedOperators = in.readCollectionAsImmutableList(OperatorStatus::new); } else { this.completedOperators = List.of(); } this.activeOperators = in.readCollectionAsImmutableList(OperatorStatus::new); + this.sleeps = DriverSleeps.read(in); } @Override @@ -125,11 +130,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(cpuNanos); out.writeVLong(iterations); } - out.writeString(status.toString()); + status.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeCollection(completedOperators); } out.writeCollection(activeOperators); + sleeps.writeTo(out); } @Override @@ -188,6 +194,13 @@ public List completedOperators() { return completedOperators; } + /** + * Records of the times the driver has slept. + */ + public DriverSleeps sleeps() { + return sleeps; + } + /** * Status of each active {@link Operator} in the driver. */ @@ -206,7 +219,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("cpu_time", TimeValue.timeValueNanos(cpuNanos)); } builder.field("iterations", iterations); - builder.field("status", status.toString().toLowerCase(Locale.ROOT)); + builder.field("status", status, params); builder.startArray("completed_operators"); for (OperatorStatus completed : completedOperators) { builder.value(completed); @@ -217,6 +230,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.value(active); } builder.endArray(); + builder.field("sleeps", sleeps, params); return builder.endObject(); } @@ -232,12 +246,13 @@ public boolean equals(Object o) { && iterations == that.iterations && status == that.status && completedOperators.equals(that.completedOperators) - && activeOperators.equals(that.activeOperators); + && activeOperators.equals(that.activeOperators) + && sleeps.equals(that.sleeps); } @Override public int hashCode() { - return Objects.hash(sessionId, started, lastUpdated, cpuNanos, iterations, status, completedOperators, activeOperators); + return Objects.hash(sessionId, started, lastUpdated, cpuNanos, iterations, status, completedOperators, activeOperators, sleeps); } @Override @@ -313,7 +328,7 @@ public String toString() { } } - public enum Status implements ToXContentFragment { + public enum Status implements Writeable, ToXContentFragment { QUEUED, STARTING, RUNNING, @@ -321,6 +336,15 @@ public enum Status implements ToXContentFragment { WAITING, DONE; + public static Status read(StreamInput in) throws IOException { + return Status.valueOf(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(toString()); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.value(toString().toLowerCase(Locale.ROOT)); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java index 99edab038af31..943ba4dc1f4fa 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java @@ -46,10 +46,19 @@ public FailureCollector(int maxExceptions) { this.maxExceptions = maxExceptions; } - public void unwrapAndCollect(Exception originEx) { - final Exception e = originEx instanceof TransportException - ? (originEx.getCause() instanceof Exception cause ? cause : new ElasticsearchException(originEx.getCause())) - : originEx; + private static Exception unwrapTransportException(TransportException te) { + final Throwable cause = te.getCause(); + if (cause == null) { + return te; + } else if (cause instanceof Exception ex) { + return ex; + } else { + return new ElasticsearchException(cause); + } + } + + public void unwrapAndCollect(Exception e) { + e = e instanceof TransportException te ? unwrapTransportException(te) : e; if (ExceptionsHelper.unwrap(e, TaskCancelledException.class) != null) { if (cancelledExceptionsCount.incrementAndGet() <= maxExceptions) { cancelledExceptions.add(e); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/IsBlockedResult.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/IsBlockedResult.java new file mode 100644 index 0000000000000..9e9c64dfbfed4 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/IsBlockedResult.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.action.support.SubscribableListener; + +import java.util.Map; + +/** + * Is this {@link Operator} blocked? + *

+ * If the {@link #listener}'s {@link SubscribableListener#isDone()} method + * returns {@code true} then the {@linkplain Operator} is not blocked. + *

+ *

+ * If the {@linkplain Operator} is blocked then you can + * {@link SubscribableListener#addListener} to the {@link #listener} to be + * notified when the {@linkplain Operator} is unblocked. + *

+ * @param listener a listener to check for blocked-ness + * @param reason the reason that the {@linkplain Operator} is blocked. + * This is used as a {@link Map} key so this shouldn't + * vary wildly, but it should be descriptive of the reason + * the operator went async. + */ +public record IsBlockedResult(SubscribableListener listener, String reason) {} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java index 1038277c39fe1..663e06756551b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java @@ -88,11 +88,11 @@ default Status status() { * If the operator is not blocked, this method returns {@link #NOT_BLOCKED} which is an already * completed future. */ - default SubscribableListener isBlocked() { + default IsBlockedResult isBlocked() { return NOT_BLOCKED; } - SubscribableListener NOT_BLOCKED = SubscribableListener.newSucceeded(null); + IsBlockedResult NOT_BLOCKED = new IsBlockedResult(SubscribableListener.newSucceeded(null), "not blocked"); /** * A factory for creating intermediate operators. diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java index df6c09ea1ff97..ce400ddbdd6f9 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.IsBlockedResult; import org.elasticsearch.compute.operator.Operator; import java.util.Queue; @@ -83,7 +84,7 @@ private void notifyNotFull() { } } - SubscribableListener waitForWriting() { + IsBlockedResult waitForWriting() { // maxBufferSize check is not water-tight as more than one sink can pass this check at the same time. if (queueSize.get() < maxSize || noMoreInputs) { return Operator.NOT_BLOCKED; @@ -95,11 +96,11 @@ SubscribableListener waitForWriting() { if (notFullFuture == null) { notFullFuture = new SubscribableListener<>(); } - return notFullFuture; + return new IsBlockedResult(notFullFuture, "exchange full"); } } - SubscribableListener waitForReading() { + IsBlockedResult waitForReading() { if (size() > 0 || noMoreInputs) { return Operator.NOT_BLOCKED; } @@ -110,7 +111,7 @@ SubscribableListener waitForReading() { if (notEmptyFuture == null) { notEmptyFuture = new SubscribableListener<>(); } - return notEmptyFuture; + return new IsBlockedResult(notEmptyFuture, "exchange empty"); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java index 8f0208740b689..e96ca9e39b7e5 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java @@ -7,8 +7,8 @@ package org.elasticsearch.compute.operator.exchange; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.IsBlockedResult; /** * Sink for exchanging data @@ -33,5 +33,5 @@ public interface ExchangeSink { /** * Whether the sink is blocked on adding more pages */ - SubscribableListener waitForWriting(); + IsBlockedResult waitForWriting(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java index ab155d6ee8479..757a3262433c8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.IsBlockedResult; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; @@ -81,7 +82,7 @@ public boolean isFinished() { } @Override - public SubscribableListener waitForWriting() { + public IsBlockedResult waitForWriting() { return buffer.waitForWriting(); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java index 01354d681017a..dd89dfe480c36 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java @@ -9,13 +9,13 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.IsBlockedResult; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.SinkOperator; import org.elasticsearch.xcontent.XContentBuilder; @@ -65,13 +65,13 @@ public void finish() { } @Override - public SubscribableListener isBlocked() { + public IsBlockedResult isBlocked() { return sink.waitForWriting(); } @Override public boolean needsInput() { - return isFinished() == false && isBlocked().isDone(); + return isFinished() == false && isBlocked().listener().isDone(); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSource.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSource.java index 01ed5e3fb6388..aa3374aa26d3f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSource.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSource.java @@ -7,8 +7,8 @@ package org.elasticsearch.compute.operator.exchange; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.IsBlockedResult; /** * Source for exchanging data @@ -38,5 +38,5 @@ public interface ExchangeSource { /** * Allows callers to stop reading from the source when it's blocked */ - SubscribableListener waitForReading(); + IsBlockedResult waitForReading(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java index 77b535949eb9d..406dc4494208c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.FailureCollector; +import org.elasticsearch.compute.operator.IsBlockedResult; import org.elasticsearch.core.Releasable; import java.util.List; @@ -70,7 +71,7 @@ public boolean isFinished() { } @Override - public SubscribableListener waitForReading() { + public IsBlockedResult waitForReading() { return buffer.waitForReading(); } @@ -178,13 +179,13 @@ void fetchPage() { if (resp.finished()) { onSinkComplete(); } else { - SubscribableListener future = buffer.waitForWriting(); - if (future.isDone()) { + IsBlockedResult future = buffer.waitForWriting(); + if (future.listener().isDone()) { if (loopControl.tryResume() == false) { fetchPage(); } } else { - future.addListener(ActionListener.wrap(unused -> { + future.listener().addListener(ActionListener.wrap(unused -> { if (loopControl.tryResume() == false) { fetchPage(); } @@ -198,7 +199,7 @@ void fetchPage() { void onSinkFailed(Exception e) { failure.unwrapAndCollect(e); - buffer.waitForReading().onResponse(null); // resume the Driver if it is being blocked on reading + buffer.waitForReading().listener().onResponse(null); // resume the Driver if it is being blocked on reading onSinkComplete(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java index 1efba31bd831b..2d0ce228e81df 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java @@ -9,13 +9,13 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.IsBlockedResult; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.xcontent.XContentBuilder; @@ -30,7 +30,7 @@ public class ExchangeSourceOperator extends SourceOperator { private final ExchangeSource source; - private SubscribableListener isBlocked = NOT_BLOCKED; + private IsBlockedResult isBlocked = NOT_BLOCKED; private int pagesEmitted; public record ExchangeSourceOperatorFactory(Supplier exchangeSources) implements SourceOperatorFactory { @@ -70,10 +70,10 @@ public void finish() { } @Override - public SubscribableListener isBlocked() { - if (isBlocked.isDone()) { + public IsBlockedResult isBlocked() { + if (isBlocked.listener().isDone()) { isBlocked = source.waitForReading(); - if (isBlocked.isDone()) { + if (isBlocked.listener().isDone()) { isBlocked = NOT_BLOCKED; } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunctionTests.java new file mode 100644 index 0000000000000..adc891a6a977d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunctionTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.operator.SequenceBytesRefBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; + +import java.util.Comparator; +import java.util.List; +import java.util.Optional; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MaxBytesRefAggregatorFunctionTests extends AggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceBytesRefBlockSourceOperator( + blockFactory, + IntStream.range(0, size).mapToObj(l -> new BytesRef(randomAlphaOfLengthBetween(0, 100))) + ); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MaxBytesRefAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "max of bytes"; + } + + @Override + public void assertSimpleOutput(List input, Block result) { + Optional max = input.stream().flatMap(b -> allBytesRefs(b)).max(Comparator.naturalOrder()); + if (max.isEmpty()) { + assertThat(result.isNull(0), equalTo(true)); + return; + } + assertThat(result.isNull(0), equalTo(false)); + assertThat(BlockUtils.toJavaObject(result, 0), equalTo(max.get())); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunctionTests.java new file mode 100644 index 0000000000000..75a6a839ea62d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunctionTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.LongBytesRefTupleBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.xpack.esql.core.type.DataType; + +import java.util.Comparator; +import java.util.List; +import java.util.Optional; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MaxBytesRefGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new LongBytesRefTupleBlockSourceOperator( + blockFactory, + IntStream.range(0, size).mapToObj(l -> Tuple.tuple(randomLongBetween(0, 4), new BytesRef(randomAlphaOfLengthBetween(0, 100)))) + ); + } + + @Override + protected DataType acceptedDataType() { + return DataType.IP; + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MaxBytesRefAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "max of bytes"; + } + + @Override + protected void assertSimpleGroup(List input, Block result, int position, Long group) { + Optional max = input.stream().flatMap(p -> allBytesRefs(p, group)).max(Comparator.naturalOrder()); + if (max.isEmpty()) { + assertThat(result.isNull(position), equalTo(true)); + return; + } + assertThat(result.isNull(position), equalTo(false)); + assertThat(BlockUtils.toJavaObject(result, position), equalTo(max.get())); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunctionTests.java new file mode 100644 index 0000000000000..b4383d6b0f56e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunctionTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.operator.SequenceBytesRefBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; + +import java.util.Comparator; +import java.util.List; +import java.util.Optional; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MinBytesRefAggregatorFunctionTests extends AggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceBytesRefBlockSourceOperator( + blockFactory, + IntStream.range(0, size).mapToObj(l -> new BytesRef(randomAlphaOfLengthBetween(0, 100))) + ); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MinBytesRefAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "min of bytes"; + } + + @Override + public void assertSimpleOutput(List input, Block result) { + Optional max = input.stream().flatMap(b -> allBytesRefs(b)).min(Comparator.naturalOrder()); + if (max.isEmpty()) { + assertThat(result.isNull(0), equalTo(true)); + return; + } + assertThat(result.isNull(0), equalTo(false)); + assertThat(BlockUtils.toJavaObject(result, 0), equalTo(max.get())); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunctionTests.java new file mode 100644 index 0000000000000..d4cfca819f3b7 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunctionTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.LongBytesRefTupleBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.xpack.esql.core.type.DataType; + +import java.util.Comparator; +import java.util.List; +import java.util.Optional; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MinBytesRefGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new LongBytesRefTupleBlockSourceOperator( + blockFactory, + IntStream.range(0, size).mapToObj(l -> Tuple.tuple(randomLongBetween(0, 4), new BytesRef(randomAlphaOfLengthBetween(0, 100)))) + ); + } + + @Override + protected DataType acceptedDataType() { + return DataType.IP; + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MinBytesRefAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "min of bytes"; + } + + @Override + protected void assertSimpleGroup(List input, Block result, int position, Long group) { + Optional max = input.stream().flatMap(p -> allBytesRefs(p, group)).min(Comparator.naturalOrder()); + if (max.isEmpty()) { + assertThat(result.isNull(position), equalTo(true)); + return; + } + assertThat(result.isNull(position), equalTo(false)); + assertThat(BlockUtils.toJavaObject(result, position), equalTo(max.get())); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java index ae4558d5f8f71..fbcf11cd948c0 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java @@ -159,49 +159,56 @@ public void doClose() { Releasables.close(localBreaker); } - public void testStatus() { - BlockFactory blockFactory = blockFactory(); - DriverContext driverContext = new DriverContext(blockFactory.bigArrays(), blockFactory); + class TestOp extends AsyncOperator { Map> handlers = new HashMap<>(); - AsyncOperator operator = new AsyncOperator(driverContext, 2) { - @Override - protected void performAsync(Page inputPage, ActionListener listener) { - handlers.put(inputPage, listener); - } - @Override - protected void doClose() { + TestOp(DriverContext driverContext, int maxOutstandingRequests) { + super(driverContext, maxOutstandingRequests); + } - } - }; - assertTrue(operator.isBlocked().isDone()); + @Override + protected void performAsync(Page inputPage, ActionListener listener) { + handlers.put(inputPage, listener); + } + + @Override + protected void doClose() { + + } + } + + public void testStatus() { + BlockFactory blockFactory = blockFactory(); + DriverContext driverContext = new DriverContext(blockFactory.bigArrays(), blockFactory); + TestOp operator = new TestOp(driverContext, 2); + assertTrue(operator.isBlocked().listener().isDone()); assertTrue(operator.needsInput()); Page page1 = new Page(driverContext.blockFactory().newConstantNullBlock(1)); operator.addInput(page1); - assertFalse(operator.isBlocked().isDone()); - SubscribableListener blocked1 = operator.isBlocked(); + assertFalse(operator.isBlocked().listener().isDone()); + SubscribableListener blocked1 = operator.isBlocked().listener(); assertTrue(operator.needsInput()); Page page2 = new Page(driverContext.blockFactory().newConstantNullBlock(2)); operator.addInput(page2); assertFalse(operator.needsInput()); // reached the max outstanding requests - assertFalse(operator.isBlocked().isDone()); - assertThat(operator.isBlocked(), equalTo(blocked1)); + assertFalse(operator.isBlocked().listener().isDone()); + assertThat(operator.isBlocked(), equalTo(new IsBlockedResult(blocked1, "TestOp"))); Page page3 = new Page(driverContext.blockFactory().newConstantNullBlock(3)); - handlers.remove(page1).onResponse(page3); + operator.handlers.remove(page1).onResponse(page3); page1.releaseBlocks(); assertFalse(operator.needsInput()); // still have 2 outstanding requests - assertTrue(operator.isBlocked().isDone()); + assertTrue(operator.isBlocked().listener().isDone()); assertTrue(blocked1.isDone()); assertThat(operator.getOutput(), equalTo(page3)); page3.releaseBlocks(); assertTrue(operator.needsInput()); - assertFalse(operator.isBlocked().isDone()); + assertFalse(operator.isBlocked().listener().isDone()); Page page4 = new Page(driverContext.blockFactory().newConstantNullBlock(3)); - handlers.remove(page2).onResponse(page4); + operator.handlers.remove(page2).onResponse(page4); page2.releaseBlocks(); assertThat(operator.getOutput(), equalTo(page4)); page4.releaseBlocks(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilderTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilderTests.java index 24f5297a0d6fe..266c17febc5b3 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilderTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilderTests.java @@ -32,7 +32,7 @@ public void testBreakOnBuild() { public void testAddByte() { testAgainstOracle(() -> new TestIteration() { - byte b = randomByte(); + final byte b = randomByte(); @Override public int size() { @@ -53,7 +53,7 @@ public void applyToOracle(BytesRefBuilder oracle) { public void testAddBytesRef() { testAgainstOracle(() -> new TestIteration() { - BytesRef ref = new BytesRef(randomAlphaOfLengthBetween(1, 100)); + final BytesRef ref = new BytesRef(randomAlphaOfLengthBetween(1, 100)); @Override public int size() { @@ -72,10 +72,23 @@ public void applyToOracle(BytesRefBuilder oracle) { }); } + public void testCopyBytes() { + CircuitBreaker breaker = new MockBigArrays.LimitedBreaker(CircuitBreaker.REQUEST, ByteSizeValue.ofBytes(300)); + try (BreakingBytesRefBuilder builder = new BreakingBytesRefBuilder(breaker, "test")) { + String initialValue = randomAlphaOfLengthBetween(1, 50); + builder.copyBytes(new BytesRef(initialValue)); + assertThat(builder.bytesRefView().utf8ToString(), equalTo(initialValue)); + + String newValue = randomAlphaOfLengthBetween(350, 500); + Exception e = expectThrows(CircuitBreakingException.class, () -> builder.copyBytes(new BytesRef(newValue))); + assertThat(e.getMessage(), equalTo("over test limit")); + } + } + public void testGrow() { testAgainstOracle(() -> new TestIteration() { - int length = between(1, 100); - byte b = randomByte(); + final int length = between(1, 100); + final byte b = randomByte(); @Override public int size() { @@ -118,10 +131,11 @@ private void testAgainstOracle(Supplier iterations) { assertThat(builder.bytesRefView(), equalTo(oracle.get())); while (true) { TestIteration iteration = iterations.get(); - boolean willResize = builder.length() + iteration.size() >= builder.bytes().length; + int targetSize = builder.length() + iteration.size(); + boolean willResize = targetSize >= builder.bytes().length; if (willResize) { long resizeMemoryUsage = BreakingBytesRefBuilder.SHALLOW_SIZE + ramForArray(builder.bytes().length); - resizeMemoryUsage += ramForArray(ArrayUtil.oversize(builder.length() + iteration.size(), Byte.BYTES)); + resizeMemoryUsage += ramForArray(ArrayUtil.oversize(targetSize, Byte.BYTES)); if (resizeMemoryUsage > limit) { Exception e = expectThrows(CircuitBreakingException.class, () -> iteration.applyToBuilder(builder)); assertThat(e.getMessage(), equalTo("over test limit")); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java index 86655bd3b7f73..27083ea0fcd13 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java @@ -20,22 +20,34 @@ import java.io.IOException; import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.equalTo; public class DriverProfileTests extends AbstractWireSerializingTestCase { public void testToXContent() { DriverProfile status = new DriverProfile( + 123413220000L, + 123413243214L, 10012, 10000, 12, List.of( new DriverStatus.OperatorStatus("LuceneSource", LuceneSourceOperatorStatusTests.simple()), new DriverStatus.OperatorStatus("ValuesSourceReader", ValuesSourceReaderOperatorStatusTests.simple()) + ), + new DriverSleeps( + Map.of("driver time", 1L), + List.of(new DriverSleeps.Sleep("driver time", 1, 1)), + List.of(new DriverSleeps.Sleep("driver time", 1, 1)) ) ); assertThat(Strings.toString(status, true, true), equalTo(""" { + "start" : "1973-11-29T09:27:00.000Z", + "start_millis" : 123413220000, + "stop" : "1973-11-29T09:27:23.214Z", + "stop_millis" : 123413243214, "took_nanos" : 10012, "took_time" : "10micros", "cpu_nanos" : 10000, @@ -54,7 +66,30 @@ public void testToXContent() { """.stripTrailing() + " " + ValuesSourceReaderOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ } - ] + ], + "sleeps" : { + "counts" : { + "driver time" : 1 + }, + "first" : [ + { + "reason" : "driver time", + "sleep" : "1970-01-01T00:00:00.001Z", + "sleep_millis" : 1, + "wake" : "1970-01-01T00:00:00.001Z", + "wake_millis" : 1 + } + ], + "last" : [ + { + "reason" : "driver time", + "sleep" : "1970-01-01T00:00:00.001Z", + "sleep_millis" : 1, + "wake" : "1970-01-01T00:00:00.001Z", + "wake_millis" : 1 + } + ] + } }""")); } @@ -69,24 +104,33 @@ protected DriverProfile createTestInstance() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), - DriverStatusTests.randomOperatorStatuses() + randomNonNegativeLong(), + randomNonNegativeLong(), + DriverStatusTests.randomOperatorStatuses(), + DriverSleepsTests.randomDriverSleeps() ); } @Override protected DriverProfile mutateInstance(DriverProfile instance) throws IOException { + long startMillis = instance.startMillis(); + long stopMillis = instance.stopMillis(); long tookNanos = instance.tookNanos(); long cpuNanos = instance.cpuNanos(); long iterations = instance.iterations(); var operators = instance.operators(); - switch (between(0, 3)) { - case 0 -> tookNanos = randomValueOtherThan(tookNanos, ESTestCase::randomNonNegativeLong); - case 1 -> cpuNanos = randomValueOtherThan(cpuNanos, ESTestCase::randomNonNegativeLong); - case 2 -> iterations = randomValueOtherThan(iterations, ESTestCase::randomNonNegativeLong); - case 3 -> operators = randomValueOtherThan(operators, DriverStatusTests::randomOperatorStatuses); + var sleeps = instance.sleeps(); + switch (between(0, 6)) { + case 0 -> startMillis = randomValueOtherThan(startMillis, ESTestCase::randomNonNegativeLong); + case 1 -> stopMillis = randomValueOtherThan(startMillis, ESTestCase::randomNonNegativeLong); + case 2 -> tookNanos = randomValueOtherThan(tookNanos, ESTestCase::randomNonNegativeLong); + case 3 -> cpuNanos = randomValueOtherThan(cpuNanos, ESTestCase::randomNonNegativeLong); + case 4 -> iterations = randomValueOtherThan(iterations, ESTestCase::randomNonNegativeLong); + case 5 -> operators = randomValueOtherThan(operators, DriverStatusTests::randomOperatorStatuses); + case 6 -> sleeps = randomValueOtherThan(sleeps, DriverSleepsTests::randomDriverSleeps); default -> throw new UnsupportedOperationException(); } - return new DriverProfile(tookNanos, cpuNanos, iterations, operators); + return new DriverProfile(startMillis, stopMillis, tookNanos, cpuNanos, iterations, operators, sleeps); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverSleepsTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverSleepsTests.java new file mode 100644 index 0000000000000..a0d956fcd6f6f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverSleepsTests.java @@ -0,0 +1,240 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import static org.hamcrest.core.IsEqual.equalTo; + +public class DriverSleepsTests extends AbstractWireSerializingTestCase { + public static DriverSleeps randomDriverSleeps() { + return randomDriverSleeps(between(0, DriverSleeps.RECORDS * 3)); + } + + private static DriverSleeps randomDriverSleeps(int cycles) { + DriverSleeps sleeps = DriverSleeps.empty(); + long now = 0; + for (int i = 0; i < cycles; i++) { + now += between(1, 100000); + sleeps = sleeps.sleep(randomSleepReason(), now); + if (i != cycles - 1 || randomBoolean()) { + // Randomly don't wake on the last sleep + now += between(1, 100000); + sleeps = sleeps.wake(now); + } + } + return sleeps; + } + + private static String randomSleepReason() { + return randomFrom("driver time", "driver iteration", "exchange empty", "exchange full"); + } + + public void testEmptyToXContent() { + assertThat(Strings.toString(DriverSleeps.empty(), true, true), equalTo(""" + { + "counts" : { }, + "first" : [ ], + "last" : [ ] + }""")); + } + + public void testSleepingToXContent() { + assertThat(Strings.toString(DriverSleeps.empty().sleep("driver iterations", 1723555763000L), true, true), equalTo(""" + { + "counts" : { + "driver iterations" : 1 + }, + "first" : [ + { + "reason" : "driver iterations", + "sleep" : "2024-08-13T13:29:23.000Z", + "sleep_millis" : 1723555763000 + } + ], + "last" : [ + { + "reason" : "driver iterations", + "sleep" : "2024-08-13T13:29:23.000Z", + "sleep_millis" : 1723555763000 + } + ] + }""")); + } + + public void testWakingToXContent() { + assertThat( + Strings.toString(DriverSleeps.empty().sleep("driver iterations", 1723555763000L).wake(1723555863000L), true, true), + equalTo(""" + { + "counts" : { + "driver iterations" : 1 + }, + "first" : [ + { + "reason" : "driver iterations", + "sleep" : "2024-08-13T13:29:23.000Z", + "sleep_millis" : 1723555763000, + "wake" : "2024-08-13T13:31:03.000Z", + "wake_millis" : 1723555863000 + } + ], + "last" : [ + { + "reason" : "driver iterations", + "sleep" : "2024-08-13T13:29:23.000Z", + "sleep_millis" : 1723555763000, + "wake" : "2024-08-13T13:31:03.000Z", + "wake_millis" : 1723555863000 + } + ] + }""") + ); + } + + @Override + protected Writeable.Reader instanceReader() { + return DriverSleeps::read; + } + + @Override + protected DriverSleeps createTestInstance() { + return randomDriverSleeps(); + } + + @Override + protected DriverSleeps mutateInstance(DriverSleeps instance) throws IOException { + if (instance.last().isEmpty()) { + return instance.sleep(randomSleepReason(), between(1, 10000)); + } + DriverSleeps.Sleep last = instance.last().get(instance.last().size() - 1); + if (last.isStillSleeping()) { + return instance.wake(last.sleep() + between(1, 10000)); + } + return instance.sleep(randomSleepReason(), last.wake() + between(1, 10000)); + } + + public void testTracking() throws IOException { + long now = 0; + DriverSleeps sleeps = DriverSleeps.empty(); + + Map expectedCounts = new TreeMap<>(); + List expectedFirst = new ArrayList<>(); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedFirst))); + + /* + * Simulate sleeping and waking when the records aren't full. + * New sleeps and wakes should show up in both the "first" and "last" fields. + */ + for (int i = 0; i < DriverSleeps.RECORDS; i++) { + now++; + String reason = randomSleepReason(); + expectedCounts.compute(reason, (k, v) -> v == null ? 1 : v + 1); + + sleeps = sleeps.sleep(reason, now); + expectedFirst.add(new DriverSleeps.Sleep(reason, now, 0)); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedFirst))); + assertXContent(sleeps, expectedCounts, expectedFirst, expectedFirst); + + now++; + sleeps = sleeps.wake(now); + expectedFirst.set(expectedFirst.size() - 1, new DriverSleeps.Sleep(reason, now - 1, now)); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedFirst))); + assertXContent(sleeps, expectedCounts, expectedFirst, expectedFirst); + } + + /* + * Simulate sleeping and waking when the records are full. + * New sleeps and wakes should show up in only the "last" field. + */ + List expectedLast = new ArrayList<>(expectedFirst); + for (int i = 0; i < 1000; i++) { + now++; + String reason = randomSleepReason(); + expectedCounts.compute(reason, (k, v) -> v == null ? 1 : v + 1); + + sleeps = sleeps.sleep(reason, now); + expectedLast.remove(0); + expectedLast.add(new DriverSleeps.Sleep(reason, now, 0)); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedLast))); + assertXContent(sleeps, expectedCounts, expectedFirst, expectedLast); + + now++; + sleeps = sleeps.wake(now); + expectedLast.set(expectedLast.size() - 1, new DriverSleeps.Sleep(reason, now - 1, now)); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedLast))); + assertXContent(sleeps, expectedCounts, expectedFirst, expectedLast); + } + } + + public void assertXContent( + DriverSleeps sleeps, + Map expectedCounts, + List expectedFirst, + List expectedLast + ) throws IOException { + try (BytesStreamOutput expected = new BytesStreamOutput()) { + try (XContentBuilder b = new XContentBuilder(XContentType.JSON.xContent(), expected).prettyPrint().humanReadable(true)) { + b.startObject(); + b.startObject("counts"); + { + for (Map.Entry e : expectedCounts.entrySet()) { + b.field(e.getKey(), e.getValue()); + } + } + b.endObject(); + { + b.startArray("first"); + for (DriverSleeps.Sleep sleep : expectedFirst) { + sleep.toXContent(b, ToXContent.EMPTY_PARAMS); + } + b.endArray(); + } + { + b.startArray("last"); + for (DriverSleeps.Sleep sleep : expectedLast) { + sleep.toXContent(b, ToXContent.EMPTY_PARAMS); + } + b.endArray(); + } + b.endObject(); + } + assertThat(Strings.toString(sleeps, true, true), equalTo(expected.bytes().utf8ToString())); + } + } + + public void testWakeNeverSlept() { + Exception e = expectThrows(IllegalStateException.class, () -> DriverSleeps.empty().wake(1)); + assertThat(e.getMessage(), equalTo("Never slept.")); + } + + public void testWakeWhileAwake() { + Exception e = expectThrows(IllegalStateException.class, () -> DriverSleeps.empty().sleep(randomSleepReason(), 1).wake(2).wake(3)); + assertThat(e.getMessage(), equalTo("Already awake.")); + } + + public void testSleepWhileSleeping() { + Exception e = expectThrows( + IllegalStateException.class, + () -> DriverSleeps.empty().sleep(randomSleepReason(), 1).sleep(randomSleepReason(), 2) + ); + assertThat(e.getMessage(), equalTo("Still sleeping.")); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java index e82cbb831cff2..b46d9f3f4add7 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; @@ -40,7 +41,12 @@ public void testToXContent() { new DriverStatus.OperatorStatus("LuceneSource", LuceneSourceOperatorStatusTests.simple()), new DriverStatus.OperatorStatus("ValuesSourceReader", ValuesSourceReaderOperatorStatusTests.simple()) ), - List.of(new DriverStatus.OperatorStatus("ExchangeSink", ExchangeSinkOperatorStatusTests.simple())) + List.of(new DriverStatus.OperatorStatus("ExchangeSink", ExchangeSinkOperatorStatusTests.simple())), + new DriverSleeps( + Map.of("driver time", 1L), + List.of(new DriverSleeps.Sleep("driver time", 1, 1)), + List.of(new DriverSleeps.Sleep("driver time", 1, 1)) + ) ); assertThat(Strings.toString(status, true, true), equalTo(""" { @@ -72,7 +78,30 @@ public void testToXContent() { """.stripTrailing() + " " + ExchangeSinkOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ } - ] + ], + "sleeps" : { + "counts" : { + "driver time" : 1 + }, + "first" : [ + { + "reason" : "driver time", + "sleep" : "1970-01-01T00:00:00.001Z", + "sleep_millis" : 1, + "wake" : "1970-01-01T00:00:00.001Z", + "wake_millis" : 1 + } + ], + "last" : [ + { + "reason" : "driver time", + "sleep" : "1970-01-01T00:00:00.001Z", + "sleep_millis" : 1, + "wake" : "1970-01-01T00:00:00.001Z", + "wake_millis" : 1 + } + ] + } }""")); } @@ -91,7 +120,8 @@ protected DriverStatus createTestInstance() { randomNonNegativeLong(), randomStatus(), randomOperatorStatuses(), - randomOperatorStatuses() + randomOperatorStatuses(), + DriverSleepsTests.randomDriverSleeps() ); } @@ -127,7 +157,8 @@ protected DriverStatus mutateInstance(DriverStatus instance) throws IOException var status = instance.status(); var completedOperators = instance.completedOperators(); var activeOperators = instance.activeOperators(); - switch (between(0, 7)) { + var sleeps = instance.sleeps(); + switch (between(0, 8)) { case 0 -> sessionId = randomValueOtherThan(sessionId, this::randomSessionId); case 1 -> started = randomValueOtherThan(started, ESTestCase::randomNonNegativeLong); case 2 -> lastUpdated = randomValueOtherThan(lastUpdated, ESTestCase::randomNonNegativeLong); @@ -136,9 +167,10 @@ protected DriverStatus mutateInstance(DriverStatus instance) throws IOException case 5 -> status = randomValueOtherThan(status, this::randomStatus); case 6 -> completedOperators = randomValueOtherThan(completedOperators, DriverStatusTests::randomOperatorStatuses); case 7 -> activeOperators = randomValueOtherThan(activeOperators, DriverStatusTests::randomOperatorStatuses); + case 8 -> sleeps = randomValueOtherThan(sleeps, DriverSleepsTests::randomDriverSleeps); default -> throw new UnsupportedOperationException(); } - return new DriverStatus(sessionId, started, lastUpdated, cpuNanos, iterations, status, completedOperators, activeOperators); + return new DriverStatus(sessionId, started, lastUpdated, cpuNanos, iterations, status, completedOperators, activeOperators, sleeps); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java index d5fa0a1eaecc9..637cbe8892b3e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java @@ -7,12 +7,15 @@ package org.elasticsearch.compute.operator; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.NodeDisconnectedException; import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.TransportException; import org.hamcrest.Matchers; import java.io.IOException; @@ -25,6 +28,9 @@ import java.util.stream.IntStream; import java.util.stream.Stream; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThan; public class FailureCollectorTests extends ESTestCase { @@ -87,4 +93,17 @@ public void testEmpty() { assertFalse(collector.hasFailure()); assertNull(collector.getFailure()); } + + public void testTransportExceptions() { + FailureCollector collector = new FailureCollector(5); + collector.unwrapAndCollect(new NodeDisconnectedException(DiscoveryNodeUtils.builder("node-1").build(), "/field_caps")); + collector.unwrapAndCollect(new TransportException(new CircuitBreakingException("too large", CircuitBreaker.Durability.TRANSIENT))); + Exception failure = collector.getFailure(); + assertNotNull(failure); + assertThat(failure, instanceOf(NodeDisconnectedException.class)); + assertThat(failure.getMessage(), equalTo("[][0.0.0.0:1][/field_caps] disconnected")); + Throwable[] suppressed = failure.getSuppressed(); + assertThat(suppressed, arrayWithSize(1)); + assertThat(suppressed[0], instanceOf(CircuitBreakingException.class)); + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java index 3f958464656e0..ab785e739d080 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java @@ -99,40 +99,40 @@ public void testBasic() throws Exception { sourceExchanger.addCompletionListener(sourceCompletion); ExchangeSource source = sourceExchanger.createExchangeSource(); sourceExchanger.addRemoteSink(sinkExchanger::fetchPageAsync, 1); - SubscribableListener waitForReading = source.waitForReading(); + SubscribableListener waitForReading = source.waitForReading().listener(); assertFalse(waitForReading.isDone()); assertNull(source.pollPage()); - assertTrue(sink1.waitForWriting().isDone()); + assertTrue(sink1.waitForWriting().listener().isDone()); randomFrom(sink1, sink2).addPage(pages[0]); randomFrom(sink1, sink2).addPage(pages[1]); // source and sink buffers can store 5 pages for (Page p : List.of(pages[2], pages[3], pages[4])) { ExchangeSink sink = randomFrom(sink1, sink2); - assertBusy(() -> assertTrue(sink.waitForWriting().isDone())); + assertBusy(() -> assertTrue(sink.waitForWriting().listener().isDone())); sink.addPage(p); } // sink buffer is full - assertFalse(randomFrom(sink1, sink2).waitForWriting().isDone()); - assertBusy(() -> assertTrue(source.waitForReading().isDone())); + assertFalse(randomFrom(sink1, sink2).waitForWriting().listener().isDone()); + assertBusy(() -> assertTrue(source.waitForReading().listener().isDone())); assertEquals(pages[0], source.pollPage()); - assertBusy(() -> assertTrue(source.waitForReading().isDone())); + assertBusy(() -> assertTrue(source.waitForReading().listener().isDone())); assertEquals(pages[1], source.pollPage()); // sink can write again - assertBusy(() -> assertTrue(randomFrom(sink1, sink2).waitForWriting().isDone())); + assertBusy(() -> assertTrue(randomFrom(sink1, sink2).waitForWriting().listener().isDone())); randomFrom(sink1, sink2).addPage(pages[5]); - assertBusy(() -> assertTrue(randomFrom(sink1, sink2).waitForWriting().isDone())); + assertBusy(() -> assertTrue(randomFrom(sink1, sink2).waitForWriting().listener().isDone())); randomFrom(sink1, sink2).addPage(pages[6]); // sink buffer is full - assertFalse(randomFrom(sink1, sink2).waitForWriting().isDone()); + assertFalse(randomFrom(sink1, sink2).waitForWriting().listener().isDone()); sink1.finish(); assertTrue(sink1.isFinished()); for (int i = 0; i < 5; i++) { - assertBusy(() -> assertTrue(source.waitForReading().isDone())); + assertBusy(() -> assertTrue(source.waitForReading().listener().isDone())); assertEquals(pages[2 + i], source.pollPage()); } // source buffer is empty - assertFalse(source.waitForReading().isDone()); - assertBusy(() -> assertTrue(sink2.waitForWriting().isDone())); + assertFalse(source.waitForReading().listener().isDone()); + assertBusy(() -> assertTrue(sink2.waitForWriting().listener().isDone())); sink2.finish(); assertTrue(sink2.isFinished()); assertTrue(source.isFinished()); @@ -356,13 +356,13 @@ public void testEarlyTerminate() { ExchangeSink sink = sinkExchanger.createExchangeSink(); sink.addPage(p1); sink.addPage(p2); - assertFalse(sink.waitForWriting().isDone()); + assertFalse(sink.waitForWriting().listener().isDone()); PlainActionFuture future = new PlainActionFuture<>(); sinkExchanger.fetchPageAsync(true, future); ExchangeResponse resp = future.actionGet(); assertTrue(resp.finished()); assertNull(resp.takePage()); - assertTrue(sink.waitForWriting().isDone()); + assertTrue(sink.waitForWriting().listener().isDone()); assertTrue(sink.isFinished()); } diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java index e661ad1e742c9..2b162b4f18ead 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.MapMatcher; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; @@ -160,7 +161,7 @@ public void testAllowedIndices() throws Exception { .entry("values", List.of(List.of(72.0d))); assertMap(entityAsMap(resp), matcher); } - for (var index : List.of("index-user2", "index-user1,index-user2", "index-user*", "index*")) { + for (var index : List.of("index-user2", "index-user*", "index*")) { Response resp = runESQLCommand("metadata1_read2", "from " + index + " | stats sum=sum(value)"); assertOK(resp); MapMatcher matcher = responseMatcher().entry("columns", List.of(Map.of("name", "sum", "type", "double"))) @@ -170,7 +171,7 @@ public void testAllowedIndices() throws Exception { } public void testAliases() throws Exception { - for (var index : List.of("second-alias", "second-alias,index-user2", "second-*", "second-*,index*")) { + for (var index : List.of("second-alias", "second-*", "second-*,index*")) { Response resp = runESQLCommand( "alias_user2", "from " + index + " METADATA _index" + "| stats sum=sum(value), index=VALUES(_index)" @@ -185,7 +186,7 @@ public void testAliases() throws Exception { } public void testAliasFilter() throws Exception { - for (var index : List.of("first-alias", "first-alias,index-user1", "first-alias,index-*", "first-*,index-*")) { + for (var index : List.of("first-alias", "first-alias,index-*", "first-*,index-*")) { Response resp = runESQLCommand("alias_user1", "from " + index + " METADATA _index" + "| KEEP _index, org, value | LIMIT 10"); assertOK(resp); MapMatcher matcher = responseMatcher().entry( @@ -221,19 +222,97 @@ public void testInsufficientPrivilege() { assertThat(error.getMessage(), containsString("Unknown index [index-user1]")); } + public void testIndexPatternErrorMessageComparison_ESQL_SearchDSL() throws Exception { + // _search match_all query on the index-user1,index-user2 index pattern + XContentBuilder json = JsonXContent.contentBuilder(); + json.startObject(); + json.field("query", QueryBuilders.matchAllQuery()); + json.endObject(); + Request searchRequest = new Request("GET", "/index-user1,index-user2/_search"); + searchRequest.setJsonEntity(Strings.toString(json)); + searchRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", "metadata1_read2")); + + // ES|QL query on the same index pattern + var esqlResp = expectThrows(ResponseException.class, () -> runESQLCommand("metadata1_read2", "FROM index-user1,index-user2")); + var srchResp = expectThrows(ResponseException.class, () -> client().performRequest(searchRequest)); + + for (ResponseException r : List.of(esqlResp, srchResp)) { + assertThat( + EntityUtils.toString(r.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [metadata1_read2] with effective roles [metadata1_read2] on indices [index-user1]" + ) + ); + } + assertThat(esqlResp.getResponse().getStatusLine().getStatusCode(), equalTo(srchResp.getResponse().getStatusLine().getStatusCode())); + } + public void testLimitedPrivilege() throws Exception { - Response resp = runESQLCommand("metadata1_read2", """ - FROM index-user1,index-user2 METADATA _index - | STATS sum=sum(value), index=VALUES(_index) - """); - assertOK(resp); - Map respMap = entityAsMap(resp); + ResponseException resp = expectThrows( + ResponseException.class, + () -> runESQLCommand( + "metadata1_read2", + "FROM index-user1,index-user2 METADATA _index | STATS sum=sum(value), index=VALUES(_index)" + ) + ); assertThat( - respMap.get("columns"), - equalTo(List.of(Map.of("name", "sum", "type", "double"), Map.of("name", "index", "type", "keyword"))) + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [metadata1_read2] with effective roles [metadata1_read2] on indices [index-user1]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); + + resp = expectThrows( + ResponseException.class, + () -> runESQLCommand("metadata1_read2", "FROM index-user1,index-user2 METADATA _index | STATS index=VALUES(_index)") ); - assertThat(respMap.get("values"), equalTo(List.of(List.of(72.0, "index-user2")))); + assertThat( + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [metadata1_read2] with effective roles [metadata1_read2] on indices [index-user1]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); + resp = expectThrows( + ResponseException.class, + () -> runESQLCommand("metadata1_read2", "FROM index-user1,index-user2 | STATS sum=sum(value)") + ); + assertThat( + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [metadata1_read2] with effective roles [metadata1_read2] on indices [index-user1]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); + + resp = expectThrows( + ResponseException.class, + () -> runESQLCommand("alias_user1", "FROM first-alias,index-user1 METADATA _index | KEEP _index, org, value | LIMIT 10") + ); + assertThat( + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [alias_user1] with effective roles [alias_user1] on indices [index-user1]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); + + resp = expectThrows( + ResponseException.class, + () -> runESQLCommand( + "alias_user2", + "from second-alias,index-user2 METADATA _index | stats sum=sum(value), index=VALUES(_index)" + ) + ); + assertThat( + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [alias_user2] with effective roles [alias_user2] on indices [index-user2]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); } public void testDocumentLevelSecurity() throws Exception { diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java index f20d758132cbb..fa8cb49c59aed 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java @@ -12,9 +12,13 @@ import org.elasticsearch.test.cluster.util.Version; public class Clusters { + + static final String REMOTE_CLUSTER_NAME = "remote_cluster"; + static final String LOCAL_CLUSTER_NAME = "local_cluster"; + public static ElasticsearchCluster remoteCluster() { return ElasticsearchCluster.local() - .name("remote_cluster") + .name(REMOTE_CLUSTER_NAME) .distribution(DistributionType.DEFAULT) .version(Version.fromString(System.getProperty("tests.old_cluster_version"))) .nodes(2) @@ -28,7 +32,7 @@ public static ElasticsearchCluster remoteCluster() { public static ElasticsearchCluster localCluster(ElasticsearchCluster remoteCluster) { return ElasticsearchCluster.local() - .name("local_cluster") + .name(LOCAL_CLUSTER_NAME) .distribution(DistributionType.DEFAULT) .version(Version.CURRENT) .nodes(2) diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/EsqlRestValidationIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/EsqlRestValidationIT.java new file mode 100644 index 0000000000000..21307c5362417 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/EsqlRestValidationIT.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.ccq; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.EsqlRestValidationTestCase; +import org.junit.AfterClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.StringJoiner; + +import static org.elasticsearch.xpack.esql.ccq.Clusters.REMOTE_CLUSTER_NAME; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class EsqlRestValidationIT extends EsqlRestValidationTestCase { + static ElasticsearchCluster remoteCluster = Clusters.remoteCluster(); + static ElasticsearchCluster localCluster = Clusters.localCluster(remoteCluster); + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(remoteCluster).around(localCluster); + private static RestClient remoteClient; + + @Override + protected String getTestRestCluster() { + return localCluster.getHttpAddresses(); + } + + @AfterClass + public static void closeRemoteClients() throws IOException { + try { + IOUtils.close(remoteClient); + } finally { + remoteClient = null; + } + } + + @Override + protected String clusterSpecificIndexName(String pattern) { + StringJoiner sj = new StringJoiner(","); + for (String index : pattern.split(",")) { + sj.add(remoteClusterIndex(index)); + } + return sj.toString(); + } + + private static String remoteClusterIndex(String indexName) { + return REMOTE_CLUSTER_NAME + ":" + indexName; + } + + @Override + protected RestClient provisioningClient() throws IOException { + return remoteClusterClient(); + } + + @Override + protected RestClient provisioningAdminClient() throws IOException { + return remoteClusterClient(); + } + + private RestClient remoteClusterClient() throws IOException { + if (remoteClient == null) { + var clusterHosts = parseClusterHosts(remoteCluster.getHttpAddresses()); + remoteClient = buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[0])); + } + return remoteClient; + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index d6ab99f0b21ac..3e799730f7269 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -111,6 +111,7 @@ protected void shouldSkipTest(String testName) throws IOException { isEnabled(testName, instructions, Clusters.oldVersion()) ); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("inlinestats")); + assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("inlinestats_v2")); } private TestFeatureService remoteFeaturesService() throws IOException { diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlRestValidationIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlRestValidationIT.java new file mode 100644 index 0000000000000..0187bafe19fce --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlRestValidationIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.multi_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.EsqlRestValidationTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class EsqlRestValidationIT extends EsqlRestValidationTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(spec -> {}); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlRestValidationIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlRestValidationIT.java new file mode 100644 index 0000000000000..5a31fc722eec1 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlRestValidationIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.single_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.EsqlRestValidationTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class EsqlRestValidationIT extends EsqlRestValidationTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java index d679ee18d0a73..b0fa233965da6 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.elasticsearch.Build; import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; @@ -21,15 +22,19 @@ import org.elasticsearch.test.TestClustersThreadFilter; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.LogType; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase; import org.hamcrest.Matchers; +import org.junit.Assert; import org.junit.ClassRule; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.Map; import static org.elasticsearch.test.ListMatcher.matchesList; @@ -285,15 +290,11 @@ public void testProfile() throws IOException { .entry("profile", matchesMap().entry("drivers", instanceOf(List.class))) ); - MapMatcher commonProfile = matchesMap().entry("iterations", greaterThan(0)) - .entry("cpu_nanos", greaterThan(0)) - .entry("took_nanos", greaterThan(0)) - .entry("operators", instanceOf(List.class)); List> signatures = new ArrayList<>(); @SuppressWarnings("unchecked") List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); for (Map p : profiles) { - assertThat(p, commonProfile); + assertThat(p, commonProfile()); List sig = new ArrayList<>(); @SuppressWarnings("unchecked") List> operators = (List>) p.get("operators"); @@ -348,15 +349,11 @@ public void testInlineStatsProfile() throws IOException { ).entry("values", values).entry("profile", matchesMap().entry("drivers", instanceOf(List.class))) ); - MapMatcher commonProfile = matchesMap().entry("iterations", greaterThan(0)) - .entry("cpu_nanos", greaterThan(0)) - .entry("took_nanos", greaterThan(0)) - .entry("operators", instanceOf(List.class)); List> signatures = new ArrayList<>(); @SuppressWarnings("unchecked") List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); for (Map p : profiles) { - assertThat(p, commonProfile); + assertThat(p, commonProfile()); List sig = new ArrayList<>(); @SuppressWarnings("unchecked") List> operators = (List>) p.get("operators"); @@ -398,6 +395,115 @@ public void testInlineStatsProfile() throws IOException { ); } + public void testForceSleepsProfile() throws IOException { + assumeTrue("requires pragmas", Build.current().isSnapshot()); + + Request createIndex = new Request("PUT", testIndexName()); + createIndex.setJsonEntity(""" + { + "settings": { + "index": { + "number_of_shards": 1 + } + } + }"""); + Response response = client().performRequest(createIndex); + assertThat( + entityToMap(response.getEntity(), XContentType.JSON), + matchesMap().entry("shards_acknowledged", true).entry("index", testIndexName()).entry("acknowledged", true) + ); + + int groupCount = 300; + for (int group1 = 0; group1 < groupCount; group1++) { + StringBuilder b = new StringBuilder(); + for (int group2 = 0; group2 < groupCount; group2++) { + b.append(String.format(Locale.ROOT, """ + {"create":{"_index":"%s"}} + {"@timestamp":"2020-12-12","value":1,"group1":%d,"group2":%d} + """, testIndexName(), group1, group2)); + } + Request bulk = new Request("POST", "/_bulk"); + bulk.addParameter("refresh", "true"); + bulk.addParameter("filter_path", "errors"); + bulk.setJsonEntity(b.toString()); + response = client().performRequest(bulk); + Assert.assertEquals("{\"errors\":false}", EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8)); + } + + RequestObjectBuilder builder = requestObjectBuilder().query( + fromIndex() + " | STATS AVG(value), MAX(value), MIN(value) BY group1, group2 | SORT group1, group2 ASC | LIMIT 10" + ); + // Lock to shard level partitioning, so we get consistent profile output + builder.pragmas(Settings.builder().put("data_partitioning", "shard").put("page_size", 10).build()); + builder.profile(true); + Map result = runEsql(builder); + List> expectedValues = new ArrayList<>(); + for (int group2 = 0; group2 < 10; group2++) { + expectedValues.add(List.of(1.0, 1, 1, 0, group2)); + } + assertMap( + result, + matchesMap().entry( + "columns", + matchesList().item(matchesMap().entry("name", "AVG(value)").entry("type", "double")) + .item(matchesMap().entry("name", "MAX(value)").entry("type", "long")) + .item(matchesMap().entry("name", "MIN(value)").entry("type", "long")) + .item(matchesMap().entry("name", "group1").entry("type", "long")) + .item(matchesMap().entry("name", "group2").entry("type", "long")) + ).entry("values", expectedValues).entry("profile", matchesMap().entry("drivers", instanceOf(List.class))) + ); + + @SuppressWarnings("unchecked") + List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); + + for (Map p : profiles) { + assertMap(p, commonProfile()); + @SuppressWarnings("unchecked") + Map sleeps = (Map) p.get("sleeps"); + String operators = p.get("operators").toString(); + MapMatcher sleepMatcher = matchesMap().entry("reason", "exchange empty") + .entry("sleep_millis", greaterThan(0L)) + .entry("wake_millis", greaterThan(0L)); + if (operators.contains("LuceneSourceOperator")) { + assertMap(sleeps, matchesMap().entry("counts", Map.of()).entry("first", List.of()).entry("last", List.of())); + } else if (operators.contains("ExchangeSourceOperator")) { + if (operators.contains("ExchangeSinkOperator")) { + assertMap(sleeps, matchesMap().entry("counts", matchesMap().entry("exchange empty", greaterThan(0))).extraOk()); + @SuppressWarnings("unchecked") + List> first = (List>) sleeps.get("first"); + for (Map s : first) { + assertMap(s, sleepMatcher); + } + @SuppressWarnings("unchecked") + List> last = (List>) sleeps.get("last"); + for (Map s : last) { + assertMap(s, sleepMatcher); + } + + } else { + assertMap( + sleeps, + matchesMap().entry("counts", matchesMap().entry("exchange empty", 1)) + .entry("first", List.of(sleepMatcher)) + .entry("last", List.of(sleepMatcher)) + ); + } + } else { + fail("unknown signature: " + operators); + } + } + } + + private MapMatcher commonProfile() { + return matchesMap().entry("start_millis", greaterThan(0L)) + .entry("stop_millis", greaterThan(0L)) + .entry("iterations", greaterThan(0)) + .entry("cpu_nanos", greaterThan(0)) + .entry("took_nanos", greaterThan(0)) + .entry("operators", instanceOf(List.class)) + .entry("sleeps", matchesMap().extraOk()); + } + private String checkOperatorProfile(Map o) { String name = (String) o.get("operator"); name = name.replaceAll("\\[.+", ""); diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlRestValidationTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlRestValidationTestCase.java new file mode 100644 index 0000000000000..9ec4f60f4c843 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlRestValidationTestCase.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.rest; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.WarningsHandler; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public abstract class EsqlRestValidationTestCase extends ESRestTestCase { + + private static final String indexName = "test_esql"; + private static final String aliasName = "alias-test_esql"; + protected static final String[] existentIndexWithWildcard = new String[] { + indexName + ",inexistent*", + indexName + "*,inexistent*", + "inexistent*," + indexName }; + private static final String[] existentIndexWithoutWildcard = new String[] { indexName + ",inexistent", "inexistent," + indexName }; + protected static final String[] existentAliasWithWildcard = new String[] { + aliasName + ",inexistent*", + aliasName + "*,inexistent*", + "inexistent*," + aliasName }; + private static final String[] existentAliasWithoutWildcard = new String[] { aliasName + ",inexistent", "inexistent," + aliasName }; + private static final String[] inexistentIndexNameWithWildcard = new String[] { "inexistent*", "inexistent1*,inexistent2*" }; + private static final String[] inexistentIndexNameWithoutWildcard = new String[] { "inexistent", "inexistent1,inexistent2" }; + private static final String createAlias = "{\"actions\":[{\"add\":{\"index\":\"" + indexName + "\",\"alias\":\"" + aliasName + "\"}}]}"; + private static final String removeAlias = "{\"actions\":[{\"remove\":{\"index\":\"" + + indexName + + "\",\"alias\":\"" + + aliasName + + "\"}}]}"; + + @Before + @After + public void assertRequestBreakerEmpty() throws Exception { + EsqlSpecTestCase.assertRequestBreakerEmpty(); + } + + @Before + public void prepareIndices() throws IOException { + if (provisioningClient().performRequest(new Request("HEAD", "/" + indexName)).getStatusLine().getStatusCode() == 404) { + var request = new Request("PUT", "/" + indexName); + request.setJsonEntity("{\"mappings\": {\"properties\": {\"foo\":{\"type\":\"keyword\"}}}}"); + provisioningClient().performRequest(request); + } + assertOK(provisioningAdminClient().performRequest(new Request("POST", "/" + indexName + "/_refresh"))); + } + + @After + public void wipeTestData() throws IOException { + try { + var response = provisioningAdminClient().performRequest(new Request("DELETE", "/" + indexName)); + assertEquals(200, response.getStatusLine().getStatusCode()); + } catch (ResponseException re) { + assertEquals(404, re.getResponse().getStatusLine().getStatusCode()); + } + } + + private String getInexistentIndexErrorMessage() { + return "\"reason\" : \"Found 1 problem\\nline 1:1: Unknown index "; + } + + public void testInexistentIndexNameWithWildcard() throws IOException { + assertErrorMessages(inexistentIndexNameWithWildcard, getInexistentIndexErrorMessage(), 400); + } + + public void testInexistentIndexNameWithoutWildcard() throws IOException { + assertErrorMessages(inexistentIndexNameWithoutWildcard, getInexistentIndexErrorMessage(), 400); + } + + public void testExistentIndexWithoutWildcard() throws IOException { + for (String indexName : existentIndexWithoutWildcard) { + assertErrorMessage(indexName, "\"reason\" : \"no such index [inexistent]\"", 404); + } + } + + public void testExistentIndexWithWildcard() throws IOException { + assertValidRequestOnIndices(existentIndexWithWildcard); + } + + public void testAlias() throws IOException { + createAlias(); + + for (String indexName : existentAliasWithoutWildcard) { + assertErrorMessage(indexName, "\"reason\" : \"no such index [inexistent]\"", 404); + } + assertValidRequestOnIndices(existentAliasWithWildcard); + + deleteAlias(); + } + + private void assertErrorMessages(String[] indices, String errorMessage, int statusCode) throws IOException { + for (String indexName : indices) { + assertErrorMessage(indexName, errorMessage + "[" + clusterSpecificIndexName(indexName) + "]", statusCode); + } + } + + protected String clusterSpecificIndexName(String indexName) { + return indexName; + } + + private void assertErrorMessage(String indexName, String errorMessage, int statusCode) throws IOException { + var specificName = clusterSpecificIndexName(indexName); + final var request = createRequest(specificName); + ResponseException exc = expectThrows(ResponseException.class, () -> client().performRequest(request)); + + assertThat(exc.getResponse().getStatusLine().getStatusCode(), equalTo(statusCode)); + assertThat(exc.getMessage(), containsString(errorMessage)); + } + + private Request createRequest(String indexName) throws IOException { + final var request = new Request("POST", "/_query"); + request.addParameter("error_trace", "true"); + request.addParameter("pretty", "true"); + request.setJsonEntity( + Strings.toString(JsonXContent.contentBuilder().startObject().field("query", "from " + indexName).endObject()) + ); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.setWarningsHandler(WarningsHandler.PERMISSIVE); + request.setOptions(options); + return request; + } + + private void assertValidRequestOnIndices(String[] indices) throws IOException { + for (String indexName : indices) { + final var request = createRequest(clusterSpecificIndexName(indexName)); + Response response = client().performRequest(request); + assertOK(response); + } + } + + // Returned client is used to load the test data, either in the local cluster or a remote one (for + // multi-clusters). The client()/adminClient() will always connect to the local cluster + protected RestClient provisioningClient() throws IOException { + return client(); + } + + protected RestClient provisioningAdminClient() throws IOException { + return adminClient(); + } + + private void createAlias() throws IOException { + var r = new Request("POST", "_aliases"); + r.setJsonEntity(createAlias); + assertOK(provisioningClient().performRequest(r)); + } + + private void deleteAlias() throws IOException { + var r = new Request("POST", "/_aliases/"); + r.setJsonEntity(removeAlias); + assertOK(provisioningAdminClient().performRequest(r)); + } +} diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index 5a99e8006d6a7..8b6511875e86c 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -903,17 +903,24 @@ public static Map runEsqlAsync( checkKeepOnCompletion(requestObject, json); String id = (String) json.get("id"); + var supportsAsyncHeaders = clusterHasCapability("POST", "/_query", List.of(), List.of("async_query_status_headers")).orElse(false); + if (id == null) { // no id returned from an async call, must have completed immediately and without keep_on_completion assertThat(requestObject.keepOnCompletion(), either(nullValue()).or(is(false))); assertThat((boolean) json.get("is_running"), is(false)); + if (supportsAsyncHeaders) { + assertThat(response.getHeader("X-Elasticsearch-Async-Id"), nullValue()); + assertThat(response.getHeader("X-Elasticsearch-Async-Is-Running"), is("?0")); + } assertWarnings(response, expectedWarnings, expectedWarningsRegex); json.remove("is_running"); // remove this to not mess up later map assertions return Collections.unmodifiableMap(json); } else { // async may not return results immediately, so may need an async get assertThat(id, is(not(emptyOrNullString()))); - if ((boolean) json.get("is_running") == false) { + boolean isRunning = (boolean) json.get("is_running"); + if (isRunning == false) { // must have completed immediately so keep_on_completion must be true assertThat(requestObject.keepOnCompletion(), is(true)); assertWarnings(response, expectedWarnings, expectedWarningsRegex); @@ -925,6 +932,12 @@ public static Map runEsqlAsync( assertThat(json.get("columns"), is(equalTo(List.>of()))); // no partial results assertThat(json.get("pages"), nullValue()); } + + if (supportsAsyncHeaders) { + assertThat(response.getHeader("X-Elasticsearch-Async-Id"), is(id)); + assertThat(response.getHeader("X-Elasticsearch-Async-Is-Running"), is(isRunning ? "?1" : "?0")); + } + // issue a second request to "async get" the results Request getRequest = prepareAsyncGetRequest(id); getRequest.setOptions(options); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec index 7e2afb9267e5b..b8569ead94509 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec @@ -314,6 +314,21 @@ FROM sample_data 3 |2025-10-01T00:00:00.000Z ; +bucketByYearLowBucketCount#[skip:-8.13.99, reason:BUCKET extended in 8.14] +FROM employees +| WHERE hire_date >= "1985-02-18T00:00:00.000Z" AND hire_date <= "1988-10-18T00:00:00.000Z" +| STATS c = COUNT(*) BY b = BUCKET(hire_date, 3, "1985-02-18T00:00:00.000Z", "1988-10-18T00:00:00.000Z") +| SORT b +; + +// Note: we don't bucket to anything longer than 1 year (like 2 years), so even if requesting 3 buckets, we still get 4 + c:long | b:date +11 |1985-01-01T00:00:00.000Z +11 |1986-01-01T00:00:00.000Z +15 |1987-01-01T00:00:00.000Z +9 |1988-01-01T00:00:00.000Z +; + // // Numeric bucketing // @@ -393,6 +408,17 @@ ROW long = TO_LONG(100), double = 99., int = 100 99.0 |0.0 |99.0 ; +// identical results as above +bucketNumericMixedTypesIntegerSpans +required_capability: bucket_whole_number_as_span +ROW long = TO_LONG(100), double = 99., int = 100 +| STATS BY b1 = BUCKET(long, double::int), b2 = BUCKET(double, long), b3 = BUCKET(int, 49.5) +; + + b1:double| b2:double| b3:double +99.0 |0.0 |99.0 +; + bucketWithFloats#[skip:-8.13.99, reason:BUCKET renamed in 8.14] FROM employees | WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/comparison.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/comparison.csv-spec index ef07f1dae3c1a..e0b921947e16d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/comparison.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/comparison.csv-spec @@ -181,6 +181,7 @@ emp_no:integer |first_name:keyword ; rangeVersion +required_capability: string_literal_auto_casting_extended from apps | where version > "2" and version < "4" | keep id, version diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec index e52f1e45cead8..3f2e14f74174b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec @@ -67,11 +67,70 @@ emp_no:integer | avg_worked_seconds:long | gender:keyword | max_avg_worked_secon 10030 | 394597613 | M | 394597613 ; -// TODO allow inline calculation like BY l = SUBSTRING( maxOfLongByCalculatedKeyword -required_capability: inlinestats +required_capability: inlinestats_v2 // tag::longest-tenured-by-first[] +FROM employees +| KEEP emp_no, avg_worked_seconds, last_name +| INLINESTATS max_avg_worked_seconds = MAX(avg_worked_seconds) BY SUBSTRING(last_name, 0, 1) +| WHERE max_avg_worked_seconds == avg_worked_seconds +| SORT last_name ASC +| LIMIT 5 +// end::longest-tenured-by-first[] +; + +// tag::longest-tenured-by-first-result[] +emp_no:integer | avg_worked_seconds:long | last_name:keyword | SUBSTRING(last_name, 0, 1):keyword | max_avg_worked_seconds:long + 10065 | 372660279 | Awdeh | A | 372660279 + 10074 | 382397583 | Bernatsky | B | 382397583 + 10044 | 387408356 | Casley | C | 387408356 + 10030 | 394597613 | Demeyer | D | 394597613 + 10087 | 305782871 | Eugenio | E | 305782871 +// end::longest-tenured-by-first-result[] +; + +maxOfLongByCalculatedNamedKeyword +required_capability: inlinestats_v2 + +FROM employees +| KEEP emp_no, avg_worked_seconds, last_name +| INLINESTATS max_avg_worked_seconds = MAX(avg_worked_seconds) BY l = SUBSTRING(last_name, 0, 1) +| WHERE max_avg_worked_seconds == avg_worked_seconds +| SORT last_name ASC +| LIMIT 5 +; + +emp_no:integer | avg_worked_seconds:long | last_name:keyword | l:keyword | max_avg_worked_seconds:long + 10065 | 372660279 | Awdeh | A | 372660279 + 10074 | 382397583 | Bernatsky | B | 382397583 + 10044 | 387408356 | Casley | C | 387408356 + 10030 | 394597613 | Demeyer | D | 394597613 + 10087 | 305782871 | Eugenio | E | 305782871 +; + +maxOfLongByCalculatedDroppedKeyword +required_capability: inlinestats_v2 + +FROM employees +| INLINESTATS max_avg_worked_seconds = MAX(avg_worked_seconds) BY l = SUBSTRING(last_name, 0, 1) +| WHERE max_avg_worked_seconds == avg_worked_seconds +| KEEP emp_no, avg_worked_seconds, last_name, max_avg_worked_seconds +| SORT last_name ASC +| LIMIT 5 +; + +emp_no:integer | avg_worked_seconds:long | last_name:keyword | max_avg_worked_seconds:long + 10065 | 372660279 | Awdeh | 372660279 + 10074 | 382397583 | Bernatsky | 382397583 + 10044 | 387408356 | Casley | 387408356 + 10030 | 394597613 | Demeyer | 394597613 + 10087 | 305782871 | Eugenio | 305782871 +; + +maxOfLongByEvaledKeyword +required_capability: inlinestats + FROM employees | EVAL l = SUBSTRING(last_name, 0, 1) | KEEP emp_no, avg_worked_seconds, l @@ -79,17 +138,14 @@ FROM employees | WHERE max_avg_worked_seconds == avg_worked_seconds | SORT l ASC | LIMIT 5 -// end::longest-tenured-by-first[] ; -// tag::longest-tenured-by-first-result[] emp_no:integer | avg_worked_seconds:long | l:keyword | max_avg_worked_seconds:long 10065 | 372660279 | A | 372660279 10074 | 382397583 | B | 382397583 10044 | 387408356 | C | 387408356 10030 | 394597613 | D | 394597613 10087 | 305782871 | E | 305782871 -// end::longest-tenured-by-first-result[] ; maxOfLongByInt @@ -499,3 +555,101 @@ emp_no:integer | salary:integer | ninety_fifth_salary:double 10029 | 74999 | 73584.95 10045 | 74970 | 73584.95 ; + +byTwoCalculated +required_capability: inlinestats_v2 + +FROM airports +| WHERE abbrev IS NOT NULL +| KEEP abbrev, scalerank, location +| INLINESTATS min_sl=MIN(scalerank) + BY lat_10 = ROUND(ST_Y(location), -1) + , lon_10 = ROUND(ST_X(location), -1) +| SORT abbrev DESC +| LIMIT 3 +; + +abbrev:keyword | scalerank:integer | location:geo_point | lat_10:double | lon_10:double | min_sl:integer + ZRH | 3 | POINT(8.56221279534765 47.4523895064915) | 50 | 10 | 2 + ZNZ | 4 | POINT (39.2223319841558 -6.21857034620282) | -10 | 40 | 4 + ZLO | 7 | POINT (-104.560095200097 19.1480860285854) | 20 | -100 | 2 +; + +byTwoCalculatedSecondOverwrites +required_capability: inlinestats_v2 + +FROM airports +| WHERE abbrev IS NOT NULL +| KEEP abbrev, scalerank, location +| INLINESTATS min_sl=MIN(scalerank) + BY x = ROUND(ST_Y(location), -1) + , x = ROUND(ST_X(location), -1) +| SORT abbrev DESC +| LIMIT 3 +; + +abbrev:keyword | scalerank:integer | location:geo_point | x:double | min_sl:integer + ZRH | 3 | POINT(8.56221279534765 47.4523895064915) | 10 | 2 + ZNZ | 4 | POINT (39.2223319841558 -6.21857034620282) | 40 | 2 + ZLO | 7 | POINT (-104.560095200097 19.1480860285854) | -100 | 2 +; + +byTwoCalculatedSecondOverwritesReferencingFirst +required_capability: inlinestats_v2 + +FROM airports +| WHERE abbrev IS NOT NULL +| KEEP abbrev, scalerank, location +| EVAL x = ST_X(location) +| INLINESTATS min_sl=MIN(scalerank) + BY x = ROUND(x, -1) + , x = ROUND(x, -1) +| SORT abbrev DESC +| LIMIT 3 +; + +abbrev:keyword | scalerank:integer | location:geo_point | x:double | min_sl:integer + ZRH | 3 | POINT(8.56221279534765 47.4523895064915) | 10 | 2 + ZNZ | 4 | POINT (39.2223319841558 -6.21857034620282) | 40 | 2 + ZLO | 7 | POINT (-104.560095200097 19.1480860285854) | -100 | 2 +; + + +groupShadowsAgg +required_capability: inlinestats_v2 + +FROM airports +| WHERE abbrev IS NOT NULL +| KEEP abbrev, scalerank, location +| INLINESTATS min_sl=MIN(scalerank) + , lat_10 = ROUND(ST_Y(location), -1) + BY lat_10 = ROUND(ST_Y(location), -1) + , lon_10 = ROUND(ST_X(location), -1) +| SORT abbrev DESC +| LIMIT 3 +; + +abbrev:keyword | scalerank:integer | location:geo_point | lat_10:double | lon_10:double | min_sl:integer + ZRH | 3 | POINT(8.56221279534765 47.4523895064915) | 50 | 10 | 2 + ZNZ | 4 | POINT (39.2223319841558 -6.21857034620282) | -10 | 40 | 4 + ZLO | 7 | POINT (-104.560095200097 19.1480860285854) | 20 | -100 | 2 +; + +groupShadowsField +required_capability: inlinestats_v2 + + FROM employees +| KEEP emp_no, salary, hire_date +| INLINESTATS avg_salary = AVG(salary) + BY hire_date = DATE_TRUNC(1 year, hire_date) +| WHERE salary > avg_salary +| SORT emp_no ASC +| LIMIT 4 +; + +emp_no:integer | salary:integer | hire_date:datetime | avg_salary:double + 10001 | 57305 | 1986-01-01T00:00:00Z | 43869.63636363636 + 10002 | 56371 | 1985-01-01T00:00:00Z | 51831.818181818184 + 10003 | 61805 | 1986-01-01T00:00:00Z | 43869.63636363636 + 10005 | 63528 | 1989-01-01T00:00:00Z | 53487.07692307692 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec index 35c852d6ba2fe..f1f66a9cb990c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec @@ -9,8 +9,8 @@ synopsis:keyword "double atan(number:double|integer|long|unsigned_long)" "double atan2(y_coordinate:double|integer|long|unsigned_long, x_coordinate:double|integer|long|unsigned_long)" "double avg(number:double|integer|long)" -"double|date bin(field:integer|long|double|date, buckets:integer|double|date_period|time_duration, ?from:integer|long|double|date, ?to:integer|long|double|date)" -"double|date bucket(field:integer|long|double|date, buckets:integer|double|date_period|time_duration, ?from:integer|long|double|date, ?to:integer|long|double|date)" +"double|date bin(field:integer|long|double|date, buckets:integer|long|double|date_period|time_duration, ?from:integer|long|double|date|keyword|text, ?to:integer|long|double|date|keyword|text)" +"double|date bucket(field:integer|long|double|date, buckets:integer|long|double|date_period|time_duration, ?from:integer|long|double|date|keyword|text, ?to:integer|long|double|date|keyword|text)" "boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version case(condition:boolean, trueValue...:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" "double cbrt(number:double|integer|long|unsigned_long)" "double|integer|long|unsigned_long ceil(number:double|integer|long|unsigned_long)" @@ -40,10 +40,10 @@ double e() "double log(?base:integer|unsigned_long|long|double, number:integer|unsigned_long|long|double)" "double log10(number:double|integer|long|unsigned_long)" "keyword|text ltrim(string:keyword|text)" -"boolean|double|integer|long|date|ip max(field:boolean|double|integer|long|date|ip)" +"boolean|double|integer|long|date|ip|keyword|text|long|version max(field:boolean|double|integer|long|date|ip|keyword|text|long|version)" "double median(number:double|integer|long)" "double median_absolute_deviation(number:double|integer|long)" -"boolean|double|integer|long|date|ip min(field:boolean|double|integer|long|date|ip)" +"boolean|double|integer|long|date|ip|keyword|text|long|version min(field:boolean|double|integer|long|date|ip|keyword|text|long|version)" "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_append(field1:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, field2:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version)" "double mv_avg(number:double|integer|long|unsigned_long)" "keyword mv_concat(string:text|keyword, delim:text|keyword)" @@ -54,6 +54,7 @@ double e() "boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version mv_max(field:boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version)" "double|integer|long|unsigned_long mv_median(number:double|integer|long|unsigned_long)" "boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version mv_min(field:boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version)" +"double|integer|long mv_percentile(number:double|integer|long, percentile:double|integer|long)" "double mv_pseries_weighted_sum(number:double, p:double)" "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_slice(field:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, start:integer, ?end:integer)" "boolean|date|double|integer|ip|keyword|long|text|version mv_sort(field:boolean|date|double|integer|ip|keyword|long|text|version, ?order:keyword)" @@ -132,8 +133,8 @@ asin |number |"double|integer|long|unsigne atan |number |"double|integer|long|unsigned_long" |Numeric expression. If `null`, the function returns `null`. atan2 |[y_coordinate, x_coordinate] |["double|integer|long|unsigned_long", "double|integer|long|unsigned_long"] |[y coordinate. If `null`\, the function returns `null`., x coordinate. If `null`\, the function returns `null`.] avg |number |"double|integer|long" |[""] -bin |[field, buckets, from, to] |["integer|long|double|date", "integer|double|date_period|time_duration", "integer|long|double|date", "integer|long|double|date"] |[Numeric or date expression from which to derive buckets., Target number of buckets., Start of the range. Can be a number or a date expressed as a string., End of the range. Can be a number or a date expressed as a string.] -bucket |[field, buckets, from, to] |["integer|long|double|date", "integer|double|date_period|time_duration", "integer|long|double|date", "integer|long|double|date"] |[Numeric or date expression from which to derive buckets., Target number of buckets., Start of the range. Can be a number or a date expressed as a string., End of the range. Can be a number or a date expressed as a string.] +bin |[field, buckets, from, to] |["integer|long|double|date", "integer|long|double|date_period|time_duration", "integer|long|double|date|keyword|text", "integer|long|double|date|keyword|text"] |[Numeric or date expression from which to derive buckets., Target number of buckets\, or desired bucket size if `from` and `to` parameters are omitted., Start of the range. Can be a number\, a date or a date expressed as a string., End of the range. Can be a number\, a date or a date expressed as a string.] +bucket |[field, buckets, from, to] |["integer|long|double|date", "integer|long|double|date_period|time_duration", "integer|long|double|date|keyword|text", "integer|long|double|date|keyword|text"] |[Numeric or date expression from which to derive buckets., Target number of buckets\, or desired bucket size if `from` and `to` parameters are omitted., Start of the range. Can be a number\, a date or a date expressed as a string., End of the range. Can be a number\, a date or a date expressed as a string.] case |[condition, trueValue] |[boolean, "boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version"] |[A condition., The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches.] cbrt |number |"double|integer|long|unsigned_long" |"Numeric expression. If `null`, the function returns `null`." ceil |number |"double|integer|long|unsigned_long" |Numeric expression. If `null`, the function returns `null`. @@ -163,10 +164,10 @@ locate |[string, substring, start] |["keyword|text", "keyword|te log |[base, number] |["integer|unsigned_long|long|double", "integer|unsigned_long|long|double"] |["Base of logarithm. If `null`\, the function returns `null`. If not provided\, this function returns the natural logarithm (base e) of a value.", "Numeric expression. If `null`\, the function returns `null`."] log10 |number |"double|integer|long|unsigned_long" |Numeric expression. If `null`, the function returns `null`. ltrim |string |"keyword|text" |String expression. If `null`, the function returns `null`. -max |field |"boolean|double|integer|long|date|ip" |[""] +max |field |"boolean|double|integer|long|date|ip|keyword|text|long|version" |[""] median |number |"double|integer|long" |[""] median_absolut|number |"double|integer|long" |[""] -min |field |"boolean|double|integer|long|date|ip" |[""] +min |field |"boolean|double|integer|long|date|ip|keyword|text|long|version" |[""] mv_append |[field1, field2] |["boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version", "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version"] | ["", ""] mv_avg |number |"double|integer|long|unsigned_long" |Multivalue expression. mv_concat |[string, delim] |["text|keyword", "text|keyword"] |[Multivalue expression., Delimiter.] @@ -177,6 +178,7 @@ mv_last |field |"boolean|cartesian_point|car mv_max |field |"boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version" |Multivalue expression. mv_median |number |"double|integer|long|unsigned_long" |Multivalue expression. mv_min |field |"boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version" |Multivalue expression. +mv_percentile |[number, percentile] |["double|integer|long", "double|integer|long"] |[Multivalue expression., The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead.] mv_pseries_wei|[number, p] |[double, double] |[Multivalue expression., It is a constant number that represents the 'p' parameter in the P-Series. It impacts every element's contribution to the weighted sum.] mv_slice |[field, start, end] |["boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version", integer, integer]|[Multivalue expression. If `null`\, the function returns `null`., Start position. If `null`\, the function returns `null`. The start argument can be negative. An index of -1 is used to specify the last value in the list., End position(included). Optional; if omitted\, the position at `start` is returned. The end argument can be negative. An index of -1 is used to specify the last value in the list.] mv_sort |[field, order] |["boolean|date|double|integer|ip|keyword|long|text|version", keyword] |[Multivalue expression. If `null`\, the function returns `null`., Sort order. The valid options are ASC and DESC\, the default is ASC.] @@ -300,6 +302,7 @@ mv_last |Converts a multivalue expression into a single valued column cont mv_max |Converts a multivalued expression into a single valued column containing the maximum value. mv_median |Converts a multivalued field into a single valued field containing the median value. mv_min |Converts a multivalued expression into a single valued column containing the minimum value. +mv_percentile |Converts a multivalued field into a single valued field containing the value at which a certain percentage of observed values occur. mv_pseries_wei|Converts a multivalued expression into a single-valued column by multiplying every element on the input list by its corresponding term in P-Series and computing the sum. mv_slice |Returns a subset of the multivalued field using the start and end index values. mv_sort |Sorts a multivalued field in lexicographical order. @@ -411,10 +414,10 @@ locate |integer log |double |[true, false] |false |false log10 |double |false |false |false ltrim |"keyword|text" |false |false |false -max |"boolean|double|integer|long|date|ip" |false |false |true +max |"boolean|double|integer|long|date|ip|keyword|text|long|version" |false |false |true median |double |false |false |true median_absolut|double |false |false |true -min |"boolean|double|integer|long|date|ip" |false |false |true +min |"boolean|double|integer|long|date|ip|keyword|text|long|version" |false |false |true mv_append |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version" |[false, false] |false |false mv_avg |double |false |false |false mv_concat |keyword |[false, false] |false |false @@ -425,6 +428,7 @@ mv_last |"boolean|cartesian_point|cartesian_shape|date|date_nanos|double|g mv_max |"boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version" |false |false |false mv_median |"double|integer|long|unsigned_long" |false |false |false mv_min |"boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version" |false |false |false +mv_percentile |"double|integer|long" |[false, false] |false |false mv_pseries_wei|"double" |[false, false] |false |false mv_slice |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version" |[false, false, true] |false |false mv_sort |"boolean|date|double|integer|ip|keyword|long|text|version" |[false, true] |false |false @@ -504,5 +508,5 @@ countFunctions#[skip:-8.15.99] meta functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long -114 | 114 | 114 +115 | 115 | 115 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_percentile.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_percentile.csv-spec new file mode 100644 index 0000000000000..c51e62e865ea2 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_percentile.csv-spec @@ -0,0 +1,163 @@ +default +required_capability: fn_mv_percentile + +// tag::example[] +ROW values = [5, 5, 10, 12, 5000] +| EVAL p50 = MV_PERCENTILE(values, 50), median = MV_MEDIAN(values) +// end::example[] +; + +// tag::example-result[] +values:integer | p50:integer | median:integer +[5, 5, 10, 12, 5000] | 10 | 10 +// end::example-result[] +; + +p0 +required_capability: fn_mv_percentile + +ROW a = [5, 5, 10, 12, 5000] +| EVAL pInt = MV_PERCENTILE(a, 0), pLong = MV_PERCENTILE(a, 0::long), pDouble = MV_PERCENTILE(a, 0.0) +| KEEP pInt, pLong, pDouble +; + +pInt:integer | pLong:integer | pDouble:integer +5 | 5 | 5 +; + +p100 +required_capability: fn_mv_percentile + +ROW a = [5, 5, 10, 12, 5000] +| EVAL pInt = MV_PERCENTILE(a, 100), pLong = MV_PERCENTILE(a, 100::long), pDouble = MV_PERCENTILE(a, 100.0) +| KEEP pInt, pLong, pDouble +; + +pInt:integer | pLong:integer | pDouble:integer +5000 | 5000 | 5000 +; + +fractionInt +required_capability: fn_mv_percentile + +ROW a = [0, 10] +| EVAL pInt = MV_PERCENTILE(a, 75), pLong = MV_PERCENTILE(a, 75::long), pDouble = MV_PERCENTILE(a, 75.0) +| KEEP pInt, pLong, pDouble +; + +pInt:integer | pLong:integer | pDouble:integer +7 | 7 | 7 +; + +fractionLong +required_capability: fn_mv_percentile + +ROW a = to_long([0, 10]) +| EVAL pInt = MV_PERCENTILE(a, 75), pLong = MV_PERCENTILE(a, 75::long), pDouble = MV_PERCENTILE(a, 75.0) +| KEEP pInt, pLong, pDouble +; + +pInt:long | pLong:long | pDouble:long +7 | 7 | 7 +; + +fractionDouble +required_capability: fn_mv_percentile + +ROW a = [0., 10.] +| EVAL pInt = MV_PERCENTILE(a, 75), pLong = MV_PERCENTILE(a, 75::long), pDouble = MV_PERCENTILE(a, 75.0) +| KEEP pInt, pLong, pDouble +; + +pInt:double | pLong:double | pDouble:double +7.5 | 7.5 | 7.5 +; + +singleValue +required_capability: fn_mv_percentile + +ROW integer = 5, long = 5::long, double = 5.0 +| EVAL + integer = MV_PERCENTILE(integer, 75), + long = MV_PERCENTILE(long, 75), + double = MV_PERCENTILE(double, 75) +; + +integer:integer | long:long | double:double +5 | 5 | 5 +; + +fromIndex +required_capability: fn_mv_percentile + +FROM employees +| EVAL + integer = MV_PERCENTILE(salary_change.int, 75), + long = MV_PERCENTILE(salary_change.long, 75), + double = MV_PERCENTILE(salary_change, 75) +| KEEP emp_no, integer, long, double +| SORT double +| LIMIT 3 +; + +emp_no:integer | integer:integer | long:long | double:double +10034 | -8 | -8 | -8.46 +10037 | -7 | -7 | -7.08 +10039 | -6 | -6 | -6.9 +; + +fromIndexPercentile +required_capability: fn_mv_percentile + +FROM employees +| SORT emp_no +| LIMIT 1 +| EVAL + integer = MV_PERCENTILE(salary_change.int, languages), + long = MV_PERCENTILE(salary_change.long, languages.long), + double = MV_PERCENTILE(salary_change, height), + null_value = MV_PERCENTILE(salary_change, emp_no) +| KEEP integer, long, double, null_value +; +warning:Line 8:14: evaluation of [MV_PERCENTILE(salary_change, emp_no)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 8:14: java.lang.IllegalArgumentException: Percentile parameter must be a number between 0 and 100, found [10001.0] + +integer:integer | long:long | double:double | null_value:double +1 | 1 | 1.19 | null +; + +multipleExpressions +required_capability: fn_mv_percentile + +ROW x = [0, 5, 10] +| EVAL + MV_PERCENTILE(x, 75), + a = MV_PERCENTILE(x, 75), + b = MV_PERCENTILE(TO_DOUBLE([0, 5, 10]), 75), + c = MV_PERCENTILE(CASE(true, x, [0, 1]), 75) +; + +x:integer | MV_PERCENTILE(x, 75):integer | a:integer | b:double | c:integer +[0, 5, 10] | 7 | 7 | 7.5 | 7 +; + +nullsAndFolds +required_capability: fn_mv_percentile + +ROW x = [5, 5, 10, 12, 5000], n = null, y = 50 +| EVAL evalNull = null / 2, evalValue = 31 + 1 +| LIMIT 1 +| EVAL + a = mv_percentile(y, 90), + b = mv_percentile(x, y), + c = mv_percentile(null, null), + d = mv_percentile(null, y), + e = mv_percentile(evalNull, y), + f = mv_percentile(evalValue, y), + g = mv_percentile(n, y) +| KEEP a, b, c, d, e, f, g +; + +a:integer | b:integer | c:null | d:null | e:integer | f:integer | g:null +50 | 10 | null | null | null | 32 | null +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec index 02067e9dbe490..35416c7945128 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec @@ -487,6 +487,17 @@ POINT (42.97109629958868 14.7552534006536) | 1 ############################################### # Tests for ST_INTERSECTS on GEO_POINT type +literalGeoPointIntersectsLiteralPolygon +required_capability: st_intersects + +ROW pt = TO_GEOPOINT("POINT(0 85)"), polygon = TO_GEOSHAPE("POLYGON((-10 70, 10 70, 10 85, -10 85, -10 70))") +| EVAL intersects = ST_INTERSECTS(pt, polygon) +; + +pt:geo_point | polygon:geo_shape | intersects:boolean +POINT(0 85) | POLYGON((-10 70, 10 70, 10 85, -10 85, -10 70)) | true +; + pointIntersectsLiteralPolygon required_capability: st_intersects @@ -889,6 +900,34 @@ wkt:keyword | pt:geo_point | distance:double "POINT(1 -1)" | POINT(1 -1) | 157249.59498573805 ; +literalGeoPointDistanceOneDegree +required_capability: st_distance + +ROW wkt = ["POINT(1 0)", "POINT(-1 0)", "POINT(0 1)", "POINT(0 -1)"] +| MV_EXPAND wkt +| EVAL pt = TO_GEOPOINT(wkt) +| EVAL distance = ST_DISTANCE(pt, TO_GEOPOINT("POINT(0 0)")) +; + +wkt:keyword | pt:geo_point | distance:double +"POINT(1 0)" | POINT(1 0) | 111195.07310665186 +"POINT(-1 0)" | POINT(-1 0) | 111195.08242688453 +"POINT(0 1)" | POINT(0 1) | 111195.07776676829 +"POINT(0 -1)" | POINT(0 -1) | 111195.08242688453 +; + +twoCitiesPointDistanceGeo +required_capability: st_distance +required_capability: spatial_functions_fix_crstype_folding + +ROW p1 = TO_GEOPOINT("POINT(-90.82814 29.79511)"), p2 = TO_GEOPOINT("POINT(-90.79731509999999 29.8835389)") +| EVAL d = ST_DISTANCE(p1, p2) +; + +p1:geo_point | p2:geo_point | d:double +POINT (-90.82814 29.79511) | POINT (-90.79731509999999 29.8835389) | 10272.529272836206 +; + airportCityLocationPointDistance required_capability: st_distance @@ -1433,6 +1472,17 @@ POINT (726480.0130685265 3359566.331716279) | 849 ############################################### # Tests for ST_INTERSECTS on CARTESIAN_POINT type +literalCartesianPointIntersectsLiteralPolygon +required_capability: st_intersects + +ROW pt = TO_CARTESIANPOINT("POINT(0 85)"), polygon = TO_CARTESIANSHAPE("POLYGON((-10 70, 10 70, 10 85, -10 85, -10 70))") +| EVAL intersects = ST_INTERSECTS(pt, polygon) +; + +pt:cartesian_point | polygon:cartesian_shape | intersects:boolean +POINT(0 85) | POLYGON((-10 70, 10 70, 10 85, -10 85, -10 70)) | true +; + cartesianCentroidFromAirportsAfterIntersectsPredicate required_capability: st_intersects @@ -1996,6 +2046,33 @@ wkt:keyword | pt:cartesian_point | distance:double "POINT(1 -1)" | POINT(1 -1) | 1.4142135623730951 ; +literalCartesianPointDistanceOneUnit +required_capability: st_distance + +ROW wkt = ["POINT(1 0)", "POINT(-1 0)", "POINT(0 1)", "POINT(0 -1)"] +| MV_EXPAND wkt +| EVAL pt = TO_CARTESIANPOINT(wkt) +| EVAL distance = ST_DISTANCE(pt, TO_CARTESIANPOINT("POINT(0 0)")) +; + +wkt:keyword | pt:cartesian_point | distance:double +"POINT(1 0)" | POINT(1 0) | 1.0 +"POINT(-1 0)" | POINT(-1 0) | 1.0 +"POINT(0 1)" | POINT(0 1) | 1.0 +"POINT(0 -1)" | POINT(0 -1) | 1.0 +; + +twoCitiesPointDistanceCartesian +required_capability: st_distance + +ROW p1 = TO_CARTESIANPOINT("POINT(-90.82814 29.79511)"), p2 = TO_CARTESIANPOINT("POINT(-90.79731509999999 29.8835389)") +| EVAL d = ST_DISTANCE(p1, p2) +; + +p1:cartesian_point | p2:cartesian_point | d:double +POINT (-90.82814 29.79511) | POINT (-90.79731509999999 29.8835389) | 0.09364744959271905 +; + airportCartesianCityLocationPointDistance required_capability: st_distance diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index eb373b6ddef6b..3be846630d5b8 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -76,6 +76,166 @@ fe82::cae2:65ff:fece:fec0 | fe82::cae2:65ff:fece:fec0 | fe82::cae2:65ff:fece:fec fe80::cae2:65ff:fece:feb9 | fe80::cae2:65ff:fece:feb9 | fe80::cae2:65ff:fece:feb9 | fe81::cae2:65ff:fece:feb9 | gamma ; +maxOfVersion +required_capability: agg_max_min_string_support +from apps +| eval x = version +| where id > 2 +| stats max(version), a = max(version), b = max(x), c = max(case(name == "iiiii", "100.0.0"::version, version)); + +max(version):version | a:version | b:version | c:version +bad | bad | bad | 100.0.0 +; + +maxOfVersionGrouping +required_capability: agg_max_min_string_support +from apps +| eval x = version +| where id > 2 +| stats max(version), a = max(version), b = max(x), c = max(case(name == "ccccc", "100.0.0"::version, version)) by name +| sort name asc +| limit 3; + +max(version):version | a:version | b:version | c:version | name:keyword +1.2.3.4 | 1.2.3.4 | 1.2.3.4 | 1.2.3.4 | aaaaa +2.3.4 | 2.3.4 | 2.3.4 | 100.0.0 | ccccc +2.12.0 | 2.12.0 | 2.12.0 | 2.12.0 | ddddd +; + +maxOfKeyword +required_capability: agg_max_min_string_support +from airports +| eval x = abbrev +| where scalerank >= 9 +| stats max(abbrev), a = max(abbrev), b = max(x), c = max(case(mv_first(type) == "small", "___"::keyword, abbrev)); + +max(abbrev):keyword | a:keyword | b:keyword | c:keyword +ZAH | ZAH | ZAH | ___ +; + +maxOfKeywordGrouping +required_capability: agg_max_min_string_support +from airports +| eval x = abbrev +| where scalerank >= 9 +| stats max(abbrev), a = max(abbrev), b = max(x), c = max(case(mv_first(type) == "small", "___"::keyword, abbrev)) by type +| sort type asc +| limit 4; + +max(abbrev):keyword | a:keyword | b:keyword | c:keyword | type:keyword +IXC | IXC | IXC | IXC | major +ZAH | ZAH | ZAH | ZAH | mid +VIBY | VIBY | VIBY | VIBY | military +OPQS | OPQS | OPQS | ___ | small +; + +maxOfText +required_capability: agg_max_min_string_support +from airports +| eval x = name +| where scalerank >= 9 +| stats max(name), a = max(name), b = max(x); + +max(name):text | a:text | b:text +Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l +; + +maxOfTextGrouping +required_capability: agg_max_min_string_support +from airports +| eval x = name +| where scalerank >= 9 +| stats max(name), a = max(name), b = max(x) by type +| sort type asc +| limit 4; + +max(name):text | a:text | b:text | type:keyword +Cheongju Int'l | Cheongju Int'l | Cheongju Int'l | major +Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l | mid +Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l | military +Sahnewal | Sahnewal | Sahnewal | small +; + +minOfVersion +required_capability: agg_max_min_string_support +from apps +| eval x = version +| where id > 2 +| stats min(version), a = min(version), b = min(x), c = min(case(name == "iiiii", "1.0"::version, version)); + +min(version):version | a:version | b:version | c:version +1.2.3.4 | 1.2.3.4 | 1.2.3.4 | 1.0 +; + +minOfVersionGrouping +required_capability: agg_max_min_string_support +from apps +| eval x = version +| where id > 2 +| stats min(version), a = min(version), b = min(x), c = min(case(name == "ccccc", "100.0.0"::version, version)) by name +| sort name asc +| limit 3; + +min(version):version | a:version | b:version | c:version | name:keyword +1.2.3.4 | 1.2.3.4 | 1.2.3.4 | 1.2.3.4 | aaaaa +2.3.4 | 2.3.4 | 2.3.4 | 100.0.0 | ccccc +2.12.0 | 2.12.0 | 2.12.0 | 2.12.0 | ddddd +; + +minOfKeyword +required_capability: agg_max_min_string_support +from airports +| eval x = abbrev +| where scalerank >= 9 +| stats min(abbrev), a = min(abbrev), b = min(x), c = max(case(mv_first(type) == "small", "___"::keyword, abbrev)); + +min(abbrev):keyword | a:keyword | b:keyword | c:keyword +AWZ | AWZ | AWZ | ___ +; + +minOfKeywordGrouping +required_capability: agg_max_min_string_support +from airports +| eval x = abbrev +| where scalerank >= 9 +| stats min(abbrev), a = min(abbrev), b = min(x), c = min(case(mv_first(type) == "small", "___"::keyword, abbrev)) by type +| sort type asc +| limit 4; + +min(abbrev):keyword | a:keyword | b:keyword | c:keyword | type:keyword +CJJ | CJJ | CJJ | CJJ | major +AWZ | AWZ | AWZ | AWZ | mid +GWL | GWL | GWL | GWL | military +LUH | LUH | LUH | ___ | small +; + +minOfText +required_capability: agg_max_min_string_support +from airports +| eval x = name +| where scalerank >= 9 +| stats min(name), a = min(name), b = min(x); + +min(name):text | a:text | b:text +Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh +; + +minOfTextGrouping +required_capability: agg_max_min_string_support +from airports +| eval x = name +| where scalerank >= 9 +| stats min(name), a = min(name), b = min(x) by type +| sort type asc +| limit 4; + +min(name):text | a:text | b:text | type:keyword +Chandigarh Int'l | Chandigarh Int'l | Chandigarh Int'l | major +Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh | mid +Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh | military +Dhamial | Dhamial | Dhamial | small +; + minOfBooleanExpression required_capability: agg_max_min_boolean_support from employees @@ -1458,6 +1618,37 @@ m:i | o:i | l:i | s:i 1 | 39729 | 1 | 39729 ; +byTwoCalculatedSecondOverwrites +FROM employees +| STATS m = MAX(salary) by l = salary + 1, l = languages + 1 +| SORT m +| LIMIT 5 +; + + m:i | l:i +66817 | 6 +73578 | 3 +73717 | 2 +74572 | 5 +74970 | 4 +; + +byTwoCalculatedSecondOverwritesReferencingFirst +FROM employees +| EVAL l = languages +| STATS m = MAX(salary) by l = l + 1, l = l + 1 +| SORT m +| LIMIT 5 +; + + m:i | l:i +66817 | 6 +73578 | 3 +73717 | 2 +74572 | 5 +74970 | 4 +; + nestedAggsOverGroupingWithAliasAndProjection#[skip:-8.13.99,reason:supported in 8.14] FROM employees | STATS e = length(f) + 1, c = count(*) by f = first_name diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec index db386e877b9c3..2ac7a0cf6217a 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec @@ -195,3 +195,39 @@ p80_max_salary_change:double 12.132 // end::docsStatsPercentileNestedExpression-result[] ; + +constantsFrom +required_capability: fn_mv_percentile +from employees +| eval single = 7, mv = [1, 7, 10] +| stats + eval_single = percentile(single, 50), + eval_mv = percentile(mv, 50), + constant_single = percentile(5, 50), + constant_mv = percentile([1, 5, 10], 50); + +eval_single:double | eval_mv:double | constant_single:double | constant_mv:double +7 | 7 | 5 | 5 +; + +constantsRow +required_capability: fn_mv_percentile +row single=7, mv=[1, 7, 10] +| stats + eval_single = percentile(single, 50), + eval_mv = percentile(mv, 50), + constant_single = percentile(5, 50), + constant_mv = percentile([1, 5, 10], 50); + +eval_single:double | eval_mv:double | constant_single:double | constant_mv:double +7 | 7 | 5 | 5 +; + +singleConstant +required_capability: fn_mv_percentile +row a=0 +| stats constant_single = percentile(5, 50); + +constant_single:double +5 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec index 6d1d4c7892886..6819727be0131 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec @@ -977,7 +977,25 @@ event_duration:long | _index:keyword | ts:date | ts_str:k ; -inlineStatsUnionGroup +inlineStatsUnionGroup-Ignore +required_capability: union_types +required_capability: inlinestats + +FROM sample_data, sample_data_ts_long +| INLINESTATS count = COUNT(*) + BY @timestamp = SUBSTRING(TO_STRING(@timestamp), 0, 7) +| SORT client_ip ASC, @timestamp ASC +| LIMIT 4 +; + +client_ip:ip | event_duration:long | message:keyword | @timestamp:keyword | count:long + 172.21.0.5 | 1232382 | Disconnected | 1698068 | 1 + 172.21.0.5 | 1232382 | Disconnected | 2023-10 | 7 +172.21.2.113 | 2764889 | Connected to 10.1.0.2 | 1698064 | 1 +172.21.2.113 | 2764889 | Connected to 10.1.0.2 | 2023-10 | 7 +; + +inlineStatsUnionGroupWithEval-Ignore required_capability: union_types required_capability: inlinestats @@ -993,16 +1011,15 @@ client_ip:ip | event_duration:long | message:keyword | @timestamp:keyword 172.21.0.5 | 1232382 | Disconnected | 2023-10 | 7 172.21.2.113 | 2764889 | Connected to 10.1.0.2 | 1698064 | 1 172.21.2.113 | 2764889 | Connected to 10.1.0.2 | 2023-10 | 7 - ; -inlineStatsUnionGroupTogether +inlineStatsUnionGroupTogether-Ignore required_capability: union_types required_capability: inlinestats FROM sample_data, sample_data_ts_long -| EVAL @timestamp = TO_STRING(TO_DATETIME(@timestamp)) -| INLINESTATS count = COUNT(*) BY @timestamp +| INLINESTATS count = COUNT(*) + BY @timestamp = TO_STRING(TO_DATETIME(@timestamp)) | SORT client_ip ASC, @timestamp ASC | LIMIT 4 ; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileDoubleEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileDoubleEvaluator.java new file mode 100644 index 0000000000000..dd370e90b2c86 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileDoubleEvaluator.java @@ -0,0 +1,125 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.util.function.Function; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Warnings; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvPercentile}. + * This class is generated. Do not edit it. + */ +public final class MvPercentileDoubleEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator values; + + private final EvalOperator.ExpressionEvaluator percentile; + + private final MvPercentile.DoubleSortingScratch scratch; + + private final DriverContext driverContext; + + public MvPercentileDoubleEvaluator(Source source, EvalOperator.ExpressionEvaluator values, + EvalOperator.ExpressionEvaluator percentile, MvPercentile.DoubleSortingScratch scratch, + DriverContext driverContext) { + this.values = values; + this.percentile = percentile; + this.scratch = scratch; + this.driverContext = driverContext; + this.warnings = Warnings.createWarnings(driverContext.warningsMode(), source); + } + + @Override + public Block eval(Page page) { + try (DoubleBlock valuesBlock = (DoubleBlock) values.eval(page)) { + try (DoubleBlock percentileBlock = (DoubleBlock) percentile.eval(page)) { + return eval(page.getPositionCount(), valuesBlock, percentileBlock); + } + } + } + + public DoubleBlock eval(int positionCount, DoubleBlock valuesBlock, DoubleBlock percentileBlock) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!valuesBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (percentileBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (percentileBlock.getValueCount(p) != 1) { + if (percentileBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvPercentile.process(result, p, valuesBlock, percentileBlock.getDouble(percentileBlock.getFirstValueIndex(p)), this.scratch); + } catch (IllegalArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvPercentileDoubleEvaluator[" + "values=" + values + ", percentile=" + percentile + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(values, percentile); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory values; + + private final EvalOperator.ExpressionEvaluator.Factory percentile; + + private final Function scratch; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory values, + EvalOperator.ExpressionEvaluator.Factory percentile, + Function scratch) { + this.source = source; + this.values = values; + this.percentile = percentile; + this.scratch = scratch; + } + + @Override + public MvPercentileDoubleEvaluator get(DriverContext context) { + return new MvPercentileDoubleEvaluator(source, values.get(context), percentile.get(context), scratch.apply(context), context); + } + + @Override + public String toString() { + return "MvPercentileDoubleEvaluator[" + "values=" + values + ", percentile=" + percentile + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileIntegerEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileIntegerEvaluator.java new file mode 100644 index 0000000000000..93dda414c7b33 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileIntegerEvaluator.java @@ -0,0 +1,126 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.util.function.Function; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Warnings; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvPercentile}. + * This class is generated. Do not edit it. + */ +public final class MvPercentileIntegerEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator values; + + private final EvalOperator.ExpressionEvaluator percentile; + + private final MvPercentile.IntSortingScratch scratch; + + private final DriverContext driverContext; + + public MvPercentileIntegerEvaluator(Source source, EvalOperator.ExpressionEvaluator values, + EvalOperator.ExpressionEvaluator percentile, MvPercentile.IntSortingScratch scratch, + DriverContext driverContext) { + this.values = values; + this.percentile = percentile; + this.scratch = scratch; + this.driverContext = driverContext; + this.warnings = Warnings.createWarnings(driverContext.warningsMode(), source); + } + + @Override + public Block eval(Page page) { + try (IntBlock valuesBlock = (IntBlock) values.eval(page)) { + try (DoubleBlock percentileBlock = (DoubleBlock) percentile.eval(page)) { + return eval(page.getPositionCount(), valuesBlock, percentileBlock); + } + } + } + + public IntBlock eval(int positionCount, IntBlock valuesBlock, DoubleBlock percentileBlock) { + try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!valuesBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (percentileBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (percentileBlock.getValueCount(p) != 1) { + if (percentileBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvPercentile.process(result, p, valuesBlock, percentileBlock.getDouble(percentileBlock.getFirstValueIndex(p)), this.scratch); + } catch (IllegalArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvPercentileIntegerEvaluator[" + "values=" + values + ", percentile=" + percentile + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(values, percentile); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory values; + + private final EvalOperator.ExpressionEvaluator.Factory percentile; + + private final Function scratch; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory values, + EvalOperator.ExpressionEvaluator.Factory percentile, + Function scratch) { + this.source = source; + this.values = values; + this.percentile = percentile; + this.scratch = scratch; + } + + @Override + public MvPercentileIntegerEvaluator get(DriverContext context) { + return new MvPercentileIntegerEvaluator(source, values.get(context), percentile.get(context), scratch.apply(context), context); + } + + @Override + public String toString() { + return "MvPercentileIntegerEvaluator[" + "values=" + values + ", percentile=" + percentile + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileLongEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileLongEvaluator.java new file mode 100644 index 0000000000000..10d0b7c3283b2 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileLongEvaluator.java @@ -0,0 +1,126 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.util.function.Function; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Warnings; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvPercentile}. + * This class is generated. Do not edit it. + */ +public final class MvPercentileLongEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator values; + + private final EvalOperator.ExpressionEvaluator percentile; + + private final MvPercentile.LongSortingScratch scratch; + + private final DriverContext driverContext; + + public MvPercentileLongEvaluator(Source source, EvalOperator.ExpressionEvaluator values, + EvalOperator.ExpressionEvaluator percentile, MvPercentile.LongSortingScratch scratch, + DriverContext driverContext) { + this.values = values; + this.percentile = percentile; + this.scratch = scratch; + this.driverContext = driverContext; + this.warnings = Warnings.createWarnings(driverContext.warningsMode(), source); + } + + @Override + public Block eval(Page page) { + try (LongBlock valuesBlock = (LongBlock) values.eval(page)) { + try (DoubleBlock percentileBlock = (DoubleBlock) percentile.eval(page)) { + return eval(page.getPositionCount(), valuesBlock, percentileBlock); + } + } + } + + public LongBlock eval(int positionCount, LongBlock valuesBlock, DoubleBlock percentileBlock) { + try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!valuesBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (percentileBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (percentileBlock.getValueCount(p) != 1) { + if (percentileBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvPercentile.process(result, p, valuesBlock, percentileBlock.getDouble(percentileBlock.getFirstValueIndex(p)), this.scratch); + } catch (IllegalArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvPercentileLongEvaluator[" + "values=" + values + ", percentile=" + percentile + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(values, percentile); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory values; + + private final EvalOperator.ExpressionEvaluator.Factory percentile; + + private final Function scratch; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory values, + EvalOperator.ExpressionEvaluator.Factory percentile, + Function scratch) { + this.source = source; + this.values = values; + this.percentile = percentile; + this.scratch = scratch; + } + + @Override + public MvPercentileLongEvaluator get(DriverContext context) { + return new MvPercentileLongEvaluator(source, values.get(context), percentile.get(context), scratch.apply(context), context); + } + + @Override + public String toString() { + return "MvPercentileLongEvaluator[" + "values=" + values + ", percentile=" + percentile + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 3abbb655dadd3..afa8b6e1d06d7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -37,6 +37,11 @@ public enum Cap { */ FN_MV_APPEND, + /** + * Support for {@code MV_PERCENTILE} function. + */ + FN_MV_PERCENTILE, + /** * Support for function {@code IP_PREFIX}. */ @@ -52,6 +57,11 @@ public enum Cap { */ INLINESTATS(EsqlPlugin.INLINESTATS_FEATURE_FLAG), + /** + * Support for the expressions in grouping in {@code INLINESTATS} syntax. + */ + INLINESTATS_V2(EsqlPlugin.INLINESTATS_FEATURE_FLAG), + /** * Support for aggregation function {@code TOP}. */ @@ -67,6 +77,11 @@ public enum Cap { */ AGG_MAX_MIN_IP_SUPPORT, + /** + * Support for strings in aggregations {@code MAX} and {@code MIN}. + */ + AGG_MAX_MIN_STRING_SUPPORT, + /** * Support for booleans in {@code TOP} aggregation. */ @@ -120,6 +135,11 @@ public enum Cap { */ ST_DISTANCE, + /** + * Fix determination of CRS types in spatial functions when folding. + */ + SPATIAL_FUNCTIONS_FIX_CRSTYPE_FOLDING, + /** * Fix to GROK and DISSECT that allows extracting attributes with the same name as the input * https://github.com/elastic/elasticsearch/issues/110184 @@ -226,10 +246,25 @@ public enum Cap { */ COMBINE_DISJUNCTIVE_CIDRMATCHES, + /** + * Support sending HTTP headers about the status of an async query. + */ + ASYNC_QUERY_STATUS_HEADERS, + /** * Consider the upper bound when computing the interval in BUCKET auto mode. */ - BUCKET_INCLUSIVE_UPPER_BOUND; + BUCKET_INCLUSIVE_UPPER_BOUND, + + /** + * Changed error messages for fields with conflicting types in different indices. + */ + SHORT_ERROR_MESSAGES_FOR_UNSUPPORTED_FIELDS, + + /** + * Support for the whole number spans in BUCKET function. + */ + BUCKET_WHOLE_NUMBER_AS_SPAN; private final boolean snapshotOnly; private final FeatureFlag featureFlag; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 4a7120a1d3d92..5b59117ad356b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -212,8 +212,11 @@ protected LogicalPlan rule(UnresolvedRelation plan, AnalyzerContext context) { * Specific flattening method, different from the default EsRelation that: * 1. takes care of data type widening (for certain types) * 2. drops the object and keyword hierarchy + *

+ * Public for testing. + *

*/ - private static List mappingAsAttributes(Source source, Map mapping) { + public static List mappingAsAttributes(Source source, Map mapping) { var list = new ArrayList(); mappingAsAttributes(list, source, null, mapping); list.sort(Comparator.comparing(Attribute::name)); @@ -451,7 +454,7 @@ private LogicalPlan resolveStats(Stats stats, List childrenOutput) { } groupings = newGroupings; if (changed.get()) { - stats = stats.with(newGroupings, stats.aggregates()); + stats = stats.with(stats.child(), newGroupings, stats.aggregates()); changed.set(false); } } @@ -480,7 +483,7 @@ private LogicalPlan resolveStats(Stats stats, List childrenOutput) { newAggregates.add(agg); } - stats = changed.get() ? stats.with(groupings, newAggregates) : stats; + stats = changed.get() ? stats.with(stats.child(), groupings, newAggregates) : stats; } return (LogicalPlan) stats; @@ -856,6 +859,9 @@ private static List potentialCandidatesIfNoMatchesFound( Collection attrList, java.util.function.Function, String> messageProducer ) { + if (ua.customMessage()) { + return List.of(); + } // none found - add error message if (matches.isEmpty()) { Set names = new HashSet<>(attrList.size()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index 6e23f4445b564..c64cbdbd2a9ed 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -96,6 +96,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvPSeriesWeightedSum; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvPercentile; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSlice; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSort; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; @@ -362,6 +363,7 @@ private FunctionDefinition[][] functions() { def(MvMax.class, MvMax::new, "mv_max"), def(MvMedian.class, MvMedian::new, "mv_median"), def(MvMin.class, MvMin::new, "mv_min"), + def(MvPercentile.class, MvPercentile::new, "mv_percentile"), def(MvPSeriesWeightedSum.class, MvPSeriesWeightedSum::new, "mv_pseries_weighted_sum"), def(MvSort.class, MvSort::new, "mv_sort"), def(MvSlice.class, MvSlice::new, "mv_slice"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index 22224628e23ad..e7f790f90803a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxBooleanAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.MaxBytesRefAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxIntAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxIpAggregatorFunctionSupplier; @@ -32,12 +33,15 @@ import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; +import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; +import static org.elasticsearch.xpack.esql.core.type.DataType.isRepresentable; +import static org.elasticsearch.xpack.esql.core.type.DataType.isSpatial; public class Max extends AggregateFunction implements ToAggregator, SurrogateExpression { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Max", Max::new); @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip" }, + returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, description = "The maximum value of a field.", isAggregation = true, examples = { @@ -50,7 +54,13 @@ public class Max extends AggregateFunction implements ToAggregator, SurrogateExp tag = "docsStatsMaxNestedExpression" ) } ) - public Max(Source source, @Param(name = "field", type = { "boolean", "double", "integer", "long", "date", "ip" }) Expression field) { + public Max( + Source source, + @Param( + name = "field", + type = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" } + ) Expression field + ) { super(source, field); } @@ -77,13 +87,10 @@ public Max replaceChildren(List newChildren) { protected TypeResolution resolveType() { return TypeResolutions.isType( field(), - e -> e == DataType.BOOLEAN || e == DataType.DATETIME || e == DataType.IP || (e.isNumeric() && e != DataType.UNSIGNED_LONG), + t -> isRepresentable(t) && t != UNSIGNED_LONG && isSpatial(t) == false, sourceText(), DEFAULT, - "boolean", - "datetime", - "ip", - "numeric except unsigned_long or counter types" + "representable except unsigned_long and spatial types" ); } @@ -110,6 +117,9 @@ public final AggregatorFunctionSupplier supplier(List inputChannels) { if (type == DataType.IP) { return new MaxIpAggregatorFunctionSupplier(inputChannels); } + if (type == DataType.VERSION || type == DataType.KEYWORD || type == DataType.TEXT) { + return new MaxBytesRefAggregatorFunctionSupplier(inputChannels); + } throw EsqlIllegalArgumentException.illegalDataType(type); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index 8e7bb6bc3e799..6866811995059 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinBooleanAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.MinBytesRefAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinIntAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinIpAggregatorFunctionSupplier; @@ -32,12 +33,15 @@ import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; +import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; +import static org.elasticsearch.xpack.esql.core.type.DataType.isRepresentable; +import static org.elasticsearch.xpack.esql.core.type.DataType.isSpatial; public class Min extends AggregateFunction implements ToAggregator, SurrogateExpression { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Min", Min::new); @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip" }, + returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, description = "The minimum value of a field.", isAggregation = true, examples = { @@ -50,7 +54,13 @@ public class Min extends AggregateFunction implements ToAggregator, SurrogateExp tag = "docsStatsMinNestedExpression" ) } ) - public Min(Source source, @Param(name = "field", type = { "boolean", "double", "integer", "long", "date", "ip" }) Expression field) { + public Min( + Source source, + @Param( + name = "field", + type = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" } + ) Expression field + ) { super(source, field); } @@ -77,13 +87,10 @@ public Min replaceChildren(List newChildren) { protected TypeResolution resolveType() { return TypeResolutions.isType( field(), - e -> e == DataType.BOOLEAN || e == DataType.DATETIME || e == DataType.IP || (e.isNumeric() && e != DataType.UNSIGNED_LONG), + t -> isRepresentable(t) && t != UNSIGNED_LONG && isSpatial(t) == false, sourceText(), DEFAULT, - "boolean", - "datetime", - "ip", - "numeric except unsigned_long or counter types" + "representable except unsigned_long and spatial types" ); } @@ -110,6 +117,9 @@ public final AggregatorFunctionSupplier supplier(List inputChannels) { if (type == DataType.IP) { return new MinIpAggregatorFunctionSupplier(inputChannels); } + if (type == DataType.VERSION || type == DataType.KEYWORD || type == DataType.TEXT) { + return new MinBytesRefAggregatorFunctionSupplier(inputChannels); + } throw EsqlIllegalArgumentException.illegalDataType(type); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java index 54cebc7daad5d..0d5dd4b66501c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java @@ -18,9 +18,12 @@ import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvPercentile; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import java.io.IOException; @@ -31,7 +34,7 @@ import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isFoldable; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; -public class Percentile extends NumericAggregate { +public class Percentile extends NumericAggregate implements SurrogateExpression { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( Expression.class, "Percentile", @@ -152,4 +155,15 @@ protected AggregatorFunctionSupplier doubleSupplier(List inputChannels) private int percentileValue() { return ((Number) percentile.fold()).intValue(); } + + @Override + public Expression surrogate() { + var field = field(); + + if (field.foldable()) { + return new MvPercentile(source(), new ToDouble(source(), field), percentile()); + } + + return null; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java index 055e34ad5a633..1c10c7d2fa9ef 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java @@ -68,18 +68,18 @@ * {@code dataType}: This will return the datatype of your function. * May be based on its current parameters. * - * - * - * Finally, you may want to implement some interfaces. - * Check their JavaDocs to see if they are suitable for your function: - *
    - *
  • - * {@link org.elasticsearch.xpack.esql.planner.ToAggregator}: (More information about aggregators below) - *
  • *
  • - * {@link org.elasticsearch.xpack.esql.expression.SurrogateExpression} + * Implement {@link org.elasticsearch.xpack.esql.expression.SurrogateExpression}, and its required + * {@link org.elasticsearch.xpack.esql.expression.SurrogateExpression#surrogate()} method. + *

    + * It's used to be able to fold the aggregation when it receives only literals, + * or when the aggregation can be simplified. + *

    *
  • *
+ * + * Finally, implement {@link org.elasticsearch.xpack.esql.planner.ToAggregator} (More information about aggregators below). + * The only case when this interface is not required is when it always returns another function in its surrogate. * *
  • * To introduce your aggregation to the engine: diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java index 712eee8672bf3..5fabfe0e03d89 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java @@ -37,6 +37,7 @@ import java.io.IOException; import java.time.ZoneId; import java.time.ZoneOffset; +import java.util.ArrayList; import java.util.List; import java.util.function.Function; @@ -144,9 +145,7 @@ another in which the bucket size is provided directly (two parameters). ), @Example(description = """ The range can be omitted if the desired bucket size is known in advance. Simply - provide it as the second argument:""", file = "bucket", tag = "docsBucketNumericWithSpan", explanation = """ - NOTE: When providing the bucket size as the second parameter, it must be - of a floating point type."""), + provide it as the second argument:""", file = "bucket", tag = "docsBucketNumericWithSpan"), @Example( description = "Create hourly buckets for the last 24 hours, and calculate the number of events per hour:", file = "bucket", @@ -176,23 +175,23 @@ public Bucket( ) Expression field, @Param( name = "buckets", - type = { "integer", "double", "date_period", "time_duration" }, - description = "Target number of buckets." + type = { "integer", "long", "double", "date_period", "time_duration" }, + description = "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." ) Expression buckets, @Param( name = "from", - type = { "integer", "long", "double", "date" }, + type = { "integer", "long", "double", "date", "keyword", "text" }, optional = true, - description = "Start of the range. Can be a number or a date expressed as a string." + description = "Start of the range. Can be a number, a date or a date expressed as a string." ) Expression from, @Param( name = "to", - type = { "integer", "long", "double", "date" }, + type = { "integer", "long", "double", "date", "keyword", "text" }, optional = true, - description = "End of the range. Can be a number or a date expressed as a string." + description = "End of the range. Can be a number, a date or a date expressed as a string." ) Expression to ) { - super(source, from != null && to != null ? List.of(field, buckets, from, to) : List.of(field, buckets)); + super(source, fields(field, buckets, from, to)); this.field = field; this.buckets = buckets; this.from = from; @@ -209,6 +208,19 @@ private Bucket(StreamInput in) throws IOException { ); } + private static List fields(Expression field, Expression buckets, Expression from, Expression to) { + List list = new ArrayList<>(4); + list.add(field); + list.add(buckets); + if (from != null) { + list.add(from); + if (to != null) { + list.add(to); + } + } + return list; + } + @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); @@ -251,7 +263,6 @@ public ExpressionEvaluator.Factory toEvaluator(Function isNumeric(from, sourceText(), THIRD)).and(() -> isNumeric(to, sourceText(), FOURTH)) - : isNumeric(buckets, sourceText(), SECOND).and(checkArgsCount(2)); + return isNumeric(buckets, sourceText(), SECOND).and(() -> { + if (bucketsType.isRationalNumber()) { + return checkArgsCount(2); + } else { // second arg is a whole number: either a span, but as a whole, or count, and we must expect a range + var resolution = checkArgsCount(2); + if (resolution.resolved() == false) { + resolution = checkArgsCount(4).and(() -> isNumeric(from, sourceText(), THIRD)) + .and(() -> isNumeric(to, sourceText(), FOURTH)); + } + return resolution; + } + }); } return isType(field, e -> false, sourceText(), FIRST, "datetime", "numeric"); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java index 2c86dfbac12ce..c66ba7f87a1c5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java @@ -58,7 +58,7 @@ public class ToDatetime extends AbstractConvertFunction { Converts an input value to a date value. A string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. To convert dates in other formats, use <>.""", - note = "Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is" + note = "Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is " + "truncated, not rounded.", examples = { @Example(file = "date", tag = "to_datetime-str", explanation = """ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java index 90810d282ca52..cb0f9fdd8d5db 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java @@ -44,6 +44,7 @@ public static List getNamedWriteables() { MvMax.ENTRY, MvMedian.ENTRY, MvMin.ENTRY, + MvPercentile.ENTRY, MvPSeriesWeightedSum.ENTRY, MvSlice.ENTRY, MvSort.ENTRY, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java new file mode 100644 index 0000000000000..b1e710b9b2a40 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java @@ -0,0 +1,446 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.apache.lucene.util.ArrayUtil; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cast; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; + +import java.io.IOException; +import java.math.BigDecimal; +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; + +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; +import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; +import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; + +public class MvPercentile extends EsqlScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "MvPercentile", + MvPercentile::new + ); + + /** + * 2^52 is the smallest integer where it and all smaller integers can be represented exactly as double + */ + private static final double MAX_SAFE_LONG_DOUBLE = Double.longBitsToDouble(0x4330000000000000L); + + private final Expression field; + private final Expression percentile; + + @FunctionInfo( + returnType = { "double", "integer", "long" }, + description = "Converts a multivalued field into a single valued field containing " + + "the value at which a certain percentage of observed values occur.", + examples = @Example(file = "mv_percentile", tag = "example") + ) + public MvPercentile( + Source source, + @Param(name = "number", type = { "double", "integer", "long" }, description = "Multivalue expression.") Expression field, + @Param( + name = "percentile", + type = { "double", "integer", "long" }, + description = "The percentile to calculate. Must be a number between 0 and 100. " + + "Numbers out of range will return a null instead." + ) Expression percentile + ) { + super(source, List.of(field, percentile)); + this.field = field; + this.percentile = percentile; + } + + private MvPercentile(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(field); + out.writeNamedWriteable(percentile); + } + + @Override + protected Expression.TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + return isType(field, dt -> dt.isNumeric() && dt != UNSIGNED_LONG, sourceText(), FIRST, "numeric except unsigned_long").and( + isType(percentile, dt -> dt.isNumeric() && dt != UNSIGNED_LONG, sourceText(), SECOND, "numeric except unsigned_long") + ); + } + + @Override + public boolean foldable() { + return field.foldable() && percentile.foldable(); + } + + public final Expression field() { + return field; + } + + @Override + public DataType dataType() { + return field.dataType(); + } + + @Override + public final ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { + var fieldEval = toEvaluator.apply(field); + var percentileEval = Cast.cast(source(), percentile.dataType(), DOUBLE, toEvaluator.apply(percentile)); + + return switch (PlannerUtils.toElementType(field.dataType())) { + case INT -> new MvPercentileIntegerEvaluator.Factory(source(), fieldEval, percentileEval, (d) -> new IntSortingScratch()); + case LONG -> new MvPercentileLongEvaluator.Factory(source(), fieldEval, percentileEval, (d) -> new LongSortingScratch()); + case DOUBLE -> new MvPercentileDoubleEvaluator.Factory(source(), fieldEval, percentileEval, (d) -> new DoubleSortingScratch()); + default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); + }; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new MvPercentile(source(), newChildren.get(0), newChildren.get(1)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MvPercentile::new, field, percentile); + } + + static class DoubleSortingScratch { + private static final double[] EMPTY = new double[0]; + + public double[] values = EMPTY; + } + + static class IntSortingScratch { + private static final int[] EMPTY = new int[0]; + + public int[] values = EMPTY; + } + + static class LongSortingScratch { + private static final long[] EMPTY = new long[0]; + + public long[] values = EMPTY; + } + + // Evaluators + + @Evaluator(extraName = "Double", warnExceptions = IllegalArgumentException.class) + static void process( + DoubleBlock.Builder builder, + int position, + DoubleBlock values, + double percentile, + @Fixed(includeInToString = false, build = true) DoubleSortingScratch scratch + ) { + int valueCount = values.getValueCount(position); + int firstValueIndex = values.getFirstValueIndex(position); + + if (valueCount == 0) { + builder.appendNull(); + return; + } + + if (percentile < 0 || percentile > 100) { + throw new IllegalArgumentException("Percentile parameter must be a number between 0 and 100, found [" + percentile + "]"); + } + + builder.appendDouble(calculateDoublePercentile(values, firstValueIndex, valueCount, percentile, scratch)); + } + + @Evaluator(extraName = "Integer", warnExceptions = IllegalArgumentException.class) + static void process( + IntBlock.Builder builder, + int position, + IntBlock values, + double percentile, + @Fixed(includeInToString = false, build = true) IntSortingScratch scratch + ) { + int valueCount = values.getValueCount(position); + int firstValueIndex = values.getFirstValueIndex(position); + + if (valueCount == 0) { + builder.appendNull(); + return; + } + + if (percentile < 0 || percentile > 100) { + throw new IllegalArgumentException("Percentile parameter must be a number between 0 and 100, found [" + percentile + "]"); + } + + builder.appendInt(calculateIntPercentile(values, firstValueIndex, valueCount, percentile, scratch)); + } + + @Evaluator(extraName = "Long", warnExceptions = IllegalArgumentException.class) + static void process( + LongBlock.Builder builder, + int position, + LongBlock values, + double percentile, + @Fixed(includeInToString = false, build = true) LongSortingScratch scratch + ) { + int valueCount = values.getValueCount(position); + int firstValueIndex = values.getFirstValueIndex(position); + + if (valueCount == 0) { + builder.appendNull(); + return; + } + + if (percentile < 0 || percentile > 100) { + throw new IllegalArgumentException("Percentile parameter must be a number between 0 and 100, found [" + percentile + "]"); + } + + builder.appendLong(calculateLongPercentile(values, firstValueIndex, valueCount, percentile, scratch)); + } + + // Percentile calculators + + private static double calculateDoublePercentile( + DoubleBlock valuesBlock, + int firstValueIndex, + int valueCount, + double percentile, + DoubleSortingScratch scratch + ) { + if (valueCount == 1) { + return valuesBlock.getDouble(firstValueIndex); + } + + var p = percentile / 100.0; + var index = p * (valueCount - 1); + var lowerIndex = (int) index; + var upperIndex = lowerIndex + 1; + var fraction = index - lowerIndex; + + if (valuesBlock.mvSortedAscending()) { + if (percentile == 0) { + return valuesBlock.getDouble(0); + } else if (percentile == 100) { + return valuesBlock.getDouble(valueCount - 1); + } else { + assert lowerIndex >= 0 && upperIndex < valueCount; + return calculateDoublePercentile(fraction, valuesBlock.getDouble(lowerIndex), valuesBlock.getDouble(upperIndex)); + } + } + + if (percentile == 0) { + double min = Double.POSITIVE_INFINITY; + for (int i = 0; i < valueCount; i++) { + min = Math.min(min, valuesBlock.getDouble(firstValueIndex + i)); + } + return min; + } else if (percentile == 100) { + double max = Double.NEGATIVE_INFINITY; + for (int i = 0; i < valueCount; i++) { + max = Math.max(max, valuesBlock.getDouble(firstValueIndex + i)); + } + return max; + } + + if (scratch.values.length < valueCount) { + scratch.values = new double[ArrayUtil.oversize(valueCount, Double.BYTES)]; + } + + for (int i = 0; i < valueCount; i++) { + scratch.values[i] = valuesBlock.getDouble(firstValueIndex + i); + } + + Arrays.sort(scratch.values, 0, valueCount); + + assert lowerIndex >= 0 && upperIndex < valueCount; + return calculateDoublePercentile(fraction, scratch.values[lowerIndex], scratch.values[upperIndex]); + } + + private static int calculateIntPercentile( + IntBlock valuesBlock, + int firstValueIndex, + int valueCount, + double percentile, + IntSortingScratch scratch + ) { + if (valueCount == 1) { + return valuesBlock.getInt(firstValueIndex); + } + + var p = percentile / 100.0; + var index = p * (valueCount - 1); + var lowerIndex = (int) index; + var upperIndex = lowerIndex + 1; + var fraction = index - lowerIndex; + + if (valuesBlock.mvSortedAscending()) { + if (percentile == 0) { + return valuesBlock.getInt(0); + } else if (percentile == 100) { + return valuesBlock.getInt(valueCount - 1); + } else { + assert lowerIndex >= 0 && upperIndex < valueCount; + var lowerValue = valuesBlock.getInt(lowerIndex); + var upperValue = valuesBlock.getInt(upperIndex); + var difference = (long) upperValue - lowerValue; + return lowerValue + (int) (fraction * difference); + } + } + + if (percentile == 0) { + int min = Integer.MAX_VALUE; + for (int i = 0; i < valueCount; i++) { + min = Math.min(min, valuesBlock.getInt(firstValueIndex + i)); + } + return min; + } else if (percentile == 100) { + int max = Integer.MIN_VALUE; + for (int i = 0; i < valueCount; i++) { + max = Math.max(max, valuesBlock.getInt(firstValueIndex + i)); + } + return max; + } + + if (scratch.values.length < valueCount) { + scratch.values = new int[ArrayUtil.oversize(valueCount, Integer.BYTES)]; + } + + for (int i = 0; i < valueCount; i++) { + scratch.values[i] = valuesBlock.getInt(firstValueIndex + i); + } + + Arrays.sort(scratch.values, 0, valueCount); + + assert lowerIndex >= 0 && upperIndex < valueCount; + var lowerValue = scratch.values[lowerIndex]; + var upperValue = scratch.values[upperIndex]; + var difference = (long) upperValue - lowerValue; + return lowerValue + (int) (fraction * difference); + } + + private static long calculateLongPercentile( + LongBlock valuesBlock, + int firstValueIndex, + int valueCount, + double percentile, + LongSortingScratch scratch + ) { + if (valueCount == 1) { + return valuesBlock.getLong(firstValueIndex); + } + + var p = percentile / 100.0; + var index = p * (valueCount - 1); + var lowerIndex = (int) index; + var upperIndex = lowerIndex + 1; + var fraction = index - lowerIndex; + + if (valuesBlock.mvSortedAscending()) { + if (percentile == 0) { + return valuesBlock.getLong(0); + } else if (percentile == 100) { + return valuesBlock.getLong(valueCount - 1); + } else { + assert lowerIndex >= 0 && upperIndex < valueCount; + return calculateLongPercentile(fraction, valuesBlock.getLong(lowerIndex), valuesBlock.getLong(upperIndex)); + } + } + + if (percentile == 0) { + long min = Long.MAX_VALUE; + for (int i = 0; i < valueCount; i++) { + min = Math.min(min, valuesBlock.getLong(firstValueIndex + i)); + } + return min; + } else if (percentile == 100) { + long max = Long.MIN_VALUE; + for (int i = 0; i < valueCount; i++) { + max = Math.max(max, valuesBlock.getLong(firstValueIndex + i)); + } + return max; + } + + if (scratch.values.length < valueCount) { + scratch.values = new long[ArrayUtil.oversize(valueCount, Long.BYTES)]; + } + + for (int i = 0; i < valueCount; i++) { + scratch.values[i] = valuesBlock.getLong(firstValueIndex + i); + } + + Arrays.sort(scratch.values, 0, valueCount); + + assert lowerIndex >= 0 && upperIndex < valueCount; + return calculateLongPercentile(fraction, scratch.values[lowerIndex], scratch.values[upperIndex]); + } + + /** + * Calculates a percentile for a long avoiding overflows and double precision issues. + *

    + * To do that, if the values are over the limit of the representable double integers, + * it uses instead BigDecimals for the calculations. + *

    + */ + private static long calculateLongPercentile(double fraction, long lowerValue, long upperValue) { + if (upperValue < MAX_SAFE_LONG_DOUBLE && lowerValue > -MAX_SAFE_LONG_DOUBLE) { + var difference = upperValue - lowerValue; + return lowerValue + (long) (fraction * difference); + } + + var lowerValueBigDecimal = new BigDecimal(lowerValue); + var upperValueBigDecimal = new BigDecimal(upperValue); + var difference = upperValueBigDecimal.subtract(lowerValueBigDecimal); + var fractionBigDecimal = new BigDecimal(fraction); + return lowerValueBigDecimal.add(fractionBigDecimal.multiply(difference)).longValue(); + } + + /** + * Calculates a percentile for a double avoiding overflows. + *

    + * If the values are too separated (negative + positive), it uses a slightly different approach. + * This approach would fail if the values are big but not separated, so it's only used in this case. + *

    + */ + private static double calculateDoublePercentile(double fraction, double lowerValue, double upperValue) { + if (lowerValue < 0 && upperValue > 0) { + // Order is required to avoid `upper - lower` overflows + return (lowerValue + fraction * upperValue) - fraction * lowerValue; + } + + var difference = upperValue - lowerValue; + return lowerValue + fraction * difference; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java index d34ff30d9b87b..84d776888c7ae 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java @@ -42,7 +42,7 @@ public static List getNamedWriteables() { } private final SpatialTypeResolver spatialTypeResolver; - protected SpatialCrsType crsType; + private SpatialCrsType crsType; protected final boolean leftDocValues; protected final boolean rightDocValues; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java index afa2ba833dcd1..6cb3c34ba8b1f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java @@ -176,10 +176,10 @@ protected NodeInfo info() { @Override public Object fold() { try { - GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType, left()); + GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType(), left()); Geometry rightGeom = makeGeometryFromLiteral(right()); - Component2D[] components = asLuceneComponent2Ds(crsType, rightGeom); - return (crsType == SpatialCrsType.GEO) + Component2D[] components = asLuceneComponent2Ds(crsType(), rightGeom); + return (crsType() == SpatialCrsType.GEO) ? GEO.geometryRelatesGeometries(docValueReader, components) : CARTESIAN.geometryRelatesGeometries(docValueReader, components); } catch (IOException e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java index 9e37bf4c8fa51..d04dc9e1a6b07 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java @@ -131,9 +131,9 @@ protected NodeInfo info() { @Override public Object fold() { try { - GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType, left()); - Component2D component2D = asLuceneComponent2D(crsType, right()); - return (crsType == SpatialCrsType.GEO) + GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType(), left()); + Component2D component2D = asLuceneComponent2D(crsType(), right()); + return (crsType() == SpatialCrsType.GEO) ? GEO.geometryRelatesGeometry(docValueReader, component2D) : CARTESIAN.geometryRelatesGeometry(docValueReader, component2D); } catch (IOException e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java index b7aaededf76f5..48e99989c5699 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java @@ -129,9 +129,9 @@ protected NodeInfo info() { @Override public Object fold() { try { - GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType, left()); - Component2D component2D = asLuceneComponent2D(crsType, right()); - return (crsType == SpatialCrsType.GEO) + GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType(), left()); + Component2D component2D = asLuceneComponent2D(crsType(), right()); + return (crsType() == SpatialCrsType.GEO) ? GEO.geometryRelatesGeometry(docValueReader, component2D) : CARTESIAN.geometryRelatesGeometry(docValueReader, component2D); } catch (IOException e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java index 297a6b40c2175..c204468ae17d1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java @@ -131,9 +131,9 @@ protected NodeInfo info() { @Override public Object fold() { try { - GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType, left()); - Component2D component2D = asLuceneComponent2D(crsType, right()); - return (crsType == SpatialCrsType.GEO) + GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType(), left()); + Component2D component2D = asLuceneComponent2D(crsType(), right()); + return (crsType() == SpatialCrsType.GEO) ? GEO.geometryRelatesGeometry(docValueReader, component2D) : CARTESIAN.geometryRelatesGeometry(docValueReader, component2D); } catch (IOException e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java index 1fdd4241aa222..14bded51aa55f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java @@ -173,7 +173,7 @@ protected NodeInfo info() { public Object fold() { var leftGeom = makeGeometryFromLiteral(left()); var rightGeom = makeGeometryFromLiteral(right()); - return (crsType == SpatialCrsType.GEO) ? GEO.distance(leftGeom, rightGeom) : CARTESIAN.distance(leftGeom, rightGeom); + return (crsType() == SpatialCrsType.GEO) ? GEO.distance(leftGeom, rightGeom) : CARTESIAN.distance(leftGeom, rightGeom); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index e55b090bbb35f..282f46e0de7bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -155,6 +155,7 @@ public LogicalPlan optimize(LogicalPlan verified) { if (failures.hasFailures()) { throw new VerificationException(failures); } + optimized.setOptimized(); return optimized; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java index 4d3134db34a0d..733fe2e8762bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.InlineStats; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; import org.elasticsearch.xpack.esql.plan.logical.Row; @@ -99,7 +100,8 @@ protected AttributeSet generates(LogicalPlan logicalPlan) { if (logicalPlan instanceof EsRelation || logicalPlan instanceof LocalRelation || logicalPlan instanceof Row - || logicalPlan instanceof Aggregate) { + || logicalPlan instanceof Aggregate + || logicalPlan instanceof InlineStats) { return logicalPlan.outputSet(); } if (logicalPlan instanceof GeneratingPlan generating) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java index 5592a04e2f813..0f8e0f450e585 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java @@ -11,26 +11,30 @@ import org.elasticsearch.xpack.esql.analysis.AnalyzerRules; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.Stats; import java.util.ArrayList; import java.util.List; /** - * Rule that removes Aggregate overrides in grouping, aggregates and across them inside. - * The overrides appear when the same alias is used multiple times in aggregations and/or groupings: - * STATS x = COUNT(*), x = MIN(a) BY x = b + 1, x = c + 10 + * Removes {@link Stats} overrides in grouping, aggregates and across them inside. + * The overrides appear when the same alias is used multiple times in aggregations + * and/or groupings: + * {@code STATS x = COUNT(*), x = MIN(a) BY x = b + 1, x = c + 10} * becomes - * STATS BY x = c + 10 - * That is the last declaration for a given alias, overrides all the other declarations, with - * groups having priority vs aggregates. + * {@code STATS BY x = c + 10} + * and + * {@code INLINESTATS x = COUNT(*), x = MIN(a) BY x = b + 1, x = c + 10} + * becomes + * {@code INLINESTATS BY x = c + 10} + * This is "last one wins", with groups having priority over aggregates. * Separately, it replaces expressions used as group keys inside the aggregates with references: - * STATS max(a + b + 1) BY a + b + * {@code STATS max(a + b + 1) BY a + b} * becomes - * STATS max($x + 1) BY $x = a + b + * {@code STATS max($x + 1) BY $x = a + b} */ -public final class RemoveStatsOverride extends AnalyzerRules.AnalyzerRule { +public final class RemoveStatsOverride extends AnalyzerRules.AnalyzerRule { @Override protected boolean skipResolved() { @@ -38,19 +42,18 @@ protected boolean skipResolved() { } @Override - protected LogicalPlan rule(Aggregate agg) { - return agg.resolved() ? removeAggDuplicates(agg) : agg; - } - - private static Aggregate removeAggDuplicates(Aggregate agg) { - var groupings = agg.groupings(); - var aggregates = agg.aggregates(); - - groupings = removeDuplicateNames(groupings); - aggregates = removeDuplicateNames(aggregates); - - // replace EsqlAggregate with Aggregate - return new Aggregate(agg.source(), agg.child(), agg.aggregateType(), groupings, aggregates); + protected LogicalPlan rule(LogicalPlan p) { + if (p.resolved() == false) { + return p; + } + if (p instanceof Stats stats) { + return (LogicalPlan) stats.with( + stats.child(), + removeDuplicateNames(stats.groupings()), + removeDuplicateNames(stats.aggregates()) + ); + } + return p; } private static List removeDuplicateNames(List list) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java index 1746931f9a63e..ea0a302f7131d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java @@ -34,7 +34,7 @@ * becomes * stats a1 = sum(a), a2 = min(b) by x | eval a = a1 + a2 | keep a, x * The rule also considers expressions applied over groups: - * stats a = x + 1 by x becomes stats by x | eval a = x + 1 | keep a, x + * {@code STATS a = x + 1 BY x} becomes {@code STATS BY x | EVAL a = x + 1 | KEEP a, x} * And to combine the two: * stats a = x + count(*) by x * becomes diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java index 206bd6d3d1c76..02b39f6babef0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java @@ -15,9 +15,9 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.Stats; import java.util.ArrayList; import java.util.HashMap; @@ -25,15 +25,26 @@ import java.util.Map; /** - * Replace nested expressions inside an aggregate with synthetic eval (which end up being projected away by the aggregate). - * stats sum(a + 1) by x % 2 + * Replace nested expressions inside a {@link Stats} with synthetic eval. + * {@code STATS SUM(a + 1) BY x % 2} * becomes - * eval `a + 1` = a + 1, `x % 2` = x % 2 | stats sum(`a+1`_ref) by `x % 2`_ref + * {@code EVAL `a + 1` = a + 1, `x % 2` = x % 2 | STATS SUM(`a+1`_ref) BY `x % 2`_ref} + * and + * {@code INLINESTATS SUM(a + 1) BY x % 2} + * becomes + * {@code EVAL `a + 1` = a + 1, `x % 2` = x % 2 | INLINESTATS SUM(`a+1`_ref) BY `x % 2`_ref} */ -public final class ReplaceStatsNestedExpressionWithEval extends OptimizerRules.OptimizerRule { +public final class ReplaceStatsNestedExpressionWithEval extends OptimizerRules.OptimizerRule { @Override - protected LogicalPlan rule(Aggregate aggregate) { + protected LogicalPlan rule(LogicalPlan p) { + if (p instanceof Stats stats) { + return rule(stats); + } + return p; + } + + private LogicalPlan rule(Stats aggregate) { List evals = new ArrayList<>(); Map evalNames = new HashMap<>(); Map groupingAttributes = new HashMap<>(); @@ -134,10 +145,10 @@ protected LogicalPlan rule(Aggregate aggregate) { var aggregates = aggsChanged.get() ? newAggs : aggregate.aggregates(); var newEval = new Eval(aggregate.source(), aggregate.child(), evals); - aggregate = new Aggregate(aggregate.source(), newEval, aggregate.aggregateType(), groupings, aggregates); + aggregate = aggregate.with(newEval, groupings, aggregates); } - return aggregate; + return (LogicalPlan) aggregate; } static String syntheticName(Expression expression, AggregateFunction af, int counter) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java index 01132425df11f..5b6fe8c0112c6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java @@ -108,8 +108,8 @@ public Aggregate replaceChild(LogicalPlan newChild) { } @Override - public Aggregate with(List newGroupings, List newAggregates) { - return new Aggregate(source(), child(), aggregateType(), newGroupings, newAggregates); + public Aggregate with(LogicalPlan child, List newGroupings, List newAggregates) { + return new Aggregate(source(), child, aggregateType(), newGroupings, newAggregates); } public AggregateType aggregateType() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java index 187b3542e0607..b37976c00ad06 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java @@ -98,8 +98,8 @@ public InlineStats replaceChild(LogicalPlan newChild) { } @Override - public InlineStats with(List newGroupings, List newAggregates) { - return new InlineStats(source(), child(), newGroupings, newAggregates); + public InlineStats with(LogicalPlan child, List newGroupings, List newAggregates) { + return new InlineStats(source(), child, newGroupings, newAggregates); } @Override @@ -121,11 +121,13 @@ public boolean expressionsResolved() { public List output() { if (this.lazyOutput == null) { List addedFields = new ArrayList<>(); - AttributeSet childOutput = child().outputSet(); + AttributeSet set = child().outputSet(); for (NamedExpression agg : aggregates) { - if (childOutput.contains(agg) == false) { + Attribute att = agg.toAttribute(); + if (set.contains(att) == false) { addedFields.add(agg); + set.add(att); } } @@ -207,7 +209,7 @@ private LogicalPlan groupedNextPhase(List schema, List firstPha if (g instanceof Attribute a) { groupingAttributes.add(a); } else { - throw new UnsupportedOperationException("INLINESTATS doesn't support expressions in grouping position yet"); + throw new IllegalStateException("optimized plans should only have attributes in groups, but got [" + g + "]"); } } List leftFields = new ArrayList<>(groupingAttributes.size()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java index ba0f97cdfa30b..6923f9e137eab 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java @@ -91,8 +91,8 @@ public interface Phased { * Or {@code null} if there aren't any {@linkplain Phased} operations. */ static LogicalPlan extractFirstPhase(LogicalPlan plan) { - if (false == plan.analyzed()) { - throw new IllegalArgumentException("plan must be analyzed"); + if (false == plan.optimized()) { + throw new IllegalArgumentException("plan must be optimized"); } var firstPhase = new Holder(); plan.forEachUp(t -> { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java index 35d5229d4e52f..c46c735e7482e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java @@ -9,6 +9,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.tree.Source; import java.util.List; @@ -16,10 +17,25 @@ * STATS-like operations. Like {@link Aggregate} and {@link InlineStats}. */ public interface Stats { + /** + * The user supplied text in the query for this command. + */ + Source source(); + /** * Rebuild this plan with new groupings and new aggregates. */ - Stats with(List newGroupings, List newAggregates); + Stats with(LogicalPlan child, List newGroupings, List newAggregates); + + /** + * Have all the expressions in this plan been resolved? + */ + boolean expressionsResolved(); + + /** + * The operation directly before this one in the plan. + */ + LogicalPlan child(); /** * List containing both the aggregate expressions and grouping expressions. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index 213d7266a0b1e..60bf4be1d2b03 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -160,7 +160,7 @@ private static Stream, Tuple>> typeAndNames(Class if (NumericAggregate.class.isAssignableFrom(clazz)) { types = NUMERIC; } else if (Max.class.isAssignableFrom(clazz) || Min.class.isAssignableFrom(clazz)) { - types = List.of("Boolean", "Int", "Long", "Double", "Ip"); + types = List.of("Boolean", "Int", "Long", "Double", "Ip", "BytesRef"); } else if (clazz == Count.class) { types = List.of(""); // no extra type distinction } else if (SpatialAggregateFunction.class.isAssignableFrom(clazz)) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index 29d524fc664a8..fa8a5693c59bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -11,11 +11,11 @@ import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchShardsGroup; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; import org.elasticsearch.action.support.ChannelActionListener; -import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -68,7 +68,6 @@ import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.session.Configuration; -import org.elasticsearch.xpack.esql.session.IndexResolver; import org.elasticsearch.xpack.esql.session.Result; import java.util.ArrayList; @@ -98,8 +97,6 @@ public class ComputeService { private final EnrichLookupService enrichLookupService; private final ClusterService clusterService; - private static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndexResolver.FIELD_CAPS_INDICES_OPTIONS; - public ComputeService( SearchService searchService, TransportService transportService, @@ -152,7 +149,7 @@ public void execute( return; } Map clusterToConcreteIndices = transportService.getRemoteClusterService() - .groupIndices(DEFAULT_INDICES_OPTIONS, PlannerUtils.planConcreteIndices(physicalPlan).toArray(String[]::new)); + .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planConcreteIndices(physicalPlan).toArray(String[]::new)); QueryPragmas queryPragmas = configuration.pragmas(); if (dataNodePlan == null) { if (clusterToConcreteIndices.values().stream().allMatch(v -> v.indices().length == 0) == false) { @@ -188,7 +185,7 @@ public void execute( } } Map clusterToOriginalIndices = transportService.getRemoteClusterService() - .groupIndices(DEFAULT_INDICES_OPTIONS, PlannerUtils.planOriginalIndices(physicalPlan)); + .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planOriginalIndices(physicalPlan)); var localOriginalIndices = clusterToOriginalIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); var localConcreteIndices = clusterToConcreteIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); final var exchangeSource = new ExchangeSourceHandler( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index cab6161cb3eea..561baa76a01a9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -55,6 +55,7 @@ public class TransportEsqlQueryAction extends HandledTransportAction { + private final ThreadPool threadPool; private final PlanExecutor planExecutor; private final ComputeService computeService; private final ExchangeService exchangeService; @@ -82,6 +83,7 @@ public TransportEsqlQueryAction( ) { // TODO replace SAME when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 super(EsqlQueryAction.NAME, transportService, actionFilters, EsqlQueryRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + this.threadPool = threadPool; this.planExecutor = planExecutor; this.clusterService = clusterService; this.requestExecutor = threadPool.executor(ThreadPool.Names.SEARCH); @@ -181,9 +183,11 @@ private void innerExecute(Task task, EsqlQueryRequest request, ActionListener columns = result.schema().stream().map(c -> new ColumnInfoImpl(c.name(), c.dataType().outputType())).toList(); EsqlQueryResponse.Profile profile = configuration.profile() ? new EsqlQueryResponse.Profile(result.profiles()) : null; + threadPool.getThreadContext().addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_IS_RUNNING_HEADER, "?0"); if (task instanceof EsqlQueryTask asyncTask && request.keepOnCompletion()) { - String id = asyncTask.getExecutionId().getEncoded(); - return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), id, false, request.async()); + String asyncExecutionId = asyncTask.getExecutionId().getEncoded(); + threadPool.getThreadContext().addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_ID_HEADER, asyncExecutionId); + return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), asyncExecutionId, false, request.async()); } return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), request.async()); } @@ -231,12 +235,15 @@ public EsqlQueryTask createTask( @Override public EsqlQueryResponse initialResponse(EsqlQueryTask task) { + var asyncExecutionId = task.getExecutionId().getEncoded(); + threadPool.getThreadContext().addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_ID_HEADER, asyncExecutionId); + threadPool.getThreadContext().addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_IS_RUNNING_HEADER, "?1"); return new EsqlQueryResponse( List.of(), List.of(), null, false, - task.getExecutionId().getEncoded(), + asyncExecutionId, true, // is_running true // isAsync ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java index 7a47b1d38f053..d1e4e12f73868 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.geometry.ShapeType; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.GeoShapeQueryable; import org.elasticsearch.index.mapper.MappedFieldType; @@ -228,10 +227,10 @@ private static org.apache.lucene.search.Query pointShapeQuery( if (geometry == null || geometry.isEmpty()) { throw new QueryShardException(context, "Invalid/empty geometry"); } - if (geometry.type() != ShapeType.POINT && relation == ShapeField.QueryRelation.CONTAINS) { + final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); + if (isPointGeometry(luceneGeometries) == false && relation == ShapeField.QueryRelation.CONTAINS) { return new MatchNoDocsQuery("A point field can never contain a non-point geometry"); } - final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); org.apache.lucene.search.Query intersects = XYPointField.newGeometryQuery(fieldName, luceneGeometries); if (relation == ShapeField.QueryRelation.DISJOINT) { // XYPointField does not support DISJOINT queries, so we build one as EXISTS && !INTERSECTS @@ -250,6 +249,10 @@ private static org.apache.lucene.search.Query pointShapeQuery( return intersects; } + private static boolean isPointGeometry(XYGeometry[] geometries) { + return geometries.length == 1 && geometries[0] instanceof org.apache.lucene.geo.XYPoint; + } + /** * This code is based on the ShapeQueryProcessor.shapeQuery() method */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index a6bc7befccc80..25d155ccfde07 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -125,7 +125,9 @@ public void execute( LOGGER.debug("ESQL query:\n{}", request.query()); analyzedPlan( parse(request.query(), request.params()), - listener.delegateFailureAndWrap((next, analyzedPlan) -> executeAnalyzedPlan(request, runPhase, analyzedPlan, next)) + listener.delegateFailureAndWrap( + (next, analyzedPlan) -> executeOptimizedPlan(request, runPhase, optimizedPlan(analyzedPlan), next) + ) ); } @@ -133,17 +135,17 @@ public void execute( * Execute an analyzed plan. Most code should prefer calling {@link #execute} but * this is public for testing. See {@link Phased} for the sequence of operations. */ - public void executeAnalyzedPlan( + public void executeOptimizedPlan( EsqlQueryRequest request, BiConsumer> runPhase, - LogicalPlan analyzedPlan, + LogicalPlan optimizedPlan, ActionListener listener ) { - LogicalPlan firstPhase = Phased.extractFirstPhase(analyzedPlan); + LogicalPlan firstPhase = Phased.extractFirstPhase(optimizedPlan); if (firstPhase == null) { - runPhase.accept(logicalPlanToPhysicalPlan(analyzedPlan, request), listener); + runPhase.accept(logicalPlanToPhysicalPlan(optimizedPlan, request), listener); } else { - executePhased(new ArrayList<>(), analyzedPlan, request, firstPhase, runPhase, listener); + executePhased(new ArrayList<>(), optimizedPlan, request, firstPhase, runPhase, listener); } } @@ -155,11 +157,11 @@ private void executePhased( BiConsumer> runPhase, ActionListener listener ) { - PhysicalPlan physicalPlan = logicalPlanToPhysicalPlan(firstPhase, request); + PhysicalPlan physicalPlan = logicalPlanToPhysicalPlan(optimizedPlan(firstPhase), request); runPhase.accept(physicalPlan, listener.delegateFailureAndWrap((next, result) -> { try { profileAccumulator.addAll(result.profiles()); - LogicalPlan newMainPlan = Phased.applyResultsFromFirstPhase(mainPlan, physicalPlan.output(), result.pages()); + LogicalPlan newMainPlan = optimizedPlan(Phased.applyResultsFromFirstPhase(mainPlan, physicalPlan.output(), result.pages())); LogicalPlan newFirstPhase = Phased.extractFirstPhase(newMainPlan); if (newFirstPhase == null) { PhysicalPlan finalPhysicalPlan = logicalPlanToPhysicalPlan(newMainPlan, request); @@ -235,7 +237,7 @@ private void preAnalyze(LogicalPlan parsed, BiFunction void preAnalyzeIndices(LogicalPlan parsed, ActionListener listener, Set enrichPolicyMatchFields) { + private void preAnalyzeIndices(LogicalPlan parsed, ActionListener listener, Set enrichPolicyMatchFields) { PreAnalyzer.PreAnalysis preAnalysis = new PreAnalyzer().preAnalyze(parsed); // TODO we plan to support joins in the future when possible, but for now we'll just fail early if we see one if (preAnalysis.indices.size() > 1) { @@ -352,8 +354,8 @@ private static Set subfields(Set names) { return names.stream().filter(name -> name.endsWith(WILDCARD) == false).map(name -> name + ".*").collect(Collectors.toSet()); } - private PhysicalPlan logicalPlanToPhysicalPlan(LogicalPlan logicalPlan, EsqlQueryRequest request) { - PhysicalPlan physicalPlan = optimizedPhysicalPlan(logicalPlan); + private PhysicalPlan logicalPlanToPhysicalPlan(LogicalPlan optimizedPlan, EsqlQueryRequest request) { + PhysicalPlan physicalPlan = optimizedPhysicalPlan(optimizedPlan); physicalPlan = physicalPlan.transformUp(FragmentExec.class, f -> { QueryBuilder filter = request.filter(); if (filter != null) { @@ -371,20 +373,25 @@ private PhysicalPlan logicalPlanToPhysicalPlan(LogicalPlan logicalPlan, EsqlQuer } public LogicalPlan optimizedPlan(LogicalPlan logicalPlan) { - assert logicalPlan.analyzed(); + if (logicalPlan.analyzed() == false) { + throw new IllegalStateException("Expected analyzed plan"); + } var plan = logicalPlanOptimizer.optimize(logicalPlan); LOGGER.debug("Optimized logicalPlan plan:\n{}", plan); return plan; } - public PhysicalPlan physicalPlan(LogicalPlan logicalPlan) { - var plan = mapper.map(optimizedPlan(logicalPlan)); + public PhysicalPlan physicalPlan(LogicalPlan optimizedPlan) { + if (optimizedPlan.optimized() == false) { + throw new IllegalStateException("Expected optimized plan"); + } + var plan = mapper.map(optimizedPlan); LOGGER.debug("Physical plan:\n{}", plan); return plan; } - public PhysicalPlan optimizedPhysicalPlan(LogicalPlan logicalPlan) { - var plan = physicalPlanOptimizer.optimize(physicalPlan(logicalPlan)); + public PhysicalPlan optimizedPhysicalPlan(LogicalPlan optimizedPlan) { + var plan = physicalPlanOptimizer.optimize(physicalPlan(optimizedPlan)); LOGGER.debug("Optimized physical plan:\n{}", plan); return plan; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 76e0466af4da0..f30db1bf9bba2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -415,10 +415,10 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { PlainActionFuture listener = new PlainActionFuture<>(); - session.executeAnalyzedPlan( + session.executeOptimizedPlan( new EsqlQueryRequest(), runPhase(bigArrays, physicalOperationProviders), - analyzed, + session.optimizedPlan(analyzed), listener.delegateFailureAndWrap( // Wrap so we can capture the warnings in the calling thread (next, result) -> next.onResponse( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java index 782e1fb4333d8..2f3aa09868637 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.compute.operator.DriverSleeps; import org.elasticsearch.compute.operator.DriverStatus; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.test.AbstractWireSerializingTestCase; @@ -51,7 +52,10 @@ private DriverProfile randomDriverProfile() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), - randomList(10, this::randomOperatorStatus) + randomNonNegativeLong(), + randomNonNegativeLong(), + randomList(10, this::randomOperatorStatus), + DriverSleeps.empty() ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index e7f539026498b..9d4a1c21c5995 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.compute.operator.DriverSleeps; import org.elasticsearch.compute.operator.DriverStatus; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasables; @@ -479,10 +480,13 @@ public void testProfileXContent() { new EsqlQueryResponse.Profile( List.of( new DriverProfile( + 1723489812649L, + 1723489819929L, 20021, 20000, 12, - List.of(new DriverStatus.OperatorStatus("asdf", new AbstractPageMappingOperator.Status(10021, 10))) + List.of(new DriverStatus.OperatorStatus("asdf", new AbstractPageMappingOperator.Status(10021, 10))), + DriverSleeps.empty() ) ) ), @@ -509,6 +513,8 @@ public void testProfileXContent() { "profile" : { "drivers" : [ { + "start_millis" : 1723489812649, + "stop_millis" : 1723489819929, "took_nanos" : 20021, "cpu_nanos" : 20000, "iterations" : 12, @@ -520,7 +526,12 @@ public void testProfileXContent() { "pages_processed" : 10 } } - ] + ], + "sleeps" : { + "counts" : { }, + "first" : [ ], + "last" : [ ] + } } ] } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index f663002a51d68..3fb4b80d3974e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -1809,13 +1809,13 @@ public void testUnsupportedTypesInStats() { found value [x] type [unsigned_long] line 2:20: argument of [count_distinct(x)] must be [any exact type except unsigned_long, _source, or counter types],\ found value [x] type [unsigned_long] - line 2:39: argument of [max(x)] must be [boolean, datetime, ip or numeric except unsigned_long or counter types],\ + line 2:39: argument of [max(x)] must be [representable except unsigned_long and spatial types],\ found value [x] type [unsigned_long] line 2:47: argument of [median(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [unsigned_long] line 2:58: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [unsigned_long] - line 2:88: argument of [min(x)] must be [boolean, datetime, ip or numeric except unsigned_long or counter types],\ + line 2:88: argument of [min(x)] must be [representable except unsigned_long and spatial types],\ found value [x] type [unsigned_long] line 2:96: first argument of [percentile(x, 10)] must be [numeric except unsigned_long],\ found value [x] type [unsigned_long] @@ -1824,21 +1824,17 @@ public void testUnsupportedTypesInStats() { verifyUnsupported(""" row x = to_version("1.2") - | stats avg(x), max(x), median(x), median_absolute_deviation(x), min(x), percentile(x, 10), sum(x) + | stats avg(x), median(x), median_absolute_deviation(x), percentile(x, 10), sum(x) """, """ - Found 7 problems + Found 5 problems line 2:10: argument of [avg(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] - line 2:18: argument of [max(x)] must be [boolean, datetime, ip or numeric except unsigned_long or counter types],\ + line 2:18: argument of [median(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] - line 2:26: argument of [median(x)] must be [numeric except unsigned_long or counter types],\ + line 2:29: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] - line 2:37: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long or counter types],\ - found value [x] type [version] - line 2:67: argument of [min(x)] must be [boolean, datetime, ip or numeric except unsigned_long or counter types],\ - found value [x] type [version] - line 2:75: first argument of [percentile(x, 10)] must be [numeric except unsigned_long], found value [x] type [version] - line 2:94: argument of [sum(x)] must be [numeric except unsigned_long or counter types], found value [x] type [version]"""); + line 2:59: first argument of [percentile(x, 10)] must be [numeric except unsigned_long], found value [x] type [version] + line 2:78: argument of [sum(x)] must be [numeric except unsigned_long or counter types], found value [x] type [version]"""); } public void testInOnText() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 904308ef64d58..e2403505921a9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -64,9 +64,12 @@ public void testUnsupportedAndMultiTypedFields() { LinkedHashSet ipIndices = new LinkedHashSet<>(); ipIndices.add("test1"); ipIndices.add("test2"); + ipIndices.add("test3"); + ipIndices.add("test4"); + ipIndices.add("test5"); LinkedHashMap> typesToIndices = new LinkedHashMap<>(); typesToIndices.put("ip", ipIndices); - typesToIndices.put("keyword", Set.of("test3")); + typesToIndices.put("keyword", Set.of("test6")); EsField multiTypedField = new InvalidMappedField(multiTyped, typesToIndices); // Also add an unsupported/multityped field under the names `int` and `double` so we can use `LOOKUP int_number_names ...` and @@ -85,7 +88,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:22: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | dissect multi_typed \"%{foo}\"", analyzer) ); @@ -95,7 +98,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:19: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | grok multi_typed \"%{WORD:foo}\"", analyzer) ); @@ -115,7 +118,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:23: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | eval x = multi_typed", analyzer) ); @@ -125,7 +128,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:32: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | eval x = to_lower(multi_typed)", analyzer) ); @@ -135,7 +138,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:32: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | stats count(1) by multi_typed", analyzer) ); if (EsqlCapabilities.Cap.INLINESTATS.isEnabled()) { @@ -145,7 +148,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:38: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | inlinestats count(1) by multi_typed", analyzer) ); } @@ -156,7 +159,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:27: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | stats values(multi_typed)", analyzer) ); if (EsqlCapabilities.Cap.INLINESTATS.isEnabled()) { @@ -166,7 +169,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:33: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | inlinestats values(multi_typed)", analyzer) ); } @@ -177,7 +180,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:27: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | stats values(multi_typed)", analyzer) ); @@ -200,7 +203,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:24: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | mv_expand multi_typed", analyzer) ); @@ -210,7 +213,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:21: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | rename multi_typed as x", analyzer) ); @@ -220,7 +223,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:19: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | sort multi_typed desc", analyzer) ); @@ -230,7 +233,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:20: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | where multi_typed is not null", analyzer) ); } @@ -252,10 +255,30 @@ public void testRoundFunctionInvalidInputs() { "1:31: second argument of [round(a, 3.5)] must be [integer], found value [3.5] type [double]", error("row a = 1, b = \"c\" | eval x = round(a, 3.5)") ); + } + + public void testImplicitCastingErrorMessages() { assertEquals( "1:23: Cannot convert string [c] to [INTEGER], error [Cannot parse number [c]]", error("row a = round(123.45, \"c\")") ); + assertEquals( + "1:27: Cannot convert string [c] to [DOUBLE], error [Cannot parse number [c]]", + error("row a = 1 | eval x = acos(\"c\")") + ); + assertEquals( + "1:33: Cannot convert string [c] to [DOUBLE], error [Cannot parse number [c]]\n" + + "line 1:38: Cannot convert string [a] to [INTEGER], error [Cannot parse number [a]]", + error("row a = 1 | eval x = round(acos(\"c\"),\"a\")") + ); + assertEquals( + "1:63: Cannot convert string [x] to [INTEGER], error [Cannot parse number [x]]", + error("row ip4 = to_ip(\"1.2.3.4\") | eval ip4_prefix = ip_prefix(ip4, \"x\", 0)") + ); + assertEquals( + "1:42: Cannot convert string [a] to [DOUBLE], error [Cannot parse number [a]]", + error("ROW a=[3, 5, 1, 6] | EVAL avg_a = MV_AVG(\"a\")") + ); } public void testAggsExpressionsInStatsAggs() { @@ -371,6 +394,66 @@ public void testGroupingInsideGrouping() { ); } + public void testInvalidBucketCalls() { + assertThat( + error("from test | stats max(emp_no) by bucket(emp_no, 5, \"2000-01-01\")"), + containsString( + "function expects exactly four arguments when the first one is of type [INTEGER] and the second of type [INTEGER]" + ) + ); + + assertThat( + error("from test | stats max(emp_no) by bucket(emp_no, 1 week, \"2000-01-01\")"), + containsString( + "second argument of [bucket(emp_no, 1 week, \"2000-01-01\")] must be [numeric], found value [1 week] type [date_period]" + ) + ); + + assertThat( + error("from test | stats max(emp_no) by bucket(hire_date, 5.5, \"2000-01-01\")"), + containsString( + "second argument of [bucket(hire_date, 5.5, \"2000-01-01\")] must be [integral, date_period or time_duration], " + + "found value [5.5] type [double]" + ) + ); + + assertThat( + error("from test | stats max(emp_no) by bucket(hire_date, 5, 1 day, 1 month)"), + containsString( + "third argument of [bucket(hire_date, 5, 1 day, 1 month)] must be [datetime or string], " + + "found value [1 day] type [date_period]" + ) + ); + + assertThat( + error("from test | stats max(emp_no) by bucket(hire_date, 5, \"2000-01-01\", 1 month)"), + containsString( + "fourth argument of [bucket(hire_date, 5, \"2000-01-01\", 1 month)] must be [datetime or string], " + + "found value [1 month] type [date_period]" + ) + ); + + assertThat( + error("from test | stats max(emp_no) by bucket(hire_date, 5, \"2000-01-01\")"), + containsString( + "function expects exactly four arguments when the first one is of type [DATETIME] and the second of type [INTEGER]" + ) + ); + + assertThat( + error("from test | stats max(emp_no) by bucket(emp_no, \"5\")"), + containsString("second argument of [bucket(emp_no, \"5\")] must be [numeric], found value [\"5\"] type [keyword]") + ); + + assertThat( + error("from test | stats max(emp_no) by bucket(hire_date, \"5\")"), + containsString( + "second argument of [bucket(hire_date, \"5\")] must be [integral, date_period or time_duration], " + + "found value [\"5\"] type [keyword]" + ) + ); + } + public void testAggsWithInvalidGrouping() { assertEquals( "1:35: column [languages] cannot be used as an aggregate once declared in the STATS BY grouping key [l = languages % 3]", @@ -683,7 +766,7 @@ public void testAggregateOnCounter() { error("FROM tests | STATS min(network.bytes_in)", tsdb), equalTo( "1:20: argument of [min(network.bytes_in)] must be" - + " [boolean, datetime, ip or numeric except unsigned_long or counter types]," + + " [representable except unsigned_long and spatial types]," + " found value [network.bytes_in] type [counter_long]" ) ); @@ -692,7 +775,7 @@ public void testAggregateOnCounter() { error("FROM tests | STATS max(network.bytes_in)", tsdb), equalTo( "1:20: argument of [max(network.bytes_in)] must be" - + " [boolean, datetime, ip or numeric except unsigned_long or counter types]," + + " [representable except unsigned_long and spatial types]," + " found value [network.bytes_in] type [counter_long]" ) ); @@ -725,9 +808,9 @@ public void testAggsResolutionWithUnresolvedGroupings() { ); assertThat(error("FROM tests | STATS " + agg_func + "(foobar) by foobar"), matchesRegex("1:\\d+: Unknown column \\[foobar]")); assertThat( - error("FROM tests | STATS " + agg_func + "(foobar) by BUCKET(languages, 10)"), + error("FROM tests | STATS " + agg_func + "(foobar) by BUCKET(hire_date, 10)"), matchesRegex( - "1:\\d+: function expects exactly four arguments when the first one is of type \\[INTEGER]" + "1:\\d+: function expects exactly four arguments when the first one is of type \\[DATETIME]" + " and the second of type \\[INTEGER]\n" + "line 1:\\d+: Unknown column \\[foobar]" ) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java index 65425486ea4e0..f3c87e0e9d1d7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java @@ -301,14 +301,15 @@ private void resolveExpression(Expression expression, Consumer onAgg } expression = resolveSurrogates(expression); + // As expressions may be composed of multiple functions, we need to fold nulls bottom-up + expression = expression.transformUp(e -> new FoldNull().rule(e)); + assertThat(expression.dataType(), equalTo(testCase.expectedType())); + Expression.TypeResolution resolution = expression.typeResolved(); if (resolution.unresolved()) { throw new AssertionError("expected resolved " + resolution.message()); } - expression = new FoldNull().rule(expression); - assertThat(expression.dataType(), equalTo(testCase.expectedType())); - assumeTrue( "Surrogate expression with non-trivial children cannot be evaluated", expression.children() diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java index bc29e33c4a17f..c625ae5dfb61b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.expression.function; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; @@ -20,17 +19,24 @@ import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.session.Configuration; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; -import static org.elasticsearch.xpack.esql.ConfigurationTestUtils.randomConfiguration; import static org.hamcrest.Matchers.sameInstance; public abstract class AbstractAttributeTestCase extends AbstractWireSerializingTestCase< AbstractAttributeTestCase.ExtraAttribute> { + + /** + * We use a single random config for all serialization because it's pretty + * heavy to build, especially in {@link #testConcurrentSerialization()}. + */ + private Configuration config; + protected abstract T create(); protected abstract T mutate(T instance); @@ -56,7 +62,11 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { @Override protected final Writeable.Reader instanceReader() { - return ExtraAttribute::new; + return in -> { + PlanStreamInput pin = new PlanStreamInput(in, PlanNameRegistry.INSTANCE, in.namedWriteableRegistry(), config); + pin.setTransportVersion(in.getTransportVersion()); + return new ExtraAttribute(pin); + }; } /** @@ -70,10 +80,8 @@ public static class ExtraAttribute implements Writeable { assertThat(a.source(), sameInstance(Source.EMPTY)); } - ExtraAttribute(StreamInput in) throws IOException { - PlanStreamInput ps = new PlanStreamInput(in, PlanNameRegistry.INSTANCE, in.namedWriteableRegistry(), randomConfiguration()); - ps.setTransportVersion(in.getTransportVersion()); - a = ps.readNamedWriteable(Attribute.class); + ExtraAttribute(PlanStreamInput in) throws IOException { + a = in.readNamedWriteable(Attribute.class); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index cece2badb2955..efb078cbe80e0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -88,7 +88,6 @@ import java.util.Map; import java.util.Set; import java.util.TreeSet; -import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; @@ -708,13 +707,12 @@ public static void testFunctionInfo() { for (int i = 0; i < args.size(); i++) { typesFromSignature.add(new HashSet<>()); } - Function typeName = dt -> dt.esType() != null ? dt.esType() : dt.typeName(); for (Map.Entry, DataType> entry : signatures().entrySet()) { List types = entry.getKey(); for (int i = 0; i < args.size() && i < types.size(); i++) { - typesFromSignature.get(i).add(typeName.apply(types.get(i))); + typesFromSignature.get(i).add(types.get(i).esNameIfPossible()); } - returnFromSignature.add(typeName.apply(entry.getValue())); + returnFromSignature.add(entry.getValue().esNameIfPossible()); } for (int i = 0; i < args.size(); i++) { @@ -871,15 +869,15 @@ private static void renderTypes(List argNames) throws IOException { } StringBuilder b = new StringBuilder(); for (DataType arg : sig.getKey()) { - b.append(arg.typeName()).append(" | "); + b.append(arg.esNameIfPossible()).append(" | "); } b.append("| ".repeat(argNames.size() - sig.getKey().size())); - b.append(sig.getValue().typeName()); + b.append(sig.getValue().esNameIfPossible()); table.add(b.toString()); } Collections.sort(table); if (table.isEmpty()) { - table.add(signatures.values().iterator().next().typeName()); + table.add(signatures.values().iterator().next().esNameIfPossible()); } String rendered = DOCS_WARNING + """ @@ -1085,7 +1083,7 @@ private static void renderKibanaFunctionDefinition( builder.startArray("params"); builder.endArray(); // There should only be one return type so just use that as the example - builder.field("returnType", signatures().values().iterator().next().typeName()); + builder.field("returnType", signatures().values().iterator().next().esNameIfPossible()); builder.endObject(); } else { int minArgCount = (int) args.stream().filter(a -> false == a.optional()).count(); @@ -1106,14 +1104,14 @@ private static void renderKibanaFunctionDefinition( EsqlFunctionRegistry.ArgSignature arg = args.get(i); builder.startObject(); builder.field("name", arg.name()); - builder.field("type", sig.getKey().get(i).typeName()); + builder.field("type", sig.getKey().get(i).esNameIfPossible()); builder.field("optional", arg.optional()); builder.field("description", arg.description()); builder.endObject(); } builder.endArray(); builder.field("variadic", variadic); - builder.field("returnType", sig.getValue().typeName()); + builder.field("returnType", sig.getValue().esNameIfPossible()); builder.endObject(); } } @@ -1149,12 +1147,12 @@ public int compare(Map.Entry, DataType> lhs, Map.Entry parameterSuppliersFromTypedDataWithDefaultCh ); } + /** + * Converts a list of test cases into a list of parameter suppliers. + * Also, adds a default set of extra test cases. + *

    + * Use if possible, as this method may get updated with new checks in the future. + *

    + * + * @param nullsExpectedType See {@link #anyNullIsNull(List, ExpectedType, ExpectedEvaluatorToString)} + * @param evaluatorToString See {@link #anyNullIsNull(List, ExpectedType, ExpectedEvaluatorToString)} + */ + protected static Iterable parameterSuppliersFromTypedDataWithDefaultChecks( + ExpectedType nullsExpectedType, + ExpectedEvaluatorToString evaluatorToString, + List suppliers, + PositionalErrorMessageSupplier positionalErrorMessageSupplier + ) { + return parameterSuppliersFromTypedData( + errorsForCasesWithoutExamples( + anyNullIsNull(randomizeBytesRefsOffset(suppliers), nullsExpectedType, evaluatorToString), + positionalErrorMessageSupplier + ) + ); + } + public final void testEvaluate() { assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); boolean readFloating = randomBoolean(); @@ -97,6 +121,7 @@ public final void testEvaluate() { Object result; try (ExpressionEvaluator evaluator = evaluator(expression).get(driverContext())) { try (Block block = evaluator.eval(row(testCase.getDataValues()))) { + assertThat(block.getPositionCount(), is(1)); result = toJavaObjectUnsignedLongAware(block, 0); } } @@ -217,6 +242,7 @@ private void testEvaluateBlock(BlockFactory inputBlockFactory, DriverContext con ExpressionEvaluator eval = evaluator(expression).get(context); Block block = eval.eval(new Page(positions, manyPositionsBlocks)) ) { + assertThat(block.getPositionCount(), is(positions)); for (int p = 0; p < positions; p++) { if (nullPositions.contains(p)) { assertThat(toJavaObject(block, p), allNullsMatcher()); @@ -260,6 +286,7 @@ public final void testEvaluateInManyThreads() throws ExecutionException, Interru try (EvalOperator.ExpressionEvaluator eval = evalSupplier.get(driverContext())) { for (int c = 0; c < count; c++) { try (Block block = eval.eval(page)) { + assertThat(block.getPositionCount(), is(1)); assertThat(toJavaObjectUnsignedLongAware(block, 0), testCase.getMatcher()); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultivalueTestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultivalueTestCaseSupplier.java new file mode 100644 index 0000000000000..01c73e9ef0482 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultivalueTestCaseSupplier.java @@ -0,0 +1,325 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.type.DataType; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.List; + +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.ESTestCase.randomList; +import static org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier.TypedDataSupplier; + +/** + * Extension of {@link TestCaseSupplier} that provided multivalue test cases. + */ +public final class MultivalueTestCaseSupplier { + + private static final int MIN_VALUES = 1; + private static final int MAX_VALUES = 1000; + + private MultivalueTestCaseSupplier() {} + + public static List intCases(int min, int max, boolean includeZero) { + List cases = new ArrayList<>(); + + for (Block.MvOrdering ordering : Block.MvOrdering.values()) { + if (0 <= max && 0 >= min && includeZero) { + cases.add( + new TypedDataSupplier( + "<0 mv " + ordering + " ints>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> 0), ordering), + DataType.INTEGER + ) + ); + } + + if (max != 0) { + cases.add( + new TypedDataSupplier( + "<" + max + " mv " + ordering + " ints>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> max), ordering), + DataType.INTEGER + ) + ); + } + + if (min != 0 && min != max) { + cases.add( + new TypedDataSupplier( + "<" + min + " mv " + ordering + " ints>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> min), ordering), + DataType.INTEGER + ) + ); + } + + int lower = Math.max(min, 1); + int upper = Math.min(max, Integer.MAX_VALUE); + if (lower < upper) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomIntBetween(lower, upper)), ordering), + DataType.INTEGER + ) + ); + } + + int lower1 = Math.max(min, Integer.MIN_VALUE); + int upper1 = Math.min(max, -1); + if (lower1 < upper1) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomIntBetween(lower1, upper1)), ordering), + DataType.INTEGER + ) + ); + } + + if (min < 0 && max > 0) { + cases.add( + new TypedDataSupplier("", () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> { + if (includeZero) { + return ESTestCase.randomIntBetween(min, max); + } + return randomBoolean() ? ESTestCase.randomIntBetween(min, -1) : ESTestCase.randomIntBetween(1, max); + }), ordering), DataType.INTEGER) + ); + } + } + + return cases; + } + + public static List longCases(long min, long max, boolean includeZero) { + List cases = new ArrayList<>(); + + for (Block.MvOrdering ordering : Block.MvOrdering.values()) { + if (0 <= max && 0 >= min && includeZero) { + cases.add( + new TypedDataSupplier( + "<0 mv " + ordering + " longs>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> 0L), ordering), + DataType.LONG + ) + ); + } + + if (max != 0) { + cases.add( + new TypedDataSupplier( + "<" + max + " mv " + ordering + " longs>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> max), ordering), + DataType.LONG + ) + ); + } + + if (min != 0 && min != max) { + cases.add( + new TypedDataSupplier( + "<" + min + " mv " + ordering + " longs>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> min), ordering), + DataType.LONG + ) + ); + } + + long lower = Math.max(min, 1); + long upper = Math.min(max, Long.MAX_VALUE); + if (lower < upper) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomLongBetween(lower, upper)), ordering), + DataType.LONG + ) + ); + } + + long lower1 = Math.max(min, Long.MIN_VALUE); + long upper1 = Math.min(max, -1); + if (lower1 < upper1) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomLongBetween(lower1, upper1)), ordering), + DataType.LONG + ) + ); + } + + if (min < 0 && max > 0) { + cases.add( + new TypedDataSupplier("", () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> { + if (includeZero) { + return ESTestCase.randomLongBetween(min, max); + } + return randomBoolean() ? ESTestCase.randomLongBetween(min, -1) : ESTestCase.randomLongBetween(1, max); + }), ordering), DataType.LONG) + ); + } + } + + return cases; + } + + public static List doubleCases(double min, double max, boolean includeZero) { + List cases = new ArrayList<>(); + + for (Block.MvOrdering ordering : Block.MvOrdering.values()) { + if (0d <= max && 0d >= min && includeZero) { + cases.add( + new TypedDataSupplier( + "<0 mv " + ordering + " doubles>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> 0d), ordering), + DataType.DOUBLE + ) + ); + cases.add( + new TypedDataSupplier( + "<-0 mv " + ordering + " doubles>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> -0d), ordering), + DataType.DOUBLE + ) + ); + } + + if (max != 0d) { + cases.add( + new TypedDataSupplier( + "<" + max + " mv " + ordering + " doubles>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> max), ordering), + DataType.DOUBLE + ) + ); + } + + if (min != 0d && min != max) { + cases.add( + new TypedDataSupplier( + "<" + min + " mv " + ordering + " doubles>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> min), ordering), + DataType.DOUBLE + ) + ); + } + + double lower1 = Math.max(min, 0d); + double upper1 = Math.min(max, 1d); + if (lower1 < upper1) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder( + randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomDoubleBetween(lower1, upper1, true)), + ordering + ), + DataType.DOUBLE + ) + ); + } + + double lower2 = Math.max(min, -1d); + double upper2 = Math.min(max, 0d); + if (lower2 < upper2) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder( + randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomDoubleBetween(lower2, upper2, true)), + ordering + ), + DataType.DOUBLE + ) + ); + } + + double lower3 = Math.max(min, 1d); + double upper3 = Math.min(max, Double.MAX_VALUE); + if (lower3 < upper3) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder( + randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomDoubleBetween(lower3, upper3, true)), + ordering + ), + DataType.DOUBLE + ) + ); + } + + double lower4 = Math.max(min, -Double.MAX_VALUE); + double upper4 = Math.min(max, -1d); + if (lower4 < upper4) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder( + randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomDoubleBetween(lower4, upper4, true)), + ordering + ), + DataType.DOUBLE + ) + ); + } + + if (min < 0 && max > 0) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> { + if (includeZero) { + return ESTestCase.randomDoubleBetween(min, max, true); + } + return randomBoolean() + ? ESTestCase.randomDoubleBetween(min, -1, true) + : ESTestCase.randomDoubleBetween(1, max, true); + }), ordering), + DataType.DOUBLE + ) + ); + } + } + + return cases; + } + + private static > List putInOrder(List mvData, Block.MvOrdering ordering) { + switch (ordering) { + case UNORDERED -> { + } + case DEDUPLICATED_UNORDERD -> { + var dedup = new LinkedHashSet<>(mvData); + mvData.clear(); + mvData.addAll(dedup); + } + case DEDUPLICATED_AND_SORTED_ASCENDING -> { + var dedup = new HashSet<>(mvData); + mvData.clear(); + mvData.addAll(dedup); + Collections.sort(mvData); + } + case SORTED_ASCENDING -> { + Collections.sort(mvData); + } + default -> throw new UnsupportedOperationException("unsupported ordering [" + ordering + "]"); + } + + return mvData; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index 5ef71e7ae30fb..a1caa784c9787 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -1289,7 +1289,7 @@ private static String castToUnsignedLongEvaluator(String original, DataType curr throw new UnsupportedOperationException(); } - private static String castToDoubleEvaluator(String original, DataType current) { + public static String castToDoubleEvaluator(String original, DataType current) { if (current == DataType.DOUBLE) { return original; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java index 52e908a51dd1e..ce2bf7e262ae9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.esql.expression.function.AbstractAggregationTestCase; import org.elasticsearch.xpack.esql.expression.function.MultiRowTestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.versionfield.Version; import java.util.ArrayList; import java.util.Comparator; @@ -44,7 +45,10 @@ public static Iterable parameters() { MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true), MultiRowTestCaseSupplier.dateCases(1, 1000), MultiRowTestCaseSupplier.booleanCases(1, 1000), - MultiRowTestCaseSupplier.ipCases(1, 1000) + MultiRowTestCaseSupplier.ipCases(1, 1000), + MultiRowTestCaseSupplier.versionCases(1, 1000), + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.KEYWORD), + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.TEXT) ).flatMap(List::stream).map(MaxTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); suppliers.addAll( @@ -109,14 +113,44 @@ public static Iterable parameters() { DataType.IP, equalTo(new BytesRef(InetAddressPoint.encode(InetAddresses.forString("127.0.0.1")))) ) - ) + ), + new TestCaseSupplier(List.of(DataType.KEYWORD), () -> { + var value = new BytesRef(randomAlphaOfLengthBetween(0, 50)); + return new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.KEYWORD, "field")), + "Max[field=Attribute[channel=0]]", + DataType.KEYWORD, + equalTo(value) + ); + }), + new TestCaseSupplier(List.of(DataType.TEXT), () -> { + var value = new BytesRef(randomAlphaOfLengthBetween(0, 50)); + return new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.TEXT, "field")), + "Max[field=Attribute[channel=0]]", + DataType.TEXT, + equalTo(value) + ); + }), + new TestCaseSupplier(List.of(DataType.VERSION), () -> { + var value = randomBoolean() + ? new Version(randomAlphaOfLengthBetween(1, 10)).toBytesRef() + : new Version(randomIntBetween(0, 100) + "." + randomIntBetween(0, 100) + "." + randomIntBetween(0, 100)) + .toBytesRef(); + return new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.VERSION, "field")), + "Max[field=Attribute[channel=0]]", + DataType.VERSION, + equalTo(value) + ); + }) ) ); return parameterSuppliersFromTypedDataWithDefaultChecks( suppliers, false, - (v, p) -> "boolean, datetime, ip or numeric except unsigned_long or counter types" + (v, p) -> "representable except unsigned_long and spatial types" ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java index 9514c817df497..7250072cd2003 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.esql.expression.function.AbstractAggregationTestCase; import org.elasticsearch.xpack.esql.expression.function.MultiRowTestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.versionfield.Version; import java.util.ArrayList; import java.util.Comparator; @@ -44,7 +45,10 @@ public static Iterable parameters() { MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true), MultiRowTestCaseSupplier.dateCases(1, 1000), MultiRowTestCaseSupplier.booleanCases(1, 1000), - MultiRowTestCaseSupplier.ipCases(1, 1000) + MultiRowTestCaseSupplier.ipCases(1, 1000), + MultiRowTestCaseSupplier.versionCases(1, 1000), + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.KEYWORD), + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.TEXT) ).flatMap(List::stream).map(MinTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); suppliers.addAll( @@ -109,14 +113,44 @@ public static Iterable parameters() { DataType.IP, equalTo(new BytesRef(InetAddressPoint.encode(InetAddresses.forString("127.0.0.1")))) ) - ) + ), + new TestCaseSupplier(List.of(DataType.KEYWORD), () -> { + var value = new BytesRef(randomAlphaOfLengthBetween(0, 50)); + return new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.KEYWORD, "field")), + "Min[field=Attribute[channel=0]]", + DataType.KEYWORD, + equalTo(value) + ); + }), + new TestCaseSupplier(List.of(DataType.TEXT), () -> { + var value = new BytesRef(randomAlphaOfLengthBetween(0, 50)); + return new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.TEXT, "field")), + "Min[field=Attribute[channel=0]]", + DataType.TEXT, + equalTo(value) + ); + }), + new TestCaseSupplier(List.of(DataType.VERSION), () -> { + var value = randomBoolean() + ? new Version(randomAlphaOfLengthBetween(1, 10)).toBytesRef() + : new Version(randomIntBetween(0, 100) + "." + randomIntBetween(0, 100) + "." + randomIntBetween(0, 100)) + .toBytesRef(); + return new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.VERSION, "field")), + "Min[field=Attribute[channel=0]]", + DataType.VERSION, + equalTo(value) + ); + }) ) ); return parameterSuppliersFromTypedDataWithDefaultChecks( suppliers, false, - (v, p) -> "boolean, datetime, ip or numeric except unsigned_long or counter types" + (v, p) -> "representable except unsigned_long and spatial types" ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/PercentileTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/PercentileTests.java index 5271431bd43b8..be11515876966 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/PercentileTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/PercentileTests.java @@ -52,7 +52,7 @@ public static Iterable parameters() { } } - return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers); + return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers, false, (v, p) -> "numeric except unsigned_long"); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java index 4c7b812111450..a26504b8ced9a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java @@ -73,7 +73,7 @@ public static Iterable parameters() { } // TODO once we cast above the functions we can drop these - private static final DataType[] DATE_BOUNDS_TYPE = new DataType[] { DataType.DATETIME }; + private static final DataType[] DATE_BOUNDS_TYPE = new DataType[] { DataType.DATETIME, DataType.KEYWORD, DataType.TEXT }; private static void dateCases(List suppliers, String name, LongSupplier date) { for (DataType fromType : DATE_BOUNDS_TYPE) { @@ -89,7 +89,7 @@ private static void dateCases(List suppliers, String name, Lon args, "DateTruncEvaluator[fieldVal=Attribute[channel=0], rounding=Rounding[DAY_OF_MONTH in Z][fixed to midnight]]", DataType.DATETIME, - dateResultsMatcher(args) + resultsMatcher(args) ); })); // same as above, but a low bucket count and datetime bounds that match it (at hour span) @@ -136,7 +136,7 @@ private static void dateCasesWithSpan( args, "DateTruncEvaluator[fieldVal=Attribute[channel=0], rounding=Rounding" + spanStr + "]", DataType.DATETIME, - dateResultsMatcher(args) + resultsMatcher(args) ); })); } @@ -167,7 +167,7 @@ private static void numberCases(List suppliers, String name, D + ", " + "rhs=LiteralsEvaluator[lit=50.0]]], rhs=LiteralsEvaluator[lit=50.0]]", DataType.DOUBLE, - dateResultsMatcher(args) + resultsMatcher(args) ); })); } @@ -187,26 +187,29 @@ private static TestCaseSupplier.TypedData numericBound(String name, DataType typ } private static void numberCasesWithSpan(List suppliers, String name, DataType numberType, Supplier number) { - suppliers.add(new TestCaseSupplier(name, List.of(numberType, DataType.DOUBLE), () -> { - List args = new ArrayList<>(); - args.add(new TestCaseSupplier.TypedData(number.get(), "field")); - args.add(new TestCaseSupplier.TypedData(50., DataType.DOUBLE, "span").forceLiteral()); - String attr = "Attribute[channel=0]"; - if (numberType == DataType.INTEGER) { - attr = "CastIntToDoubleEvaluator[v=" + attr + "]"; - } else if (numberType == DataType.LONG) { - attr = "CastLongToDoubleEvaluator[v=" + attr + "]"; - } - return new TestCaseSupplier.TestCase( - args, - "MulDoublesEvaluator[lhs=FloorDoubleEvaluator[val=DivDoublesEvaluator[lhs=" - + attr - + ", " - + "rhs=LiteralsEvaluator[lit=50.0]]], rhs=LiteralsEvaluator[lit=50.0]]", - DataType.DOUBLE, - dateResultsMatcher(args) - ); - })); + for (Number span : List.of(50, 50L, 50d)) { + DataType spanType = DataType.fromJava(span); + suppliers.add(new TestCaseSupplier(name, List.of(numberType, spanType), () -> { + List args = new ArrayList<>(); + args.add(new TestCaseSupplier.TypedData(number.get(), "field")); + args.add(new TestCaseSupplier.TypedData(span, spanType, "span").forceLiteral()); + String attr = "Attribute[channel=0]"; + if (numberType == DataType.INTEGER) { + attr = "CastIntToDoubleEvaluator[v=" + attr + "]"; + } else if (numberType == DataType.LONG) { + attr = "CastLongToDoubleEvaluator[v=" + attr + "]"; + } + return new TestCaseSupplier.TestCase( + args, + "MulDoublesEvaluator[lhs=FloorDoubleEvaluator[val=DivDoublesEvaluator[lhs=" + + attr + + ", " + + "rhs=LiteralsEvaluator[lit=50.0]]], rhs=LiteralsEvaluator[lit=50.0]]", + DataType.DOUBLE, + resultsMatcher(args) + ); + })); + } } @@ -214,7 +217,7 @@ private static TestCaseSupplier.TypedData keywordDateLiteral(String name, DataTy return new TestCaseSupplier.TypedData(date, type, name).forceLiteral(); } - private static Matcher dateResultsMatcher(List typedData) { + private static Matcher resultsMatcher(List typedData) { if (typedData.get(0).type() == DataType.DATETIME) { long millis = ((Number) typedData.get(0).data()).longValue(); return equalTo(Rounding.builder(Rounding.DateTimeUnit.DAY_OF_MONTH).build().prepareForUnknown().round(millis)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java new file mode 100644 index 0000000000000..3410b95458302 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java @@ -0,0 +1,466 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.StringUtils; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.MultivalueTestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; +import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; +import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class MvPercentileTests extends AbstractScalarFunctionTestCase { + public MvPercentileTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List cases = new ArrayList<>(); + + var fieldSuppliers = Stream.of( + MultivalueTestCaseSupplier.intCases(Integer.MIN_VALUE, Integer.MAX_VALUE, true), + MultivalueTestCaseSupplier.longCases(Long.MIN_VALUE, Long.MAX_VALUE, true), + MultivalueTestCaseSupplier.doubleCases(-Double.MAX_VALUE, Double.MAX_VALUE, true) + ).flatMap(List::stream).toList(); + + var percentileSuppliers = Stream.of( + TestCaseSupplier.intCases(0, 100, true), + TestCaseSupplier.longCases(0, 100, true), + TestCaseSupplier.doubleCases(0, 100, true) + ).flatMap(List::stream).toList(); + + for (var fieldSupplier : fieldSuppliers) { + for (var percentileSupplier : percentileSuppliers) { + cases.add(makeSupplier(fieldSupplier, percentileSupplier)); + } + } + + for (var percentileType : List.of(INTEGER, LONG, DataType.DOUBLE)) { + cases.addAll( + List.of( + // Doubles + new TestCaseSupplier( + "median double", + List.of(DOUBLE, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10., 5., 10.), DOUBLE, "field"), + percentileWithType(50, percentileType) + ), + evaluatorString(DOUBLE, percentileType), + DOUBLE, + equalTo(5.) + ) + ), + new TestCaseSupplier( + "single value double", + List.of(DOUBLE, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(55.), DOUBLE, "field"), + percentileWithType(randomIntBetween(0, 100), percentileType) + ), + evaluatorString(DOUBLE, percentileType), + DOUBLE, + equalTo(55.) + ) + ), + new TestCaseSupplier( + "p0 double", + List.of(DOUBLE, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10., 5., 10.), DOUBLE, "field"), + percentileWithType(0, percentileType) + ), + evaluatorString(DOUBLE, percentileType), + DOUBLE, + equalTo(-10.) + ) + ), + new TestCaseSupplier( + "p100 double", + List.of(DOUBLE, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10., 5., 10.), DOUBLE, "field"), + percentileWithType(100, percentileType) + ), + evaluatorString(DOUBLE, percentileType), + DOUBLE, + equalTo(10.) + ) + ), + new TestCaseSupplier( + "averaged double", + List.of(DOUBLE, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10., 5., 10.), DOUBLE, "field"), + percentileWithType(75, percentileType) + ), + evaluatorString(DOUBLE, percentileType), + DOUBLE, + equalTo(7.5) + ) + ), + new TestCaseSupplier( + "big double difference", + List.of(DOUBLE, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-Double.MAX_VALUE, Double.MAX_VALUE), DOUBLE, "field"), + percentileWithType(50, percentileType) + ), + evaluatorString(DOUBLE, percentileType), + DOUBLE, + closeTo(0, 0.0000001) + ) + ), + + // Int + new TestCaseSupplier( + "median int", + List.of(INTEGER, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10, 5, 10), INTEGER, "field"), + percentileWithType(50, percentileType) + ), + evaluatorString(INTEGER, percentileType), + INTEGER, + equalTo(5) + ) + ), + new TestCaseSupplier( + "single value int", + List.of(INTEGER, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(55), INTEGER, "field"), + percentileWithType(randomIntBetween(0, 100), percentileType) + ), + evaluatorString(INTEGER, percentileType), + INTEGER, + equalTo(55) + ) + ), + new TestCaseSupplier( + "p0 int", + List.of(INTEGER, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10, 5, 10), INTEGER, "field"), + percentileWithType(0, percentileType) + ), + evaluatorString(INTEGER, percentileType), + INTEGER, + equalTo(-10) + ) + ), + new TestCaseSupplier( + "p100 int", + List.of(INTEGER, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10, 5, 10), INTEGER, "field"), + percentileWithType(100, percentileType) + ), + evaluatorString(INTEGER, percentileType), + INTEGER, + equalTo(10) + ) + ), + new TestCaseSupplier( + "averaged int", + List.of(INTEGER, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10, 5, 10), INTEGER, "field"), + percentileWithType(75, percentileType) + ), + evaluatorString(INTEGER, percentileType), + INTEGER, + equalTo(7) + ) + ), + new TestCaseSupplier( + "big int difference", + List.of(INTEGER, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(Integer.MIN_VALUE, Integer.MAX_VALUE), INTEGER, "field"), + percentileWithType(50, percentileType) + ), + evaluatorString(INTEGER, percentileType), + INTEGER, + equalTo(-1) // Negative max is 1 smaller than positive max + ) + ), + + // Long + new TestCaseSupplier( + "median long", + List.of(LONG, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10L, 5L, 10L), LONG, "field"), + percentileWithType(50, percentileType) + ), + evaluatorString(LONG, percentileType), + LONG, + equalTo(5L) + ) + ), + new TestCaseSupplier( + "single value long", + List.of(LONG, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(55L), LONG, "field"), + percentileWithType(randomIntBetween(0, 100), percentileType) + ), + evaluatorString(LONG, percentileType), + LONG, + equalTo(55L) + ) + ), + new TestCaseSupplier( + "p0 long", + List.of(LONG, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10L, 5L, 10L), LONG, "field"), + percentileWithType(0, percentileType) + ), + evaluatorString(LONG, percentileType), + LONG, + equalTo(-10L) + ) + ), + new TestCaseSupplier( + "p100 long", + List.of(LONG, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10L, 5L, 10L), LONG, "field"), + percentileWithType(100, percentileType) + ), + evaluatorString(LONG, percentileType), + LONG, + equalTo(10L) + ) + ), + new TestCaseSupplier( + "averaged long", + List.of(LONG, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10L, 5L, 10L), LONG, "field"), + percentileWithType(75, percentileType) + ), + evaluatorString(LONG, percentileType), + LONG, + equalTo(7L) + ) + ), + new TestCaseSupplier( + "big long difference", + List.of(LONG, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(Long.MIN_VALUE, Long.MAX_VALUE), LONG, "field"), + percentileWithType(50, percentileType) + ), + evaluatorString(LONG, percentileType), + LONG, + equalTo(0L) + ) + ) + ) + ); + + for (var fieldType : List.of(INTEGER, LONG, DataType.DOUBLE)) { + cases.add( + new TestCaseSupplier( + "out of bounds percentile <" + fieldType + ", " + percentileType + ">", + List.of(fieldType, percentileType), + () -> { + var percentile = numberWithType( + randomBoolean() ? randomIntBetween(Integer.MIN_VALUE, -1) : randomIntBetween(101, Integer.MAX_VALUE), + percentileType + ); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(numberWithType(0, fieldType), fieldType, "field"), + new TestCaseSupplier.TypedData(percentile, percentileType, "percentile") + ), + evaluatorString(fieldType, percentileType), + fieldType, + nullValue() + ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") + .withWarning( + "Line -1:-1: java.lang.IllegalArgumentException: Percentile parameter must be " + + "a number between 0 and 100, found [" + + percentile.doubleValue() + + "]" + ); + } + ) + ); + } + } + + return parameterSuppliersFromTypedDataWithDefaultChecks( + (nullPosition, nullValueDataType, original) -> nullValueDataType == DataType.NULL && nullPosition == 0 + ? DataType.NULL + : original.expectedType(), + (nullPosition, nullData, original) -> original, + cases, + (v, p) -> "numeric except unsigned_long" + ); + } + + @SuppressWarnings("unchecked") + private static TestCaseSupplier makeSupplier( + TestCaseSupplier.TypedDataSupplier fieldSupplier, + TestCaseSupplier.TypedDataSupplier percentileSupplier + ) { + return new TestCaseSupplier( + "field: " + fieldSupplier.name() + ", percentile: " + percentileSupplier.name(), + List.of(fieldSupplier.type(), percentileSupplier.type()), + () -> { + var fieldTypedData = fieldSupplier.get(); + var percentileTypedData = percentileSupplier.get(); + + var values = (List) fieldTypedData.data(); + var percentile = ((Number) percentileTypedData.data()).doubleValue(); + + var expected = calculatePercentile(values, percentile); + + return new TestCaseSupplier.TestCase( + List.of(fieldTypedData, percentileTypedData), + evaluatorString(fieldSupplier.type(), percentileSupplier.type()), + fieldSupplier.type(), + expected instanceof Double expectedDouble + ? closeTo(expectedDouble, Math.abs(expectedDouble * 0.0000001)) + : equalTo(expected) + ); + } + ); + } + + private static Number calculatePercentile(List rawValues, double percentile) { + if (rawValues.isEmpty() || percentile < 0 || percentile > 100) { + return null; + } + + if (rawValues.size() == 1) { + return rawValues.get(0); + } + + int valueCount = rawValues.size(); + var p = percentile / 100.0; + var index = p * (valueCount - 1); + var lowerIndex = (int) index; + var upperIndex = lowerIndex + 1; + var fraction = index - lowerIndex; + + if (rawValues.get(0) instanceof Integer) { + var values = rawValues.stream().mapToInt(Number::intValue).sorted().toArray(); + + if (percentile == 0) { + return values[0]; + } else if (percentile == 100) { + return values[valueCount - 1]; + } else { + assert lowerIndex >= 0 && upperIndex < valueCount; + var difference = (long) values[upperIndex] - values[lowerIndex]; + return values[lowerIndex] + (int) (fraction * difference); + } + } + + if (rawValues.get(0) instanceof Long) { + var values = rawValues.stream().mapToLong(Number::longValue).sorted().toArray(); + + if (percentile == 0) { + return values[0]; + } else if (percentile == 100) { + return values[valueCount - 1]; + } else { + assert lowerIndex >= 0 && upperIndex < valueCount; + return calculatePercentile(fraction, new BigDecimal(values[lowerIndex]), new BigDecimal(values[upperIndex])).longValue(); + } + } + + if (rawValues.get(0) instanceof Double) { + var values = rawValues.stream().mapToDouble(Number::doubleValue).sorted().toArray(); + + if (percentile == 0) { + return values[0]; + } else if (percentile == 100) { + return values[valueCount - 1]; + } else { + assert lowerIndex >= 0 && upperIndex < valueCount; + return calculatePercentile(fraction, new BigDecimal(values[lowerIndex]), new BigDecimal(values[upperIndex])).doubleValue(); + } + } + + throw new IllegalArgumentException("Unsupported type: " + rawValues.get(0).getClass()); + } + + private static BigDecimal calculatePercentile(double fraction, BigDecimal lowerValue, BigDecimal upperValue) { + return lowerValue.add(new BigDecimal(fraction).multiply(upperValue.subtract(lowerValue))); + } + + private static TestCaseSupplier.TypedData percentileWithType(Number value, DataType type) { + return new TestCaseSupplier.TypedData(numberWithType(value, type), type, "percentile"); + } + + private static Number numberWithType(Number value, DataType type) { + return switch (type) { + case INTEGER -> value.intValue(); + case LONG -> value.longValue(); + default -> value.doubleValue(); + }; + } + + private static String evaluatorString(DataType fieldDataType, DataType percentileDataType) { + var fieldTypeName = StringUtils.underscoreToLowerCamelCase(fieldDataType.name()); + + fieldTypeName = fieldTypeName.substring(0, 1).toUpperCase(Locale.ROOT) + fieldTypeName.substring(1); + + var percentileEvaluator = TestCaseSupplier.castToDoubleEvaluator("Attribute[channel=1]", percentileDataType); + + return "MvPercentile" + fieldTypeName + "Evaluator[values=Attribute[channel=0], percentile=" + percentileEvaluator + "]"; + } + + @Override + protected final Expression build(Source source, List args) { + return new MvPercentile(source, args.get(0), args.get(1)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java index 1ac61a2adf68e..e1b56d61a211c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java @@ -7,17 +7,26 @@ package org.elasticsearch.xpack.esql.index; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.type.EsFieldTests; +import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; import java.io.IOException; import java.util.HashMap; import java.util.HashSet; +import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; + +import static org.elasticsearch.test.ByteSizeEqualsMatcher.byteSizeEquals; public class EsIndexSerializationTests extends AbstractWireSerializingTestCase { public static EsIndex randomEsIndex() { @@ -73,4 +82,97 @@ protected EsIndex mutateInstance(EsIndex instance) throws IOException { protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry(EsField.getNamedWriteables()); } + + /** + * Build an {@link EsIndex} with many conflicting fields across many indices. + */ + public static EsIndex indexWithManyConflicts(boolean withParent) { + /* + * The number of fields with a mapping conflict. + */ + int conflictingCount = 250; + /* + * The number of indices that map conflicting fields are "keyword". + * One other index will map the field as "text" + */ + int keywordIndicesCount = 600; + /* + * The number of fields that don't have a mapping conflict. + */ + int nonConflictingCount = 7000; + + Set keywordIndices = new TreeSet<>(); + for (int i = 0; i < keywordIndicesCount; i++) { + keywordIndices.add(String.format(Locale.ROOT, ".ds-logs-apache.access-external-2024.08.09-%08d", i)); + } + + Set textIndices = Set.of("logs-endpoint.events.imported"); + + Map fields = new TreeMap<>(); + for (int i = 0; i < conflictingCount; i++) { + String name = String.format(Locale.ROOT, "blah.blah.blah.blah.blah.blah.conflict.name%04d", i); + Map> conflicts = Map.of("text", textIndices, "keyword", keywordIndices); + fields.put(name, new InvalidMappedField(name, conflicts)); + } + for (int i = 0; i < nonConflictingCount; i++) { + String name = String.format(Locale.ROOT, "blah.blah.blah.blah.blah.blah.nonconflict.name%04d", i); + fields.put(name, new EsField(name, DataType.KEYWORD, Map.of(), true)); + } + + if (withParent) { + EsField parent = new EsField("parent", DataType.OBJECT, Map.copyOf(fields), false); + fields.put("parent", parent); + } + + TreeSet concrete = new TreeSet<>(); + concrete.addAll(keywordIndices); + concrete.addAll(textIndices); + + return new EsIndex("name", fields, concrete); + } + + /** + * Test the size of serializing an index with many conflicts at the root level. + * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. + */ + public void testManyTypeConflicts() throws IOException { + testManyTypeConflicts(false, ByteSizeValue.ofBytes(976591)); + } + + /** + * Test the size of serializing an index with many conflicts inside a "parent" object. + * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. + */ + public void testManyTypeConflictsWithParent() throws IOException { + testManyTypeConflicts(true, ByteSizeValue.ofBytes(1921374)); + /* + * History: + * 16.9mb - start + * 1.8mb - shorten error messages for UnsupportedAttributes #111973 + */ + } + + /** + * Test the size of serializing an index with many conflicts. Callers of + * this method intentionally use a very precise size for the serialized + * data so a programmer making changes has to think when this size changes. + *

    + * In general, shrinking the over the wire size is great and the precise + * size should just ratchet downwards. Small upwards movement is fine so + * long as you understand why the change is happening and you think it's + * worth it for the data node request for a big index to grow. + *

    + *

    + * Large upwards movement in the size is not fine! Folks frequently make + * requests across large clusters with many fields and these requests can + * really clog up the network interface. Super large results here can make + * ESQL impossible to use at all for big mappings with many conflicts. + *

    + */ + private void testManyTypeConflicts(boolean withParent, ByteSizeValue expected) throws IOException { + try (BytesStreamOutput out = new BytesStreamOutput()) { + indexWithManyConflicts(withParent).writeTo(out); + assertThat(ByteSizeValue.ofBytes(out.bytes().length()), byteSizeEquals(expected)); + } + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index c6b12eb0dc23f..74f95e3defbd3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -127,6 +127,7 @@ import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.Grok; +import org.elasticsearch.xpack.esql.plan.logical.InlineStats; import org.elasticsearch.xpack.esql.plan.logical.Limit; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; @@ -3514,6 +3515,49 @@ public void testBucketWithAggExpression() { assertThat(agg.groupings().get(0), is(ref)); } + public void testBucketWithNonFoldingArgs() { + assertThat( + typesError("from types | stats max(integer) by bucket(date, integer, \"2000-01-01\", \"2000-01-02\")"), + containsString( + "second argument of [bucket(date, integer, \"2000-01-01\", \"2000-01-02\")] must be a constant, " + "received [integer]" + ) + ); + + assertThat( + typesError("from types | stats max(integer) by bucket(date, 2, date, \"2000-01-02\")"), + containsString("third argument of [bucket(date, 2, date, \"2000-01-02\")] must be a constant, " + "received [date]") + ); + + assertThat( + typesError("from types | stats max(integer) by bucket(date, 2, \"2000-01-02\", date)"), + containsString("fourth argument of [bucket(date, 2, \"2000-01-02\", date)] must be a constant, " + "received [date]") + ); + + assertThat( + typesError("from types | stats max(integer) by bucket(integer, long, 4, 5)"), + containsString("second argument of [bucket(integer, long, 4, 5)] must be a constant, " + "received [long]") + ); + + assertThat( + typesError("from types | stats max(integer) by bucket(integer, 3, long, 5)"), + containsString("third argument of [bucket(integer, 3, long, 5)] must be a constant, " + "received [long]") + ); + + assertThat( + typesError("from types | stats max(integer) by bucket(integer, 3, 4, long)"), + containsString("fourth argument of [bucket(integer, 3, 4, long)] must be a constant, " + "received [long]") + ); + } + + private String typesError(String query) { + VerificationException e = expectThrows(VerificationException.class, () -> planTypes(query)); + String message = e.getMessage(); + assertTrue(message.startsWith("Found ")); + String pattern = "\nline "; + int index = message.indexOf(pattern); + return message.substring(index + pattern.length()); + } + /** * Expects * Project[[x{r}#5]] @@ -4499,6 +4543,31 @@ public void testReplaceSortByExpressionsWithStats() { as(aggregate.child(), EsRelation.class); } + /** + * Expects + * Limit[1000[INTEGER]] + * \_InlineStats[[emp_no % 2{r}#6],[COUNT(salary{f}#12) AS c, emp_no % 2{r}#6]] + * \_Eval[[emp_no{f}#7 % 2[INTEGER] AS emp_no % 2]] + * \_EsRelation[test][_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, ge..] + */ + public void testInlinestatsNestedExpressionsInGroups() { + var plan = optimizedPlan(""" + FROM test + | INLINESTATS c = COUNT(salary) by emp_no % 2 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), InlineStats.class); + var groupings = agg.groupings(); + var aggs = agg.aggregates(); + var ref = as(groupings.get(0), ReferenceAttribute.class); + assertThat(aggs.get(1), is(ref)); + var eval = as(agg.child(), Eval.class); + assertThat(eval.fields(), hasSize(1)); + assertThat(eval.fields().get(0).toAttribute(), is(ref)); + assertThat(eval.fields().get(0).name(), is("emp_no % 2")); + } + /** * Expects * diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java index 9a0f1ba3efe1d..5e45de6c77c42 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java @@ -31,14 +31,14 @@ public class PhasedTests extends ESTestCase { public void testZeroLayers() { EsRelation relation = new EsRelation(Source.synthetic("relation"), new EsIndex("foo", Map.of()), IndexMode.STANDARD, false); - relation.setAnalyzed(); + relation.setOptimized(); assertThat(Phased.extractFirstPhase(relation), nullValue()); } public void testOneLayer() { EsRelation relation = new EsRelation(Source.synthetic("relation"), new EsIndex("foo", Map.of()), IndexMode.STANDARD, false); LogicalPlan orig = new Dummy(Source.synthetic("orig"), relation); - orig.setAnalyzed(); + orig.setOptimized(); assertThat(Phased.extractFirstPhase(orig), sameInstance(relation)); LogicalPlan finalPhase = Phased.applyResultsFromFirstPhase( orig, @@ -49,6 +49,7 @@ public void testOneLayer() { finalPhase, equalTo(new Row(orig.source(), List.of(new Alias(orig.source(), "foo", new Literal(orig.source(), "foo", DataType.KEYWORD))))) ); + finalPhase.setOptimized(); assertThat(Phased.extractFirstPhase(finalPhase), nullValue()); } @@ -56,7 +57,7 @@ public void testTwoLayer() { EsRelation relation = new EsRelation(Source.synthetic("relation"), new EsIndex("foo", Map.of()), IndexMode.STANDARD, false); LogicalPlan inner = new Dummy(Source.synthetic("inner"), relation); LogicalPlan orig = new Dummy(Source.synthetic("outer"), inner); - orig.setAnalyzed(); + orig.setOptimized(); assertThat( "extractFirstPhase should call #firstPhase on the earliest child in the plan", Phased.extractFirstPhase(orig), @@ -67,6 +68,7 @@ public void testTwoLayer() { List.of(new ReferenceAttribute(Source.EMPTY, "foo", DataType.KEYWORD)), List.of() ); + secondPhase.setOptimized(); assertThat( "applyResultsFromFirstPhase should call #nextPhase one th earliest child in the plan", secondPhase, @@ -84,6 +86,7 @@ public void testTwoLayer() { List.of(new ReferenceAttribute(Source.EMPTY, "foo", DataType.KEYWORD)), List.of() ); + finalPhase.setOptimized(); assertThat( finalPhase, equalTo(new Row(orig.source(), List.of(new Alias(orig.source(), "foo", new Literal(orig.source(), "foo", DataType.KEYWORD))))) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java new file mode 100644 index 0000000000000..237f8d6a9c580 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java @@ -0,0 +1,159 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plan.physical; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.analysis.Analyzer; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.index.EsIndex; +import org.elasticsearch.xpack.esql.index.EsIndexSerializationTests; +import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.session.Configuration; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.test.ByteSizeEqualsMatcher.byteSizeEquals; +import static org.elasticsearch.xpack.esql.ConfigurationTestUtils.randomConfiguration; +import static org.hamcrest.Matchers.equalTo; + +public class ExchangeSinkExecSerializationTests extends ESTestCase { + // TODO port this to AbstractPhysicalPlanSerializationTests when implementing NamedWriteable + private Configuration config; + + public static Source randomSource() { + int lineNumber = between(0, EXAMPLE_QUERY.length - 1); + String line = EXAMPLE_QUERY[lineNumber]; + int offset = between(0, line.length() - 2); + int length = between(1, line.length() - offset - 1); + String text = line.substring(offset, offset + length); + return new Source(lineNumber + 1, offset, text); + } + + /** + * Test the size of serializing a plan with many conflicts. + * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. + */ + public void testManyTypeConflicts() throws IOException { + testManyTypeConflicts(false, ByteSizeValue.ofBytes(2444252)); + } + + /** + * Test the size of serializing a plan with many conflicts. + * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. + */ + public void testManyTypeConflictsWithParent() throws IOException { + testManyTypeConflicts(true, ByteSizeValue.ofBytes(5885765)); + /* + * History: + * 2 gb+ - start + * 43.3mb - Cache attribute subclasses #111447 + * 5.6mb - shorten error messages for UnsupportedAttributes #111973 + */ + } + + /** + * Test the size of serializing a plan with many conflicts. Callers of + * this method intentionally use a very precise size for the serialized + * data so a programmer making changes has to think when this size changes. + *

    + * In general, shrinking the over the wire size is great and the precise + * size should just ratchet downwards. Small upwards movement is fine so + * long as you understand why the change is happening and you think it's + * worth it for the data node request for a big index to grow. + *

    + *

    + * Large upwards movement in the size is not fine! Folks frequently make + * requests across large clusters with many fields and these requests can + * really clog up the network interface. Super large results here can make + * ESQL impossible to use at all for big mappings with many conflicts. + *

    + */ + private void testManyTypeConflicts(boolean withParent, ByteSizeValue expected) throws IOException { + EsIndex index = EsIndexSerializationTests.indexWithManyConflicts(withParent); + List attributes = Analyzer.mappingAsAttributes(randomSource(), index.mapping()); + EsRelation relation = new EsRelation(randomSource(), index, attributes, IndexMode.STANDARD); + Limit limit = new Limit(randomSource(), new Literal(randomSource(), 10, DataType.INTEGER), relation); + Project project = new Project(randomSource(), limit, limit.output()); + FragmentExec fragmentExec = new FragmentExec(project); + ExchangeSinkExec exchangeSinkExec = new ExchangeSinkExec(randomSource(), fragmentExec.output(), false, fragmentExec); + try ( + BytesStreamOutput out = new BytesStreamOutput(); + PlanStreamOutput pso = new PlanStreamOutput(out, new PlanNameRegistry(), configuration()) + ) { + pso.writePhysicalPlanNode(exchangeSinkExec); + assertThat(ByteSizeValue.ofBytes(out.bytes().length()), byteSizeEquals(expected)); + try ( + PlanStreamInput psi = new PlanStreamInput( + out.bytes().streamInput(), + new PlanNameRegistry(), + getNamedWriteableRegistry(), + configuration() + ) + ) { + assertThat(psi.readPhysicalPlanNode(), equalTo(exchangeSinkExec)); + } + } + } + + private NamedWriteableRegistry getNamedWriteableRegistry() { + List entries = new ArrayList<>(); + entries.addAll(PhysicalPlan.getNamedWriteables()); + entries.addAll(LogicalPlan.getNamedWriteables()); + entries.addAll(AggregateFunction.getNamedWriteables()); + entries.addAll(Expression.getNamedWriteables()); + entries.addAll(Attribute.getNamedWriteables()); + entries.addAll(EsField.getNamedWriteables()); + entries.addAll(Block.getNamedWriteables()); + entries.addAll(NamedExpression.getNamedWriteables()); + entries.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); + return new NamedWriteableRegistry(entries); + } + + private Configuration configuration() { + return config; + } + + private static final String[] EXAMPLE_QUERY = new String[] { + "I am the very model of a modern Major-Gineral,", + "I've information vegetable, animal, and mineral,", + "I know the kings of England, and I quote the fights historical", + "From Marathon to Waterloo, in order categorical;", + "I'm very well acquainted, too, with matters mathematical,", + "I understand equations, both the simple and quadratical,", + "About binomial theorem I'm teeming with a lot o' news,", + "With many cheerful facts about the square of the hypotenuse." }; + + @Before + public void initConfig() { + config = randomConfiguration(String.join("\n", EXAMPLE_QUERY), Map.of()); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java index c93f3b9e0e350..26529a3605d38 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.compute.operator.DriverSleeps; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskCancellationService; @@ -92,7 +93,17 @@ private ComputeResponse randomResponse() { int numProfiles = randomIntBetween(0, 2); List profiles = new ArrayList<>(numProfiles); for (int i = 0; i < numProfiles; i++) { - profiles.add(new DriverProfile(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), List.of())); + profiles.add( + new DriverProfile( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + List.of(), + DriverSleeps.empty() + ) + ); } return new ComputeResponse(profiles); } diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java index 3530f33704beb..30d1d6f7c914b 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java @@ -496,7 +496,7 @@ public void testPollIntervalUpdate() throws Exception { assertThat(indexLifecycleService.getScheduler().jobCount(), equalTo(1)); }); { - TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().getSchedule(); + TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().schedule(); assertThat(schedule.getInterval(), equalTo(pollInterval)); } @@ -504,7 +504,7 @@ public void testPollIntervalUpdate() throws Exception { TimeValue newPollInterval = TimeValue.timeValueHours(randomLongBetween(6, 1000)); updateClusterSettings(Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, newPollInterval.getStringRep())); { - TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().getSchedule(); + TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().schedule(); assertThat(schedule.getInterval(), equalTo(newPollInterval)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java index c2e2c80998992..9c978ffc25cba 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java @@ -353,8 +353,8 @@ private void cancelJob() { @Override public void triggered(SchedulerEngine.Event event) { - if (event.getJobName().equals(XPackField.INDEX_LIFECYCLE)) { - logger.trace("job triggered: " + event.getJobName() + ", " + event.getScheduledTime() + ", " + event.getTriggeredTime()); + if (event.jobName().equals(XPackField.INDEX_LIFECYCLE)) { + logger.trace("job triggered: " + event.jobName() + ", " + event.scheduledTime() + ", " + event.triggeredTime()); triggerPolicies(clusterService.state(), false); } } diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java index 5157683f2dce9..d776f3963c2ca 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java +++ b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java @@ -25,6 +25,7 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsFeatureFlag; import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.inference.registry.ModelRegistry; import org.elasticsearch.xpack.inference.services.elser.ElserInternalModel; @@ -101,6 +102,7 @@ public void testStoreModelWithUnknownFields() throws Exception { } public void testGetModel() throws Exception { + assumeTrue("Only if 'inference_adaptive_allocations' feature flag is enabled", AdaptiveAllocationsFeatureFlag.isEnabled()); String inferenceEntityId = "test-get-model"; Model model = buildElserModelConfig(inferenceEntityId, TaskType.SPARSE_EMBEDDING); AtomicReference putModelHolder = new AtomicReference<>(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java index 4cc7f5b502ba9..12a32ecdc6d4f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java @@ -9,6 +9,7 @@ import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; import java.util.Set; @@ -20,7 +21,10 @@ public class InferenceFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of(TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED); + return Set.of( + TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED, + RandomRankRetrieverBuilder.RANDOM_RERANKER_RETRIEVER_SUPPORTED + ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 9e067654ec4f8..dff93a63d0647 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -63,6 +63,8 @@ import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; +import org.elasticsearch.xpack.inference.rank.random.RandomRankBuilder; +import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; import org.elasticsearch.xpack.inference.registry.ModelRegistry; @@ -245,6 +247,7 @@ public List getInferenceServiceFactories() { public List getNamedWriteables() { var entries = new ArrayList<>(InferenceNamedWriteablesProvider.getNamedWriteables()); entries.add(new NamedWriteableRegistry.Entry(RankBuilder.class, TextSimilarityRankBuilder.NAME, TextSimilarityRankBuilder::new)); + entries.add(new NamedWriteableRegistry.Entry(RankBuilder.class, RandomRankBuilder.NAME, RandomRankBuilder::new)); return entries; } @@ -338,7 +341,8 @@ public List> getQueries() { @Override public List> getRetrievers() { return List.of( - new RetrieverSpec<>(new ParseField(TextSimilarityRankBuilder.NAME), TextSimilarityRankRetrieverBuilder::fromXContent) + new RetrieverSpec<>(new ParseField(TextSimilarityRankBuilder.NAME), TextSimilarityRankRetrieverBuilder::fromXContent), + new RetrieverSpec<>(new ParseField(RandomRankBuilder.NAME), RandomRankRetrieverBuilder::fromXContent) ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java index a08acab655936..0826d990a80a5 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.external.amazonbedrock; -import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -45,7 +44,6 @@ public AmazonBedrockExecuteOnlyRequestSender(AmazonBedrockClientCache clientCach public void send( Logger logger, Request request, - HttpClientContext context, Supplier hasRequestTimedOutFunction, ResponseHandler responseHandler, ActionListener listener diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RequestSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RequestSender.java index 8244e5ad29e95..8e55b0988de6a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RequestSender.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RequestSender.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.external.http.retry; -import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.InferenceServiceResults; @@ -19,7 +18,6 @@ public interface RequestSender { void send( Logger logger, Request request, - HttpClientContext context, Supplier hasRequestTimedOutFunction, ResponseHandler responseHandler, ActionListener listener diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java index dd45501564e4e..263bdea5ce368 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java @@ -189,12 +189,18 @@ public boolean shouldRetry(Exception e) { public void send( Logger logger, Request request, - HttpClientContext context, Supplier hasRequestTimedOutFunction, ResponseHandler responseHandler, ActionListener listener ) { - InternalRetrier retrier = new InternalRetrier(logger, request, context, hasRequestTimedOutFunction, responseHandler, listener); + var retrier = new InternalRetrier( + logger, + request, + HttpClientContext.create(), + hasRequestTimedOutFunction, + responseHandler, + listener + ); retrier.run(); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java index 8642a19b26a7d..1c6bb58717942 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.external.http.sender; -import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -52,7 +51,7 @@ public void execute( var responseHandler = new AmazonBedrockChatCompletionResponseHandler(); try { - requestSender.send(logger, request, HttpClientContext.create(), hasRequestCompletedFunction, responseHandler, listener); + requestSender.send(logger, request, hasRequestCompletedFunction, responseHandler, listener); } catch (Exception e) { var errorMessage = Strings.format( "Failed to send [completion] request from inference entity id [%s]", diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java index 2f94cdf342938..34aacbf67af6f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.external.http.sender; -import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -61,7 +60,7 @@ public void execute( var responseHandler = new AmazonBedrockEmbeddingsResponseHandler(); var request = new AmazonBedrockEmbeddingsRequest(truncator, truncatedInput, embeddingsModel, requestEntity, timeout); try { - requestSender.send(logger, request, HttpClientContext.create(), hasRequestCompletedFunction, responseHandler, listener); + requestSender.send(logger, request, hasRequestCompletedFunction, responseHandler, listener); } catch (Exception e) { var errorMessage = Strings.format( "Failed to send [text_embedding] request from inference entity id [%s]", diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java index 214eba4ee3485..241466422e47b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.external.http.sender; -import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; @@ -33,7 +32,7 @@ public void run() { var inferenceEntityId = request.createHttpRequest().inferenceEntityId(); try { - requestSender.send(logger, request, HttpClientContext.create(), hasFinished, responseHandler, listener); + requestSender.send(logger, request, hasFinished, responseHandler, listener); } catch (Exception e) { var errorMessage = Strings.format("Failed to send request from inference entity id [%s]", inferenceEntityId); logger.warn(errorMessage, e); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java index a8c3de84572a7..71906a720e969 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java @@ -13,7 +13,6 @@ import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; -import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -61,6 +60,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.function.Function; @@ -475,7 +475,7 @@ private static ObjectMapper createInferenceField( @Nullable SemanticTextField.ModelSettings modelSettings, Function bitSetProducer ) { - return new ObjectMapper.Builder(INFERENCE_FIELD, Explicit.EXPLICIT_TRUE).dynamic(ObjectMapper.Dynamic.FALSE) + return new ObjectMapper.Builder(INFERENCE_FIELD, Optional.of(ObjectMapper.Subobjects.ENABLED)).dynamic(ObjectMapper.Dynamic.FALSE) .add(createChunksField(indexVersionCreated, modelSettings, bitSetProducer)) .build(context); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java new file mode 100644 index 0000000000000..fdb5503e491eb --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.Query; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.rank.RankBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.context.QueryPhaseRankShardContext; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankShardContext; +import org.elasticsearch.search.rank.feature.RankFeatureDoc; +import org.elasticsearch.search.rank.rerank.RerankingQueryPhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.rerank.RerankingQueryPhaseRankShardContext; +import org.elasticsearch.search.rank.rerank.RerankingRankFeaturePhaseRankShardContext; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder.FIELD_FIELD; +import static org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder.SEED_FIELD; + +/** + * A {@code RankBuilder} that performs reranking with random scores, used for testing. + */ +public class RandomRankBuilder extends RankBuilder { + + public static final String NAME = "random_reranker"; + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, args -> { + Integer rankWindowSize = args[0] == null ? DEFAULT_RANK_WINDOW_SIZE : (Integer) args[0]; + String field = (String) args[1]; + Integer seed = (Integer) args[2]; + + return new RandomRankBuilder(rankWindowSize, field, seed); + }); + + static { + PARSER.declareInt(optionalConstructorArg(), RANK_WINDOW_SIZE_FIELD); + PARSER.declareString(constructorArg(), FIELD_FIELD); + PARSER.declareInt(optionalConstructorArg(), SEED_FIELD); + } + + private final String field; + private final Integer seed; + + public RandomRankBuilder(int rankWindowSize, String field, Integer seed) { + super(rankWindowSize); + + if (field == null || field.isEmpty()) { + throw new IllegalArgumentException("field is required"); + } + + this.field = field; + this.seed = seed; + } + + public RandomRankBuilder(StreamInput in) throws IOException { + super(in); + // rankWindowSize deserialization is handled by the parent class RankBuilder + this.field = in.readString(); + this.seed = in.readOptionalInt(); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.RANDOM_RERANKER_RETRIEVER; + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + // rankWindowSize serialization is handled by the parent class RankBuilder + out.writeString(field); + out.writeOptionalInt(seed); + } + + @Override + public void doXContent(XContentBuilder builder, Params params) throws IOException { + // rankWindowSize serialization is handled by the parent class RankBuilder + builder.field(FIELD_FIELD.getPreferredName(), field); + if (seed != null) { + builder.field(SEED_FIELD.getPreferredName(), seed); + } + } + + @Override + public boolean isCompoundBuilder() { + return false; + } + + @Override + public Explanation explainHit(Explanation baseExplanation, RankDoc scoreDoc, List queryNames) { + if (scoreDoc == null) { + return baseExplanation; + } + if (false == baseExplanation.isMatch()) { + return baseExplanation; + } + + assert scoreDoc instanceof RankFeatureDoc : "ScoreDoc is not an instance of RankFeatureDoc"; + RankFeatureDoc rankFeatureDoc = (RankFeatureDoc) scoreDoc; + + return Explanation.match( + rankFeatureDoc.score, + "rank after reranking: [" + rankFeatureDoc.rank + "] using seed [" + seed + "] with score: [" + rankFeatureDoc.score + "]", + baseExplanation + ); + } + + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + return new RerankingQueryPhaseRankShardContext(queries, rankWindowSize()); + } + + @Override + public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { + return new RerankingQueryPhaseRankCoordinatorContext(rankWindowSize()); + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + return new RerankingRankFeaturePhaseRankShardContext(field); + } + + @Override + public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext(int size, int from, Client client) { + return new RandomRankFeaturePhaseRankCoordinatorContext(size, from, rankWindowSize(), seed); + } + + public String field() { + return field; + } + + @Override + protected boolean doEquals(RankBuilder other) { + RandomRankBuilder that = (RandomRankBuilder) other; + return Objects.equals(field, that.field) && Objects.equals(seed, that.seed); + } + + @Override + protected int doHashCode() { + return Objects.hash(field, seed); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankFeaturePhaseRankCoordinatorContext.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankFeaturePhaseRankCoordinatorContext.java new file mode 100644 index 0000000000000..446d8e5862dd2 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankFeaturePhaseRankCoordinatorContext.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.feature.RankFeatureDoc; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.Random; + +/** + * A {@code RankFeaturePhaseRankCoordinatorContext} that performs a rerank inference call to determine relevance scores for documents within + * the provided rank window. + */ +public class RandomRankFeaturePhaseRankCoordinatorContext extends RankFeaturePhaseRankCoordinatorContext { + + private final Integer seed; + + public RandomRankFeaturePhaseRankCoordinatorContext(int size, int from, int rankWindowSize, Integer seed) { + super(size, from, rankWindowSize); + this.seed = seed; + } + + @Override + protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { + // Generate random scores seeded by doc + float[] scores = new float[featureDocs.length]; + for (int i = 0; i < featureDocs.length; i++) { + RankFeatureDoc featureDoc = featureDocs[i]; + int doc = featureDoc.doc; + long docSeed = seed != null ? seed + doc : doc; + scores[i] = new Random(docSeed).nextFloat(); + } + scoreListener.onResponse(scores); + } + + /** + * Sorts documents by score descending. + * @param originalDocs documents to process + */ + @Override + protected RankFeatureDoc[] preprocess(RankFeatureDoc[] originalDocs) { + return Arrays.stream(originalDocs) + .sorted(Comparator.comparing((RankFeatureDoc doc) -> doc.score).reversed()) + .toArray(RankFeatureDoc[]::new); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java new file mode 100644 index 0000000000000..ab8c85cac00e3 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParserContext; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * A {@code RetrieverBuilder} for parsing and constructing a text similarity reranker retriever. + */ +public class RandomRankRetrieverBuilder extends RetrieverBuilder { + + public static final NodeFeature RANDOM_RERANKER_RETRIEVER_SUPPORTED = new NodeFeature("random_reranker_retriever_supported"); + + public static final ParseField RETRIEVER_FIELD = new ParseField("retriever"); + public static final ParseField FIELD_FIELD = new ParseField("field"); + public static final ParseField RANK_WINDOW_SIZE_FIELD = new ParseField("rank_window_size"); + public static final ParseField SEED_FIELD = new ParseField("seed"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(RandomRankBuilder.NAME, args -> { + RetrieverBuilder retrieverBuilder = (RetrieverBuilder) args[0]; + String field = (String) args[1]; + int rankWindowSize = args[2] == null ? DEFAULT_RANK_WINDOW_SIZE : (int) args[2]; + Integer seed = (Integer) args[3]; + + return new RandomRankRetrieverBuilder(retrieverBuilder, field, rankWindowSize, seed); + }); + + static { + PARSER.declareNamedObject(constructorArg(), (p, c, n) -> p.namedObject(RetrieverBuilder.class, n, c), RETRIEVER_FIELD); + PARSER.declareString(optionalConstructorArg(), FIELD_FIELD); + PARSER.declareInt(optionalConstructorArg(), RANK_WINDOW_SIZE_FIELD); + PARSER.declareInt(optionalConstructorArg(), SEED_FIELD); + + RetrieverBuilder.declareBaseParserFields(RandomRankBuilder.NAME, PARSER); + } + + public static RandomRankRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException { + if (context.clusterSupportsFeature(RANDOM_RERANKER_RETRIEVER_SUPPORTED) == false) { + throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + RandomRankBuilder.NAME + "]"); + } + return PARSER.apply(parser, context); + } + + private final RetrieverBuilder retrieverBuilder; + private final String field; + private final int rankWindowSize; + private final Integer seed; + + public RandomRankRetrieverBuilder(RetrieverBuilder retrieverBuilder, String field, int rankWindowSize, Integer seed) { + this.retrieverBuilder = retrieverBuilder; + this.field = field; + this.rankWindowSize = rankWindowSize; + this.seed = seed; + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + retrieverBuilder.extractToSearchSourceBuilder(searchSourceBuilder, compoundUsed); + + // Combining with other rank builder (such as RRF) is not supported + if (searchSourceBuilder.rankBuilder() != null) { + throw new IllegalArgumentException("random rank builder cannot be combined with other rank builders"); + } + + searchSourceBuilder.rankBuilder(new RandomRankBuilder(this.rankWindowSize, this.field, this.seed)); + } + + @Override + public String getName() { + return RandomRankBuilder.NAME; + } + + public int rankWindowSize() { + return rankWindowSize; + } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(RETRIEVER_FIELD.getPreferredName()); + builder.startObject(); + builder.field(retrieverBuilder.getName(), retrieverBuilder); + builder.endObject(); + builder.field(FIELD_FIELD.getPreferredName(), field); + builder.field(RANK_WINDOW_SIZE_FIELD.getPreferredName(), rankWindowSize); + if (seed != null) { + builder.field(SEED_FIELD.getPreferredName(), seed); + } + } + + @Override + protected boolean doEquals(Object other) { + RandomRankRetrieverBuilder that = (RandomRankRetrieverBuilder) other; + return Objects.equals(retrieverBuilder, that.retrieverBuilder) + && Objects.equals(field, that.field) + && Objects.equals(rankWindowSize, that.rankWindowSize) + && Objects.equals(seed, that.seed); + } + + @Override + protected int doHashCode() { + return Objects.hash(retrieverBuilder, field, rankWindowSize, seed); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java index 42413c35fcbff..cad11cbdc9d5b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java @@ -62,6 +62,7 @@ protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener rankedDocs = ((RankedDocsResults) results).getRankedDocs(); + if (rankedDocs.size() != featureDocs.length) { l.onFailure( new IllegalStateException( @@ -104,12 +105,18 @@ protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener featureData = Arrays.stream(featureDocs).map(x -> x.featureData).toList(); - InferenceAction.Request inferenceRequest = generateRequest(featureData); - try { - client.execute(InferenceAction.INSTANCE, inferenceRequest, inferenceListener); - } finally { - inferenceRequest.decRef(); + + // Short circuit on empty results after request validation + if (featureDocs.length == 0) { + inferenceListener.onResponse(new InferenceAction.Response(new RankedDocsResults(List.of()))); + } else { + List featureData = Arrays.stream(featureDocs).map(x -> x.featureData).toList(); + InferenceAction.Request inferenceRequest = generateRequest(featureData); + try { + client.execute(InferenceAction.INSTANCE, inferenceRequest, inferenceListener); + } finally { + inferenceRequest.decRef(); + } } }); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java index f4bbcbebf0340..f5c30d0a94c54 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.rest.action.RestChunkedToXContentListener; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import java.io.IOException; @@ -59,6 +59,6 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient InferenceAction.Request.DEFAULT_TIMEOUT ); requestBuilder.setInferenceTimeout(inferTimeout); - return channel -> client.execute(InferenceAction.INSTANCE, requestBuilder.build(), new RestToXContentListener<>(channel)); + return channel -> client.execute(InferenceAction.INSTANCE, requestBuilder.build(), new RestChunkedToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java index 1acf19c5373b7..8de791325a6df 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java @@ -16,6 +16,7 @@ import org.elasticsearch.inference.ServiceSettings; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsFeatureFlag; import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.inference.services.ServiceUtils; @@ -87,12 +88,18 @@ protected static ElasticsearchInternalServiceSettings.Builder fromMap( String modelId = extractOptionalString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); if (numAllocations == null && adaptiveAllocationsSettings == null) { - validationException.addValidationError( - ServiceUtils.missingOneOfSettingsErrorMsg( - List.of(NUM_ALLOCATIONS, ADAPTIVE_ALLOCATIONS), - ModelConfigurations.SERVICE_SETTINGS - ) - ); + if (AdaptiveAllocationsFeatureFlag.isEnabled()) { + validationException.addValidationError( + ServiceUtils.missingOneOfSettingsErrorMsg( + List.of(NUM_ALLOCATIONS, ADAPTIVE_ALLOCATIONS), + ModelConfigurations.SERVICE_SETTINGS + ) + ); + } else { + validationException.addValidationError( + ServiceUtils.missingSettingErrorMsg(NUM_ALLOCATIONS, ModelConfigurations.SERVICE_SETTINGS) + ); + } } // if an error occurred while parsing, we'll set these to an invalid value, so we don't accidentally get a diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java index c2842a1278a49..f70ab43908827 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java @@ -10,7 +10,6 @@ import org.apache.http.ConnectionClosedException; import org.apache.http.HttpResponse; import org.apache.http.StatusLine; -import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; @@ -80,7 +79,7 @@ public void testSend_CallsSenderAgain_AfterValidateResponseThrowsAnException() t var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); verify(httpClient, times(2)).send(any(), any(), any()); @@ -111,7 +110,7 @@ public void testSend_CallsSenderAgain_WhenAFailureStatusCodeIsReturned() throws var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); verify(httpClient, times(2)).send(any(), any(), any()); @@ -139,7 +138,7 @@ public void testSend_CallsSenderAgain_WhenParsingFailsOnce() throws IOException var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); verify(httpClient, times(2)).send(any(), any(), any()); @@ -167,7 +166,7 @@ public void testSend_DoesNotCallSenderAgain_WhenParsingFailsWithNonRetryableExce var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 0); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 0); var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed")); @@ -202,7 +201,7 @@ public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce() var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); verify(httpClient, times(2)).send(any(), any(), any()); @@ -235,7 +234,7 @@ public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce_W var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); verify(httpClient, times(2)).send(any(), any(), any()); @@ -268,7 +267,7 @@ public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnceWi var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); verify(httpClient, times(2)).send(any(), any(), any()); @@ -295,7 +294,7 @@ public void testSend_ReturnsFailure_WhenHttpResultListenerCallsOnFailureOnceWith var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 0); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 0); var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("Invalid host [null], please check that the URL is correct.")); @@ -317,10 +316,7 @@ public void testSend_ReturnsElasticsearchExceptionFailure_WhenTheHttpClientThrow var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks( - () -> retrier.send(mock(Logger.class), mockRequest("id"), HttpClientContext.create(), () -> false, handler, listener), - 0 - ); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest("id"), () -> false, handler, listener), 0); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("Http client failed to send request from inference entity id [id]")); @@ -354,7 +350,7 @@ public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnException_AfterO var retrier = createRetrier(sender); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed again")); @@ -391,7 +387,7 @@ public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnElasticsearchExc var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); var thrownException = expectThrows(RetryException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed again")); @@ -423,7 +419,7 @@ public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_AfterO var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); var thrownException = expectThrows(RetryException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed again")); @@ -449,7 +445,7 @@ public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_WithNo var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 0); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 0); var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed")); @@ -484,7 +480,7 @@ public void testSend_DoesNotRetryIndefinitely() throws IOException { ); var listener = new PlainActionFuture(); - retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener); + retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener); // Assert that the retrying sender stopped after max retires even though the exception is retryable var thrownException = expectThrows(UncategorizedExecutionException.class, () -> listener.actionGet(TIMEOUT)); @@ -524,7 +520,7 @@ public void testSend_DoesNotRetryIndefinitely_WithAlwaysRetryingResponseHandler( ); var listener = new PlainActionFuture(); - retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener); + retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener); // Assert that the retrying sender stopped after max retires var thrownException = expectThrows(UncategorizedExecutionException.class, () -> listener.actionGet(TIMEOUT)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java index 762a3a74184a4..e09e4968571e5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java @@ -123,7 +123,7 @@ public void testIsTerminated_AfterStopFromSeparateThread() { waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any()); var service = createRequestExecutorService(null, requestSender); @@ -203,7 +203,7 @@ public void testTaskThrowsError_CallsOnFailure() { doAnswer(invocation -> { service.shutdown(); throw new IllegalArgumentException("failed"); - }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any()); PlainActionFuture listener = new PlainActionFuture<>(); @@ -270,13 +270,13 @@ public void testExecute_PreservesThreadContext() throws InterruptedException, Ex assertNull(serviceThreadContext.getHeader(headerKey)); @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[5]; + ActionListener listener = invocation.getArgument(4, ActionListener.class); listener.onResponse(null); waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any()); var finishedOnResponse = new CountDownLatch(1); ActionListener listener = new ActionListener<>() { @@ -422,7 +422,7 @@ public void testChangingCapacity_SetsCapacityToTwo() throws ExecutionException, waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any()); Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); @@ -467,7 +467,7 @@ public void testChangingCapacity_DoesNotRejectsOverflowTasks_BecauseOfQueueFull( waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any()); Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); @@ -528,7 +528,7 @@ public void testChangingCapacity_ToZero_SetsQueueCapacityToUnbounded() throws IO waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any()); Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); @@ -598,11 +598,11 @@ public void testDoesNotExecuteTask_WhenCannotReserveTokens_AndThenCanReserve_And doAnswer(invocation -> { service.shutdown(); return Void.TYPE; - }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any()); service.start(); - verify(requestSender, times(1)).send(any(), any(), any(), any(), any(), any()); + verify(requestSender, times(1)).send(any(), any(), any(), any(), any()); } public void testRemovesRateLimitGroup_AfterStaleDuration() { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManagerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManagerTests.java index 8b7c01ae133cf..d8a1f2c4227e4 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManagerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManagerTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.external.http.sender; -import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.InferenceServiceResults; @@ -47,7 +46,6 @@ public static RequestManager createMock(RequestSender requestSender, String infe requestSender.send( mock(Logger.class), RequestTests.mockRequest(inferenceEntityId), - HttpClientContext.create(), () -> false, mock(ResponseHandler.class), listener diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java index c3c416d8fe65e..e350a539ba928 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java @@ -310,7 +310,7 @@ public void testFails_ResponseIsInvalidJson_MissingSquareBracket() { ) ); - assertThat(thrownException.getMessage(), containsString("expected close marker for Array (start marker at [Source: (byte[])")); + assertThat(thrownException.getMessage(), containsString("expected close marker for Array (start marker at")); } public void testFails_ResponseIsInvalidJson_MissingField() { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilderTests.java new file mode 100644 index 0000000000000..c464dbaea47cd --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilderTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; + +import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; + +public class RandomRankBuilderTests extends AbstractXContentSerializingTestCase { + + @Override + protected RandomRankBuilder createTestInstance() { + return new RandomRankBuilder(randomIntBetween(1, 1000), "my-field", randomBoolean() ? randomIntBetween(1, 1000) : null); + } + + @Override + protected RandomRankBuilder mutateInstance(RandomRankBuilder instance) throws IOException { + String field = instance.field() + randomAlphaOfLength(2); + int rankWindowSize = randomValueOtherThan(instance.rankWindowSize(), this::randomRankWindowSize); + Integer seed = randomBoolean() ? randomIntBetween(1, 1000) : null; + return new RandomRankBuilder(rankWindowSize, field, seed); + } + + @Override + protected Writeable.Reader instanceReader() { + return RandomRankBuilder::new; + } + + @Override + protected RandomRankBuilder doParseInstance(XContentParser parser) throws IOException { + parser.nextToken(); + assertEquals(parser.currentToken(), XContentParser.Token.START_OBJECT); + parser.nextToken(); + assertEquals(parser.currentToken(), XContentParser.Token.FIELD_NAME); + assertEquals(parser.currentName(), RandomRankBuilder.NAME); + RandomRankBuilder builder = RandomRankBuilder.PARSER.parse(parser, null); + parser.nextToken(); + assertEquals(parser.currentToken(), XContentParser.Token.END_OBJECT); + parser.nextToken(); + assertNull(parser.currentToken()); + return builder; + } + + private int randomRankWindowSize() { + return randomIntBetween(0, 1000); + } + + public void testParserDefaults() throws IOException { + String json = """ + { + "field": "my-field" + }"""; + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + RandomRankBuilder parsed = RandomRankBuilder.PARSER.parse(parser, null); + assertEquals(DEFAULT_RANK_WINDOW_SIZE, parsed.rankWindowSize()); + } + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java new file mode 100644 index 0000000000000..c33f30d461350 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParserContext; +import org.elasticsearch.search.retriever.TestRetrieverBuilder; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.usage.SearchUsage; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankBuilder; +import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; + +public class RandomRankRetrieverBuilderTests extends AbstractXContentTestCase { + + /** + * Creates a random {@link RandomRankRetrieverBuilder}. The created instance + * is not guaranteed to pass {@link SearchRequest} validation. This is purely + * for x-content testing. + */ + public static RandomRankRetrieverBuilder createRandomRankRetrieverBuilder() { + return new RandomRankRetrieverBuilder( + TestRetrieverBuilder.createRandomTestRetrieverBuilder(), + randomAlphaOfLength(10), + randomIntBetween(1, 10000), + randomBoolean() ? randomIntBetween(1, 1000) : null + ); + } + + @Override + protected RandomRankRetrieverBuilder createTestInstance() { + return createRandomRankRetrieverBuilder(); + } + + @Override + protected RandomRankRetrieverBuilder doParseInstance(XContentParser parser) { + return RandomRankRetrieverBuilder.PARSER.apply( + parser, + new RetrieverParserContext( + new SearchUsage(), + nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == RandomRankRetrieverBuilder.RANDOM_RERANKER_RETRIEVER_SUPPORTED + ) + ); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + TestRetrieverBuilder.TEST_SPEC.getName(), + (p, c) -> TestRetrieverBuilder.TEST_SPEC.getParser().fromXContent(p, (RetrieverParserContext) c), + TestRetrieverBuilder.TEST_SPEC.getName().getForRestApiVersion() + ) + ); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + new ParseField(TextSimilarityRankBuilder.NAME), + (p, c) -> TextSimilarityRankRetrieverBuilder.PARSER.apply(p, (RetrieverParserContext) c) + ) + ); + return new NamedXContentRegistry(entries); + } + + public void testParserDefaults() throws IOException { + String json = """ + { + "retriever": { + "test": { + "value": "my-test-retriever" + } + }, + "field": "my-field" + }"""; + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + RandomRankRetrieverBuilder parsed = RandomRankRetrieverBuilder.PARSER.parse(parser, null); + assertEquals(DEFAULT_RANK_WINDOW_SIZE, parsed.rankWindowSize()); + } + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java index 2e9be42b5c5d4..d6c476cdc15d6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java @@ -61,4 +61,23 @@ public void onFailure(Exception e) { ); } + public void testComputeScoresForEmpty() { + subject.computeScores(new RankFeatureDoc[0], new ActionListener<>() { + @Override + public void onResponse(float[] floats) { + assertArrayEquals(new float[0], floats, 0.0f); + } + + @Override + public void onFailure(Exception e) { + fail(); + } + }); + verify(mockClient).execute( + eq(GetInferenceModelAction.INSTANCE), + argThat(actionRequest -> ((GetInferenceModelAction.Request) actionRequest).getTaskType().equals(TaskType.RERANK)), + any() + ); + } + } diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml index 6d3c1231440fb..530be2341c9c8 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml @@ -38,8 +38,8 @@ setup: id: doc_1 body: text: "As seen from Earth, a solar eclipse happens when the Moon is directly between the Earth and the Sun." - topic: ["science"] - subtopic: ["technology"] + topic: [ "science" ] + subtopic: [ "technology" ] refresh: true - do: @@ -48,8 +48,8 @@ setup: id: doc_2 body: text: "The phases of the Moon come from the position of the Moon relative to the Earth and Sun." - topic: ["science"] - subtopic: ["astronomy"] + topic: [ "science" ] + subtopic: [ "astronomy" ] refresh: true - do: @@ -58,7 +58,7 @@ setup: id: doc_3 body: text: "Sun Moon Lake is a lake in Nantou County, Taiwan. It is the largest lake in Taiwan." - topic: ["geography"] + topic: [ "geography" ] refresh: true --- "Simple text similarity rank retriever": @@ -82,7 +82,7 @@ setup: field: text size: 10 - - match: { hits.total.value : 2 } + - match: { hits.total.value: 2 } - length: { hits.hits: 2 } - match: { hits.hits.0._id: "doc_2" } @@ -118,9 +118,62 @@ setup: field: text size: 10 - - match: { hits.total.value : 1 } + - match: { hits.total.value: 1 } - length: { hits.hits: 1 } - match: { hits.hits.0._id: "doc_1" } - match: { hits.hits.0._rank: 1 } - close_to: { hits.hits.0._score: { value: 0.2, error: 0.001 } } + + +--- +"Text similarity reranking fails if the inference ID does not exist": + - do: + catch: /Inference endpoint not found/ + search: + index: test-index + body: + track_total_hits: true + fields: [ "text", "topic" ] + retriever: + text_similarity_reranker: + retriever: + standard: + query: + term: + topic: "science" + filter: + term: + subtopic: "technology" + rank_window_size: 10 + inference_id: i-dont-exist + inference_text: "How often does the moon hide the sun?" + field: text + size: 10 + +--- +"Text similarity reranking fails if the inference ID does not exist and result set is empty": + - requires: + cluster_features: "gte_v8.15.1" + reason: bug fixed in 8.15.1 + + - do: + catch: /Inference endpoint not found/ + search: + index: test-index + body: + track_total_hits: true + fields: [ "text", "topic" ] + retriever: + text_similarity_reranker: + retriever: + standard: + query: + term: + topic: "asdfasdf" + rank_window_size: 10 + inference_id: i-dont-exist + inference_text: "asdfasdf" + field: text + size: 10 + diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/80_random_rerank_retriever.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/80_random_rerank_retriever.yml new file mode 100644 index 0000000000000..d33f57f763db8 --- /dev/null +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/80_random_rerank_retriever.yml @@ -0,0 +1,94 @@ +setup: + - requires: + cluster_features: "gte_v8.16.0" + reason: random rerank retriever introduced in 8.16.0 + test_runner_features: "close_to" + + - do: + indices.create: + index: test-index + body: + mappings: + properties: + text: + type: text + topic: + type: keyword + subtopic: + type: keyword + + - do: + bulk: + refresh: true + index: test-index + body: | + {"index": { "_id": "doc_1" } } + { "text": "Pugs are proof that even nature has a sense of humor." } + {"index": { "_id": "doc_2" } } + { "text": "A pugs snore can rival a chainsaw, but it's somehow adorable." } + {"index": { "_id": "doc_3" } } + { "text": "Pugs are like potato chips; you can't have just one wrinkle." } + {"index": { "_id": "doc_4" } } + { "text": "Pugs don't walk; pugs waddle majestically." } + {"index": { "_id": "doc_5" } } + { "text": "A pugs life goal: be the ultimate couch potato, and they're crushing it." } +--- +"Random rerank retriever predictably shuffles results": + + - do: + search: + index: test-index + body: + query: + query_string: + query: "pugs" + size: 10 + + - match: { hits.total.value: 5 } + - length: { hits.hits: 5 } + + - match: { hits.hits.0._id: "doc_4" } + - close_to: { hits.hits.0._score: { value: 0.136, error: 0.001 } } + + - do: + search: + index: test-index + body: + retriever: + random_reranker: + retriever: + standard: + query: + query_string: + query: "pugs" + field: text + seed: 42 + rank_window_size: 10 + size: 10 + + - match: { hits.total.value: 5 } + - length: { hits.hits: 5 } + + - match: { hits.hits.0._id: "doc_1" } + - close_to: { hits.hits.0._score: { value: 0.727, error: 0.001 } } + + - do: + search: + index: test-index + body: + retriever: + random_reranker: + retriever: + standard: + query: + query_string: + query: "pugs" + field: text + rank_window_size: 10 + size: 10 + + - match: { hits.total.value: 5 } + - length: { hits.hits: 5 } + + - match: { hits.hits.0._id: "doc_3" } + - close_to: { hits.hits.0._score: { value: 0.731, error: 0.001 } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java index 0f8024dd7207a..528883439ef2f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java @@ -26,7 +26,7 @@ default void configure(Settings settings) {} boolean isNlpEnabled(); default boolean isLearningToRankEnabled() { - return false; + return true; } default boolean disableInferenceProcessCache() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java index d2179a69ebc24..46edcf1f63c01 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; @@ -303,8 +304,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - // TODO: update transport version when released! - return TransportVersion.current(); + return TransportVersions.LTR_SERVERLESS_RELEASE; } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java index f10df86cc23ae..9232d32e40a97 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -136,7 +137,8 @@ public void setUpVariables() { Collections.singletonList(SKINNY_INGEST_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java index 3b361748abf67..7d8a474453c4c 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java @@ -53,10 +53,11 @@ public class ProfilingIndexTemplateRegistry extends IndexTemplateRegistry { // version 10: changed mapping profiling-events @timestamp to 'date_nanos' from 'date' // version 11: Added 'profiling.agent.protocol' keyword mapping to profiling-hosts // version 12: Added 'profiling.agent.env_https_proxy' keyword mapping to profiling-hosts - public static final int INDEX_TEMPLATE_VERSION = 12; + // version 13: Added 'container.id' keyword mapping to profiling-events + public static final int INDEX_TEMPLATE_VERSION = 13; // history for individual indices / index templates. Only bump these for breaking changes that require to create a new index - public static final int PROFILING_EVENTS_VERSION = 4; + public static final int PROFILING_EVENTS_VERSION = 5; public static final int PROFILING_EXECUTABLES_VERSION = 1; public static final int PROFILING_METRICS_VERSION = 2; public static final int PROFILING_HOSTS_VERSION = 2; diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java index e891e575e7de3..10aff2f4d68cd 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java @@ -46,9 +46,6 @@ public class RRFRankBuilder extends RankBuilder { static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(RRFRankPlugin.NAME, args -> { int windowSize = args[0] == null ? DEFAULT_RANK_WINDOW_SIZE : (int) args[0]; int rankConstant = args[1] == null ? DEFAULT_RANK_CONSTANT : (int) args[1]; - if (rankConstant < 1) { - throw new IllegalArgumentException("[rank_constant] must be greater than [0] for [rrf]"); - } return new RRFRankBuilder(windowSize, rankConstant); }); @@ -73,6 +70,11 @@ public void doXContent(XContentBuilder builder, Params params) throws IOExceptio public RRFRankBuilder(int rankWindowSize, int rankConstant) { super(rankWindowSize); + + if (rankConstant < 1) { + throw new IllegalArgumentException("[rank_constant] must be greater or equal to [1] for [rrf]"); + } + this.rankConstant = rankConstant; } diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/100_rank_rrf.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/100_rank_rrf.yml index a4972d0557dab..4f76f52409810 100644 --- a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/100_rank_rrf.yml +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/100_rank_rrf.yml @@ -33,7 +33,7 @@ setup: body: text: "term term" keyword: "other" - vector: [0.0] + vector: [ 0.0 ] - do: index: @@ -42,7 +42,7 @@ setup: body: text: "other" keyword: "other" - vector: [1.0] + vector: [ 1.0 ] - do: index: @@ -51,10 +51,10 @@ setup: body: text: "term" keyword: "keyword" - vector: [2.0] + vector: [ 2.0 ] - do: - indices.refresh: {} + indices.refresh: { } --- "Simple rank with bm25 search and kNN search": @@ -67,7 +67,7 @@ setup: fields: [ "text", "keyword" ] knn: field: vector - query_vector: [0.0] + query_vector: [ 0.0 ] k: 3 num_candidates: 3 query: @@ -125,7 +125,7 @@ setup: rank_constant: 1 size: 10 - - match: { hits.total.value : 2 } + - match: { hits.total.value: 2 } - match: { hits.hits.0._id: "3" } - match: { hits.hits.0._rank: 1 } @@ -173,7 +173,7 @@ setup: rank_constant: 1 size: 10 - - match: { hits.total.value : 3 } + - match: { hits.total.value: 3 } - match: { hits.hits.0._id: "3" } - match: { hits.hits.0._rank: 1 } @@ -227,3 +227,43 @@ setup: rank_window_size: 2 rank_constant: 1 size: 10 + +--- +"RRF rank should fail if rank_constant < 1": + - requires: + cluster_features: "gte_v8.16.0" + reason: 'validation fixed in 8.16.0' + + - do: + catch: "/\\[rank_constant\\] must be greater or equal to \\[1\\] for \\[rrf\\]/" + search: + index: test + body: + track_total_hits: true + fields: [ "text", "keyword" ] + knn: + field: vector + query_vector: [ 0.0 ] + k: 3 + num_candidates: 3 + sub_searches: [ + { + "query": { + "term": { + "text": "term" + } + } + }, + { + "query": { + "match": { + "keyword": "keyword" + } + } + } + ] + rank: + rrf: + rank_window_size: 10 + rank_constant: 0.3 + size: 10 diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index f4c420db47ac3..5704d7837268b 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -463,8 +463,8 @@ public synchronized void onCancelled() { public synchronized void triggered(SchedulerEngine.Event event) { // Verify this is actually the event that we care about, then trigger the indexer. // Note that the status of the indexer is checked in the indexer itself - if (event.getJobName().equals(SCHEDULE_NAME + "_" + job.getConfig().getId())) { - logger.debug("Rollup indexer [" + event.getJobName() + "] schedule has triggered, state: [" + indexer.getState() + "]"); + if (event.jobName().equals(SCHEDULE_NAME + "_" + job.getConfig().getId())) { + logger.debug("Rollup indexer [" + event.jobName() + "] schedule has triggered, state: [" + indexer.getState() + "]"); indexer.maybeTriggerAsyncJob(System.currentTimeMillis()); } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java index 56aec13cbab29..c99f2be0a6cad 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java @@ -371,12 +371,8 @@ public void testCanMountSnapshotTakenWhileConcurrentlyIndexing() throws Exceptio for (int i = between(10, 10_000); i >= 0; i--) { indexRequestBuilders.add(prepareIndex(indexName).setSource("foo", randomBoolean() ? "bar" : "baz")); } - try { - safeAwait(cyclicBarrier); - indexRandom(true, true, indexRequestBuilders); - } catch (InterruptedException e) { - throw new AssertionError(e); - } + safeAwait(cyclicBarrier); + indexRandom(true, true, indexRequestBuilders); refresh(indexName); assertThat( indicesAdmin().prepareForceMerge(indexName).setOnlyExpungeDeletes(true).setFlush(true).get().getFailedShards(), diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java index 56efc72f2f6f7..d7cf22a05981f 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.blobcache.BlobCacheUtils; import org.elasticsearch.blobcache.common.ByteBufferReference; import org.elasticsearch.blobcache.common.ByteRange; @@ -146,32 +147,38 @@ private void readWithoutBlobCacheSlow(ByteBuffer b, long position, int length) t final int read = SharedBytes.readCacheFile(channel, pos, relativePos, len, byteBufferReference); stats.addCachedBytesRead(read); return read; - }, (channel, channelPos, streamFactory, relativePos, len, progressUpdater) -> { - assert streamFactory == null : streamFactory; - final long startTimeNanos = stats.currentTimeNanos(); - try (InputStream input = openInputStreamFromBlobStore(rangeToWrite.start() + relativePos, len)) { - assert ThreadPool.assertCurrentThreadPool(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME); - logger.trace( - "{}: writing channel {} pos {} length {} (details: {})", - fileInfo.physicalName(), - channelPos, - relativePos, - len, - cacheFile - ); - SharedBytes.copyToCacheFileAligned( - channel, - input, - channelPos, - relativePos, - len, - progressUpdater, - writeBuffer.get().clear() - ); - final long endTimeNanos = stats.currentTimeNanos(); - stats.addCachedBytesWritten(len, endTimeNanos - startTimeNanos); - } - }); + }, + (channel, channelPos, streamFactory, relativePos, len, progressUpdater, completionListener) -> ActionListener.completeWith( + completionListener, + () -> { + assert streamFactory == null : streamFactory; + final long startTimeNanos = stats.currentTimeNanos(); + try (InputStream input = openInputStreamFromBlobStore(rangeToWrite.start() + relativePos, len)) { + assert ThreadPool.assertCurrentThreadPool(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME); + logger.trace( + "{}: writing channel {} pos {} length {} (details: {})", + fileInfo.physicalName(), + channelPos, + relativePos, + len, + cacheFile + ); + SharedBytes.copyToCacheFileAligned( + channel, + input, + channelPos, + relativePos, + len, + progressUpdater, + writeBuffer.get().clear() + ); + final long endTimeNanos = stats.currentTimeNanos(); + stats.addCachedBytesWritten(len, endTimeNanos - startTimeNanos); + return null; + } + } + ) + ); assert bytesRead == length : bytesRead + " vs " + length; byteBufferReference.finish(bytesRead); } finally { diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java index 262e1340fb465..f5f9410a145cc 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.Tuple; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.junit.RunnableTestRuleAdapter; @@ -347,21 +348,6 @@ public void testCrossClusterQuery() throws Exception { | LIMIT 10""")); assertRemoteAndLocalResults(response); - // query remote cluster only - but also include employees2 which the user does not have access to - response = performRequestWithRemoteSearchUser(esqlRequest(""" - FROM my_remote_cluster:employees,my_remote_cluster:employees2 - | SORT emp_id ASC - | LIMIT 2 - | KEEP emp_id, department""")); - assertRemoteOnlyResults(response); // same as above since the user only has access to employees - - // query remote and local cluster - but also include employees2 which the user does not have access to - response = performRequestWithRemoteSearchUser(esqlRequest(""" - FROM my_remote_cluster:employees,my_remote_cluster:employees2,employees,employees2 - | SORT emp_id ASC - | LIMIT 10""")); - assertRemoteAndLocalResults(response); // same as above since the user only has access to employees - // update role to include both employees and employees2 for the remote cluster final var putRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); putRoleRequest.setJsonEntity(""" @@ -618,6 +604,37 @@ public void testCrossClusterQueryWithOnlyRemotePrivs() throws Exception { + "this action is granted by the index privileges [read,read_cross_cluster,all]" ) ); + + // query remote cluster only - but also include employees2 which the user does not have access to + error = expectThrows(ResponseException.class, () -> { performRequestWithRemoteSearchUser(esqlRequest(""" + FROM my_remote_cluster:employees,my_remote_cluster:employees2 + | SORT emp_id ASC + | LIMIT 2 + | KEEP emp_id, department""")); }); + + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat( + error.getMessage(), + containsString( + "action [indices:data/read/esql] is unauthorized for user [remote_search_user] with effective roles " + + "[remote_search], this action is granted by the index privileges [read,read_cross_cluster,all]" + ) + ); + + // query remote and local cluster - but also include employees2 which the user does not have access to + error = expectThrows(ResponseException.class, () -> { performRequestWithRemoteSearchUser(esqlRequest(""" + FROM my_remote_cluster:employees,my_remote_cluster:employees2,employees,employees2 + | SORT emp_id ASC + | LIMIT 10""")); }); + + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat( + error.getMessage(), + containsString( + "action [indices:data/read/esql] is unauthorized for user [remote_search_user] with effective roles " + + "[remote_search], this action is granted by the index privileges [read,read_cross_cluster,all]" + ) + ); } @SuppressWarnings("unchecked") @@ -841,7 +858,7 @@ public void testAlias() throws Exception { }"""); assertOK(adminClient().performRequest(putRoleRequest)); // query `employees2` - for (String index : List.of("*:employees2", "*:employee*", "*:employee*,*:alias-employees,*:employees3")) { + for (String index : List.of("*:employees2", "*:employee*")) { Request request = esqlRequest("FROM " + index + " | KEEP emp_id | SORT emp_id | LIMIT 100"); Response response = performRequestWithRemoteSearchUser(request); assertOK(response); @@ -849,15 +866,7 @@ public void testAlias() throws Exception { List ids = (List) responseAsMap.get("values"); assertThat(ids, equalTo(List.of(List.of("11"), List.of("13")))); } - // query `alias-engineering` - for (var index : List.of("*:alias*", "*:alias*", "*:alias*,my*:employees1", "*:alias*,my*:employees3")) { - Request request = esqlRequest("FROM " + index + " | KEEP emp_id | SORT emp_id | LIMIT 100"); - Response response = performRequestWithRemoteSearchUser(request); - assertOK(response); - Map responseAsMap = entityAsMap(response); - List ids = (List) responseAsMap.get("values"); - assertThat(ids, equalTo(List.of(List.of("1"), List.of("7")))); - } + // query `employees2` and `alias-engineering` for (var index : List.of("*:employees2,*:alias-engineering", "*:emp*,*:alias-engineering", "*:emp*,my*:alias*")) { Request request = esqlRequest("FROM " + index + " | KEEP emp_id | SORT emp_id | LIMIT 100"); @@ -874,6 +883,30 @@ public void testAlias() throws Exception { assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(400)); assertThat(error.getMessage(), containsString(" Unknown index [" + index + "]")); } + + for (var index : List.of( + Tuple.tuple("*:employee*,*:alias-employees,*:employees3", "alias-employees,employees3"), + Tuple.tuple("*:alias*,my*:employees1", "employees1"), + Tuple.tuple("*:alias*,my*:employees3", "employees3") + )) { + Request request = esqlRequest("FROM " + index.v1() + " | KEEP emp_id | SORT emp_id | LIMIT 100"); + ResponseException error = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(request)); + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat( + error.getMessage(), + containsString("unauthorized for user [remote_search_user] with assigned roles [remote_search]") + ); + assertThat(error.getMessage(), containsString("user [test_user] on indices [" + index.v2() + "]")); + } + + // query `alias-engineering` + Request request = esqlRequest("FROM *:alias* | KEEP emp_id | SORT emp_id | LIMIT 100"); + Response response = performRequestWithRemoteSearchUser(request); + assertOK(response); + Map responseAsMap = entityAsMap(response); + List ids = (List) responseAsMap.get("values"); + assertThat(ids, equalTo(List.of(List.of("1"), List.of("7")))); + removeAliases(); } diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/LicenseDLSFLSRoleIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/LicenseDLSFLSRoleIT.java index f81bab4866bdf..552e9f5cba578 100644 --- a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/LicenseDLSFLSRoleIT.java +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/LicenseDLSFLSRoleIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.SecureString; @@ -21,6 +20,8 @@ import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.junit.After; +import org.junit.Before; import org.junit.ClassRule; import java.io.IOException; @@ -50,8 +51,6 @@ public final class LicenseDLSFLSRoleIT extends ESRestTestCase { public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .nodes(1) .distribution(DistributionType.DEFAULT) - // start as "trial" - .setting("xpack.license.self_generated.type", "trial") .setting("xpack.security.enabled", "true") .setting("xpack.security.http.ssl.enabled", "false") .setting("xpack.security.transport.ssl.enabled", "false") @@ -61,6 +60,23 @@ public final class LicenseDLSFLSRoleIT extends ESRestTestCase { .user(READ_SECURITY_USER, READ_SECURITY_PASSWORD.toString(), "read_security_user_role", false) .build(); + @Before + public void setupLicense() throws IOException { + // start with trial license + Request request = new Request("POST", "/_license/start_trial?acknowledge=true"); + Response response = adminClient().performRequest(request); + assertOK(response); + assertTrue((boolean) responseAsMap(response).get("trial_was_started")); + } + + @After + public void removeLicense() throws IOException { + // start with trial license + Request request = new Request("DELETE", "/_license"); + Response response = adminClient().performRequest(request); + assertOK(response); + } + @Override protected String getTestRestCluster() { return cluster.getHttpAddresses(); @@ -78,10 +94,7 @@ protected Settings restClientSettings() { return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } - @SuppressWarnings("unchecked") public void testQueryDLSFLSRolesShowAsDisabled() throws Exception { - // auto-generated "trial" - waitForLicense(adminClient(), "trial"); // neither DLS nor FLS role { RoleDescriptor.IndicesPrivileges[] indicesPrivileges = new RoleDescriptor.IndicesPrivileges[] { @@ -138,7 +151,6 @@ public void testQueryDLSFLSRolesShowAsDisabled() throws Exception { Map responseMap = responseAsMap(response); assertTrue(((Boolean) responseMap.get("basic_was_started"))); assertTrue(((Boolean) responseMap.get("acknowledged"))); - waitForLicense(adminClient(), "basic"); // now the same roles show up as disabled ("enabled" is "false") assertQuery(client(), "", 4, roles -> { roles.sort(Comparator.comparing(o -> ((String) o.get("name")))); @@ -175,22 +187,4 @@ private static void assertRoleEnabled(Map roleMap, boolean enabl assertThat(roleMap.get("transient_metadata"), instanceOf(Map.class)); assertThat(((Map) roleMap.get("transient_metadata")).get("enabled"), equalTo(enabled)); } - - @SuppressWarnings("unchecked") - private static void waitForLicense(RestClient adminClient, String type) throws Exception { - final Request request = new Request("GET", "_license"); - assertBusy(() -> { - Response response; - try { - response = adminClient.performRequest(request); - } catch (ResponseException e) { - throw new AssertionError("license not yet installed", e); - } - assertOK(response); - Map responseMap = responseAsMap(response); - assertTrue(responseMap.containsKey("license")); - assertThat(((Map) responseMap.get("license")).get("status"), equalTo("active")); - assertThat(((Map) responseMap.get("license")).get("type"), equalTo(type)); - }); - } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java index 963c42c55aa60..d057b7ce0be20 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java @@ -451,7 +451,7 @@ public void testSuggestProfilesWithHint() throws IOException { final List spaces = List.of("space1", "space2", "space3", "space4", "*"); final List profiles = spaces.stream().map(space -> { final PlainActionFuture future1 = new PlainActionFuture<>(); - final String lastName = randomAlphaOfLengthBetween(3, 8); + final String lastName = randomAlphaOfLengthBetween(3, 8) + space; final Authentication.RealmRef realmRef = randomBoolean() ? AuthenticationTestHelper.randomRealmRef(false) : new Authentication.RealmRef( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java index 466d0e3428d50..6abf6c81b673e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java @@ -401,59 +401,52 @@ private PortBlockingRunnable( public void run() { final List openedSockets = new ArrayList<>(); final List failedAddresses = new ArrayList<>(); - try { - final boolean allSocketsOpened = waitUntil(() -> { - try { - final InetAddress[] allAddresses; - if (serverAddress instanceof Inet4Address) { - allAddresses = NetworkUtils.getAllIPV4Addresses(); - } else { - allAddresses = NetworkUtils.getAllIPV6Addresses(); - } - final List inetAddressesToBind = Arrays.stream(allAddresses) - .filter(addr -> openedSockets.stream().noneMatch(s -> addr.equals(s.getLocalAddress()))) - .filter(addr -> failedAddresses.contains(addr) == false) - .collect(Collectors.toList()); - for (InetAddress localAddress : inetAddressesToBind) { - try { - final Socket socket = openMockSocket(serverAddress, serverPort, localAddress, portToBind); - openedSockets.add(socket); - logger.debug("opened socket [{}]", socket); - } catch (NoRouteToHostException | ConnectException e) { - logger.debug(() -> "marking address [" + localAddress + "] as failed due to:", e); - failedAddresses.add(localAddress); - } - } - if (openedSockets.size() == 0) { - logger.debug("Could not open any sockets from the available addresses"); - return false; + + final boolean allSocketsOpened = waitUntil(() -> { + try { + final InetAddress[] allAddresses; + if (serverAddress instanceof Inet4Address) { + allAddresses = NetworkUtils.getAllIPV4Addresses(); + } else { + allAddresses = NetworkUtils.getAllIPV6Addresses(); + } + final List inetAddressesToBind = Arrays.stream(allAddresses) + .filter(addr -> openedSockets.stream().noneMatch(s -> addr.equals(s.getLocalAddress()))) + .filter(addr -> failedAddresses.contains(addr) == false) + .collect(Collectors.toList()); + for (InetAddress localAddress : inetAddressesToBind) { + try { + final Socket socket = openMockSocket(serverAddress, serverPort, localAddress, portToBind); + openedSockets.add(socket); + logger.debug("opened socket [{}]", socket); + } catch (NoRouteToHostException | ConnectException e) { + logger.debug(() -> "marking address [" + localAddress + "] as failed due to:", e); + failedAddresses.add(localAddress); } - return true; - } catch (IOException e) { - logger.debug(() -> "caught exception while opening socket on [" + portToBind + "]", e); + } + if (openedSockets.size() == 0) { + logger.debug("Could not open any sockets from the available addresses"); return false; } - }); - - if (allSocketsOpened) { - latch.countDown(); - } else { - success.set(false); - IOUtils.closeWhileHandlingException(openedSockets); - openedSockets.clear(); - latch.countDown(); - return; + return true; + } catch (IOException e) { + logger.debug(() -> "caught exception while opening socket on [" + portToBind + "]", e); + return false; } - } catch (InterruptedException e) { - logger.debug(() -> "interrupted while trying to open sockets on [" + portToBind + "]", e); - Thread.currentThread().interrupt(); + }); + + if (allSocketsOpened) { + latch.countDown(); + } else { + success.set(false); + IOUtils.closeWhileHandlingException(openedSockets); + openedSockets.clear(); + latch.countDown(); + return; } try { - closeLatch.await(); - } catch (InterruptedException e) { - logger.debug("caught exception while waiting for close latch", e); - Thread.currentThread().interrupt(); + safeAwait(closeLatch); } finally { logger.debug("closing sockets on [{}]", portToBind); IOUtils.closeWhileHandlingException(openedSockets); diff --git a/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java b/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java index abaf9a14aeadb..d42c8ec9655ef 100644 --- a/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java +++ b/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java @@ -38,6 +38,8 @@ import java.io.IOException; import java.io.InputStream; +import java.time.Duration; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -108,7 +110,8 @@ public void testFullPolicySnapshot() throws Exception { // allow arbitrarily frequent slm snapshots disableSLMMinimumIntervalValidation(); - createSnapshotPolicy(policyName, "snap", "*/1 * * * * ?", repoId, indexName, true); + var schedule = randomBoolean() ? "*/1 * * * * ?" : "1s"; + createSnapshotPolicy(policyName, "snap", schedule, repoId, indexName, true); // Check that the snapshot was actually taken assertBusy(() -> { @@ -176,7 +179,8 @@ public void testPolicyFailure() throws Exception { disableSLMMinimumIntervalValidation(); // Create a policy with ignore_unavailable: false and an index that doesn't exist - createSnapshotPolicy(policyName, "snap", "*/1 * * * * ?", repoName, indexPattern, false); + var schedule = randomBoolean() ? "*/1 * * * * ?" : "1s"; + createSnapshotPolicy(policyName, "snap", schedule, repoName, indexPattern, false); assertBusy(() -> { // Check that the failure is written to the cluster state @@ -300,10 +304,11 @@ public void testStartStopStatus() throws Exception { }); try { + var schedule = randomBoolean() ? "0 0/15 * * * ?" : "15m"; createSnapshotPolicy( policyName, "snap", - "0 0/15 * * * ?", + schedule, repoId, indexName, true, @@ -671,6 +676,36 @@ public void testSnapshotRetentionWithMissingRepo() throws Exception { }, 60, TimeUnit.SECONDS); } + @SuppressWarnings("unchecked") + public void testGetIntervalSchedule() throws Exception { + final String indexName = "index-1"; + final String policyName = "policy-1"; + final String repoId = "repo-1"; + + initializeRepo(repoId); + + var schedule = "30m"; + var now = Instant.now(); + createSnapshotPolicy(policyName, "snap", schedule, repoId, indexName, true); + + assertBusy(() -> { + Request getReq = new Request("GET", "/_slm/policy/" + policyName); + Response policyMetadata = client().performRequest(getReq); + Map policyResponseMap; + try (InputStream is = policyMetadata.getEntity().getContent()) { + policyResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + + Map policyMetadataMap = (Map) policyResponseMap.get(policyName); + Long nextExecutionMillis = (Long) policyMetadataMap.get("next_execution_millis"); + assertNotNull(nextExecutionMillis); + + Instant nextExecution = Instant.ofEpochMilli(nextExecutionMillis); + assertTrue(nextExecution.isAfter(now.plus(Duration.ofMinutes(29)))); + assertTrue(nextExecution.isBefore(now.plus(Duration.ofMinutes(31)))); + }); + } + public Map getLocation(String path) { try { Response executeRepsonse = client().performRequest(new Request("GET", path)); diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java index 0d79ecf31670c..192807d667abb 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.health.HealthIndicatorService; import org.elasticsearch.license.XPackLicenseState; @@ -92,6 +93,7 @@ public class SnapshotLifecycle extends Plugin implements ActionPlugin, HealthPlu private final SetOnce snapshotRetentionService = new SetOnce<>(); private final SetOnce snapshotHistoryStore = new SetOnce<>(); private final SetOnce slmHealthIndicatorService = new SetOnce<>(); + private final SetOnce featureService = new SetOnce<>(); private final Settings settings; public SnapshotLifecycle(Settings settings) { @@ -124,7 +126,7 @@ public Collection createComponents(PluginServices services) { ClusterService clusterService = services.clusterService(); ThreadPool threadPool = services.threadPool(); final List components = new ArrayList<>(); - + featureService.set(services.featureService()); SnapshotLifecycleTemplateRegistry templateRegistry = new SnapshotLifecycleTemplateRegistry( settings, clusterService, @@ -236,7 +238,7 @@ public List getRestHandlers( } List> reservedClusterStateHandlers() { - return List.of(new ReservedSnapshotAction()); + return List.of(new ReservedSnapshotAction(featureService.get())); } @Override diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java index f3dfe4fb26f65..96b962f70a1b6 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java @@ -13,8 +13,14 @@ import org.elasticsearch.xpack.slm.history.SnapshotLifecycleTemplateRegistry; import java.util.Map; +import java.util.Set; public class SnapshotLifecycleFeatures implements FeatureSpecification { + @Override + public Set getFeatures() { + return Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE); + } + @Override public Map getHistoricalFeatures() { return Map.of(SnapshotLifecycleTemplateRegistry.MANAGED_BY_DATA_STREAM_LIFECYCLE, Version.V_8_12_0); diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java index 6d77926149334..b93f90de73f05 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java @@ -20,10 +20,11 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xpack.core.ilm.LifecycleSettings; import org.elasticsearch.xpack.core.ilm.OperationMode; import org.elasticsearch.xpack.core.ilm.OperationModeUpdateTask; -import org.elasticsearch.xpack.core.scheduler.CronSchedule; import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadata; @@ -45,7 +46,7 @@ * task according to the policy's schedule. */ public class SnapshotLifecycleService implements Closeable, ClusterStateListener { - + public static final NodeFeature INTERVAL_SCHEDULE = new NodeFeature("slm.interval_schedule"); private static final Logger logger = LogManager.getLogger(SnapshotLifecycleService.class); private static final String JOB_PATTERN_SUFFIX = "-\\d+$"; @@ -193,15 +194,13 @@ public void maybeScheduleSnapshot(final SnapshotLifecyclePolicyMetadata snapshot // is identical to an existing job (meaning the version has not changed) then this does // not reschedule it. scheduledTasks.computeIfAbsent(jobId, id -> { - final SchedulerEngine.Job job = new SchedulerEngine.Job( - jobId, - new CronSchedule(snapshotLifecyclePolicy.getPolicy().getSchedule()) - ); if (existingJobsFoundAndCancelled) { logger.info("rescheduling updated snapshot lifecycle job [{}]", jobId); } else { logger.info("scheduling snapshot lifecycle job [{}]", jobId); } + + final SchedulerEngine.Job job = snapshotLifecyclePolicy.buildSchedulerJob(jobId); scheduler.add(job); return job; }); @@ -249,7 +248,7 @@ public static void validateRepositoryExists(final String repository, final Clust */ public static void validateMinimumInterval(final SnapshotLifecyclePolicy lifecycle, final ClusterState state) { TimeValue minimum = LifecycleSettings.SLM_MINIMUM_INTERVAL_SETTING.get(state.metadata().settings()); - TimeValue next = lifecycle.calculateNextInterval(); + TimeValue next = lifecycle.calculateNextInterval(Clock.systemUTC()); if (next.duration() > 0 && minimum.duration() > 0 && next.millis() < minimum.millis()) { throw new IllegalArgumentException( "invalid schedule [" @@ -262,6 +261,18 @@ public static void validateMinimumInterval(final SnapshotLifecyclePolicy lifecyc } } + /** + * Validate that interval schedule feature is not supported by all nodes + * @throws IllegalArgumentException if is interval expression but interval schedule not supported + */ + public static void validateIntervalScheduleSupport(String schedule, FeatureService featureService, ClusterState state) { + if (SnapshotLifecyclePolicy.isIntervalSchedule(schedule) && featureService.clusterHasFeature(state, INTERVAL_SCHEDULE) == false) { + throw new IllegalArgumentException( + "Unable to use slm interval schedules in mixed-clusters with nodes that do not support feature " + INTERVAL_SCHEDULE.id() + ); + } + } + @Override public void close() { if (this.running.compareAndSet(true, false)) { diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java index adf011e0ade37..d49f32869f28a 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java @@ -68,21 +68,21 @@ public SnapshotLifecycleTask(final Client client, final ClusterService clusterSe @Override public void triggered(SchedulerEngine.Event event) { - logger.debug("snapshot lifecycle policy task triggered from job [{}]", event.getJobName()); + logger.debug("snapshot lifecycle policy task triggered from job [{}]", event.jobName()); - final Optional snapshotName = maybeTakeSnapshot(event.getJobName(), client, clusterService, historyStore); + final Optional snapshotName = maybeTakeSnapshot(event.jobName(), client, clusterService, historyStore); // Would be cleaner if we could use Optional#ifPresentOrElse snapshotName.ifPresent( name -> logger.info( "snapshot lifecycle policy job [{}] issued new snapshot creation for [{}] successfully", - event.getJobName(), + event.jobName(), name ) ); if (snapshotName.isPresent() == false) { - logger.warn("snapshot lifecycle policy for job [{}] no longer exists, snapshot not created", event.getJobName()); + logger.warn("snapshot lifecycle policy for job [{}] no longer exists, snapshot not created", event.jobName()); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java index 0cf1373e92beb..678e6941599c9 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java @@ -88,21 +88,21 @@ public SnapshotRetentionTask( @Override public void triggered(SchedulerEngine.Event event) { - assert event.getJobName().equals(SnapshotRetentionService.SLM_RETENTION_JOB_ID) - || event.getJobName().equals(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID) + assert event.jobName().equals(SnapshotRetentionService.SLM_RETENTION_JOB_ID) + || event.jobName().equals(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID) : "expected id to be " + SnapshotRetentionService.SLM_RETENTION_JOB_ID + " or " + SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID + " but it was " - + event.getJobName(); + + event.jobName(); final ClusterState state = clusterService.state(); // Skip running retention if SLM is disabled, however, even if it's // disabled we allow manual running. if (SnapshotLifecycleService.slmStoppedOrStopping(state) - && event.getJobName().equals(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID) == false) { + && event.jobName().equals(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID) == false) { logger.debug("skipping SLM retention as SLM is currently stopped or stopping"); return; } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java index f14edd89b826d..192b03aa385d5 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.xcontent.XContentParser; @@ -41,7 +42,11 @@ public class ReservedSnapshotAction implements ReservedClusterStateHandler prepare(List { private static final Logger logger = LogManager.getLogger(TransportPutSnapshotLifecycleAction.class); + private final FeatureService featureService; @Inject public TransportPutSnapshotLifecycleAction( @@ -56,7 +58,8 @@ public TransportPutSnapshotLifecycleAction( ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver + IndexNameExpressionResolver indexNameExpressionResolver, + FeatureService featureService ) { super( PutSnapshotLifecycleAction.NAME, @@ -69,6 +72,7 @@ public TransportPutSnapshotLifecycleAction( AcknowledgedResponse::readFrom, EsExecutors.DIRECT_EXECUTOR_SERVICE ); + this.featureService = featureService; } @Override @@ -78,8 +82,8 @@ protected void masterOperation( final ClusterState state, final ActionListener listener ) { + SnapshotLifecycleService.validateIntervalScheduleSupport(request.getLifecycle().getSchedule(), featureService, state); SnapshotLifecycleService.validateRepositoryExists(request.getLifecycle().getRepository(), state); - SnapshotLifecycleService.validateMinimumInterval(request.getLifecycle(), state); // headers from the thread context stored by the AuthenticationService to be shared between the diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java index fc4ee7867ed04..b7674a2d60bff 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java @@ -19,11 +19,17 @@ import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; import java.io.IOException; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.temporal.ChronoUnit; import java.util.Collections; import java.util.HashMap; import java.util.Map; import static org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests.randomSnapshotLifecyclePolicy; +import static org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests.randomTimeValueString; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -34,10 +40,11 @@ public class SnapshotLifecyclePolicyTests extends AbstractXContentSerializingTes private String id; public void testToRequest() { + var schedule = randomBoolean() ? "0 1 2 3 4 ? 2099" : "30m"; SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( "id", "name", - "0 1 2 3 4 ? 2099", + schedule, "repo", Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY @@ -47,13 +54,13 @@ public void testToRequest() { Collections.singletonMap("policy", "id") ); - p = new SnapshotLifecyclePolicy("id", "name", "0 1 2 3 4 ? 2099", "repo", null, null); + p = new SnapshotLifecyclePolicy("id", "name", schedule, "repo", null, null); request = p.toRequest(TEST_REQUEST_TIMEOUT); expected.waitForCompletion(true).snapshot(request.snapshot()).repository("repo").uuid(request.uuid()); assertEquals(expected, request); } - public void testNextExecutionTime() { + public void testNextExecutionTimeSchedule() { SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( "id", "name", @@ -62,10 +69,100 @@ public void testNextExecutionTime() { Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - assertThat(p.calculateNextExecution(), equalTo(4078864860000L)); + assertThat(p.calculateNextExecution(-1, Clock.systemUTC()), equalTo(4078864860000L)); } - public void testCalculateNextInterval() { + public void testNextExecutionTimeInterval() { + SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( + "id", + "name", + "30m", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + + { + // current time is exactly modified time + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime; + Instant expected = Instant.parse("2024-07-17T00:30:00.000Z").truncatedTo(ChronoUnit.SECONDS); + assertThat(p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime)), equalTo(expected.toEpochMilli())); + } + + { + // current time is half an interval past modified time + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime.plus(Duration.ofMinutes(15)); + Instant expected = Instant.parse("2024-07-17T00:30:00.000Z").truncatedTo(ChronoUnit.SECONDS); + assertThat(p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime)), equalTo(expected.toEpochMilli())); + } + + { + // current time is a full day (24 intervals) ahead of modified time + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime.plus(Duration.ofDays(1)); + Instant expected = Instant.parse("2024-07-18T00:30:00.000Z").truncatedTo(ChronoUnit.SECONDS); + assertThat(p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime)), equalTo(expected.toEpochMilli())); + } + + { + // current time before modified time + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime.minus(Duration.ofHours(1)); + expectThrows(AssertionError.class, () -> p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime))); + } + + { + // current time is every minute of a day + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime; + Instant expectedTime = modifiedTime.plus(Duration.ofMinutes(30)); + + for (; currentTime.isBefore(modifiedTime.plus(Duration.ofDays(1))); currentTime = currentTime.plus(Duration.ofMinutes(1))) { + if (currentTime.equals(expectedTime)) { + expectedTime = expectedTime.plus(Duration.ofMinutes(30)); + } + assertThat( + p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime)), + equalTo(expectedTime.toEpochMilli()) + ); + } + } + } + + private static Clock fixedClock(Instant instant) { + return Clock.fixed(instant, ZoneOffset.UTC); + } + + public void testCalculateNextIntervalInterval() { + + { + SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( + "id", + "name", + "30m", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.timeValueMinutes(30))); + } + { + String schedule = randomTimeValueString(); + SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( + "id", + "name", + schedule, + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.parseTimeValue(schedule, "schedule"))); + } + } + + public void testCalculateNextIntervalSchedule() { { SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( "id", @@ -75,7 +172,7 @@ public void testCalculateNextInterval() { Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - assertThat(p.calculateNextInterval(), equalTo(TimeValue.timeValueMinutes(5))); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.timeValueMinutes(5))); } { @@ -87,7 +184,7 @@ public void testCalculateNextInterval() { Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - assertThat(p.calculateNextInterval(), equalTo(TimeValue.MINUS_ONE)); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.MINUS_ONE)); } { @@ -99,7 +196,7 @@ public void testCalculateNextInterval() { Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - assertThat(p.calculateNextInterval(), equalTo(TimeValue.MINUS_ONE)); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.MINUS_ONE)); } } @@ -123,7 +220,7 @@ public void testValidation() { + " the following characters " + Strings.INVALID_FILENAME_CHARS, "invalid repository name [ ]: cannot be empty", - "invalid schedule: invalid cron expression [* * * * * L]" + "invalid schedule [* * * * * L]: must be a valid cron expression or time unit" ) ); } @@ -149,6 +246,34 @@ public void testValidation() { ); } + { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + "my_policy", + "my_snap", + "0d", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + + ValidationException e = policy.validate(); + assertThat(e.validationErrors(), containsInAnyOrder("invalid schedule [0d]: time unit must be at least 1 millisecond")); + } + + { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + "my_policy", + "my_snap", + "999micros", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + + ValidationException e = policy.validate(); + assertThat(e.validationErrors(), containsInAnyOrder("invalid schedule [999micros]: time unit must be at least 1 millisecond")); + } + { SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( "my_policy", @@ -161,6 +286,33 @@ public void testValidation() { ValidationException e = policy.validate(); assertThat(e, nullValue()); } + + { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + "my_policy", + "my_snap", + "30m", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + ValidationException e = policy.validate(); + assertThat(e, nullValue()); + } + + { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + "my_policy", + "my_snap", + "1ms", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + + ValidationException e = policy.validate(); + assertThat(e, nullValue()); + } } public void testMetadataValidation() { diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java index 5b59ac9efc0ab..36887681f5575 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -37,6 +38,7 @@ import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests; import org.elasticsearch.xpack.core.slm.SnapshotLifecycleStats; import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; @@ -48,6 +50,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -108,7 +111,7 @@ public void testRepositoryExistenceForMissingRepo() { public void testNothingScheduledWhenNotRunning() throws InterruptedException { ClockMock clock = new ClockMock(); SnapshotLifecyclePolicyMetadata initialPolicy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("initial", "*/1 * * * * ?")) + .setPolicy(createPolicy("initial", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setVersion(1) .setModifiedDate(1) @@ -133,7 +136,7 @@ public void testNothingScheduledWhenNotRunning() throws InterruptedException { sls.init(); SnapshotLifecyclePolicyMetadata newPolicy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setPolicy(createPolicy("foo", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setVersion(2) .setModifiedDate(2) @@ -211,7 +214,7 @@ public void testPolicyCRUD() throws Exception { Map policies = new HashMap<>(); SnapshotLifecyclePolicyMetadata policy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setPolicy(createPolicy("foo", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setModifiedDate(1) .build(); @@ -240,7 +243,7 @@ public void testPolicyCRUD() throws Exception { int currentCount = triggerCount.get(); previousState = state; SnapshotLifecyclePolicyMetadata newPolicy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setPolicy(createPolicy("foo", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setVersion(2) .setModifiedDate(2) @@ -253,7 +256,7 @@ public void testPolicyCRUD() throws Exception { CopyOnWriteArrayList triggeredJobs = new CopyOnWriteArrayList<>(); trigger.set(e -> { - triggeredJobs.add(e.getJobName()); + triggeredJobs.add(e.jobName()); triggerCount.incrementAndGet(); }); clock.fastForwardSeconds(1); @@ -283,7 +286,7 @@ public void testPolicyCRUD() throws Exception { // When the service is no longer master, all jobs should be automatically cancelled policy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setPolicy(createPolicy("foo", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setVersion(3) .setModifiedDate(1) @@ -343,7 +346,7 @@ public void testPolicyNamesEndingInNumbers() throws Exception { Map policies = new HashMap<>(); SnapshotLifecyclePolicyMetadata policy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo-2", "30 * * * * ?")) + .setPolicy(createPolicy("foo-2", randomBoolean() ? "30 * * * * ?" : "30s")) .setHeaders(Collections.emptyMap()) .setVersion(1) .setModifiedDate(1) @@ -358,7 +361,7 @@ public void testPolicyNamesEndingInNumbers() throws Exception { assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.singleton("foo-2-1"))); SnapshotLifecyclePolicyMetadata secondPolicy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo-1", "45 * * * * ?")) + .setPolicy(createPolicy("foo-1", randomBoolean() ? "45 * * * * ?" : "45s")) .setHeaders(Collections.emptyMap()) .setVersion(2) .setModifiedDate(1) @@ -410,33 +413,70 @@ public void testValidateMinimumInterval() { ) .build(); - for (String schedule : List.of("0 0/15 * * * ?", "0 0 1 * * ?", "0 0 0 1 1 ? 2099" /* once */, "* * * 31 FEB ? *" /* never */)) { - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), defaultState); - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), validationOneMinuteState); - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), validationDisabledState); + { // using chron schedule + for (String schedule : List.of( + "0 0/15 * * * ?", + "0 0 1 * * ?", + "0 0 0 1 1 ? 2099" /* once */, + "* * * 31 FEB ? *" /* never */ + )) { + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), defaultState); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), validationOneMinuteState); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), validationDisabledState); + } + + IllegalArgumentException e; + + e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0 0/1 * * * ?"), defaultState) + ); + assertThat( + e.getMessage(), + equalTo("invalid schedule [0 0/1 * * * ?]: " + "schedule would be too frequent, executing more than every [15m]") + ); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0 0/1 * * * ?"), validationOneMinuteState); + + e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0/30 0/1 * * * ?"), validationOneMinuteState) + ); + assertThat( + e.getMessage(), + equalTo("invalid schedule [0/30 0/1 * * * ?]: " + "schedule would be too frequent, executing more than every [1m]") + ); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0/30 0/1 * * * ?"), validationDisabledState); } - IllegalArgumentException e; + { // using time value + for (String interval : List.of("15m", "1h", "1d")) { + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", interval), defaultState); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", interval), validationOneMinuteState); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", interval), validationDisabledState); + } - e = expectThrows( - IllegalArgumentException.class, - () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0 0/1 * * * ?"), defaultState) - ); - assertThat( - e.getMessage(), - equalTo("invalid schedule [0 0/1 * * * ?]: " + "schedule would be too frequent, executing more than every [15m]") - ); - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0 0/1 * * * ?"), validationOneMinuteState); + IllegalArgumentException e; - e = expectThrows( - IllegalArgumentException.class, - () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0/30 0/1 * * * ?"), validationOneMinuteState) - ); - assertThat( - e.getMessage(), - equalTo("invalid schedule [0/30 0/1 * * * ?]: " + "schedule would be too frequent, executing more than every [1m]") - ); - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0/30 0/1 * * * ?"), validationDisabledState); + e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "1m"), defaultState) + ); + assertThat( + e.getMessage(), + equalTo("invalid schedule [1m]: " + "schedule would be too frequent, executing more than every [15m]") + ); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "1m"), validationOneMinuteState); + + e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "30s"), validationOneMinuteState) + ); + assertThat( + e.getMessage(), + equalTo("invalid schedule [30s]: " + "schedule would be too frequent, executing more than every [1m]") + ); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "30s"), validationDisabledState); + } } public void testStoppedPriority() { @@ -485,6 +525,41 @@ public void submitUnbatchedStateUpdateTask(String source, ClusterStateUpdateTask } } + public void testValidateIntervalScheduleSupport() { + var featureService = new FeatureService(List.of(new SnapshotLifecycleFeatures())); + { + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodeFeatures(Map.of("a", Set.of(), "b", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) + .build(); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateIntervalScheduleSupport("30d", featureService, state) + ); + assertThat(e.getMessage(), containsString("Unable to use slm interval schedules")); + } + { + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodeFeatures(Map.of("a", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) + .build(); + try { + SnapshotLifecycleService.validateIntervalScheduleSupport("30d", featureService, state); + } catch (Exception e) { + fail("interval schedule is supported by version and should not fail"); + } + } + { + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodeFeatures(Map.of("a", Set.of(), "b", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) + .build(); + try { + SnapshotLifecycleService.validateIntervalScheduleSupport("*/1 * * * * ?", featureService, state); + } catch (Exception e) { + fail("cron schedule does not need feature check and should not fail"); + } + } + } + class FakeSnapshotTask extends SnapshotLifecycleTask { private final Consumer onTriggered; @@ -515,7 +590,7 @@ public ClusterState createState(SnapshotLifecycleMetadata snapMeta, boolean loca } public static SnapshotLifecyclePolicy createPolicy(String id) { - return createPolicy(id, randomSchedule()); + return createPolicy(id, SnapshotLifecyclePolicyMetadataTests.randomSchedule()); } public static SnapshotLifecyclePolicy createPolicy(String id, String schedule) { @@ -534,8 +609,4 @@ public static SnapshotLifecyclePolicy createPolicy(String id, String schedule) { SnapshotRetentionConfiguration.EMPTY ); } - - public static String randomSchedule() { - return randomIntBetween(0, 59) + " " + randomIntBetween(0, 59) + " " + randomIntBetween(0, 12) + " * * ?"; - } } diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java index dbb22f8dd49d8..877aa0ddb7342 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ilm.LifecycleSettings; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.elasticsearch.xpack.slm.history.SnapshotHistoryStore; @@ -52,7 +53,7 @@ public void testJobsAreScheduled() throws InterruptedException { assertThat(service.getScheduler().jobCount(), equalTo(0)); service.onMaster(); - service.setUpdateSchedule(SnapshotLifecycleServiceTests.randomSchedule()); + service.setUpdateSchedule(SnapshotLifecyclePolicyMetadataTests.randomCronSchedule()); assertThat(service.getScheduler().scheduledJobIds(), containsInAnyOrder(SnapshotRetentionService.SLM_RETENTION_JOB_ID)); service.offMaster(); @@ -81,7 +82,7 @@ public void testManualTriggering() throws InterruptedException { try ( ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool, clusterSettings); SnapshotRetentionService service = new SnapshotRetentionService(Settings.EMPTY, () -> new FakeRetentionTask(event -> { - assertThat(event.getJobName(), equalTo(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID)); + assertThat(event.jobName(), equalTo(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID)); invoked.incrementAndGet(); }), clock) ) { diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java index 71346ebc495d4..0fcc4b8007c6d 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Releasable; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; @@ -79,17 +80,17 @@ private TransformState processJSON(ReservedSnapshotAction action, TransformState } public void testDependencies() { - var action = new ReservedSnapshotAction(); + var action = new ReservedSnapshotAction(mock(FeatureService.class)); assertThat(action.optionalDependencies(), contains(ReservedRepositoryAction.NAME)); } - public void testValidationFails() { + public void testValidationFailsNeitherScheduleOrInterval() { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); final ClusterName clusterName = new ClusterName("elasticsearch"); ClusterState state = ClusterState.builder(clusterName).build(); - ReservedSnapshotAction action = new ReservedSnapshotAction(); + ReservedSnapshotAction action = new ReservedSnapshotAction(mock(FeatureService.class)); TransformState prevState = new TransformState(state, Set.of()); String badPolicyJSON = """ @@ -117,6 +118,56 @@ public void testValidationFails() { ); } + public void testIntervalScheduleSupportValidation() { + Client client = mock(Client.class); + when(client.settings()).thenReturn(Settings.EMPTY); + final ClusterName clusterName = new ClusterName("elasticsearch"); + List repositoriesMetadata = List.of(new RepositoryMetadata("repo", "fs", Settings.EMPTY)); + Metadata.Builder mdBuilder = Metadata.builder(); + mdBuilder.putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata)); + ClusterState state = ClusterState.builder(clusterName).metadata(mdBuilder).build(); + TransformState prevState = new TransformState(state, Set.of()); + String goodPolicyJSON = """ + { + "daily-snapshots": { + "schedule": "30d", + "name": "", + "repository": "repo", + "config": { + "indices": ["foo-*", "important"], + "ignore_unavailable": true, + "include_global_state": false + }, + "retention": { + "expire_after": "30d", + "min_count": 1, + "max_count": 50 + } + } + } + """; + + { + FeatureService featureService = mock(FeatureService.class); + when(featureService.clusterHasFeature(any(), any())).thenReturn(false); + ReservedSnapshotAction action = new ReservedSnapshotAction(featureService); + assertThat( + expectThrows(IllegalArgumentException.class, () -> processJSON(action, prevState, goodPolicyJSON)).getMessage(), + is("Error on validating SLM requests") + ); + } + { + FeatureService featureService = mock(FeatureService.class); + when(featureService.clusterHasFeature(any(), any())).thenReturn(true); + ReservedSnapshotAction action = new ReservedSnapshotAction(featureService); + try { + processJSON(action, prevState, goodPolicyJSON); + } catch (Exception e) { + fail("interval schedule with interval feature should pass validation"); + } + } + } + public void testActionAddRemove() throws Exception { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); @@ -128,7 +179,7 @@ public void testActionAddRemove() throws Exception { mdBuilder.putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata)); ClusterState state = ClusterState.builder(clusterName).metadata(mdBuilder).build(); - ReservedSnapshotAction action = new ReservedSnapshotAction(); + ReservedSnapshotAction action = new ReservedSnapshotAction(mock(FeatureService.class)); String emptyJSON = ""; @@ -362,7 +413,7 @@ public void testOperatorControllerFromJSONContent() throws IOException { null, List.of( new ReservedClusterSettingsAction(clusterSettings), - new ReservedSnapshotAction(), + new ReservedSnapshotAction(mock(FeatureService.class)), new ReservedRepositoryAction(repositoriesService) ) ); @@ -396,7 +447,8 @@ public void testPutSLMReservedStateHandler() throws Exception { mock(ClusterService.class), threadPool, mock(ActionFilters.class), - mock(IndexNameExpressionResolver.class) + mock(IndexNameExpressionResolver.class), + mock(FeatureService.class) ); assertThat(putAction.reservedStateHandlerName().get(), equalTo(ReservedSnapshotAction.NAME)); diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java index 750fdd40c12d6..211afe8e55a15 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests; import org.junit.After; import org.junit.Before; @@ -194,10 +195,14 @@ public static SnapshotLifecyclePolicy randomSnapshotLifecyclePolicy(String id) { config.put(randomAlphaOfLength(4), randomAlphaOfLength(4)); } } - return new SnapshotLifecyclePolicy(id, randomAlphaOfLength(4), randomSchedule(), randomAlphaOfLength(4), config, null); - } - private static String randomSchedule() { - return randomIntBetween(0, 59) + " " + randomIntBetween(0, 59) + " " + randomIntBetween(0, 12) + " * * ?"; + return new SnapshotLifecyclePolicy( + id, + randomAlphaOfLength(4), + SnapshotLifecyclePolicyMetadataTests.randomSchedule(), + randomAlphaOfLength(4), + config, + null + ); } } diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverPointTests.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverPointTests.java index 0563c8f281cb8..f4ee7f264d4f7 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverPointTests.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverPointTests.java @@ -6,141 +6,23 @@ */ package org.elasticsearch.xpack.spatial.search; -import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.geo.ShapeRelation; -import org.elasticsearch.geometry.GeometryCollection; -import org.elasticsearch.geometry.Line; -import org.elasticsearch.geometry.LinearRing; -import org.elasticsearch.geometry.MultiLine; -import org.elasticsearch.geometry.MultiPoint; -import org.elasticsearch.geometry.Point; -import org.elasticsearch.geometry.Rectangle; -import org.elasticsearch.geometry.ShapeType; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.spatial.index.query.ShapeQueryBuilder; -import org.hamcrest.CoreMatchers; - -import java.util.List; public class ShapeQueryOverPointTests extends ShapeQueryTestCase { @Override protected XContentBuilder createDefaultMapping() throws Exception { - XContentBuilder xcb = XContentFactory.jsonBuilder() + final boolean isIndexed = randomBoolean(); + final boolean hasDocValues = isIndexed == false || randomBoolean(); + return XContentFactory.jsonBuilder() .startObject() .startObject("properties") .startObject(defaultFieldName) .field("type", "point") + .field("index", isIndexed) + .field("doc_values", hasDocValues) .endObject() .endObject() .endObject(); - - return xcb; - } - - public void testProcessRelationSupport() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - Rectangle rectangle = new Rectangle(-35, -25, -25, -35); - - for (ShapeRelation shapeRelation : ShapeRelation.values()) { - if (shapeRelation.equals(ShapeRelation.INTERSECTS) == false) { - SearchPhaseExecutionException e = expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch("test") - .setQuery(new ShapeQueryBuilder(defaultFieldName, rectangle).relation(shapeRelation)) - .get() - ); - assertThat( - e.getCause().getMessage(), - CoreMatchers.containsString(shapeRelation + " query relation not supported for Field [" + defaultFieldName + "]") - ); - } - } - } - - public void testQueryLine() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - Line line = new Line(new double[] { -25, -25 }, new double[] { -35, -35 }); - - try { - client().prepareSearch("test").setQuery(new ShapeQueryBuilder(defaultFieldName, line)).get(); - } catch (SearchPhaseExecutionException e) { - assertThat(e.getCause().getMessage(), CoreMatchers.containsString("does not support " + ShapeType.LINESTRING + " queries")); - } - } - - public void testQueryLinearRing() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - LinearRing linearRing = new LinearRing(new double[] { -25, -35, -25 }, new double[] { -25, -35, -25 }); - - IllegalArgumentException ex = expectThrows( - IllegalArgumentException.class, - () -> new ShapeQueryBuilder(defaultFieldName, linearRing) - ); - assertThat(ex.getMessage(), CoreMatchers.containsString("[LINEARRING] geometries are not supported")); - - ex = expectThrows( - IllegalArgumentException.class, - () -> new ShapeQueryBuilder(defaultFieldName, new GeometryCollection<>(List.of(linearRing))) - ); - assertThat(ex.getMessage(), CoreMatchers.containsString("[LINEARRING] geometries are not supported")); - } - - public void testQueryMultiLine() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - Line lsb1 = new Line(new double[] { -35, -25 }, new double[] { -35, -25 }); - Line lsb2 = new Line(new double[] { -15, -5 }, new double[] { -15, -5 }); - - MultiLine multiline = new MultiLine(List.of(lsb1, lsb2)); - try { - client().prepareSearch("test").setQuery(new ShapeQueryBuilder(defaultFieldName, multiline)).get(); - } catch (Exception e) { - assertThat( - e.getCause().getMessage(), - CoreMatchers.containsString("does not support " + ShapeType.MULTILINESTRING + " queries") - ); - } - } - - public void testQueryMultiPoint() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - MultiPoint multiPoint = new MultiPoint(List.of(new Point(-35, -25), new Point(-15, -5))); - - try { - client().prepareSearch("test").setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint)).get(); - } catch (Exception e) { - assertThat(e.getCause().getMessage(), CoreMatchers.containsString("does not support " + ShapeType.MULTIPOINT + " queries")); - } } - - public void testQueryPoint() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - Point point = new Point(-35, -2); - - try { - client().prepareSearch("test").setQuery(new ShapeQueryBuilder(defaultFieldName, point)).get(); - } catch (Exception e) { - assertThat(e.getCause().getMessage(), CoreMatchers.containsString("does not support " + ShapeType.POINT + " queries")); - } - } - } diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java index 38d0a30b593b6..1ac6bf3b6fd31 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java @@ -7,16 +7,18 @@ package org.elasticsearch.xpack.spatial.search; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.geometry.Circle; +import org.elasticsearch.geometry.GeometryCollection; +import org.elasticsearch.geometry.Line; import org.elasticsearch.geometry.LinearRing; +import org.elasticsearch.geometry.MultiLine; +import org.elasticsearch.geometry.MultiPoint; import org.elasticsearch.geometry.MultiPolygon; import org.elasticsearch.geometry.Point; import org.elasticsearch.geometry.Polygon; import org.elasticsearch.geometry.Rectangle; -import org.elasticsearch.geometry.utils.WellKnownText; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -26,6 +28,7 @@ import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.spatial.LocalStateSpatialPlugin; import org.elasticsearch.xpack.spatial.index.query.ShapeQueryBuilder; +import org.hamcrest.CoreMatchers; import java.util.Collection; import java.util.List; @@ -35,6 +38,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -46,29 +50,18 @@ protected Collection> getPlugins() { return pluginList(LocalStateSpatialPlugin.class, LocalStateCompositeXPackPlugin.class); } - protected abstract XContentBuilder createDefaultMapping() throws Exception; - - static String defaultFieldName = "xy"; - static String defaultIndexName = "test-points"; + @Override + public void setUp() throws Exception { + super.setUp(); - public void testNullShape() throws Exception { String mapping = Strings.toString(createDefaultMapping()); indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); ensureGreen(); prepareIndex(defaultIndexName).setId("aNullshape") - .setSource("{\"geo\": null}", XContentType.JSON) + .setSource("{\"" + defaultFieldName + "\": null}", XContentType.JSON) .setRefreshPolicy(IMMEDIATE) .get(); - GetResponse result = client().prepareGet(defaultIndexName, "aNullshape").get(); - assertThat(result.getField("location"), nullValue()); - }; - - public void testIndexPointsFilterRectangle() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) @@ -78,74 +71,82 @@ public void testIndexPointsFilterRectangle() throws Exception { .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); + prepareIndex(defaultIndexName).setId("3") + .setSource(jsonBuilder().startObject().field("name", "Document 3").field(defaultFieldName, "POINT(50 50)").endObject()) + .setRefreshPolicy(IMMEDIATE) + .get(); + prepareIndex(defaultIndexName).setId("4") + .setSource( + jsonBuilder().startObject() + .field("name", "Document 4") + .field(defaultFieldName, new String[] { "POINT(-30 -30)", "POINT(50 50)" }) + .endObject() + ) + .setRefreshPolicy(IMMEDIATE) + .get(); + prepareIndex(defaultIndexName).setId("5") + .setSource( + jsonBuilder().startObject() + .field("name", "Document 5") + .field(defaultFieldName, new String[] { "POINT(60 60)", "POINT(50 50)" }) + .endObject() + ) + .setRefreshPolicy(IMMEDIATE) + .get(); + } + + protected abstract XContentBuilder createDefaultMapping() throws Exception; + + static String defaultFieldName = "xy"; + static String defaultIndexName = "test-points"; + public void testNullShape() { + GetResponse result = client().prepareGet(defaultIndexName, "aNullshape").get(); + assertThat(result.getField(defaultFieldName), nullValue()); + }; + + public void testIndexPointsFilterRectangle() { Rectangle rectangle = new Rectangle(-45, 45, 45, -45); assertNoFailuresAndResponse( client().prepareSearch(defaultIndexName) .setQuery(new ShapeQueryBuilder(defaultFieldName, rectangle).relation(ShapeRelation.INTERSECTS)), response -> { - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getHits().length, equalTo(1)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getAt(0).getId(), anyOf(equalTo("1"), equalTo("4"))); + assertThat(response.getHits().getAt(1).getId(), anyOf(equalTo("1"), equalTo("4"))); } ); // default query, without specifying relation (expect intersects) - assertNoFailuresAndResponse( client().prepareSearch(defaultIndexName).setQuery(new ShapeQueryBuilder(defaultFieldName, rectangle)), response -> { - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getHits().length, equalTo(1)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getAt(0).getId(), anyOf(equalTo("1"), equalTo("4"))); + assertThat(response.getHits().getAt(1).getId(), anyOf(equalTo("1"), equalTo("4"))); } ); } - public void testIndexPointsCircle() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("1") - .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("2") - .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - + public void testIndexPointsCircle() { Circle circle = new Circle(-30, -30, 1); assertNoFailuresAndResponse( client().prepareSearch(defaultIndexName) .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.INTERSECTS)), response -> { - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getHits().length, equalTo(1)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getAt(0).getId(), anyOf(equalTo("1"), equalTo("4"))); + assertThat(response.getHits().getAt(1).getId(), anyOf(equalTo("1"), equalTo("4"))); } ); } - public void testIndexPointsPolygon() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("1") - .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("2") - .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-45 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - + public void testIndexPointsPolygon() { Polygon polygon = new Polygon(new LinearRing(new double[] { -35, -35, -25, -25, -35 }, new double[] { -35, -25, -25, -35, -35 })); assertNoFailuresAndResponse( @@ -153,32 +154,14 @@ public void testIndexPointsPolygon() throws Exception { .setQuery(new ShapeQueryBuilder(defaultFieldName, polygon).relation(ShapeRelation.INTERSECTS)), response -> { SearchHits searchHits = response.getHits(); - assertThat(searchHits.getTotalHits().value, equalTo(1L)); - assertThat(searchHits.getAt(0).getId(), equalTo("1")); + assertThat(searchHits.getTotalHits().value, equalTo(2L)); + assertThat(searchHits.getAt(0).getId(), anyOf(equalTo("1"), equalTo("4"))); + assertThat(searchHits.getAt(1).getId(), anyOf(equalTo("1"), equalTo("4"))); } ); } - public void testIndexPointsMultiPolygon() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("1") - .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("2") - .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-40 -40)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("3") - .setSource(jsonBuilder().startObject().field("name", "Document 3").field(defaultFieldName, "POINT(-50 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - + public void testIndexPointsMultiPolygon() { Polygon encloseDocument1Shape = new Polygon( new LinearRing(new double[] { -35, -35, -25, -25, -35 }, new double[] { -35, -25, -25, -35, -35 }) ); @@ -192,29 +175,16 @@ public void testIndexPointsMultiPolygon() throws Exception { client().prepareSearch(defaultIndexName) .setQuery(new ShapeQueryBuilder(defaultFieldName, mp).relation(ShapeRelation.INTERSECTS)), response -> { - assertThat(response.getHits().getTotalHits().value, equalTo(2L)); - assertThat(response.getHits().getHits().length, equalTo(2)); - assertThat(response.getHits().getAt(0).getId(), not(equalTo("2"))); - assertThat(response.getHits().getAt(1).getId(), not(equalTo("2"))); + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getAt(0).getId(), not(equalTo("3"))); + assertThat(response.getHits().getAt(1).getId(), not(equalTo("3"))); + assertThat(response.getHits().getAt(2).getId(), not(equalTo("3"))); } ); } - public void testIndexPointsRectangle() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("1") - .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("2") - .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - + public void testIndexPointsRectangle() { Rectangle rectangle = new Rectangle(-50, -40, -45, -55); assertNoFailuresAndResponse( @@ -229,20 +199,6 @@ public void testIndexPointsRectangle() throws Exception { } public void testIndexPointsIndexedRectangle() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("point1") - .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("point2") - .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-45 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - String indexedShapeIndex = "indexed_query_shapes"; String indexedShapePath = "shape"; String queryShapesMapping = Strings.toString( @@ -278,7 +234,7 @@ public void testIndexPointsIndexedRectangle() throws Exception { response -> { assertThat(response.getHits().getTotalHits().value, equalTo(1L)); assertThat(response.getHits().getHits().length, equalTo(1)); - assertThat(response.getHits().getAt(0).getId(), equalTo("point2")); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); } ); @@ -291,53 +247,122 @@ public void testIndexPointsIndexedRectangle() throws Exception { ), 0L ); + } - public void testDistanceQuery() throws Exception { - indicesAdmin().prepareCreate("test_distance").setMapping("location", "type=shape").get(); - ensureGreen(); + public void testDistanceQuery() { + Circle circle = new Circle(-25, -25, 10); - Circle circle = new Circle(1, 0, 10); - - client().index( - new IndexRequest("test_distance").source( - jsonBuilder().startObject().field("location", WellKnownText.toWKT(new Point(2, 2))).endObject() - ).setRefreshPolicy(IMMEDIATE) - ).actionGet(); - client().index( - new IndexRequest("test_distance").source( - jsonBuilder().startObject().field("location", WellKnownText.toWKT(new Point(3, 1))).endObject() - ).setRefreshPolicy(IMMEDIATE) - ).actionGet(); - client().index( - new IndexRequest("test_distance").source( - jsonBuilder().startObject().field("location", WellKnownText.toWKT(new Point(-20, -30))).endObject() - ).setRefreshPolicy(IMMEDIATE) - ).actionGet(); - client().index( - new IndexRequest("test_distance").source( - jsonBuilder().startObject().field("location", WellKnownText.toWKT(new Point(20, 30))).endObject() - ).setRefreshPolicy(IMMEDIATE) - ).actionGet(); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.INTERSECTS)), + 2L + ); + + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.WITHIN)), + 1L + ); + + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.DISJOINT)), + 3L + ); assertHitCount( - client().prepareSearch("test_distance").setQuery(new ShapeQueryBuilder("location", circle).relation(ShapeRelation.WITHIN)), + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.CONTAINS)), + 0L + ); + } + + public void testIndexPointsQueryLinearRing() { + LinearRing linearRing = new LinearRing(new double[] { -50, -50 }, new double[] { 50, 50 }); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> new ShapeQueryBuilder(defaultFieldName, linearRing) + ); + assertThat(ex.getMessage(), CoreMatchers.containsString("[LINEARRING] geometries are not supported")); + + ex = expectThrows( + IllegalArgumentException.class, + () -> new ShapeQueryBuilder(defaultFieldName, new GeometryCollection<>(List.of(linearRing))) + ); + assertThat(ex.getMessage(), CoreMatchers.containsString("[LINEARRING] geometries are not supported")); + } + + public void testIndexPointsQueryLine() { + Line line = new Line(new double[] { 100, -30 }, new double[] { -100, -30 }); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, line).relation(ShapeRelation.INTERSECTS)), 2L ); + } + + public void testIndexPointsQueryMultiLine() { + MultiLine multiLine = new MultiLine( + List.of( + new Line(new double[] { 100, -30 }, new double[] { -100, -30 }), + new Line(new double[] { 100, -20 }, new double[] { -100, -20 }) + ) + ); assertHitCount( - client().prepareSearch("test_distance").setQuery(new ShapeQueryBuilder("location", circle).relation(ShapeRelation.INTERSECTS)), + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiLine).relation(ShapeRelation.INTERSECTS)), 2L ); + } + public void testIndexPointsQueryPoint() { + Point point = new Point(-30, -30); assertHitCount( - client().prepareSearch("test_distance").setQuery(new ShapeQueryBuilder("location", circle).relation(ShapeRelation.DISJOINT)), + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, point).relation(ShapeRelation.INTERSECTS)), 2L ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, point).relation(ShapeRelation.WITHIN)), + 1L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, point).relation(ShapeRelation.CONTAINS)), + 2L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, point).relation(ShapeRelation.DISJOINT)), + 3L + ); + } + public void testIndexPointsQueryMultiPoint() { + MultiPoint multiPoint = new MultiPoint(List.of(new Point(-30, -30), new Point(50, 50))); assertHitCount( - client().prepareSearch("test_distance").setQuery(new ShapeQueryBuilder("location", circle).relation(ShapeRelation.CONTAINS)), - 0L + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint).relation(ShapeRelation.INTERSECTS)), + 4L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint).relation(ShapeRelation.WITHIN)), + 3L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint).relation(ShapeRelation.CONTAINS)), + 1L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint).relation(ShapeRelation.DISJOINT)), + 1L ); } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java index d98fe7fdfc6ec..9412dc3c5eb53 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java @@ -215,7 +215,8 @@ public String typeName() { @Override public Query shapeQuery(Geometry shape, String fieldName, ShapeRelation relation, SearchExecutionContext context) { - return queryProcessor.shapeQuery(shape, fieldName, relation, context); + failIfNotIndexedNorDocValuesFallback(context); + return queryProcessor.shapeQuery(shape, fieldName, relation, isIndexed(), hasDocValues()); } @Override diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java index a8c084e7e0f01..22616eabf8211 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java @@ -8,50 +8,272 @@ import org.apache.lucene.document.XYDocValuesField; import org.apache.lucene.document.XYPointField; +import org.apache.lucene.geo.Component2D; import org.apache.lucene.geo.XYGeometry; +import org.apache.lucene.geo.XYPoint; +import org.apache.lucene.geo.XYRectangle; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.common.geo.LuceneGeometriesUtils; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.geometry.ShapeType; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.query.QueryShardException; -import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.xpack.spatial.index.mapper.PointFieldMapper; -import java.util.function.Consumer; +import java.util.Arrays; +/** Generates a lucene query for a spatial query over a point field. + * + * Note that lucene only supports intersects spatial relation so we build other relations + * using just that one. + * */ public class ShapeQueryPointProcessor { - public Query shapeQuery(Geometry geometry, String fieldName, ShapeRelation relation, SearchExecutionContext context) { - final boolean hasDocValues = validateIsPointFieldType(fieldName, context); - // only the intersects relation is supported for indexed cartesian point types - if (relation != ShapeRelation.INTERSECTS) { - throw new QueryShardException(context, relation + " query relation not supported for Field [" + fieldName + "]."); - } - final Consumer checker = t -> { - if (t == ShapeType.POINT || t == ShapeType.MULTIPOINT || t == ShapeType.LINESTRING || t == ShapeType.MULTILINESTRING) { - throw new QueryShardException(context, "Field [" + fieldName + "] does not support " + t + " queries"); - } + public Query shapeQuery(Geometry geometry, String fieldName, ShapeRelation relation, boolean isIndexed, boolean hasDocValues) { + assert isIndexed || hasDocValues; + final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); + // XYPointField only supports intersects query so we build all the relationships using that logic. + // it is not very efficient but it works. + return switch (relation) { + case INTERSECTS -> buildIntersectsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); + case DISJOINT -> buildDisjointQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); + case CONTAINS -> buildContainsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); + case WITHIN -> buildWithinQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); }; - final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, checker); - Query query = XYPointField.newGeometryQuery(fieldName, luceneGeometries); - if (hasDocValues) { - final Query queryDocValues = XYDocValuesField.newSlowGeometryQuery(fieldName, luceneGeometries); - query = new IndexOrDocValuesQuery(query, queryDocValues); + } + + private static Query buildIntersectsQuery(String fieldName, boolean isIndexed, boolean hasDocValues, XYGeometry... luceneGeometries) { + // This is supported natively in lucene + Query query; + if (isIndexed) { + query = XYPointField.newGeometryQuery(fieldName, luceneGeometries); + if (hasDocValues) { + final Query queryDocValues = XYDocValuesField.newSlowGeometryQuery(fieldName, luceneGeometries); + query = new IndexOrDocValuesQuery(query, queryDocValues); + } + } else { + query = XYDocValuesField.newSlowGeometryQuery(fieldName, luceneGeometries); } return query; } - private boolean validateIsPointFieldType(String fieldName, SearchExecutionContext context) { - MappedFieldType fieldType = context.getFieldType(fieldName); - if (fieldType instanceof PointFieldMapper.PointFieldType == false) { - throw new QueryShardException( - context, - "Expected " + PointFieldMapper.CONTENT_TYPE + " field type for Field [" + fieldName + "] but found " + fieldType.typeName() + private static Query buildDisjointQuery(String fieldName, boolean isIndexed, boolean hasDocValues, XYGeometry... luceneGeometries) { + // first collect all the documents that contain a shape + final BooleanQuery.Builder builder = new BooleanQuery.Builder(); + if (hasDocValues) { + builder.add(new FieldExistsQuery(fieldName), BooleanClause.Occur.FILTER); + } else { + builder.add( + buildIntersectsQuery( + fieldName, + isIndexed, + hasDocValues, + new XYRectangle(-Float.MAX_VALUE, Float.MAX_VALUE, -Float.MAX_VALUE, Float.MAX_VALUE) + ), + BooleanClause.Occur.FILTER ); } - return fieldType.hasDocValues(); + // then remove all intersecting documents + builder.add(buildIntersectsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries), BooleanClause.Occur.MUST_NOT); + return builder.build(); + } + + private static Query buildContainsQuery(String fieldName, boolean isIndexed, boolean hasDocValues, XYGeometry... luceneGeometries) { + // for non-point data the result is always false + if (allPoints(luceneGeometries) == false) { + return new MatchNoDocsQuery(); + } + // for a unique point, it behaves like intersect + if (luceneGeometries.length == 1) { + return buildIntersectsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); + } + // for a multi point, all points needs to be in the document + final BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (XYGeometry geometry : luceneGeometries) { + builder.add(buildIntersectsQuery(fieldName, isIndexed, hasDocValues, geometry), BooleanClause.Occur.FILTER); + } + return builder.build(); + } + + private static Query buildWithinQuery(String fieldName, boolean isIndexed, boolean hasDocValues, XYGeometry... luceneGeometries) { + final BooleanQuery.Builder builder = new BooleanQuery.Builder(); + // collect all the intersecting documents + builder.add(buildIntersectsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries), BooleanClause.Occur.FILTER); + // This is the tricky part as we need to remove all documents that they have at least one disjoint point. + // In order to do that, we introduce a InverseXYGeometry which return all documents that have at least one disjoint point + // with the original geometry. + builder.add( + buildIntersectsQuery(fieldName, isIndexed, hasDocValues, new InverseXYGeometry(luceneGeometries)), + BooleanClause.Occur.MUST_NOT + ); + return builder.build(); + } + + private static boolean allPoints(XYGeometry[] geometries) { + return Arrays.stream(geometries).allMatch(g -> g instanceof XYPoint); + } + + private static class InverseXYGeometry extends XYGeometry { + private final XYGeometry[] geometries; + + InverseXYGeometry(XYGeometry... geometries) { + this.geometries = geometries; + } + + @Override + protected Component2D toComponent2D() { + final Component2D component2D = XYGeometry.create(geometries); + return new Component2D() { + @Override + public double getMinX() { + return -Float.MAX_VALUE; + } + + @Override + public double getMaxX() { + return Float.MAX_VALUE; + } + + @Override + public double getMinY() { + return -Float.MAX_VALUE; + } + + @Override + public double getMaxY() { + return Float.MAX_VALUE; + } + + @Override + public boolean contains(double x, double y) { + return component2D.contains(x, y) == false; + } + + @Override + public PointValues.Relation relate(double minX, double maxX, double minY, double maxY) { + PointValues.Relation relation = component2D.relate(minX, maxX, minY, maxY); + return switch (relation) { + case CELL_INSIDE_QUERY -> PointValues.Relation.CELL_OUTSIDE_QUERY; + case CELL_OUTSIDE_QUERY -> PointValues.Relation.CELL_INSIDE_QUERY; + case CELL_CROSSES_QUERY -> PointValues.Relation.CELL_CROSSES_QUERY; + }; + } + + @Override + public boolean intersectsLine( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + double bX, + double bY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean intersectsTriangle( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + double bX, + double bY, + double cX, + double cY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsLine( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + double bX, + double bY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsTriangle( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + double bX, + double bY, + double cX, + double cY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public WithinRelation withinPoint(double x, double y) { + throw new UnsupportedOperationException(); + } + + @Override + public WithinRelation withinLine( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + boolean ab, + double bX, + double bY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public WithinRelation withinTriangle( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + boolean ab, + double bX, + double bY, + boolean bc, + double cX, + double cY, + boolean ca + ) { + throw new UnsupportedOperationException(); + } + }; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + InverseXYGeometry that = (InverseXYGeometry) o; + return Arrays.equals(geometries, that.geometries); + } + + @Override + public int hashCode() { + return Arrays.hashCode(geometries); + } } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java index db67b1f1e998b..05756168991c9 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java @@ -30,22 +30,11 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws @Override protected ShapeRelation getShapeRelation(ShapeType type) { - return ShapeRelation.INTERSECTS; + return randomFrom(ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS, ShapeRelation.DISJOINT, ShapeRelation.WITHIN); } @Override protected Geometry getGeometry() { - if (randomBoolean()) { - if (randomBoolean()) { - return ShapeTestUtils.randomMultiPolygon(false); - } else { - return ShapeTestUtils.randomPolygon(false); - } - } else if (randomBoolean()) { - // it should be a circle - return ShapeTestUtils.randomPolygon(false); - } else { - return ShapeTestUtils.randomRectangle(); - } + return ShapeTestUtils.randomGeometry(false); } } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/51_many_indexes.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/51_many_indexes.yml new file mode 100644 index 0000000000000..eb589cb810cc3 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/51_many_indexes.yml @@ -0,0 +1,126 @@ +setup: + - requires: + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [short_error_messages_for_unsupported_fields] + reason: "We changed error messages for unsupported fields in v 8.16" + test_runner_features: [capabilities, allowed_warnings_regex] + + - do: + indices.create: + index: ambiguous_1 + body: + mappings: + properties: + "name": + type: keyword + + - do: + indices.create: + index: ambiguous_2 + body: + mappings: + properties: + "name": + type: keyword + + - do: + indices.create: + index: ambiguous_3 + body: + mappings: + properties: + "name": + type: keyword + + - do: + indices.create: + index: ambiguous_4 + body: + mappings: + properties: + "name": + type: integer + + - do: + indices.create: + index: ambiguous_5 + body: + mappings: + properties: + "name": + type: integer + + - do: + indices.create: + index: ambiguous_6 + body: + mappings: + properties: + "name": + type: integer + + - do: + indices.create: + index: ambiguous_7 + body: + mappings: + properties: + "name": + type: integer + + - do: + indices.create: + index: ambiguous_8 + body: + mappings: + properties: + "name": + type: ip + + - do: + indices.create: + index: ambiguous_9 + body: + mappings: + properties: + "name": + type: ip + + - do: + indices.create: + index: ambiguous_10 + body: + mappings: + properties: + "name": + type: ip + + - do: + indices.create: + index: ambiguous_11 + body: + mappings: + properties: + "name": + type: ip + + - do: + indices.create: + index: ambiguous_12 + body: + mappings: + properties: + "name": + type: ip + +--- +load many indices with ambiguities: + - do: + catch: '/Cannot use field \[name\] due to ambiguities being mapped as \[3\] incompatible types: \[integer\] in \[ambiguous_4, ambiguous_5, ambiguous_6\] and \[1\] other index, \[ip\] in \[ambiguous_10, ambiguous_11, ambiguous_12\] and \[2\] other indices, \[keyword\] in \[ambiguous_1, ambiguous_2, ambiguous_3\]/' + esql.query: + body: + query: 'FROM ambiguous* | SORT name' + diff --git a/x-pack/rest-resources-zip/build.gradle b/x-pack/rest-resources-zip/build.gradle index 3d0533b4ec57e..cc5bddf12d801 100644 --- a/x-pack/rest-resources-zip/build.gradle +++ b/x-pack/rest-resources-zip/build.gradle @@ -20,6 +20,7 @@ dependencies { apis project(path: ':rest-api-spec', configuration: 'restSpecs') freeTests project(path: ':rest-api-spec', configuration: 'restTests') freeTests project(path: ':modules:aggregations', configuration: 'restTests') + freeTests project(path: ':modules:analysis-common', configuration: 'restTests') compatApis project(path: ':rest-api-spec', configuration: 'restCompatSpecs') compatApis project(path: ':x-pack:plugin', configuration: 'restCompatSpecs') freeCompatTests project(path: ':rest-api-spec', configuration: 'restCompatTests')