diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json index 732f2f9b96ae3..1c80f5048a611 100644 --- a/.github/benchmark-configs.json +++ b/.github/benchmark-configs.json @@ -239,5 +239,38 @@ "data_instance_config": "4vCPU, 32G Mem, 16G Heap" }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" + }, + "id_15": { + "description": "Search only test-procedure for big5, uses lucene-10 index snapshot to restore the data for OS-3.0.0", + "supported_major_versions": ["3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "big5", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"big5_1_shard_single_client\"}", + "CAPTURE_NODE_STAT": "true", + "TEST_PROCEDURE": "restore-from-snapshot" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" + }, + "id_16": { + "description": "Benchmarking config for NESTED workload, benchmarks nested queries with inner-hits", + "supported_major_versions": ["2", "3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "nested", + "WORKLOAD_PARAMS": "{\"number_of_replicas\":\"0\",\"number_of_shards\":\"1\"}", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" + } } -} diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml index c494df6e27ce3..e6ccc31160bf9 100644 --- a/.github/workflows/benchmark-pull-request.yml +++ b/.github/workflows/benchmark-pull-request.yml @@ -4,7 +4,10 @@ on: types: [created] jobs: run-performance-benchmark-on-pull-request: - if: ${{ (github.event.issue.pull_request) && (contains(github.event.comment.body, '"run-benchmark-test"')) }} + if: | + github.repository == 'opensearch-project/OpenSearch' && + github.event.issue.pull_request && + contains(github.event.comment.body, '"run-benchmark-test"') runs-on: ubuntu-latest permissions: id-token: write @@ -111,7 +114,7 @@ jobs: uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} - result-encoding: string + result-encoding: json script: | // Get the collaborators - filtered to maintainer permissions const maintainersResponse = await github.request('GET /repos/{owner}/{repo}/collaborators', { @@ -121,12 +124,12 @@ jobs: affiliation: 'all', per_page: 100 }); - return maintainersResponse.data.map(item => item.login).join(', '); + return maintainersResponse.data.map(item => item.login); - uses: trstringer/manual-approval@v1 - if: (!contains(steps.get_approvers.outputs.result, github.event.comment.user.login)) + if: ${{ !contains(fromJSON(steps.get_approvers.outputs.result), github.event.comment.user.login) }} with: secret: ${{ github.TOKEN }} - approvers: ${{ steps.get_approvers.outputs.result }} + approvers: ${{ join(fromJSON(steps.get_approvers.outputs.result), ', ') }} minimum-approvals: 1 issue-title: 'Request to approve/deny benchmark run for PR #${{ env.PR_NUMBER }}' issue-body: "Please approve or deny the benchmark run for PR #${{ env.PR_NUMBER }}" diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml deleted file mode 100644 index ef842bb405d60..0000000000000 --- a/.github/workflows/dco.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Developer Certificate of Origin Check - -on: [pull_request] - -jobs: - dco-check: - runs-on: ubuntu-latest - - steps: - - name: Get PR Commits - id: 'get-pr-commits' - uses: tim-actions/get-pr-commits@v1.3.1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - - name: DCO Check - uses: tim-actions/dco@v1.1.0 - with: - commits: ${{ steps.get-pr-commits.outputs.commits }} - diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 3697750dab97a..923c82028cd1b 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@v4 - name: lychee Link Checker id: lychee - uses: lycheeverse/lychee-action@v2.1.0 + uses: lycheeverse/lychee-action@v2.2.0 with: args: --accept=200,403,429 --exclude-mail **/*.html **/*.md **/*.txt **/*.json --exclude-file .lychee.excludes fail: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 13f952c66f66c..eb5314a769f76 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,32 +18,53 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Support for keyword fields in star-tree index ([#16233](https://github.com/opensearch-project/OpenSearch/pull/16233)) - Add a flag in QueryShardContext to differentiate inner hit query ([#16600](https://github.com/opensearch-project/OpenSearch/pull/16600)) - Add vertical scaling and SoftReference for snapshot repository data cache ([#16489](https://github.com/opensearch-project/OpenSearch/pull/16489)) +- [Workload Management] Add Workload Management IT ([#16359](https://github.com/opensearch-project/OpenSearch/pull/16359)) - Support prefix list for remote repository attributes([#16271](https://github.com/opensearch-project/OpenSearch/pull/16271)) - Add new configuration setting `synonym_analyzer`, to the `synonym` and `synonym_graph` filters, enabling the specification of a custom analyzer for reading the synonym file ([#16488](https://github.com/opensearch-project/OpenSearch/pull/16488)). - Add stats for remote publication failure and move download failure stats to remote methods([#16682](https://github.com/opensearch-project/OpenSearch/pull/16682/)) +- Update script supports java.lang.String.sha1() and java.lang.String.sha256() methods ([#16923](https://github.com/opensearch-project/OpenSearch/pull/16923)) +- Added a precaution to handle extreme date values during sorting to prevent `arithmetic_exception: long overflow` ([#16812](https://github.com/opensearch-project/OpenSearch/pull/16812)). +- Add search replica stats to segment replication stats API ([#16678](https://github.com/opensearch-project/OpenSearch/pull/16678)) +- Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/)) +- Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802)) +- Introduce framework for auxiliary transports and an experimental gRPC transport plugin ([#16534](https://github.com/opensearch-project/OpenSearch/pull/16534)) +- Changes to support IP field in star tree indexing([#16641](https://github.com/opensearch-project/OpenSearch/pull/16641/)) +- Support object fields in star-tree index([#16728](https://github.com/opensearch-project/OpenSearch/pull/16728/)) ### Dependencies - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504)) - Bump `google-auth-library-oauth2-http` from 1.7.0 to 1.29.0 in /plugins/repository-gcs ([#16520](https://github.com/opensearch-project/OpenSearch/pull/16520)) -- Bump `com.azure:azure-storage-common` from 12.25.1 to 12.27.1 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521)) +- Bump `com.azure:azure-storage-common` from 12.25.1 to 12.28.0 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521), [#16808](https://github.com/opensearch-project/OpenSearch/pull/16808)) - Bump `com.google.apis:google-api-services-compute` from v1-rev20240407-2.0.0 to v1-rev20241105-2.0.0 ([#16502](https://github.com/opensearch-project/OpenSearch/pull/16502), [#16548](https://github.com/opensearch-project/OpenSearch/pull/16548), [#16613](https://github.com/opensearch-project/OpenSearch/pull/16613)) - Bump `com.azure:azure-storage-blob` from 12.23.0 to 12.28.1 ([#16501](https://github.com/opensearch-project/OpenSearch/pull/16501)) - Bump `org.apache.hadoop:hadoop-minicluster` from 3.4.0 to 3.4.1 ([#16550](https://github.com/opensearch-project/OpenSearch/pull/16550)) -- Bump `org.apache.xmlbeans:xmlbeans` from 5.2.1 to 5.2.2 ([#16612](https://github.com/opensearch-project/OpenSearch/pull/16612)) -- Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 9.46 ([#16611](https://github.com/opensearch-project/OpenSearch/pull/16611)) -- Bump `lycheeverse/lychee-action` from 2.0.2 to 2.1.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610)) +- Bump `org.apache.xmlbeans:xmlbeans` from 5.2.1 to 5.3.0 ([#16612](https://github.com/opensearch-project/OpenSearch/pull/16612), [#16854](https://github.com/opensearch-project/OpenSearch/pull/16854)) +- Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 9.47 ([#16611](https://github.com/opensearch-project/OpenSearch/pull/16611), [#16807](https://github.com/opensearch-project/OpenSearch/pull/16807)) +- Bump `lycheeverse/lychee-action` from 2.0.2 to 2.2.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610), [#16897](https://github.com/opensearch-project/OpenSearch/pull/16897)) - Bump `me.champeau.gradle.japicmp` from 0.4.4 to 0.4.5 ([#16614](https://github.com/opensearch-project/OpenSearch/pull/16614)) - Bump `mockito` from 5.14.1 to 5.14.2, `objenesis` from 3.2 to 3.3 and `bytebuddy` from 1.15.4 to 1.15.10 ([#16655](https://github.com/opensearch-project/OpenSearch/pull/16655)) - Bump `Netty` from 4.1.114.Final to 4.1.115.Final ([#16661](https://github.com/opensearch-project/OpenSearch/pull/16661)) - Bump `org.xerial.snappy:snappy-java` from 1.1.10.6 to 1.1.10.7 ([#16665](https://github.com/opensearch-project/OpenSearch/pull/16665)) - Bump `codecov/codecov-action` from 4 to 5 ([#16667](https://github.com/opensearch-project/OpenSearch/pull/16667)) -- Bump `org.apache.logging.log4j:log4j-core` from 2.24.1 to 2.24.2 ([#16718](https://github.com/opensearch-project/OpenSearch/pull/16718)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.24.1 to 2.24.3 ([#16718](https://github.com/opensearch-project/OpenSearch/pull/16718), [#16858](https://github.com/opensearch-project/OpenSearch/pull/16858)) - Bump `jackson` from 2.17.2 to 2.18.2 ([#16733](https://github.com/opensearch-project/OpenSearch/pull/16733)) -- Bump `ch.qos.logback:logback-classic` from 1.2.13 to 1.5.12 ([#16716](https://github.com/opensearch-project/OpenSearch/pull/16716)) +- Bump `ch.qos.logback:logback-classic` from 1.2.13 to 1.5.15 ([#16716](https://github.com/opensearch-project/OpenSearch/pull/16716), [#16898](https://github.com/opensearch-project/OpenSearch/pull/16898)) - Bump `com.azure:azure-identity` from 1.13.2 to 1.14.2 ([#16778](https://github.com/opensearch-project/OpenSearch/pull/16778)) +- Bump Apache Lucene from 9.12.0 to 9.12.1 ([#16846](https://github.com/opensearch-project/OpenSearch/pull/16846)) +- Bump `com.gradle.develocity` from 3.18.2 to 3.19 ([#16855](https://github.com/opensearch-project/OpenSearch/pull/16855)) +- Bump `org.jline:jline` from 3.27.1 to 3.28.0 ([#16857](https://github.com/opensearch-project/OpenSearch/pull/16857)) +- Bump `com.azure:azure-core` from 1.51.0 to 1.54.1 ([#16856](https://github.com/opensearch-project/OpenSearch/pull/16856)) +- Bump `com.nimbusds:oauth2-oidc-sdk` from 11.19.1 to 11.20.1 ([#16895](https://github.com/opensearch-project/OpenSearch/pull/16895)) +- Bump `com.netflix.nebula.ospackage-base` from 11.10.0 to 11.10.1 ([#16896](https://github.com/opensearch-project/OpenSearch/pull/16896)) +- Bump `com.microsoft.azure:msal4j` from 1.17.2 to 1.18.0 ([#16918](https://github.com/opensearch-project/OpenSearch/pull/16918)) +- Bump `org.apache.commons:commons-text` from 1.12.0 to 1.13.0 ([#16919](https://github.com/opensearch-project/OpenSearch/pull/16919)) +- Bump `ch.qos.logback:logback-core` from 1.5.12 to 1.5.16 ([#16951](https://github.com/opensearch-project/OpenSearch/pull/16951)) +- Bump `com.azure:azure-core-http-netty` from 1.15.5 to 1.15.7 ([#16952](https://github.com/opensearch-project/OpenSearch/pull/16952)) ### Changed - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391) +- Make entries for dependencies from server/build.gradle to gradle version catalog ([#16707](https://github.com/opensearch-project/OpenSearch/pull/16707)) +- Allow extended plugins to be optional ([#16909](https://github.com/opensearch-project/OpenSearch/pull/16909)) - Cancellation support for cat/nodes and optimize it ([#14853](https://github.com/opensearch-project/OpenSearch/pull/14853)) ### Deprecated @@ -64,6 +85,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Tiered Caching] Fix bug in cache stats API ([#16560](https://github.com/opensearch-project/OpenSearch/pull/16560)) - Bound the size of cache in deprecation logger ([16702](https://github.com/opensearch-project/OpenSearch/issues/16702)) - Ensure consistency of system flag on IndexMetadata after diff is applied ([#16644](https://github.com/opensearch-project/OpenSearch/pull/16644)) +- Skip remote-repositories validations for node-joins when RepositoriesService is not in sync with cluster-state ([#16763](https://github.com/opensearch-project/OpenSearch/pull/16763)) +- Fix _list/shards API failing when closed indices are present ([#16606](https://github.com/opensearch-project/OpenSearch/pull/16606)) +- Fix remote shards balance ([#15335](https://github.com/opensearch-project/OpenSearch/pull/15335)) +- Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964)) ### Security diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/OptionalDependenciesPlugin.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/OptionalDependenciesPlugin.groovy index 2bd8835535881..9b687e1037a08 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/OptionalDependenciesPlugin.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/OptionalDependenciesPlugin.groovy @@ -79,7 +79,7 @@ class OptionalDependenciesPlugin implements Plugin { if (foundDep) { if (foundDep.optional) { - foundDep.optional.value = 'true' + foundDep.optional*.value = 'true' } else { foundDep.appendNode(OPTIONAL_IDENTIFIER, 'true') } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java index 77d7997d6d48d..b75bdcffb257b 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java @@ -105,8 +105,7 @@ public DockerAvailability getDockerAvailability() { Result lastResult = null; Version version = null; boolean isVersionHighEnough = false; - boolean isComposeAvailable = false; - boolean isComposeV2Available = false; + DockerComposeAvailability dockerComposeAvailability = null; // Check if the Docker binary exists final Optional dockerBinary = getDockerPath(); @@ -114,7 +113,7 @@ public DockerAvailability getDockerAvailability() { dockerPath = dockerBinary.get(); // Since we use a multi-stage Docker build, check the Docker version meets minimum requirement - lastResult = runCommand(dockerPath, "version", "--format", "{{.Server.Version}}"); + lastResult = runCommand(execOperations, dockerPath, "version", "--format", "{{.Server.Version}}"); if (lastResult.isSuccess()) { version = Version.fromString(lastResult.stdout.trim(), Version.Mode.RELAXED); @@ -123,15 +122,11 @@ public DockerAvailability getDockerAvailability() { if (isVersionHighEnough) { // Check that we can execute a privileged command - lastResult = runCommand(dockerPath, "images"); - + lastResult = runCommand(execOperations, dockerPath, "images"); // If docker all checks out, see if docker-compose is available and working - Optional composePath = getDockerComposePath(); - if (lastResult.isSuccess() && composePath.isPresent()) { - isComposeAvailable = runCommand(composePath.get(), "version").isSuccess(); + if (lastResult.isSuccess()) { + dockerComposeAvailability = DockerComposeAvailability.detect(execOperations, dockerPath).orElse(null); } - - isComposeV2Available = runCommand(dockerPath, "compose", "version").isSuccess(); } } } @@ -140,8 +135,7 @@ public DockerAvailability getDockerAvailability() { this.dockerAvailability = new DockerAvailability( isAvailable, - isComposeAvailable, - isComposeV2Available, + dockerComposeAvailability, isVersionHighEnough, dockerPath, version, @@ -291,17 +285,6 @@ private Optional getDockerPath() { return Arrays.asList(DOCKER_BINARIES).stream().filter(path -> new File(path).exists()).findFirst(); } - /** - * Searches the entries in {@link #DOCKER_COMPOSE_BINARIES} for the Docker Compose CLI. This method does - * not check whether the installation appears usable, see {@link #getDockerAvailability()} instead. - * - * @return the path to a CLI, if available. - */ - private Optional getDockerComposePath() { - // Check if the Docker binary exists - return Arrays.asList(DOCKER_COMPOSE_BINARIES).stream().filter(path -> new File(path).exists()).findFirst(); - } - private void throwDockerRequiredException(final String message) { throwDockerRequiredException(message, null); } @@ -321,7 +304,7 @@ private void throwDockerRequiredException(final String message, Exception e) { * while running the command, or the process was killed after reaching the 10s timeout, * then the exit code will be -1. */ - private Result runCommand(String... args) { + private static Result runCommand(ExecOperations execOperations, String... args) { if (args.length == 0) { throw new IllegalArgumentException("Cannot execute with no command"); } @@ -356,14 +339,9 @@ public static class DockerAvailability { public final boolean isAvailable; /** - * True if docker-compose is available. + * Non-null if docker-compose v1 or v2 is available. */ - public final boolean isComposeAvailable; - - /** - * True if docker compose is available. - */ - public final boolean isComposeV2Available; + public final DockerComposeAvailability dockerComposeAvailability; /** * True if the installed Docker version is >= 17.05 @@ -387,23 +365,70 @@ public static class DockerAvailability { DockerAvailability( boolean isAvailable, - boolean isComposeAvailable, - boolean isComposeV2Available, + DockerComposeAvailability dockerComposeAvailability, boolean isVersionHighEnough, String path, Version version, Result lastCommand ) { this.isAvailable = isAvailable; - this.isComposeAvailable = isComposeAvailable; - this.isComposeV2Available = isComposeV2Available; + this.dockerComposeAvailability = dockerComposeAvailability; this.isVersionHighEnough = isVersionHighEnough; this.path = path; this.version = version; this.lastCommand = lastCommand; } + + public boolean isDockerComposeAvailable() { + return dockerComposeAvailability != null; + } + } + + /** + * Marker interface for Docker Compose availability + */ + private interface DockerComposeAvailability { + /** + * Detects Docker Compose V1/V2 availability + */ + private static Optional detect(ExecOperations execOperations, String dockerPath) { + Optional composePath = getDockerComposePath(); + if (composePath.isPresent()) { + if (runCommand(execOperations, composePath.get(), "version").isSuccess()) { + return Optional.of(new DockerComposeV1Availability()); + } + } + + if (runCommand(execOperations, dockerPath, "compose", "version").isSuccess()) { + return Optional.of(new DockerComposeV2Availability()); + } + + return Optional.empty(); + } + + /** + * Searches the entries in {@link #DOCKER_COMPOSE_BINARIES} for the Docker Compose CLI. This method does + * not check whether the installation appears usable, see {@link #getDockerAvailability()} instead. + * + * @return the path to a CLI, if available. + */ + private static Optional getDockerComposePath() { + // Check if the Docker binary exists + return Arrays.asList(DOCKER_COMPOSE_BINARIES).stream().filter(path -> new File(path).exists()).findFirst(); + } + } + /** + * Docker Compose V1 availability + */ + public static class DockerComposeV1Availability implements DockerComposeAvailability {} + + /** + * Docker Compose V2 availability + */ + public static class DockerComposeV2Availability implements DockerComposeAvailability {} + /** * This class models the result of running a command. It captures the exit code, standard output and standard error. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java index a74781ac44720..6842f0e541abe 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java @@ -229,8 +229,7 @@ public Set getJarsToScan() { @TaskAction public void runThirdPartyAudit() throws IOException { Set jars = getJarsToScan(); - - extractJars(jars); + Set extractedJars = extractJars(jars); final String forbiddenApisOutput = runForbiddenAPIsCli(); @@ -248,7 +247,7 @@ public void runThirdPartyAudit() throws IOException { Set jdkJarHellClasses = null; if (this.jarHellEnabled) { - jdkJarHellClasses = runJdkJarHellCheck(); + jdkJarHellClasses = runJdkJarHellCheck(extractedJars); } if (missingClassExcludes != null) { @@ -301,16 +300,26 @@ private void logForbiddenAPIsOutput(String forbiddenApisOutput) { getLogger().error("Forbidden APIs output:\n{}==end of forbidden APIs==", forbiddenApisOutput); } - private void extractJars(Set jars) { + /** + * Extract project jars to build directory as specified by getJarExpandDir. + * Handle multi release jars by keeping versions closest to `targetCompatibility` version. + * @param jars to extract to build dir + * @return File set of extracted jars + */ + private Set extractJars(Set jars) { + Set extractedJars = new TreeSet<>(); File jarExpandDir = getJarExpandDir(); // We need to clean up to make sure old dependencies don't linger getProject().delete(jarExpandDir); jars.forEach(jar -> { + String jarPrefix = jar.getName().replace(".jar", ""); + File jarSubDir = new File(jarExpandDir, jarPrefix); + extractedJars.add(jarSubDir); FileTree jarFiles = getProject().zipTree(jar); getProject().copy(spec -> { spec.from(jarFiles); - spec.into(jarExpandDir); + spec.into(jarSubDir); // exclude classes from multi release jars spec.exclude("META-INF/versions/**"); }); @@ -329,7 +338,7 @@ private void extractJars(Set jars) { Integer.parseInt(targetCompatibility.get().getMajorVersion()) ).forEach(majorVersion -> getProject().copy(spec -> { spec.from(getProject().zipTree(jar)); - spec.into(jarExpandDir); + spec.into(jarSubDir); String metaInfPrefix = "META-INF/versions/" + majorVersion; spec.include(metaInfPrefix + "/**"); // Drop the version specific prefix @@ -337,6 +346,8 @@ private void extractJars(Set jars) { spec.setIncludeEmptyDirs(false); })); }); + + return extractedJars; } private void assertNoJarHell(Set jdkJarHellClasses) { @@ -398,7 +409,12 @@ private String runForbiddenAPIsCli() throws IOException { return forbiddenApisOutput; } - private Set runJdkJarHellCheck() throws IOException { + /** + * Execute java with JDK_JAR_HELL_MAIN_CLASS against provided jars with OpenSearch core in the classpath. + * @param jars to scan for jarHell violations. + * @return standard out of jarHell process. + */ + private Set runJdkJarHellCheck(Set jars) throws IOException { ByteArrayOutputStream standardOut = new ByteArrayOutputStream(); InjectedExecOps execOps = getProject().getObjects().newInstance(InjectedExecOps.class); ExecResult execResult = execOps.getExecOps().javaexec(spec -> { @@ -407,9 +423,8 @@ private Set runJdkJarHellCheck() throws IOException { getRuntimeConfiguration(), getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME) ); - spec.getMainClass().set(JDK_JAR_HELL_MAIN_CLASS); - spec.args(getJarExpandDir()); + spec.args(jars); spec.setIgnoreExitValue(true); if (javaHome != null) { spec.setExecutable(javaHome + "/bin/java"); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java index f65e231cd2e50..79b5f837c75ce 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java @@ -43,6 +43,7 @@ import org.opensearch.gradle.SystemPropertyCommandLineArgumentProvider; import org.opensearch.gradle.docker.DockerSupportPlugin; import org.opensearch.gradle.docker.DockerSupportService; +import org.opensearch.gradle.docker.DockerSupportService.DockerComposeV2Availability; import org.opensearch.gradle.info.BuildParams; import org.opensearch.gradle.precommit.TestingConventionsTasks; import org.opensearch.gradle.util.GradleUtils; @@ -171,11 +172,8 @@ public void execute(Task task) { .findFirst(); composeExtension.getExecutable().set(dockerCompose.isPresent() ? dockerCompose.get() : "/usr/bin/docker"); - if (dockerSupport.get().getDockerAvailability().isComposeV2Available) { - composeExtension.getUseDockerComposeV2().set(true); - } else if (dockerSupport.get().getDockerAvailability().isComposeAvailable) { - composeExtension.getUseDockerComposeV2().set(false); - } + composeExtension.getUseDockerComposeV2() + .set(dockerSupport.get().getDockerAvailability().dockerComposeAvailability instanceof DockerComposeV2Availability); tasks.named("composeUp").configure(t -> { // Avoid running docker-compose tasks in parallel in CI due to some issues on certain Linux distributions @@ -232,8 +230,7 @@ private void maybeSkipTask(Provider dockerSupport, TaskPro private void maybeSkipTask(Provider dockerSupport, Task task) { task.onlyIf(spec -> { - boolean isComposeAvailable = dockerSupport.get().getDockerAvailability().isComposeV2Available - || dockerSupport.get().getDockerAvailability().isComposeAvailable; + boolean isComposeAvailable = dockerSupport.get().getDockerAvailability().isDockerComposeAvailable(); if (isComposeAvailable == false) { LOGGER.info("Task {} requires docker-compose but it is unavailable. Task will be skipped.", task.getPath()); } diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index 3db2a6e7c2733..83bec727b1502 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -17,7 +17,7 @@ repositories { } dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.24.2" + implementation "org.apache.logging.log4j:log4j-core:2.24.3" } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 25af649bb4aed..e1fa4de5a0caa 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.10.0" + id "com.netflix.nebula.ospackage-base" version "11.10.1" } void addProcessFilesTask(String type, boolean jdk) { diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 2658d2aa8b561..f357fb248520c 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -1,6 +1,6 @@ [versions] opensearch = "3.0.0" -lucene = "9.12.0" +lucene = "9.12.1" bundled_jdk_vendor = "adoptium" bundled_jdk = "23.0.1+11" @@ -27,13 +27,14 @@ google_http_client = "1.44.1" google_auth = "1.29.0" tdigest = "3.3" hdrhistogram = "2.2.2" -grpc = "1.68.0" +grpc = "1.68.2" # when updating the JNA version, also update the version in buildSrc/build.gradle jna = "5.13.0" netty = "4.1.115.Final" joda = "2.12.7" +roaringbitmap = "1.3.0" # project reactor reactor_netty = "1.1.23" @@ -83,3 +84,52 @@ opentelemetrysemconv = "1.27.0-alpha" # arrow dependencies arrow = "17.0.0" flatbuffers = "2.0.0" + +[libraries] +hdrhistogram = { group = "org.hdrhistogram", name = "HdrHistogram", version.ref = "hdrhistogram" } +jakartaannotation = { group = "jakarta.annotation", name = "jakarta.annotation-api", version.ref = "jakarta_annotation" } +jodatime = { group = "joda-time", name = "joda-time", version.ref = "joda" } +jna = { group = "net.java.dev.jna", name = "jna", version.ref = "jna" } +jtscore = { group = "org.locationtech.jts", name = "jts-core", version.ref = "jts" } +jzlib = { group = "com.jcraft", name = "jzlib", version.ref = "jzlib" } +log4japi = { group = "org.apache.logging.log4j", name = "log4j-api", version.ref = "log4j" } +log4jjul = { group = "org.apache.logging.log4j", name = "log4j-jul", version.ref = "log4j" } +log4jcore = { group = "org.apache.logging.log4j", name = "log4j-core", version.ref = "log4j" } +lucene-core = { group = "org.apache.lucene", name = "lucene-core", version.ref = "lucene" } +lucene-analysis-common = { group = "org.apache.lucene", name = "lucene-analysis-common", version.ref = "lucene" } +lucene-backward-codecs = { group = "org.apache.lucene", name = "lucene-backward-codecs", version.ref = "lucene" } +lucene-grouping = { group = "org.apache.lucene", name = "lucene-grouping", version.ref = "lucene" } +lucene-highlighter = { group = "org.apache.lucene", name = "lucene-highlighter", version.ref = "lucene" } +lucene-join = { group = "org.apache.lucene", name = "lucene-join", version.ref = "lucene" } +lucene-memory = { group = "org.apache.lucene", name = "lucene-memory", version.ref = "lucene" } +lucene-misc = { group = "org.apache.lucene", name = "lucene-misc", version.ref = "lucene" } +lucene-queries = { group = "org.apache.lucene", name = "lucene-queries", version.ref = "lucene" } +lucene-queryparser = { group = "org.apache.lucene", name = "lucene-queryparser", version.ref = "lucene" } +lucene-sandbox = { group = "org.apache.lucene", name = "lucene-sandbox", version.ref = "lucene" } +lucene-spatial-extras = { group = "org.apache.lucene", name = "lucene-spatial-extras", version.ref = "lucene" } +lucene-spatial3d = { group = "org.apache.lucene", name = "lucene-spatial3d", version.ref = "lucene" } +lucene-suggest = { group = "org.apache.lucene", name = "lucene-suggest", version.ref = "lucene" } +protobuf = { group = "com.google.protobuf", name = "protobuf-java", version.ref = "protobuf" } +reactivestreams = { group = "io.projectreactor", name = "reactor-core", version.ref = "reactor" } +reactorcore = { group = "org.reactivestreams", name = "reactive-streams", version.ref = "reactivestreams" } +roaringbitmap = { group = "org.roaringbitmap", name = "RoaringBitmap", version.ref = "roaringbitmap" } +spatial4j = { group = "org.locationtech.spatial4j", name = "spatial4j", version.ref = "spatial4j" } +tdigest = { group = "com.tdunning", name = "t-digest", version.ref = "tdigest" } + +[bundles] +lucene = [ + "lucene-core", + "lucene-analysis-common", + "lucene-backward-codecs", + "lucene-grouping", + "lucene-highlighter", + "lucene-join", + "lucene-memory", + "lucene-misc", + "lucene-queries", + "lucene-queryparser", + "lucene-sandbox", + "lucene-spatial-extras", + "lucene-spatial3d", + "lucene-suggest" +] diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index 751da941d25dd..5a98a60e806ea 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -170,7 +170,6 @@ configure([ project(":libs:opensearch-common"), project(":libs:opensearch-core"), project(":libs:opensearch-compress"), - project(":plugins:events-correlation-engine"), project(":server") ]) { project.tasks.withType(MissingJavadocTask) { diff --git a/libs/core/licenses/lucene-core-9.12.0.jar.sha1 b/libs/core/licenses/lucene-core-9.12.0.jar.sha1 deleted file mode 100644 index e55f896dedb63..0000000000000 --- a/libs/core/licenses/lucene-core-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fdb055d569bb20bfce9618fe2b01c29bab7f290c \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.12.1.jar.sha1 b/libs/core/licenses/lucene-core-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..2521c91a81d64 --- /dev/null +++ b/libs/core/licenses/lucene-core-9.12.1.jar.sha1 @@ -0,0 +1 @@ +91447c90c1180122142773b5baddaf8547124794 \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index ec0a18dbbf882..dd804fcc6db70 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -112,9 +112,9 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_17_1 = new Version(2170199, org.apache.lucene.util.Version.LUCENE_9_11_1); public static final Version V_2_17_2 = new Version(2170299, org.apache.lucene.util.Version.LUCENE_9_11_1); public static final Version V_2_18_0 = new Version(2180099, org.apache.lucene.util.Version.LUCENE_9_12_0); - public static final Version V_2_18_1 = new Version(2180199, org.apache.lucene.util.Version.LUCENE_9_12_0); - public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_12_0); - public static final Version V_2_19_0 = new Version(2190099, org.apache.lucene.util.Version.LUCENE_9_12_0); + public static final Version V_2_18_1 = new Version(2180199, org.apache.lucene.util.Version.LUCENE_9_12_1); + public static final Version V_2_19_0 = new Version(2190099, org.apache.lucene.util.Version.LUCENE_9_12_1); + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version CURRENT = V_3_0_0; public static Version fromId(int id) { diff --git a/modules/lang-expression/licenses/lucene-expressions-9.12.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.12.0.jar.sha1 deleted file mode 100644 index 476049a66cc08..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ba843374a0aab3dfe0b11cb28b251844d85bf5b \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.12.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..9e0a5c2d7df21 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.12.1.jar.sha1 @@ -0,0 +1 @@ +667ee99f31c8e42eac70b0adcf8deb4232935430 \ No newline at end of file diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java index 55dc23f665d2e..b3f6f7d0730fd 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java @@ -66,6 +66,7 @@ import org.opensearch.script.ScriptContext; import org.opensearch.script.ScriptEngine; import org.opensearch.script.ScriptService; +import org.opensearch.script.UpdateScript; import org.opensearch.search.aggregations.pipeline.MovingFunctionScript; import org.opensearch.threadpool.ThreadPool; import org.opensearch.watcher.ResourceWatcherService; @@ -109,6 +110,11 @@ public final class PainlessModulePlugin extends Plugin implements ScriptPlugin, ingest.add(AllowlistLoader.loadFromResourceFiles(Allowlist.class, "org.opensearch.ingest.txt")); map.put(IngestScript.CONTEXT, ingest); + // Functions available to update scripts + List update = new ArrayList<>(Allowlist.BASE_ALLOWLISTS); + update.add(AllowlistLoader.loadFromResourceFiles(Allowlist.class, "org.opensearch.update.txt")); + map.put(UpdateScript.CONTEXT, update); + // Functions available to derived fields List derived = new ArrayList<>(Allowlist.BASE_ALLOWLISTS); derived.add(AllowlistLoader.loadFromResourceFiles(Allowlist.class, "org.opensearch.derived.txt")); diff --git a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.update.txt b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.update.txt new file mode 100644 index 0000000000000..144614b3862b0 --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.update.txt @@ -0,0 +1,14 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# + +# This file contains an allowlist for the update scripts + +class java.lang.String { + String org.opensearch.painless.api.Augmentation sha1() + String org.opensearch.painless.api.Augmentation sha256() +} diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml index cb118ed9d562f..e0f3068810ed8 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml @@ -123,3 +123,39 @@ - match: { error.root_cause.0.type: "illegal_argument_exception" } - match: { error.type: "illegal_argument_exception" } - match: { error.reason: "Iterable object is self-referencing itself" } + +# update script supports java.lang.String.sha1() and java.lang.String.sha256() methods +# related issue: https://github.com/opensearch-project/OpenSearch/issues/16423 +--- +"Update script supports sha1() and sha256() method for strings": + - skip: + version: " - 2.18.99" + reason: "introduced in 2.19.0" + - do: + index: + index: test_1 + id: 1 + body: + foo: bar + + - do: + update: + index: test_1 + id: 1 + body: + script: + lang: painless + source: "ctx._source.foo_sha1 = ctx._source.foo.sha1();ctx._source.foo_sha256 = ctx._source.foo.sha256();" + + - match: { _index: test_1 } + - match: { _id: "1" } + - match: { _version: 2 } + + - do: + get: + index: test_1 + id: 1 + + - match: { _source.foo: bar } + - match: { _source.foo_sha1: "62cdb7020ff920e5aa642c3d4066950dd1f01f4d" } + - match: { _source.foo_sha256: "fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9" } diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml index 9c38b13bb1ff0..5c218aa00ca4f 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml @@ -440,3 +440,41 @@ lang: painless source: syntax errors are fun! - match: {error.reason: 'compile error'} + +# script in reindex supports java.lang.String.sha1() and java.lang.String.sha256() methods +# related issue: https://github.com/opensearch-project/OpenSearch/issues/16423 +--- +"Script supports sha1() and sha256() method for strings": + - skip: + version: " - 2.18.99" + reason: "introduced in 2.19.0" + - do: + index: + index: twitter + id: 1 + body: { "user": "foobar" } + - do: + indices.refresh: {} + + - do: + reindex: + refresh: true + body: + source: + index: twitter + dest: + index: new_twitter + script: + lang: painless + source: ctx._source.user_sha1 = ctx._source.user.sha1();ctx._source.user_sha256 = ctx._source.user.sha256() + - match: {created: 1} + - match: {noops: 0} + + - do: + get: + index: new_twitter + id: 1 + + - match: { _source.user: foobar } + - match: { _source.user_sha1: "8843d7f92416211de9ebb963ff4ce28125932878" } + - match: { _source.user_sha256: "c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2" } diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml index a8de49d812677..b52b1428e08bb 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml @@ -432,3 +432,38 @@ lang: painless source: syntax errors are fun! - match: {error.reason: 'compile error'} + +# script in update_by_query supports java.lang.String.sha1() and java.lang.String.sha256() methods +# related issue: https://github.com/opensearch-project/OpenSearch/issues/16423 +--- +"Script supports sha1() and sha256() method for strings": + - skip: + version: " - 2.18.99" + reason: "introduced in 2.19.0" + - do: + index: + index: twitter + id: 1 + body: { "user": "foobar" } + - do: + indices.refresh: {} + + - do: + update_by_query: + index: twitter + refresh: true + body: + script: + lang: painless + source: ctx._source.user_sha1 = ctx._source.user.sha1();ctx._source.user_sha256 = ctx._source.user.sha256() + - match: {updated: 1} + - match: {noops: 0} + + - do: + get: + index: twitter + id: 1 + + - match: { _source.user: foobar } + - match: { _source.user_sha1: "8843d7f92416211de9ebb963ff4ce28125932878" } + - match: { _source.user_sha256: "c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2" } diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0.jar.sha1 deleted file mode 100644 index 31398b27708a3..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a9232b6a4882979118d3281b98dfdb6e0e1cb5ca \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..acb73de8b5dc9 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.1.jar.sha1 @@ -0,0 +1 @@ +abaef4767ad64289e62abdd4606bf6ed2ddea0fd \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0.jar.sha1 deleted file mode 100644 index fa4c9d2d09d6e..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3a6950ffc22e76a082e1b3cefb022b9f7870d29 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..916778086a6bd --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.1.jar.sha1 @@ -0,0 +1 @@ +635c41143b896f402589d29e33695dcfabae9cc5 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0.jar.sha1 deleted file mode 100644 index 576b924286d2d..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e71f85b72ed3939039ba8897b28b065dd11918b9 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.1.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..9c057370df5d1 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.1.jar.sha1 @@ -0,0 +1 @@ +e265410a6a4d9cd23b2e9c73321e6bd307bc1422 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0.jar.sha1 deleted file mode 100644 index c8c146bbd0d25..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6baa3ae7ab20d6e644cf0bedb271c50a44c0e259 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..30db9fc8d69e2 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.1.jar.sha1 @@ -0,0 +1 @@ +3787b8edc0cfad21998abc6aeb9d2cbf152b4b26 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0.jar.sha1 deleted file mode 100644 index 54ea0b19f2a7b..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f183e1e8b1eaaa4dec444774a285bb8b66518522 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..96f8d70e6ee53 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.1.jar.sha1 @@ -0,0 +1 @@ +e935f600bf153c46f5725198ca9352c32025f274 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0.jar.sha1 deleted file mode 100644 index 5442a40f5bba2..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b81a609934e65d12ab9d2d84bc2ea6f56a360e57 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..d6d5f1c2609ff --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.1.jar.sha1 @@ -0,0 +1 @@ +c4e1c94b1adbd1cb9dbdc0d3c2d2c33beabfc777 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0.jar.sha1 deleted file mode 100644 index 60fd4015cfde0..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bec069f286b45f20b743c81e84202369cd0467e7 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..661f3062458e2 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.1.jar.sha1 @@ -0,0 +1 @@ +d8e4716dab6d829e7b37a8b185cbd242650aeb9e \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/grpc-api-1.68.0.jar.sha1 b/plugins/discovery-gce/licenses/grpc-api-1.68.0.jar.sha1 deleted file mode 100644 index bf45716c5b8ce..0000000000000 --- a/plugins/discovery-gce/licenses/grpc-api-1.68.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a9f25c58d8d5b0fcf37ae889a50fec87e34ac08 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/grpc-api-1.68.2.jar.sha1 b/plugins/discovery-gce/licenses/grpc-api-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..1844172dec982 --- /dev/null +++ b/plugins/discovery-gce/licenses/grpc-api-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a257a5dd25dda1c97a99b56d5b9c1e56c12ae554 \ No newline at end of file diff --git a/plugins/events-correlation-engine/build.gradle b/plugins/events-correlation-engine/build.gradle deleted file mode 100644 index c3eff30012b1d..0000000000000 --- a/plugins/events-correlation-engine/build.gradle +++ /dev/null @@ -1,21 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - * - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -apply plugin: 'opensearch.java-rest-test' -apply plugin: 'opensearch.internal-cluster-test' - -opensearchplugin { - description 'OpenSearch Events Correlation Engine.' - classname 'org.opensearch.plugin.correlation.EventsCorrelationPlugin' -} - -dependencies { -} diff --git a/plugins/events-correlation-engine/src/internalClusterTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTransportIT.java b/plugins/events-correlation-engine/src/internalClusterTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTransportIT.java deleted file mode 100644 index 028848a91213e..0000000000000 --- a/plugins/events-correlation-engine/src/internalClusterTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTransportIT.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation; - -import org.apache.lucene.search.join.ScoreMode; -import org.opensearch.action.admin.cluster.node.info.NodeInfo; -import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; -import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; -import org.opensearch.action.search.SearchRequest; -import org.opensearch.action.search.SearchResponse; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.index.query.NestedQueryBuilder; -import org.opensearch.index.query.QueryBuilders; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleRequest; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleResponse; -import org.opensearch.plugin.correlation.rules.model.CorrelationQuery; -import org.opensearch.plugin.correlation.rules.model.CorrelationRule; -import org.opensearch.plugins.Plugin; -import org.opensearch.plugins.PluginInfo; -import org.opensearch.rest.RestRequest; -import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.Assert; - -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -/** - * Transport Action tests for events-correlation-plugin - */ -public class EventsCorrelationPluginTransportIT extends OpenSearchIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(EventsCorrelationPlugin.class); - } - - /** - * test events-correlation-plugin is installed - */ - public void testPluginsAreInstalled() { - NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); - nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName()); - NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); - List pluginInfos = nodesInfoResponse.getNodes() - .stream() - .flatMap( - (Function>) nodeInfo -> nodeInfo.getInfo(PluginsAndModules.class).getPluginInfos().stream() - ) - .collect(Collectors.toList()); - Assert.assertTrue( - pluginInfos.stream() - .anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.plugin.correlation.EventsCorrelationPlugin")) - ); - } - - /** - * test creating a correlation rule - * @throws Exception Exception - */ - public void testCreatingACorrelationRule() throws Exception { - List correlationQueries = Arrays.asList( - new CorrelationQuery("s3_access_logs", "aws.cloudtrail.eventName:ReplicateObject", "@timestamp", List.of("s3")), - new CorrelationQuery("app_logs", "keywords:PermissionDenied", "@timestamp", List.of("others_application")) - ); - CorrelationRule correlationRule = new CorrelationRule("s3 to app logs", correlationQueries); - IndexCorrelationRuleRequest request = new IndexCorrelationRuleRequest(correlationRule, RestRequest.Method.POST); - - IndexCorrelationRuleResponse response = client().execute(IndexCorrelationRuleAction.INSTANCE, request).get(); - Assert.assertEquals(RestStatus.CREATED, response.getStatus()); - - NestedQueryBuilder queryBuilder = QueryBuilders.nestedQuery( - "correlate", - QueryBuilders.matchQuery("correlate.index", "s3_access_logs"), - ScoreMode.None - ); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.query(queryBuilder); - searchSourceBuilder.fetchSource(true); - - SearchRequest searchRequest = new SearchRequest(); - searchRequest.indices(CorrelationRule.CORRELATION_RULE_INDEX); - searchRequest.source(searchSourceBuilder); - - SearchResponse searchResponse = client().search(searchRequest).get(); - Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value); - } - - /** - * test filtering correlation rules - * @throws Exception Exception - */ - public void testFilteringCorrelationRules() throws Exception { - List correlationQueries1 = Arrays.asList( - new CorrelationQuery("s3_access_logs", "aws.cloudtrail.eventName:ReplicateObject", "@timestamp", List.of("s3")), - new CorrelationQuery("app_logs", "keywords:PermissionDenied", "@timestamp", List.of("others_application")) - ); - CorrelationRule correlationRule1 = new CorrelationRule("s3 to app logs", correlationQueries1); - IndexCorrelationRuleRequest request1 = new IndexCorrelationRuleRequest(correlationRule1, RestRequest.Method.POST); - client().execute(IndexCorrelationRuleAction.INSTANCE, request1).get(); - - List correlationQueries2 = Arrays.asList( - new CorrelationQuery("windows", "host.hostname:EC2AMAZ*", "@timestamp", List.of("windows")), - new CorrelationQuery("app_logs", "endpoint:/customer_records.txt", "@timestamp", List.of("others_application")) - ); - CorrelationRule correlationRule2 = new CorrelationRule("windows to app logs", correlationQueries2); - IndexCorrelationRuleRequest request2 = new IndexCorrelationRuleRequest(correlationRule2, RestRequest.Method.POST); - client().execute(IndexCorrelationRuleAction.INSTANCE, request2).get(); - - NestedQueryBuilder queryBuilder = QueryBuilders.nestedQuery( - "correlate", - QueryBuilders.matchQuery("correlate.index", "s3_access_logs"), - ScoreMode.None - ); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.query(queryBuilder); - searchSourceBuilder.fetchSource(true); - - SearchRequest searchRequest = new SearchRequest(); - searchRequest.indices(CorrelationRule.CORRELATION_RULE_INDEX); - searchRequest.source(searchSourceBuilder); - - SearchResponse searchResponse = client().search(searchRequest).get(); - Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value); - } - - /** - * test creating a correlation rule with no timestamp field - * @throws Exception Exception - */ - @SuppressWarnings("unchecked") - public void testCreatingACorrelationRuleWithNoTimestampField() throws Exception { - List correlationQueries = Arrays.asList( - new CorrelationQuery("s3_access_logs", "aws.cloudtrail.eventName:ReplicateObject", null, List.of("s3")), - new CorrelationQuery("app_logs", "keywords:PermissionDenied", null, List.of("others_application")) - ); - CorrelationRule correlationRule = new CorrelationRule("s3 to app logs", correlationQueries); - IndexCorrelationRuleRequest request = new IndexCorrelationRuleRequest(correlationRule, RestRequest.Method.POST); - - IndexCorrelationRuleResponse response = client().execute(IndexCorrelationRuleAction.INSTANCE, request).get(); - Assert.assertEquals(RestStatus.CREATED, response.getStatus()); - - NestedQueryBuilder queryBuilder = QueryBuilders.nestedQuery( - "correlate", - QueryBuilders.matchQuery("correlate.index", "s3_access_logs"), - ScoreMode.None - ); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.query(queryBuilder); - searchSourceBuilder.fetchSource(true); - - SearchRequest searchRequest = new SearchRequest(); - searchRequest.indices(CorrelationRule.CORRELATION_RULE_INDEX); - searchRequest.source(searchSourceBuilder); - - SearchResponse searchResponse = client().search(searchRequest).get(); - Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value); - Assert.assertEquals( - "_timestamp", - ((List>) (searchResponse.getHits().getHits()[0].getSourceAsMap().get("correlate"))).get(0) - .get("timestampField") - ); - } -} diff --git a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java b/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java deleted file mode 100644 index 414fe1948f053..0000000000000 --- a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java +++ /dev/null @@ -1,312 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation; - -import org.apache.hc.core5.http.Header; -import org.apache.hc.core5.http.HttpEntity; -import org.apache.lucene.index.VectorSimilarityFunction; -import org.opensearch.client.Request; -import org.opensearch.client.RequestOptions; -import org.opensearch.client.Response; -import org.opensearch.client.ResponseException; -import org.opensearch.client.RestClient; -import org.opensearch.client.WarningsHandler; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.common.Strings; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.IndexSettings; -import org.opensearch.test.rest.OpenSearchRestTestCase; -import org.junit.Assert; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; - -/** - * Correlation Vectors Engine e2e tests - */ -public class CorrelationVectorsEngineIT extends OpenSearchRestTestCase { - - private static final int DIMENSION = 4; - private static final String PROPERTIES_FIELD_NAME = "properties"; - private static final String TYPE_FIELD_NAME = "type"; - private static final String CORRELATION_VECTOR_TYPE = "correlation_vector"; - private static final String DIMENSION_FIELD_NAME = "dimension"; - private static final int M = 16; - private static final int EF_CONSTRUCTION = 128; - private static final String INDEX_NAME = "test-index-1"; - private static final Float[][] TEST_VECTORS = new Float[][] { - { 1.0f, 1.0f, 1.0f, 1.0f }, - { 2.0f, 2.0f, 2.0f, 2.0f }, - { 3.0f, 3.0f, 3.0f, 3.0f } }; - private static final float[][] TEST_QUERY_VECTORS = new float[][] { - { 1.0f, 1.0f, 1.0f, 1.0f }, - { 2.0f, 2.0f, 2.0f, 2.0f }, - { 3.0f, 3.0f, 3.0f, 3.0f } }; - private static final Map> VECTOR_SIMILARITY_TO_SCORE = Map.of( - VectorSimilarityFunction.EUCLIDEAN, - (similarity) -> 1 / (1 + similarity), - VectorSimilarityFunction.DOT_PRODUCT, - (similarity) -> (1 + similarity) / 2, - VectorSimilarityFunction.COSINE, - (similarity) -> (1 + similarity) / 2 - ); - - /** - * test the e2e storage and query layer of events-correlation-engine - * @throws IOException IOException - */ - @SuppressWarnings("unchecked") - public void testQuery() throws IOException { - String textField = "text-field"; - String luceneField = "lucene-field"; - XContentBuilder builder = XContentFactory.jsonBuilder() - .startObject() - .startObject(PROPERTIES_FIELD_NAME) - .startObject(textField) - .field(TYPE_FIELD_NAME, "text") - .endObject() - .startObject(luceneField) - .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) - .field(DIMENSION_FIELD_NAME, DIMENSION) - .startObject("correlation_ctx") - .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) - .startObject("parameters") - .field("m", M) - .field("ef_construction", EF_CONSTRUCTION) - .endObject() - .endObject() - .endObject() - .endObject() - .endObject(); - - String mapping = builder.toString(); - createTestIndexWithMappingJson(client(), INDEX_NAME, mapping, getCorrelationDefaultIndexSettings()); - - for (int idx = 0; idx < TEST_VECTORS.length; ++idx) { - addCorrelationDoc( - INDEX_NAME, - String.valueOf(idx + 1), - List.of(textField, luceneField), - List.of(java.util.UUID.randomUUID().toString(), TEST_VECTORS[idx]) - ); - } - refreshAllIndices(); - Assert.assertEquals(TEST_VECTORS.length, getDocCount(INDEX_NAME)); - - int k = 2; - for (float[] query : TEST_QUERY_VECTORS) { - - String correlationQuery = "{\n" - + " \"query\": {\n" - + " \"correlation\": {\n" - + " \"lucene-field\": {\n" - + " \"vector\": \n" - + Arrays.toString(query) - + " ,\n" - + " \"k\": 2,\n" - + " \"boost\": 1\n" - + " }\n" - + " }\n" - + " }\n" - + "}"; - - Response response = searchCorrelationIndex(INDEX_NAME, correlationQuery, k); - Map responseBody = entityAsMap(response); - Assert.assertEquals(2, ((List) ((Map) responseBody.get("hits")).get("hits")).size()); - @SuppressWarnings("unchecked") - double actualScore1 = Double.parseDouble( - ((List>) ((Map) responseBody.get("hits")).get("hits")).get(0).get("_score").toString() - ); - @SuppressWarnings("unchecked") - double actualScore2 = Double.parseDouble( - ((List>) ((Map) responseBody.get("hits")).get("hits")).get(1).get("_score").toString() - ); - @SuppressWarnings("unchecked") - List hit1 = ((Map>) ((List>) ((Map) responseBody.get("hits")) - .get("hits")).get(0).get("_source")).get(luceneField).stream().map(Double::floatValue).collect(Collectors.toList()); - float[] resultVector1 = new float[hit1.size()]; - for (int i = 0; i < hit1.size(); ++i) { - resultVector1[i] = hit1.get(i); - } - - @SuppressWarnings("unchecked") - List hit2 = ((Map>) ((List>) ((Map) responseBody.get("hits")) - .get("hits")).get(1).get("_source")).get(luceneField).stream().map(Double::floatValue).collect(Collectors.toList()); - float[] resultVector2 = new float[hit2.size()]; - for (int i = 0; i < hit2.size(); ++i) { - resultVector2[i] = hit2.get(i); - } - - double rawScore1 = VectorSimilarityFunction.EUCLIDEAN.compare(resultVector1, query); - Assert.assertEquals(rawScore1, actualScore1, 0.0001); - double rawScore2 = VectorSimilarityFunction.EUCLIDEAN.compare(resultVector2, query); - Assert.assertEquals(rawScore2, actualScore2, 0.0001); - } - } - - /** - * unhappy test for the e2e storage and query layer of events-correlation-engine with no index exist - */ - public void testQueryWithNoIndexExist() { - float[] query = new float[] { 1.0f, 1.0f, 1.0f, 1.0f }; - String correlationQuery = "{\n" - + " \"query\": {\n" - + " \"correlation\": {\n" - + " \"lucene-field\": {\n" - + " \"vector\": \n" - + Arrays.toString(query) - + " ,\n" - + " \"k\": 2,\n" - + " \"boost\": 1\n" - + " }\n" - + " }\n" - + " }\n" - + "}"; - Exception ex = assertThrows(ResponseException.class, () -> { searchCorrelationIndex(INDEX_NAME, correlationQuery, 2); }); - String expectedMessage = String.format(Locale.ROOT, "no such index [%s]", INDEX_NAME); - String actualMessage = ex.getMessage(); - Assert.assertTrue(actualMessage.contains(expectedMessage)); - } - - /** - * unhappy test for the e2e storage and query layer of events-correlation-engine with wrong mapping - */ - public void testQueryWithWrongMapping() throws IOException { - String textField = "text-field"; - String luceneField = "lucene-field"; - XContentBuilder builder = XContentFactory.jsonBuilder() - .startObject() - .startObject(PROPERTIES_FIELD_NAME) - .startObject(textField) - .field(TYPE_FIELD_NAME, "text") - .endObject() - .startObject(luceneField) - .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) - .field("test", DIMENSION) - .startObject("correlation_ctx") - .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) - .startObject("parameters") - .field("m", M) - .field("ef_construction", EF_CONSTRUCTION) - .endObject() - .endObject() - .endObject() - .endObject() - .endObject(); - - String mapping = builder.toString(); - Exception ex = assertThrows(ResponseException.class, () -> { - createTestIndexWithMappingJson(client(), INDEX_NAME, mapping, getCorrelationDefaultIndexSettings()); - }); - - String expectedMessage = String.format( - Locale.ROOT, - "unknown parameter [test] on mapper [%s] of type [correlation_vector]", - luceneField - ); - String actualMessage = ex.getMessage(); - Assert.assertTrue(actualMessage.contains(expectedMessage)); - } - - private String createTestIndexWithMappingJson(RestClient client, String index, String mapping, Settings settings) throws IOException { - Request request = new Request("PUT", "/" + index); - String entity = "{\"settings\": " + Strings.toString(MediaTypeRegistry.JSON, settings); - if (mapping != null) { - entity = entity + ",\"mappings\" : " + mapping; - } - - entity = entity + "}"; - if (!settings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)) { - expectSoftDeletesWarning(request, index); - } - - request.setJsonEntity(entity); - client.performRequest(request); - return index; - } - - private Settings getCorrelationDefaultIndexSettings() { - return Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).put("index.correlation", true).build(); - } - - private void addCorrelationDoc(String index, String docId, List fieldNames, List vectors) throws IOException { - Request request = new Request("POST", "/" + index + "/_doc/" + docId + "?refresh=true"); - - XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); - for (int i = 0; i < fieldNames.size(); i++) { - builder.field(fieldNames.get(i), vectors.get(i)); - } - builder.endObject(); - - request.setJsonEntity(builder.toString()); - Response response = client().performRequest(request); - assertEquals(request.getEndpoint() + ": failed", RestStatus.CREATED, RestStatus.fromCode(response.getStatusLine().getStatusCode())); - } - - private Response searchCorrelationIndex(String index, String correlationQuery, int resultSize) throws IOException { - Request request = new Request("POST", "/" + index + "/_search"); - - request.addParameter("size", Integer.toString(resultSize)); - request.addParameter("explain", Boolean.toString(true)); - request.addParameter("search_type", "query_then_fetch"); - request.setJsonEntity(correlationQuery); - - Response response = client().performRequest(request); - Assert.assertEquals("Search failed", RestStatus.OK, restStatus(response)); - return response; - } - - private int getDocCount(String index) throws IOException { - Response response = makeRequest( - client(), - "GET", - String.format(Locale.getDefault(), "/%s/_count", index), - Collections.emptyMap(), - null - ); - Assert.assertEquals(RestStatus.OK, restStatus(response)); - return Integer.parseInt(entityAsMap(response).get("count").toString()); - } - - private Response makeRequest( - RestClient client, - String method, - String endpoint, - Map params, - HttpEntity entity, - Header... headers - ) throws IOException { - Request request = new Request(method, endpoint); - RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); - options.setWarningsHandler(WarningsHandler.PERMISSIVE); - - for (Header header : headers) { - options.addHeader(header.getName(), header.getValue()); - } - request.setOptions(options.build()); - request.addParameters(params); - if (entity != null) { - request.setEntity(entity); - } - return client.performRequest(request); - } - - private RestStatus restStatus(Response response) { - return RestStatus.fromCode(response.getStatusLine().getStatusCode()); - } -} diff --git a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginRestIT.java b/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginRestIT.java deleted file mode 100644 index 3791a5cdf5db0..0000000000000 --- a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginRestIT.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation; - -import org.opensearch.action.search.SearchResponse; -import org.opensearch.client.Request; -import org.opensearch.client.Response; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.test.rest.OpenSearchRestTestCase; -import org.junit.Assert; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -/** - * Rest Action tests for events-correlation-plugin - */ -public class EventsCorrelationPluginRestIT extends OpenSearchRestTestCase { - - /** - * test events-correlation-plugin is installed - * @throws IOException IOException - */ - @SuppressWarnings("unchecked") - public void testPluginsAreInstalled() throws IOException { - Request request = new Request("GET", "/_cat/plugins?s=component&h=name,component,version,description&format=json"); - Response response = client().performRequest(request); - List pluginsList = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - response.getEntity().getContent() - ).list(); - Assert.assertTrue( - pluginsList.stream() - .map(o -> (Map) o) - .anyMatch(plugin -> plugin.get("component").equals("events-correlation-engine")) - ); - } - - /** - * test creating a correlation rule - * @throws IOException IOException - */ - public void testCreatingACorrelationRule() throws IOException { - Request request = new Request("POST", "/_correlation/rules"); - request.setJsonEntity(sampleCorrelationRule()); - Response response = client().performRequest(request); - - Assert.assertEquals(201, response.getStatusLine().getStatusCode()); - - Map responseMap = entityAsMap(response); - String id = responseMap.get("_id").toString(); - - request = new Request("POST", "/.opensearch-correlation-rules-config/_search"); - request.setJsonEntity(matchIdQuery(id)); - response = client().performRequest(request); - - Assert.assertEquals(200, response.getStatusLine().getStatusCode()); - SearchResponse searchResponse = SearchResponse.fromXContent( - createParser(JsonXContent.jsonXContent, response.getEntity().getContent()) - ); - Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value); - } - - /** - * test creating a correlation rule with no timestamp field - * @throws IOException IOException - */ - @SuppressWarnings("unchecked") - public void testCreatingACorrelationRuleWithNoTimestampField() throws IOException { - Request request = new Request("POST", "/_correlation/rules"); - request.setJsonEntity(sampleCorrelationRuleWithNoTimestamp()); - Response response = client().performRequest(request); - - Assert.assertEquals(201, response.getStatusLine().getStatusCode()); - - Map responseMap = entityAsMap(response); - String id = responseMap.get("_id").toString(); - - request = new Request("POST", "/.opensearch-correlation-rules-config/_search"); - request.setJsonEntity(matchIdQuery(id)); - response = client().performRequest(request); - - Assert.assertEquals(200, response.getStatusLine().getStatusCode()); - SearchResponse searchResponse = SearchResponse.fromXContent( - createParser(JsonXContent.jsonXContent, response.getEntity().getContent()) - ); - Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value); - Assert.assertEquals( - "_timestamp", - ((List>) (searchResponse.getHits().getHits()[0].getSourceAsMap().get("correlate"))).get(0) - .get("timestampField") - ); - } - - private String sampleCorrelationRule() { - return "{\n" - + " \"name\": \"s3 to app logs\",\n" - + " \"correlate\": [\n" - + " {\n" - + " \"index\": \"s3_access_logs\",\n" - + " \"query\": \"aws.cloudtrail.eventName:ReplicateObject\",\n" - + " \"timestampField\": \"@timestamp\",\n" - + " \"tags\": [\n" - + " \"s3\"\n" - + " ]\n" - + " },\n" - + " {\n" - + " \"index\": \"app_logs\",\n" - + " \"query\": \"keywords:PermissionDenied\",\n" - + " \"timestampField\": \"@timestamp\",\n" - + " \"tags\": [\n" - + " \"others_application\"\n" - + " ]\n" - + " }\n" - + " ]\n" - + "}"; - } - - private String sampleCorrelationRuleWithNoTimestamp() { - return "{\n" - + " \"name\": \"s3 to app logs\",\n" - + " \"correlate\": [\n" - + " {\n" - + " \"index\": \"s3_access_logs\",\n" - + " \"query\": \"aws.cloudtrail.eventName:ReplicateObject\",\n" - + " \"tags\": [\n" - + " \"s3\"\n" - + " ]\n" - + " },\n" - + " {\n" - + " \"index\": \"app_logs\",\n" - + " \"query\": \"keywords:PermissionDenied\",\n" - + " \"tags\": [\n" - + " \"others_application\"\n" - + " ]\n" - + " }\n" - + " ]\n" - + "}"; - } - - private String matchIdQuery(String id) { - return "{\n" + " \"query\" : {\n" + " \"match\":{\n" + " \"_id\": \"" + id + "\"\n" + " }\n" + " }\n" + "}"; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java deleted file mode 100644 index 9637042974d03..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation; - -import org.opensearch.action.ActionRequest; -import org.opensearch.client.Client; -import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.IndexScopedSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.settings.SettingsFilter; -import org.opensearch.core.action.ActionResponse; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.env.Environment; -import org.opensearch.env.NodeEnvironment; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.codec.CodecServiceFactory; -import org.opensearch.index.mapper.Mapper; -import org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecService; -import org.opensearch.plugin.correlation.core.index.mapper.CorrelationVectorFieldMapper; -import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper; -import org.opensearch.plugin.correlation.core.index.query.CorrelationQueryBuilder; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction; -import org.opensearch.plugin.correlation.rules.resthandler.RestIndexCorrelationRuleAction; -import org.opensearch.plugin.correlation.rules.transport.TransportIndexCorrelationRuleAction; -import org.opensearch.plugin.correlation.settings.EventsCorrelationSettings; -import org.opensearch.plugin.correlation.utils.CorrelationRuleIndices; -import org.opensearch.plugins.ActionPlugin; -import org.opensearch.plugins.EnginePlugin; -import org.opensearch.plugins.MapperPlugin; -import org.opensearch.plugins.Plugin; -import org.opensearch.plugins.SearchPlugin; -import org.opensearch.repositories.RepositoriesService; -import org.opensearch.rest.RestController; -import org.opensearch.rest.RestHandler; -import org.opensearch.script.ScriptService; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.watcher.ResourceWatcherService; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.function.Supplier; - -/** - * Plugin class for events-correlation-engine - */ -public class EventsCorrelationPlugin extends Plugin implements ActionPlugin, MapperPlugin, SearchPlugin, EnginePlugin { - - /** - * events-correlation-engine base uri - */ - public static final String PLUGINS_BASE_URI = "/_correlation"; - /** - * events-correlation-engine rules uri - */ - public static final String CORRELATION_RULES_BASE_URI = PLUGINS_BASE_URI + "/rules"; - - private CorrelationRuleIndices correlationRuleIndices; - - /** - * Default constructor - */ - public EventsCorrelationPlugin() {} - - @Override - public Collection createComponents( - Client client, - ClusterService clusterService, - ThreadPool threadPool, - ResourceWatcherService resourceWatcherService, - ScriptService scriptService, - NamedXContentRegistry xContentRegistry, - Environment environment, - NodeEnvironment nodeEnvironment, - NamedWriteableRegistry namedWriteableRegistry, - IndexNameExpressionResolver indexNameExpressionResolver, - Supplier repositoriesServiceSupplier - ) { - correlationRuleIndices = new CorrelationRuleIndices(client, clusterService); - return List.of(correlationRuleIndices); - } - - @Override - public List getRestHandlers( - Settings settings, - RestController restController, - ClusterSettings clusterSettings, - IndexScopedSettings indexScopedSettings, - SettingsFilter settingsFilter, - IndexNameExpressionResolver indexNameExpressionResolver, - Supplier nodesInCluster - ) { - return List.of(new RestIndexCorrelationRuleAction()); - } - - @Override - public Map getMappers() { - return Collections.singletonMap(CorrelationVectorFieldMapper.CONTENT_TYPE, new VectorFieldMapper.TypeParser()); - } - - @Override - public Optional getCustomCodecServiceFactory(IndexSettings indexSettings) { - if (indexSettings.getValue(EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING)) { - return Optional.of(CorrelationCodecService::new); - } - return Optional.empty(); - } - - @Override - public List> getQueries() { - return Collections.singletonList( - new QuerySpec<>( - CorrelationQueryBuilder.NAME_FIELD.getPreferredName(), - CorrelationQueryBuilder::new, - CorrelationQueryBuilder::parse - ) - ); - } - - @Override - public List> getActions() { - return List.of(new ActionPlugin.ActionHandler<>(IndexCorrelationRuleAction.INSTANCE, TransportIndexCorrelationRuleAction.class)); - } - - @Override - public List> getSettings() { - return List.of(EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING, EventsCorrelationSettings.CORRELATION_TIME_WINDOW); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContext.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContext.java deleted file mode 100644 index fef9200a73091..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContext.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index; - -import org.apache.lucene.index.VectorSimilarityFunction; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.ToXContentFragment; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.mapper.MapperParsingException; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; - -/** - * Defines vector similarity function, m and ef_construction hyper parameters field mappings for correlation_vector type. - * - * @opensearch.internal - */ -public class CorrelationParamsContext implements ToXContentFragment, Writeable { - - /** - * Vector Similarity Function field - */ - public static final String VECTOR_SIMILARITY_FUNCTION = "similarityFunction"; - /** - * Parameters field to define m and ef_construction - */ - public static final String PARAMETERS = "parameters"; - - private final VectorSimilarityFunction similarityFunction; - private final Map parameters; - - /** - * Parameterized ctor for CorrelationParamsContext - * @param similarityFunction Vector Similarity Function - * @param parameters Parameters to define m and ef_construction - */ - public CorrelationParamsContext(VectorSimilarityFunction similarityFunction, Map parameters) { - this.similarityFunction = similarityFunction; - this.parameters = parameters; - } - - /** - * Parameterized ctor for CorrelationParamsContext - * @param sin StreamInput - * @throws IOException IOException - */ - public CorrelationParamsContext(StreamInput sin) throws IOException { - this.similarityFunction = VectorSimilarityFunction.valueOf(sin.readString()); - if (sin.available() > 0) { - this.parameters = sin.readMap(); - } else { - this.parameters = null; - } - } - - /** - * Parse into CorrelationParamsContext - * @param in Object - * @return CorrelationParamsContext - */ - public static CorrelationParamsContext parse(Object in) { - if (!(in instanceof Map)) { - throw new MapperParsingException("Unable to parse CorrelationParamsContext"); - } - - @SuppressWarnings("unchecked") - Map contextMap = (Map) in; - VectorSimilarityFunction similarityFunction = VectorSimilarityFunction.EUCLIDEAN; - Map parameters = new HashMap<>(); - - if (contextMap.containsKey(VECTOR_SIMILARITY_FUNCTION)) { - Object value = contextMap.get(VECTOR_SIMILARITY_FUNCTION); - - if (value != null && !(value instanceof String)) { - throw new MapperParsingException(String.format(Locale.getDefault(), "%s must be a string", VECTOR_SIMILARITY_FUNCTION)); - } - - try { - similarityFunction = VectorSimilarityFunction.valueOf((String) value); - } catch (IllegalArgumentException ex) { - throw new MapperParsingException(String.format(Locale.getDefault(), "Invalid %s: %s", VECTOR_SIMILARITY_FUNCTION, value)); - } - } - if (contextMap.containsKey(PARAMETERS)) { - Object value = contextMap.get(PARAMETERS); - if (!(value instanceof Map)) { - throw new MapperParsingException("Unable to parse parameters for Correlation context"); - } - - @SuppressWarnings("unchecked") - Map valueMap = (Map) value; - parameters.putAll(valueMap); - } - return new CorrelationParamsContext(similarityFunction, parameters); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(VECTOR_SIMILARITY_FUNCTION, similarityFunction.name()); - if (params == null) { - builder.field(PARAMETERS, (String) null); - } else { - builder.startObject(PARAMETERS); - for (Map.Entry parameter : parameters.entrySet()) { - builder.field(parameter.getKey(), parameter.getValue()); - } - builder.endObject(); - } - builder.endObject(); - return builder; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(similarityFunction.name()); - if (this.parameters != null) { - out.writeMap(parameters); - } - } - - /** - * get Vector Similarity Function - * @return Vector Similarity Function - */ - public VectorSimilarityFunction getSimilarityFunction() { - return similarityFunction; - } - - /** - * Get Parameters to define m and ef_construction - * @return Parameters to define m and ef_construction - */ - public Map getParameters() { - return parameters; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/VectorField.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/VectorField.java deleted file mode 100644 index 61efd6b9a87ae..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/VectorField.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index; - -import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexableFieldType; -import org.apache.lucene.util.BytesRef; -import org.opensearch.common.io.stream.BytesStreamOutput; - -import java.io.IOException; - -/** - * Generic Vector Field defining a correlation vector name, float array. - * - * @opensearch.internal - */ -public class VectorField extends Field { - - /** - * Parameterized ctor for VectorField - * @param name name of the field - * @param value float array value for the field - * @param type type of the field - */ - public VectorField(String name, float[] value, IndexableFieldType type) { - super(name, new BytesRef(), type); - try { - final byte[] floatToByte = floatToByteArray(value); - this.setBytesValue(floatToByte); - } catch (IOException ex) { - throw new RuntimeException(ex); - } - } - - /** - * converts float array based vector to byte array. - * @param input float array - * @return byte array - */ - protected static byte[] floatToByteArray(float[] input) throws IOException { - BytesStreamOutput objectStream = new BytesStreamOutput(); - objectStream.writeFloatArray(input); - return objectStream.bytes().toBytesRef().bytes; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java deleted file mode 100644 index 00b55eb75995c..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.codec; - -import org.apache.lucene.codecs.KnnVectorsFormat; -import org.apache.lucene.codecs.perfield.PerFieldKnnVectorsFormat; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.plugin.correlation.core.index.mapper.CorrelationVectorFieldMapper; - -import java.util.Locale; -import java.util.Map; -import java.util.Optional; -import java.util.function.BiFunction; -import java.util.function.Supplier; - -/** - * Class to define the hyper-parameters m and ef_construction for insert and store of correlation vectors into HNSW graphs based lucene index. - * - * @opensearch.internal - */ -public abstract class BasePerFieldCorrelationVectorsFormat extends PerFieldKnnVectorsFormat { - /** - * the hyper-parameters for constructing HNSW graphs. - * HnswGraph.html - */ - public static final String METHOD_PARAMETER_M = "m"; - /** - * the hyper-parameters for constructing HNSW graphs. - * HnswGraph.html - */ - public static final String METHOD_PARAMETER_EF_CONSTRUCTION = "ef_construction"; - - private final Optional mapperService; - private final int defaultMaxConnections; - private final int defaultBeamWidth; - private final Supplier defaultFormatSupplier; - private final BiFunction formatSupplier; - - /** - * Parameterized ctor of BasePerFieldCorrelationVectorsFormat - * @param mapperService mapper service - * @param defaultMaxConnections default m - * @param defaultBeamWidth default ef_construction - * @param defaultFormatSupplier default format supplier - * @param formatSupplier format supplier - */ - public BasePerFieldCorrelationVectorsFormat( - Optional mapperService, - int defaultMaxConnections, - int defaultBeamWidth, - Supplier defaultFormatSupplier, - BiFunction formatSupplier - ) { - this.mapperService = mapperService; - this.defaultMaxConnections = defaultMaxConnections; - this.defaultBeamWidth = defaultBeamWidth; - this.defaultFormatSupplier = defaultFormatSupplier; - this.formatSupplier = formatSupplier; - } - - @Override - public KnnVectorsFormat getKnnVectorsFormatForField(String field) { - if (!isCorrelationVectorFieldType(field)) { - return defaultFormatSupplier.get(); - } - - var type = (CorrelationVectorFieldMapper.CorrelationVectorFieldType) mapperService.orElseThrow( - () -> new IllegalArgumentException( - String.format(Locale.getDefault(), "Cannot read field type for field [%s] because mapper service is not available", field) - ) - ).fieldType(field); - - var params = type.getCorrelationParams().getParameters(); - int maxConnections = getMaxConnections(params); - int beamWidth = getBeamWidth(params); - - return formatSupplier.apply(maxConnections, beamWidth); - } - - private boolean isCorrelationVectorFieldType(final String field) { - return mapperService.isPresent() - && mapperService.get().fieldType(field) instanceof CorrelationVectorFieldMapper.CorrelationVectorFieldType; - } - - private int getMaxConnections(final Map params) { - if (params != null && params.containsKey(METHOD_PARAMETER_M)) { - return (int) params.get(METHOD_PARAMETER_M); - } - return defaultMaxConnections; - } - - private int getBeamWidth(final Map params) { - if (params != null && params.containsKey(METHOD_PARAMETER_EF_CONSTRUCTION)) { - return (int) params.get(METHOD_PARAMETER_EF_CONSTRUCTION); - } - return defaultBeamWidth; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecService.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecService.java deleted file mode 100644 index 09d5e1d2c19e3..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecService.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.codec; - -import org.apache.lucene.codecs.Codec; -import org.opensearch.index.codec.CodecService; -import org.opensearch.index.codec.CodecServiceConfig; -import org.opensearch.index.mapper.MapperService; - -/** - * custom Correlation Codec Service - * - * @opensearch.internal - */ -public class CorrelationCodecService extends CodecService { - - private final MapperService mapperService; - - /** - * Parameterized ctor for CorrelationCodecService - * @param codecServiceConfig Generic codec service config - */ - public CorrelationCodecService(CodecServiceConfig codecServiceConfig) { - super(codecServiceConfig.getMapperService(), codecServiceConfig.getIndexSettings(), codecServiceConfig.getLogger()); - mapperService = codecServiceConfig.getMapperService(); - } - - @Override - public Codec codec(String name) { - return CorrelationCodecVersion.current().getCorrelationCodecSupplier().apply(super.codec(name), mapperService); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java deleted file mode 100644 index 9dbb695f14b78..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.codec; - -import org.apache.lucene.backward_codecs.lucene99.Lucene99Codec; -import org.apache.lucene.codecs.Codec; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.plugin.correlation.core.index.codec.correlation990.CorrelationCodec; -import org.opensearch.plugin.correlation.core.index.codec.correlation990.PerFieldCorrelationVectorsFormat; - -import java.util.Optional; -import java.util.function.BiFunction; -import java.util.function.Supplier; - -/** - * CorrelationCodecVersion enum - * - * @opensearch.internal - */ -public enum CorrelationCodecVersion { - V_9_9_0( - "CorrelationCodec", - new Lucene99Codec(), - new PerFieldCorrelationVectorsFormat(Optional.empty()), - (userCodec, mapperService) -> new CorrelationCodec(userCodec, new PerFieldCorrelationVectorsFormat(Optional.of(mapperService))), - CorrelationCodec::new - ); - - private static final CorrelationCodecVersion CURRENT = V_9_9_0; - private final String codecName; - private final Codec defaultCodecDelegate; - private final PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat; - private final BiFunction correlationCodecSupplier; - private final Supplier defaultCorrelationCodecSupplier; - - CorrelationCodecVersion( - String codecName, - Codec defaultCodecDelegate, - PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat, - BiFunction correlationCodecSupplier, - Supplier defaultCorrelationCodecSupplier - ) { - this.codecName = codecName; - this.defaultCodecDelegate = defaultCodecDelegate; - this.perFieldCorrelationVectorsFormat = perFieldCorrelationVectorsFormat; - this.correlationCodecSupplier = correlationCodecSupplier; - this.defaultCorrelationCodecSupplier = defaultCorrelationCodecSupplier; - } - - /** - * get codec name - * @return codec name - */ - public String getCodecName() { - return codecName; - } - - /** - * get default codec delegate - * @return default codec delegate - */ - public Codec getDefaultCodecDelegate() { - return defaultCodecDelegate; - } - - /** - * get correlation vectors format - * @return correlation vectors format - */ - public PerFieldCorrelationVectorsFormat getPerFieldCorrelationVectorsFormat() { - return perFieldCorrelationVectorsFormat; - } - - /** - * get correlation codec supplier - * @return correlation codec supplier - */ - public BiFunction getCorrelationCodecSupplier() { - return correlationCodecSupplier; - } - - /** - * get default correlation codec supplier - * @return default correlation codec supplier - */ - public Supplier getDefaultCorrelationCodecSupplier() { - return defaultCorrelationCodecSupplier; - } - - /** - * static method to get correlation codec version - * @return correlation codec version - */ - public static final CorrelationCodecVersion current() { - return CURRENT; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java deleted file mode 100644 index 022972e2e06c3..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.codec.correlation990; - -import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.FilterCodec; -import org.apache.lucene.codecs.KnnVectorsFormat; -import org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecVersion; - -/** - * Correlation Codec class - * - * @opensearch.internal - */ -public class CorrelationCodec extends FilterCodec { - private static final CorrelationCodecVersion VERSION = CorrelationCodecVersion.V_9_9_0; - private final PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat; - - /** - * ctor for CorrelationCodec - */ - public CorrelationCodec() { - this(VERSION.getDefaultCodecDelegate(), VERSION.getPerFieldCorrelationVectorsFormat()); - } - - /** - * Parameterized ctor for CorrelationCodec - * @param delegate codec delegate - * @param perFieldCorrelationVectorsFormat correlation vectors format - */ - public CorrelationCodec(Codec delegate, PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat) { - super(VERSION.getCodecName(), delegate); - this.perFieldCorrelationVectorsFormat = perFieldCorrelationVectorsFormat; - } - - @Override - public KnnVectorsFormat knnVectorsFormat() { - return perFieldCorrelationVectorsFormat; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java deleted file mode 100644 index 89cc0b614a1a5..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.codec.correlation990; - -import org.apache.lucene.codecs.lucene99.Lucene99HnswVectorsFormat; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat; - -import java.util.Optional; - -/** - * Class to define the hyper-parameters m and ef_construction for insert and store of correlation vectors into HNSW graphs based lucene index. - */ -public class PerFieldCorrelationVectorsFormat extends BasePerFieldCorrelationVectorsFormat { - - /** - * Parameterized ctor for PerFieldCorrelationVectorsFormat - * @param mapperService mapper service - */ - public PerFieldCorrelationVectorsFormat(final Optional mapperService) { - super( - mapperService, - Lucene99HnswVectorsFormat.DEFAULT_MAX_CONN, - Lucene99HnswVectorsFormat.DEFAULT_BEAM_WIDTH, - Lucene99HnswVectorsFormat::new, - Lucene99HnswVectorsFormat::new - ); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java deleted file mode 100644 index fc2a9de58a73a..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * custom Lucene9.5 codec package for events-correlation-engine - */ -package org.opensearch.plugin.correlation.core.index.codec.correlation990; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/package-info.java deleted file mode 100644 index 862b7cd253f04..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * custom codec package for events-correlation-engine - */ -package org.opensearch.plugin.correlation.core.index.codec; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java deleted file mode 100644 index 18c9dd222e2cf..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.mapper; - -import org.apache.lucene.codecs.KnnVectorsFormat; -import org.apache.lucene.document.FieldType; -import org.apache.lucene.document.KnnFloatVectorField; -import org.apache.lucene.document.StoredField; -import org.apache.lucene.index.DocValuesType; -import org.apache.lucene.index.VectorSimilarityFunction; -import org.opensearch.common.Explicit; -import org.opensearch.index.mapper.FieldMapper; -import org.opensearch.index.mapper.ParseContext; -import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext; -import org.opensearch.plugin.correlation.core.index.VectorField; - -import java.io.IOException; -import java.util.Locale; -import java.util.Optional; - -/** - * Field mapper for the correlation vector type - * - * @opensearch.internal - */ -public class CorrelationVectorFieldMapper extends VectorFieldMapper { - - private static final int LUCENE_MAX_DIMENSION = KnnVectorsFormat.DEFAULT_MAX_DIMENSIONS; - - private final FieldType vectorFieldType; - - /** - * Parameterized ctor for CorrelationVectorFieldMapper - * @param input Object containing name of the field, type and other details. - */ - public CorrelationVectorFieldMapper(final CreateLuceneFieldMapperInput input) { - super( - input.getName(), - input.getMappedFieldType(), - input.getMultiFields(), - input.getCopyTo(), - input.getIgnoreMalformed(), - input.isStored(), - input.isHasDocValues() - ); - - this.correlationParams = input.getCorrelationParams(); - final VectorSimilarityFunction vectorSimilarityFunction = this.correlationParams.getSimilarityFunction(); - - final int dimension = input.getMappedFieldType().getDimension(); - if (dimension > LUCENE_MAX_DIMENSION) { - throw new IllegalArgumentException( - String.format( - Locale.ROOT, - "Dimension value cannot be greater than [%s] but got [%s] for vector [%s]", - LUCENE_MAX_DIMENSION, - dimension, - input.getName() - ) - ); - } - - this.fieldType = KnnFloatVectorField.createFieldType(dimension, vectorSimilarityFunction); - - if (this.hasDocValues) { - this.vectorFieldType = buildDocValuesFieldType(); - } else { - this.vectorFieldType = null; - } - } - - private static FieldType buildDocValuesFieldType() { - FieldType field = new FieldType(); - field.setDocValuesType(DocValuesType.BINARY); - field.freeze(); - return field; - } - - @Override - protected void parseCreateField(ParseContext context, int dimension) throws IOException { - Optional arrayOptional = getFloatsFromContext(context, dimension); - - if (arrayOptional.isEmpty()) { - return; - } - final float[] array = arrayOptional.get(); - - KnnFloatVectorField point = new KnnFloatVectorField(name(), array, fieldType); - - context.doc().add(point); - if (fieldType.stored()) { - context.doc().add(new StoredField(name(), point.toString())); - } - if (hasDocValues && vectorFieldType != null) { - context.doc().add(new VectorField(name(), array, vectorFieldType)); - } - context.path().remove(); - } - - static class CreateLuceneFieldMapperInput { - String name; - - CorrelationVectorFieldType mappedFieldType; - - FieldMapper.MultiFields multiFields; - - FieldMapper.CopyTo copyTo; - - Explicit ignoreMalformed; - boolean stored; - boolean hasDocValues; - - CorrelationParamsContext correlationParams; - - public CreateLuceneFieldMapperInput( - String name, - CorrelationVectorFieldType mappedFieldType, - FieldMapper.MultiFields multiFields, - FieldMapper.CopyTo copyTo, - Explicit ignoreMalformed, - boolean stored, - boolean hasDocValues, - CorrelationParamsContext correlationParams - ) { - this.name = name; - this.mappedFieldType = mappedFieldType; - this.multiFields = multiFields; - this.copyTo = copyTo; - this.ignoreMalformed = ignoreMalformed; - this.stored = stored; - this.hasDocValues = hasDocValues; - this.correlationParams = correlationParams; - } - - public String getName() { - return name; - } - - public CorrelationVectorFieldType getMappedFieldType() { - return mappedFieldType; - } - - public FieldMapper.MultiFields getMultiFields() { - return multiFields; - } - - public FieldMapper.CopyTo getCopyTo() { - return copyTo; - } - - public Explicit getIgnoreMalformed() { - return ignoreMalformed; - } - - public boolean isStored() { - return stored; - } - - public boolean isHasDocValues() { - return hasDocValues; - } - - public CorrelationParamsContext getCorrelationParams() { - return correlationParams; - } - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/VectorFieldMapper.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/VectorFieldMapper.java deleted file mode 100644 index 5ac6d92792295..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/VectorFieldMapper.java +++ /dev/null @@ -1,399 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.mapper; - -import org.apache.lucene.search.FieldExistsQuery; -import org.apache.lucene.search.Query; -import org.opensearch.common.Explicit; -import org.opensearch.common.xcontent.support.XContentMapValues; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.mapper.FieldMapper; -import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.Mapper; -import org.opensearch.index.mapper.MapperParsingException; -import org.opensearch.index.mapper.ParametrizedFieldMapper; -import org.opensearch.index.mapper.ParseContext; -import org.opensearch.index.mapper.TextSearchInfo; -import org.opensearch.index.mapper.ValueFetcher; -import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.query.QueryShardException; -import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext; -import org.opensearch.search.lookup.SearchLookup; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Optional; - -/** - * Parameterized field mapper for Correlation Vector type - * - * @opensearch.internal - */ -public abstract class VectorFieldMapper extends ParametrizedFieldMapper { - - /** - * name of Correlation Vector type - */ - public static final String CONTENT_TYPE = "correlation_vector"; - /** - * dimension of the correlation vectors - */ - public static final String DIMENSION = "dimension"; - /** - * context e.g. parameters and vector similarity function of Correlation Vector type - */ - public static final String CORRELATION_CONTEXT = "correlation_ctx"; - - private static VectorFieldMapper toType(FieldMapper in) { - return (VectorFieldMapper) in; - } - - /** - * definition of VectorFieldMapper.Builder - */ - public static class Builder extends ParametrizedFieldMapper.Builder { - protected Boolean ignoreMalformed; - - protected final Parameter stored = Parameter.boolParam("store", false, m -> toType(m).stored, false); - protected final Parameter hasDocValues = Parameter.boolParam("doc_values", false, m -> toType(m).hasDocValues, true); - protected final Parameter dimension = new Parameter<>(DIMENSION, false, () -> -1, (n, c, o) -> { - if (o == null) { - throw new IllegalArgumentException("Dimension cannot be null"); - } - int value; - try { - value = XContentMapValues.nodeIntegerValue(o); - } catch (Exception ex) { - throw new IllegalArgumentException( - String.format(Locale.getDefault(), "Unable to parse [dimension] from provided value [%s] for vector [%s]", o, name) - ); - } - if (value <= 0) { - throw new IllegalArgumentException( - String.format(Locale.getDefault(), "Dimension value must be greater than 0 for vector: %s", name) - ); - } - return value; - }, m -> toType(m).dimension); - - protected final Parameter correlationParamsContext = new Parameter<>( - CORRELATION_CONTEXT, - false, - () -> null, - (n, c, o) -> CorrelationParamsContext.parse(o), - m -> toType(m).correlationParams - ); - - protected final Parameter> meta = Parameter.metaParam(); - - /** - * Parameterized ctor for VectorFieldMapper.Builder - * @param name name - */ - public Builder(String name) { - super(name); - } - - @Override - protected List> getParameters() { - return Arrays.asList(stored, hasDocValues, dimension, meta, correlationParamsContext); - } - - protected Explicit ignoreMalformed(BuilderContext context) { - if (ignoreMalformed != null) { - return new Explicit<>(ignoreMalformed, true); - } - if (context.indexSettings() != null) { - return new Explicit<>(IGNORE_MALFORMED_SETTING.get(context.indexSettings()), false); - } - return Defaults.IGNORE_MALFORMED; - } - - @Override - public ParametrizedFieldMapper build(BuilderContext context) { - final CorrelationParamsContext correlationParams = correlationParamsContext.getValue(); - final MultiFields multiFieldsBuilder = this.multiFieldsBuilder.build(this, context); - final CopyTo copyToBuilder = copyTo.build(); - final Explicit ignoreMalformed = ignoreMalformed(context); - final Map metaValue = meta.getValue(); - - final CorrelationVectorFieldType mappedFieldType = new CorrelationVectorFieldType( - buildFullName(context), - metaValue, - dimension.getValue(), - correlationParams - ); - - CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput createLuceneFieldMapperInput = - new CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput( - name, - mappedFieldType, - multiFieldsBuilder, - copyToBuilder, - ignoreMalformed, - stored.get(), - hasDocValues.get(), - correlationParams - ); - return new CorrelationVectorFieldMapper(createLuceneFieldMapperInput); - } - } - - /** - * deifintion of VectorFieldMapper.TypeParser - */ - public static class TypeParser implements Mapper.TypeParser { - - /** - * default constructor of VectorFieldMapper.TypeParser - */ - public TypeParser() {} - - @Override - public Mapper.Builder parse(String name, Map node, ParserContext context) throws MapperParsingException { - Builder builder = new VectorFieldMapper.Builder(name); - builder.parse(name, context, node); - - if (builder.dimension.getValue() == -1) { - throw new IllegalArgumentException(String.format(Locale.getDefault(), "Dimension value missing for vector: %s", name)); - } - return builder; - } - } - - /** - * deifintion of VectorFieldMapper.CorrelationVectorFieldType - */ - public static class CorrelationVectorFieldType extends MappedFieldType { - int dimension; - CorrelationParamsContext correlationParams; - - /** - * Parameterized ctor for VectorFieldMapper.CorrelationVectorFieldType - * @param name name of the field - * @param meta meta of the field - * @param dimension dimension of the field - */ - public CorrelationVectorFieldType(String name, Map meta, int dimension) { - this(name, meta, dimension, null); - } - - /** - * Parameterized ctor for VectorFieldMapper.CorrelationVectorFieldType - * @param name name of the field - * @param meta meta of the field - * @param dimension dimension of the field - * @param correlationParams correlation params for the field - */ - public CorrelationVectorFieldType( - String name, - Map meta, - int dimension, - CorrelationParamsContext correlationParams - ) { - super(name, false, false, true, TextSearchInfo.NONE, meta); - this.dimension = dimension; - this.correlationParams = correlationParams; - } - - @Override - public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String s) { - throw new UnsupportedOperationException("Correlation Vector do not support fields search"); - } - - @Override - public String typeName() { - return CONTENT_TYPE; - } - - @Override - public Query existsQuery(QueryShardContext context) { - return new FieldExistsQuery(name()); - } - - @Override - public Query termQuery(Object o, QueryShardContext context) { - throw new QueryShardException( - context, - String.format( - Locale.getDefault(), - "Correlation vector do not support exact searching, use Correlation queries instead: [%s]", - name() - ) - ); - } - - /** - * get dimension - * @return dimension - */ - public int getDimension() { - return dimension; - } - - /** - * get correlation params - * @return correlation params - */ - public CorrelationParamsContext getCorrelationParams() { - return correlationParams; - } - } - - protected Explicit ignoreMalformed; - protected boolean stored; - protected boolean hasDocValues; - protected Integer dimension; - protected CorrelationParamsContext correlationParams; - - /** - * Parameterized ctor for VectorFieldMapper - * @param simpleName name of field - * @param mappedFieldType field type of field - * @param multiFields multi fields - * @param copyTo copy to - * @param ignoreMalformed ignore malformed - * @param stored stored field - * @param hasDocValues has doc values - */ - public VectorFieldMapper( - String simpleName, - CorrelationVectorFieldType mappedFieldType, - FieldMapper.MultiFields multiFields, - FieldMapper.CopyTo copyTo, - Explicit ignoreMalformed, - boolean stored, - boolean hasDocValues - ) { - super(simpleName, mappedFieldType, multiFields, copyTo); - this.ignoreMalformed = ignoreMalformed; - this.stored = stored; - this.hasDocValues = hasDocValues; - this.dimension = mappedFieldType.getDimension(); - } - - @Override - protected VectorFieldMapper clone() { - return (VectorFieldMapper) super.clone(); - } - - @Override - protected String contentType() { - return CONTENT_TYPE; - } - - @Override - protected void parseCreateField(ParseContext parseContext) throws IOException { - parseCreateField(parseContext, fieldType().getDimension()); - } - - protected abstract void parseCreateField(ParseContext parseContext, int dimension) throws IOException; - - Optional getFloatsFromContext(ParseContext context, int dimension) throws IOException { - context.path().add(simpleName()); - - List vector = new ArrayList<>(); - XContentParser.Token token = context.parser().currentToken(); - float value; - if (token == XContentParser.Token.START_ARRAY) { - token = context.parser().nextToken(); - while (token != XContentParser.Token.END_ARRAY) { - value = context.parser().floatValue(); - - if (Float.isNaN(value)) { - throw new IllegalArgumentException("Correlation vector values cannot be NaN"); - } - - if (Float.isInfinite(value)) { - throw new IllegalArgumentException("Correlation vector values cannot be infinity"); - } - vector.add(value); - token = context.parser().nextToken(); - } - } else if (token == XContentParser.Token.VALUE_NUMBER) { - value = context.parser().floatValue(); - if (Float.isNaN(value)) { - throw new IllegalArgumentException("Correlation vector values cannot be NaN"); - } - - if (Float.isInfinite(value)) { - throw new IllegalArgumentException("Correlation vector values cannot be infinity"); - } - vector.add(value); - context.parser().nextToken(); - } else if (token == XContentParser.Token.VALUE_NULL) { - context.path().remove(); - return Optional.empty(); - } - - if (dimension != vector.size()) { - String errorMessage = String.format( - Locale.ROOT, - "Vector dimension mismatch. Expected: %d, Given: %d", - dimension, - vector.size() - ); - throw new IllegalArgumentException(errorMessage); - } - - float[] array = new float[vector.size()]; - int i = 0; - for (Float f : vector) { - array[i++] = f; - } - return Optional.of(array); - } - - @Override - protected boolean docValuesByDefault() { - return true; - } - - @Override - public ParametrizedFieldMapper.Builder getMergeBuilder() { - return new VectorFieldMapper.Builder(simpleName()).init(this); - } - - @Override - public boolean parsesArrayValue() { - return true; - } - - @Override - public CorrelationVectorFieldType fieldType() { - return (CorrelationVectorFieldType) super.fieldType(); - } - - @Override - protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { - super.doXContentBody(builder, includeDefaults, params); - if (includeDefaults || ignoreMalformed.explicit()) { - builder.field(Names.IGNORE_MALFORMED, ignoreMalformed.value()); - } - } - - /** - * Class for constants used in parent class VectorFieldMapper - */ - public static class Names { - public static final String IGNORE_MALFORMED = "ignore_malformed"; - } - - /** - * Class for constants used in parent class VectorFieldMapper - */ - public static class Defaults { - public static final Explicit IGNORE_MALFORMED = new Explicit<>(false, false); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/package-info.java deleted file mode 100644 index cfc0ffdfa81f1..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * package to wrap Lucene KnnFloatVectorField and KnnFloatVectorQuery for Opensearch events-correlation-engine - */ -package org.opensearch.plugin.correlation.core.index; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java deleted file mode 100644 index e95b68e855cca..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java +++ /dev/null @@ -1,332 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.query; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.search.Query; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.ParsingException; -import org.opensearch.core.common.Strings; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.NumberFieldMapper; -import org.opensearch.index.query.AbstractQueryBuilder; -import org.opensearch.index.query.QueryBuilder; -import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.query.WithFieldName; -import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.Objects; - -/** - * Constructs a query to get correlated events or documents for a particular event or document. - * - * @opensearch.internal - */ -public class CorrelationQueryBuilder extends AbstractQueryBuilder implements WithFieldName { - - private static final Logger log = LogManager.getLogger(CorrelationQueryBuilder.class); - protected static final ParseField VECTOR_FIELD = new ParseField("vector"); - protected static final ParseField K_FIELD = new ParseField("k"); - protected static final ParseField FILTER_FIELD = new ParseField("filter"); - /** - * max number of neighbors that can be retrieved. - */ - public static int K_MAX = 10000; - - /** - * name of the query - */ - public static final ParseField NAME_FIELD = new ParseField("correlation"); - - private String fieldName; - private float[] vector; - private int k = 0; - private double boost; - private QueryBuilder filter; - - private CorrelationQueryBuilder() {} - - /** - * parameterized ctor for CorrelationQueryBuilder - * @param fieldName field name for query - * @param vector query vector - * @param k number of nearby neighbors - */ - public CorrelationQueryBuilder(String fieldName, float[] vector, int k) { - this(fieldName, vector, k, null); - } - - /** - * parameterized ctor for CorrelationQueryBuilder - * @param fieldName field name for query - * @param vector query vector - * @param k number of nearby neighbors - * @param filter optional filter query - */ - public CorrelationQueryBuilder(String fieldName, float[] vector, int k, QueryBuilder filter) { - if (Strings.isNullOrEmpty(fieldName)) { - throw new IllegalArgumentException( - String.format(Locale.getDefault(), "[%s] requires fieldName", NAME_FIELD.getPreferredName()) - ); - } - if (vector == null) { - throw new IllegalArgumentException( - String.format(Locale.getDefault(), "[%s] requires query vector", NAME_FIELD.getPreferredName()) - ); - } - if (vector.length == 0) { - throw new IllegalArgumentException( - String.format(Locale.getDefault(), "[%s] query vector is empty", NAME_FIELD.getPreferredName()) - ); - } - if (k <= 0) { - throw new IllegalArgumentException(String.format(Locale.getDefault(), "[%s] requires k > 0", NAME_FIELD.getPreferredName())); - } - if (k > K_MAX) { - throw new IllegalArgumentException(String.format(Locale.getDefault(), "[%s] requires k <= ", K_MAX)); - } - - this.fieldName = fieldName; - this.vector = vector; - this.k = k; - this.filter = filter; - } - - /** - * parameterized ctor for CorrelationQueryBuilder - * @param sin StreamInput - * @throws IOException IOException - */ - public CorrelationQueryBuilder(StreamInput sin) throws IOException { - super(sin); - this.fieldName = sin.readString(); - this.vector = sin.readFloatArray(); - this.k = sin.readInt(); - this.filter = sin.readOptionalNamedWriteable(QueryBuilder.class); - } - - private static float[] objectsToFloats(List objs) { - float[] vector = new float[objs.size()]; - for (int i = 0; i < objs.size(); ++i) { - vector[i] = ((Number) objs.get(i)).floatValue(); - } - return vector; - } - - /** - * parse into CorrelationQueryBuilder - * @param xcp XContentParser - * @return CorrelationQueryBuilder - */ - public static CorrelationQueryBuilder parse(XContentParser xcp) throws IOException { - String fieldName = null; - List vector = null; - float boost = AbstractQueryBuilder.DEFAULT_BOOST; - - int k = 0; - QueryBuilder filter = null; - String queryName = null; - String currentFieldName = null; - XContentParser.Token token; - while ((token = xcp.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = xcp.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - throwParsingExceptionOnMultipleFields(NAME_FIELD.getPreferredName(), xcp.getTokenLocation(), fieldName, currentFieldName); - fieldName = currentFieldName; - while ((token = xcp.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = xcp.currentName(); - } else if (token.isValue() || token == XContentParser.Token.START_ARRAY) { - if (VECTOR_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) { - vector = xcp.list(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) { - boost = xcp.floatValue(); - } else if (K_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) { - k = (Integer) NumberFieldMapper.NumberType.INTEGER.parse(xcp.objectBytes(), false); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) { - queryName = xcp.text(); - } else { - throw new ParsingException( - xcp.getTokenLocation(), - "[" + NAME_FIELD.getPreferredName() + "] query does not support [" + currentFieldName + "]" - ); - } - } else if (token == XContentParser.Token.START_OBJECT) { - String tokenName = xcp.currentName(); - if (FILTER_FIELD.getPreferredName().equals(tokenName)) { - filter = parseInnerQueryBuilder(xcp); - } else { - throw new ParsingException( - xcp.getTokenLocation(), - "[" + NAME_FIELD.getPreferredName() + "] unknown token [" + token + "]" - ); - } - } else { - throw new ParsingException( - xcp.getTokenLocation(), - "[" + NAME_FIELD.getPreferredName() + "] unknown token [" + token + "] after [" + currentFieldName + "]" - ); - } - } - } else { - throwParsingExceptionOnMultipleFields(NAME_FIELD.getPreferredName(), xcp.getTokenLocation(), fieldName, xcp.currentName()); - fieldName = xcp.currentName(); - vector = xcp.list(); - } - } - - assert vector != null; - CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(fieldName, objectsToFloats(vector), k, filter); - correlationQueryBuilder.queryName(queryName); - correlationQueryBuilder.boost(boost); - return correlationQueryBuilder; - } - - public void setFieldName(String fieldName) { - this.fieldName = fieldName; - } - - /** - * get field name - * @return field name - */ - @Override - public String fieldName() { - return fieldName; - } - - public void setVector(float[] vector) { - this.vector = vector; - } - - /** - * get query vector - * @return query vector - */ - public Object vector() { - return vector; - } - - public void setK(int k) { - this.k = k; - } - - /** - * get number of nearby neighbors - * @return number of nearby neighbors - */ - public int getK() { - return k; - } - - public void setBoost(double boost) { - this.boost = boost; - } - - /** - * get boost - * @return boost - */ - public double getBoost() { - return boost; - } - - public void setFilter(QueryBuilder filter) { - this.filter = filter; - } - - /** - * get optional filter - * @return optional filter - */ - public QueryBuilder getFilter() { - return filter; - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - out.writeString(fieldName); - out.writeFloatArray(vector); - out.writeInt(k); - out.writeOptionalNamedWriteable(filter); - } - - @Override - public void doXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(fieldName); - - builder.field(VECTOR_FIELD.getPreferredName(), vector); - builder.field(K_FIELD.getPreferredName(), k); - if (filter != null) { - builder.field(FILTER_FIELD.getPreferredName(), filter); - } - printBoostAndQueryName(builder); - builder.endObject(); - } - - @Override - protected Query doToQuery(QueryShardContext context) throws IOException { - MappedFieldType mappedFieldType = context.fieldMapper(fieldName); - - if (!(mappedFieldType instanceof VectorFieldMapper.CorrelationVectorFieldType)) { - throw new IllegalArgumentException(String.format(Locale.getDefault(), "Field '%s' is not knn_vector type.", this.fieldName)); - } - - VectorFieldMapper.CorrelationVectorFieldType correlationVectorFieldType = - (VectorFieldMapper.CorrelationVectorFieldType) mappedFieldType; - int fieldDimension = correlationVectorFieldType.getDimension(); - - if (fieldDimension != vector.length) { - throw new IllegalArgumentException( - String.format( - Locale.getDefault(), - "Query vector has invalid dimension: %d. Dimension should be: %d", - vector.length, - fieldDimension - ) - ); - } - - String indexName = context.index().getName(); - CorrelationQueryFactory.CreateQueryRequest createQueryRequest = new CorrelationQueryFactory.CreateQueryRequest( - indexName, - this.fieldName, - this.vector, - this.k, - this.filter, - context - ); - return CorrelationQueryFactory.create(createQueryRequest); - } - - @Override - protected boolean doEquals(CorrelationQueryBuilder other) { - return Objects.equals(fieldName, other.fieldName) && Arrays.equals(vector, other.vector) && Objects.equals(k, other.k); - } - - @Override - protected int doHashCode() { - return Objects.hash(fieldName, vector, k); - } - - @Override - public String getWriteableName() { - return NAME_FIELD.getPreferredName(); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryFactory.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryFactory.java deleted file mode 100644 index d5db299bfa3a5..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryFactory.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.query; - -import org.apache.lucene.search.KnnFloatVectorQuery; -import org.apache.lucene.search.Query; -import org.opensearch.index.query.QueryBuilder; -import org.opensearch.index.query.QueryShardContext; - -import java.io.IOException; -import java.util.Optional; - -/** - * CorrelationQueryFactory util class is used to construct a Lucene KnnFloatVectorQuery. - * - * @opensearch.internal - */ -public class CorrelationQueryFactory { - - /** - * static method which takes input params to construct a Lucene KnnFloatVectorQuery. - * @param createQueryRequest object parameter containing inputs for constructing Lucene KnnFloatVectorQuery. - * @return generic Lucene Query object - */ - public static Query create(CreateQueryRequest createQueryRequest) { - final String indexName = createQueryRequest.getIndexName(); - final String fieldName = createQueryRequest.getFieldName(); - final int k = createQueryRequest.getK(); - final float[] vector = createQueryRequest.getVector(); - - if (createQueryRequest.getFilter().isPresent()) { - final QueryShardContext context = createQueryRequest.getContext() - .orElseThrow(() -> new RuntimeException("Shard context cannot be null")); - - try { - final Query filterQuery = createQueryRequest.getFilter().get().toQuery(context); - return new KnnFloatVectorQuery(fieldName, vector, k, filterQuery); - } catch (IOException ex) { - throw new RuntimeException("Cannot create knn query with filter", ex); - } - } - return new KnnFloatVectorQuery(fieldName, vector, k); - } - - /** - * class containing params to construct a Lucene KnnFloatVectorQuery. - * - * @opensearch.internal - */ - public static class CreateQueryRequest { - private String indexName; - - private String fieldName; - - private float[] vector; - - private int k; - - private QueryBuilder filter; - - private QueryShardContext context; - - /** - * Parameterized ctor for CreateQueryRequest - * @param indexName index name - * @param fieldName field name - * @param vector query vector - * @param k number of nearby neighbors - * @param filter additional filter query - * @param context QueryShardContext - */ - public CreateQueryRequest( - String indexName, - String fieldName, - float[] vector, - int k, - QueryBuilder filter, - QueryShardContext context - ) { - this.indexName = indexName; - this.fieldName = fieldName; - this.vector = vector; - this.k = k; - this.filter = filter; - this.context = context; - } - - /** - * get index name - * @return get index name - */ - public String getIndexName() { - return indexName; - } - - /** - * get field name - * @return get field name - */ - public String getFieldName() { - return fieldName; - } - - /** - * get vector - * @return get vector - */ - public float[] getVector() { - return vector; - } - - /** - * get number of nearby neighbors - * @return number of nearby neighbors - */ - public int getK() { - return k; - } - - /** - * get optional filter query - * @return get optional filter query - */ - public Optional getFilter() { - return Optional.ofNullable(filter); - } - - /** - * get optional query shard context - * @return get optional query shard context - */ - public Optional getContext() { - return Optional.ofNullable(context); - } - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/package-info.java deleted file mode 100644 index 2cf5db786a60f..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * correlation query builder package - */ -package org.opensearch.plugin.correlation.core.index.query; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/package-info.java deleted file mode 100644 index 82be787af5a72..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * base package of events-correlation-engine - */ -package org.opensearch.plugin.correlation; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleAction.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleAction.java deleted file mode 100644 index ab6f05ec0e6a3..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleAction.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.rules.action; - -import org.opensearch.action.ActionType; - -/** - * Transport Action for indexing correlation rules. - * - * @opensearch.internal - */ -public class IndexCorrelationRuleAction extends ActionType { - - /** - * Instance of IndexCorrelationRuleAction - */ - public static final IndexCorrelationRuleAction INSTANCE = new IndexCorrelationRuleAction(); - /** - * Name of IndexCorrelationRuleAction - */ - public static final String NAME = "cluster:admin/correlation/rules"; - - private IndexCorrelationRuleAction() { - super(NAME, IndexCorrelationRuleResponse::new); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleRequest.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleRequest.java deleted file mode 100644 index 3fe25d144059d..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleRequest.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.rules.action; - -import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.plugin.correlation.rules.model.CorrelationRule; -import org.opensearch.rest.RestRequest; - -import java.io.IOException; - -/** - * A request to index correlation rules. - * - * @opensearch.internal - */ -public class IndexCorrelationRuleRequest extends ActionRequest { - - private String correlationRuleId; - - private CorrelationRule correlationRule; - - private RestRequest.Method method; - - /** - * Parameterized ctor for IndexCorrelationRuleRequest - * @param correlationRule correlation rule - * @param method Rest method of request PUT or POST - */ - public IndexCorrelationRuleRequest(CorrelationRule correlationRule, RestRequest.Method method) { - super(); - this.correlationRuleId = ""; - this.correlationRule = correlationRule; - this.method = method; - } - - /** - * Parameterized ctor for IndexCorrelationRuleRequest - * @param correlationRuleId correlation rule id - * @param correlationRule correlation rule - * @param method Rest method of request PUT or POST - */ - public IndexCorrelationRuleRequest(String correlationRuleId, CorrelationRule correlationRule, RestRequest.Method method) { - super(); - this.correlationRuleId = correlationRuleId; - this.correlationRule = correlationRule; - this.method = method; - } - - /** - * StreamInput ctor of IndexCorrelationRuleRequest - * @param sin StreamInput - * @throws IOException IOException - */ - public IndexCorrelationRuleRequest(StreamInput sin) throws IOException { - this(sin.readString(), CorrelationRule.readFrom(sin), sin.readEnum(RestRequest.Method.class)); - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(correlationRuleId); - correlationRule.writeTo(out); - } - - /** - * get correlation rule id - * @return correlation rule id - */ - public String getCorrelationRuleId() { - return correlationRuleId; - } - - /** - * get correlation rule - * @return correlation rule - */ - public CorrelationRule getCorrelationRule() { - return correlationRule; - } - - /** - * get Rest method - * @return Rest method - */ - public RestRequest.Method getMethod() { - return method; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleResponse.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleResponse.java deleted file mode 100644 index 8102e6585825e..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleResponse.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.rules.action; - -import org.opensearch.core.ParseField; -import org.opensearch.core.action.ActionResponse; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.ToXContentObject; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.plugin.correlation.rules.model.CorrelationRule; - -import java.io.IOException; - -/** - * Transport Response for indexing correlation rules. - * - * @opensearch.internal - */ -public class IndexCorrelationRuleResponse extends ActionResponse implements ToXContentObject { - - private static final ParseField _ID = new ParseField("_id"); - private static final ParseField _VERSION = new ParseField("_version"); - - private String id; - - private Long version; - - private RestStatus status; - - private CorrelationRule correlationRule; - - /** - * Parameterized ctor for IndexCorrelationRuleResponse - * @param version version of rule - * @param status Rest status of indexing rule - * @param correlationRule correlation rule - */ - public IndexCorrelationRuleResponse(String id, Long version, RestStatus status, CorrelationRule correlationRule) { - super(); - this.id = id; - this.version = version; - this.status = status; - this.correlationRule = correlationRule; - } - - /** - * StreamInput ctor of IndexCorrelationRuleResponse - * @param sin StreamInput - * @throws IOException IOException - */ - public IndexCorrelationRuleResponse(StreamInput sin) throws IOException { - this(sin.readString(), sin.readLong(), sin.readEnum(RestStatus.class), CorrelationRule.readFrom(sin)); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject().field(_ID.getPreferredName(), id).field(_VERSION.getPreferredName(), version); - - builder.field("rule", correlationRule); - return builder.endObject(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(id); - out.writeLong(version); - out.writeEnum(status); - correlationRule.writeTo(out); - } - - /** - * get id - * @return id of rule - */ - public String getId() { - return id; - } - - /** - * get status - * @return Rest status of indexing rule - */ - public RestStatus getStatus() { - return status; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/package-info.java deleted file mode 100644 index c01f2936a20ca..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Transport Actions, Requests and Responses for correlation rules - */ -package org.opensearch.plugin.correlation.rules.action; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationQuery.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationQuery.java deleted file mode 100644 index 3797e0c7043dc..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationQuery.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.rules.model; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.ObjectParser; -import org.opensearch.core.xcontent.ToXContentObject; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** - * Correlation Query DSL - * { - * "index": "s3_access_logs", - * "query": "aws.cloudtrail.eventName:ReplicateObject", - * "timestampField": "@timestamp", - * "tags": [ - * "s3" - * ] - * } - */ -public class CorrelationQuery implements Writeable, ToXContentObject { - - private static final Logger log = LogManager.getLogger(CorrelationQuery.class); - private static final ParseField INDEX_FIELD = new ParseField("index"); - private static final ParseField QUERY_FIELD = new ParseField("query"); - private static final ParseField TIMESTAMP_FIELD = new ParseField("timestampField"); - private static final ParseField TAGS_FIELD = new ParseField("tags"); - private static final ObjectParser PARSER = new ObjectParser<>("CorrelationQuery", CorrelationQuery::new); - - static { - PARSER.declareString(CorrelationQuery::setIndex, INDEX_FIELD); - PARSER.declareString(CorrelationQuery::setQuery, QUERY_FIELD); - PARSER.declareStringOrNull(CorrelationQuery::setTimestampField, TIMESTAMP_FIELD); - PARSER.declareField((xcp, query, context) -> { - List tags = new ArrayList<>(); - XContentParser.Token currentToken = xcp.currentToken(); - if (currentToken == XContentParser.Token.START_ARRAY) { - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - tags.add(xcp.text()); - } - } - query.setTags(tags); - }, TAGS_FIELD, ObjectParser.ValueType.STRING_ARRAY); - } - - private String index; - - private String query; - - private String timestampField; - - private List tags; - - private CorrelationQuery() { - this.timestampField = "_timestamp"; - } - - /** - * Parameterized ctor of Correlation Query - * @param index event index to correlate - * @param query query to filter relevant events for correlations from index - * @param timestampField timestamp field in the index - * @param tags tags to store additional metadata as part of correlation queries. - */ - public CorrelationQuery(String index, String query, String timestampField, List tags) { - this.index = index; - this.query = query; - this.timestampField = timestampField != null ? timestampField : "_timestamp"; - this.tags = tags; - } - - /** - * StreamInput ctor of Correlation Query - * @param sin StreamInput - * @throws IOException IOException - */ - public CorrelationQuery(StreamInput sin) throws IOException { - this(sin.readString(), sin.readString(), sin.readString(), sin.readStringList()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(index); - out.writeString(query); - out.writeString(timestampField); - out.writeStringCollection(tags); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(INDEX_FIELD.getPreferredName(), index) - .field(QUERY_FIELD.getPreferredName(), query) - .field(TIMESTAMP_FIELD.getPreferredName(), timestampField) - .field(TAGS_FIELD.getPreferredName(), tags); - return builder.endObject(); - } - - /** - * parse into CorrelationQuery - * @param xcp XContentParser - * @return CorrelationQuery - */ - public static CorrelationQuery parse(XContentParser xcp) { - return PARSER.apply(xcp, null); - } - - /** - * convert StreamInput to CorrelationQuery - * @param sin StreamInput - * @return CorrelationQuery - * @throws IOException IOException - */ - public static CorrelationQuery readFrom(StreamInput sin) throws IOException { - return new CorrelationQuery(sin); - } - - /** - * Set index - * @param index event index to correlate - */ - public void setIndex(String index) { - this.index = index; - } - - /** - * Get index - * @return event index to correlate - */ - public String getIndex() { - return index; - } - - /** - * Set query - * @param query query to filter relevant events for correlations from index - */ - public void setQuery(String query) { - this.query = query; - } - - /** - * Get query - * @return query to filter relevant events for correlations from index - */ - public String getQuery() { - return query; - } - - /** - * Set timestamp field - * @param timestampField timestamp field in the index - */ - public void setTimestampField(String timestampField) { - this.timestampField = timestampField != null ? timestampField : "_timestamp"; - } - - /** - * Get timestamp field - * @return timestamp field in the index - */ - public String getTimestampField() { - return timestampField; - } - - /** - * Set tags - * @param tags tags to store additional metadata as part of correlation queries. - */ - public void setTags(List tags) { - this.tags = tags; - } - - /** - * Get tags - * @return tags to store additional metadata as part of correlation queries. - */ - public List getTags() { - return tags; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationRule.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationRule.java deleted file mode 100644 index 6978d7248e199..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationRule.java +++ /dev/null @@ -1,244 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.rules.model; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.ObjectParser; -import org.opensearch.core.xcontent.ToXContentObject; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; - -/** - * Correlation Rule DSL - * { - * "name": "s3 to app logs", - * "correlate": [ - * { - * "index": "s3_access_logs", - * "query": "aws.cloudtrail.eventName:ReplicateObject", - * "timestampField": "@timestamp", - * "tags": [ - * "s3" - * ] - * } - * ] - * } - * - * @opensearch.api - * @opensearch.experimental - */ -public class CorrelationRule implements Writeable, ToXContentObject { - - private static final Logger log = LogManager.getLogger(CorrelationRule.class); - - /** - * Correlation Rule Index - */ - public static final String CORRELATION_RULE_INDEX = ".opensearch-correlation-rules-config"; - - private static final ParseField ID_FIELD = new ParseField("id"); - private static final ParseField VERSION_FIELD = new ParseField("version"); - private static final ParseField NAME_FIELD = new ParseField("name"); - private static final ParseField CORRELATION_QUERIES_FIELD = new ParseField("correlate"); - private static final ObjectParser PARSER = new ObjectParser<>("CorrelationRule", CorrelationRule::new); - - static { - PARSER.declareString(CorrelationRule::setId, ID_FIELD); - PARSER.declareLong(CorrelationRule::setVersion, VERSION_FIELD); - PARSER.declareString(CorrelationRule::setName, NAME_FIELD); - PARSER.declareField((xcp, rule, context) -> { - List correlationQueries = new ArrayList<>(); - XContentParser.Token currentToken = xcp.currentToken(); - if (currentToken == XContentParser.Token.START_ARRAY) { - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - correlationQueries.add(CorrelationQuery.parse(xcp)); - } - } - rule.setCorrelationQueries(correlationQueries); - }, CORRELATION_QUERIES_FIELD, ObjectParser.ValueType.OBJECT_ARRAY); - } - - private String id; - - private Long version; - - private String name; - - private List correlationQueries; - - private CorrelationRule() {} - - /** - * Parameterized ctor of Correlation Rule - * @param name name of rule - * @param correlationQueries list of correlation queries part of rule - */ - public CorrelationRule(String name, List correlationQueries) { - this("", 1L, name, correlationQueries); - } - - /** - * Parameterized ctor of Correlation Rule - * @param id id of rule - * @param version version of rule - * @param name name of rule - * @param correlationQueries list of correlation queries part of rule - */ - public CorrelationRule(String id, Long version, String name, List correlationQueries) { - this.id = id; - this.version = version; - this.name = name; - this.correlationQueries = correlationQueries; - } - - /** - * StreamInput ctor of Correlation Rule - * @param sin StreamInput - * @throws IOException IOException - */ - public CorrelationRule(StreamInput sin) throws IOException { - this(sin.readString(), sin.readLong(), sin.readString(), sin.readList(CorrelationQuery::readFrom)); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - - builder.field(ID_FIELD.getPreferredName(), id); - builder.field(VERSION_FIELD.getPreferredName(), version); - builder.field(NAME_FIELD.getPreferredName(), name); - - CorrelationQuery[] correlationQueries = new CorrelationQuery[] {}; - correlationQueries = this.correlationQueries.toArray(correlationQueries); - builder.field(CORRELATION_QUERIES_FIELD.getPreferredName(), correlationQueries); - return builder.endObject(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(id); - out.writeLong(version); - out.writeString(name); - - for (CorrelationQuery query : correlationQueries) { - query.writeTo(out); - } - } - - /** - * parse into CorrelationRule - * @param xcp XContentParser - * @param id id of rule - * @param version version of rule - * @return CorrelationRule - */ - public static CorrelationRule parse(XContentParser xcp, String id, Long version) { - return PARSER.apply(xcp, null); - } - - /** - * convert StreamInput to CorrelationRule - * @param sin StreamInput - * @return CorrelationRule - * @throws IOException IOException - */ - public static CorrelationRule readFrom(StreamInput sin) throws IOException { - return new CorrelationRule(sin); - } - - /** - * set id - * @param id id of rule - */ - public void setId(String id) { - this.id = id; - } - - /** - * get id - * @return id of rule - */ - public String getId() { - return id; - } - - /** - * set version - * @param version version of rule - */ - public void setVersion(Long version) { - this.version = version; - } - - /** - * get version - * @return version of rule - */ - public Long getVersion() { - return version; - } - - /** - * set name - * @param name name of rule - */ - public void setName(String name) { - this.name = name; - } - - /** - * get name - * @return name of rule - */ - public String getName() { - return name; - } - - /** - * set correlation queries - * @param correlationQueries set correlation queries for the rule - */ - public void setCorrelationQueries(List correlationQueries) { - this.correlationQueries = correlationQueries; - } - - /** - * get correlation queries - * @return correlation queries for the rule - */ - public List getCorrelationQueries() { - return correlationQueries; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - CorrelationRule that = (CorrelationRule) o; - return id.equals(that.id) - && version.equals(that.version) - && name.equals(that.name) - && correlationQueries.equals(that.correlationQueries); - } - - @Override - public int hashCode() { - return Objects.hash(id, version, name, correlationQueries); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/package-info.java deleted file mode 100644 index b04b7be3c62e3..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * data models for correlation rules - */ -package org.opensearch.plugin.correlation.rules.model; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/RestIndexCorrelationRuleAction.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/RestIndexCorrelationRuleAction.java deleted file mode 100644 index 3b2b7eb02ae5f..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/RestIndexCorrelationRuleAction.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.rules.resthandler; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.client.node.NodeClient; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.plugin.correlation.EventsCorrelationPlugin; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleRequest; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleResponse; -import org.opensearch.plugin.correlation.rules.model.CorrelationRule; -import org.opensearch.rest.BaseRestHandler; -import org.opensearch.rest.BytesRestResponse; -import org.opensearch.rest.RestChannel; -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestResponse; -import org.opensearch.rest.action.RestResponseListener; - -import java.io.IOException; -import java.util.List; -import java.util.Locale; - -/** - * Rest action for indexing correlation rules. - * - * @opensearch.api - */ -public class RestIndexCorrelationRuleAction extends BaseRestHandler { - - private static final Logger log = LogManager.getLogger(RestIndexCorrelationRuleAction.class); - - /** - * Default constructor - */ - public RestIndexCorrelationRuleAction() {} - - @Override - public String getName() { - return "index_correlation_rule_action"; - } - - @Override - public List routes() { - return List.of( - new Route(RestRequest.Method.POST, EventsCorrelationPlugin.CORRELATION_RULES_BASE_URI), - new Route( - RestRequest.Method.PUT, - String.format(Locale.ROOT, "%s/{%s}", EventsCorrelationPlugin.CORRELATION_RULES_BASE_URI, "rule_id") - ) - ); - } - - @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - log.debug(String.format(Locale.ROOT, "%s %s", request.method(), EventsCorrelationPlugin.CORRELATION_RULES_BASE_URI)); - - String id = request.param("rule_id", ""); - - XContentParser xcp = request.contentParser(); - - CorrelationRule correlationRule = CorrelationRule.parse(xcp, id, 1L); - IndexCorrelationRuleRequest indexCorrelationRuleRequest = new IndexCorrelationRuleRequest(id, correlationRule, request.method()); - return channel -> client.execute( - IndexCorrelationRuleAction.INSTANCE, - indexCorrelationRuleRequest, - indexCorrelationRuleResponse(channel, request.method()) - ); - } - - private RestResponseListener indexCorrelationRuleResponse( - RestChannel channel, - RestRequest.Method restMethod - ) { - return new RestResponseListener<>(channel) { - @Override - public RestResponse buildResponse(IndexCorrelationRuleResponse response) throws Exception { - RestStatus returnStatus = RestStatus.CREATED; - if (restMethod == RestRequest.Method.PUT) { - returnStatus = RestStatus.OK; - } - - BytesRestResponse restResponse = new BytesRestResponse( - returnStatus, - response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS) - ); - - if (restMethod == RestRequest.Method.POST) { - String location = String.format( - Locale.ROOT, - "%s/%s", - EventsCorrelationPlugin.CORRELATION_RULES_BASE_URI, - response.getId() - ); - restResponse.addHeader("Location", location); - } - - return restResponse; - } - }; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/package-info.java deleted file mode 100644 index 607ec355801ad..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Rest Handlers for correlation rules - */ -package org.opensearch.plugin.correlation.rules.resthandler; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/TransportIndexCorrelationRuleAction.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/TransportIndexCorrelationRuleAction.java deleted file mode 100644 index 7b4fb670c4aee..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/TransportIndexCorrelationRuleAction.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.rules.transport; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.OpenSearchStatusException; -import org.opensearch.action.admin.indices.create.CreateIndexResponse; -import org.opensearch.action.index.IndexRequest; -import org.opensearch.action.index.IndexResponse; -import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.HandledTransportAction; -import org.opensearch.action.support.WriteRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.client.Client; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.inject.Inject; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.action.ActionListener; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleRequest; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleResponse; -import org.opensearch.plugin.correlation.rules.model.CorrelationRule; -import org.opensearch.plugin.correlation.utils.CorrelationRuleIndices; -import org.opensearch.plugin.correlation.utils.IndexUtils; -import org.opensearch.rest.RestRequest; -import org.opensearch.tasks.Task; -import org.opensearch.transport.TransportService; - -import java.io.IOException; -import java.util.Locale; - -/** - * Transport Action for indexing correlation rules. - * - * @opensearch.internal - */ -public class TransportIndexCorrelationRuleAction extends HandledTransportAction { - - private static final Logger log = LogManager.getLogger(TransportIndexCorrelationRuleAction.class); - - private final Client client; - - private final CorrelationRuleIndices correlationRuleIndices; - - private final ClusterService clusterService; - - /** - * Parameterized ctor for Transport Action - * @param transportService TransportService - * @param client OS client - * @param actionFilters ActionFilters - * @param clusterService ClusterService - * @param correlationRuleIndices CorrelationRuleIndices which manages lifecycle of correlation rule index - */ - @Inject - public TransportIndexCorrelationRuleAction( - TransportService transportService, - Client client, - ActionFilters actionFilters, - ClusterService clusterService, - CorrelationRuleIndices correlationRuleIndices - ) { - super(IndexCorrelationRuleAction.NAME, transportService, actionFilters, IndexCorrelationRuleRequest::new); - this.client = client; - this.clusterService = clusterService; - this.correlationRuleIndices = correlationRuleIndices; - } - - @Override - protected void doExecute(Task task, IndexCorrelationRuleRequest request, ActionListener listener) { - AsyncIndexCorrelationRuleAction asyncAction = new AsyncIndexCorrelationRuleAction(request, listener); - asyncAction.start(); - } - - private class AsyncIndexCorrelationRuleAction { - private final IndexCorrelationRuleRequest request; - - private final ActionListener listener; - - AsyncIndexCorrelationRuleAction(IndexCorrelationRuleRequest request, ActionListener listener) { - this.request = request; - this.listener = listener; - } - - void start() { - try { - if (correlationRuleIndices.correlationRuleIndexExists() == false) { - try { - correlationRuleIndices.initCorrelationRuleIndex(new ActionListener<>() { - @Override - public void onResponse(CreateIndexResponse response) { - try { - onCreateMappingsResponse(response); - indexCorrelationRule(); - } catch (IOException e) { - onFailures(e); - } - } - - @Override - public void onFailure(Exception e) { - onFailures(e); - } - }); - } catch (IOException e) { - onFailures(e); - } - } else if (!IndexUtils.correlationRuleIndexUpdated) { - IndexUtils.updateIndexMapping( - CorrelationRule.CORRELATION_RULE_INDEX, - CorrelationRuleIndices.correlationRuleIndexMappings(), - clusterService.state(), - client.admin().indices(), - new ActionListener<>() { - @Override - public void onResponse(AcknowledgedResponse response) { - onUpdateMappingsResponse(response); - try { - indexCorrelationRule(); - } catch (IOException e) { - onFailures(e); - } - } - - @Override - public void onFailure(Exception e) { - onFailures(e); - } - } - ); - } else { - indexCorrelationRule(); - } - } catch (IOException ex) { - onFailures(ex); - } - } - - void indexCorrelationRule() throws IOException { - IndexRequest indexRequest; - if (request.getMethod() == RestRequest.Method.POST) { - indexRequest = new IndexRequest(CorrelationRule.CORRELATION_RULE_INDEX).setRefreshPolicy( - WriteRequest.RefreshPolicy.IMMEDIATE - ) - .source(request.getCorrelationRule().toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) - .timeout(TimeValue.timeValueSeconds(60)); - } else { - indexRequest = new IndexRequest(CorrelationRule.CORRELATION_RULE_INDEX).setRefreshPolicy( - WriteRequest.RefreshPolicy.IMMEDIATE - ) - .source(request.getCorrelationRule().toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) - .id(request.getCorrelationRuleId()) - .timeout(TimeValue.timeValueSeconds(60)); - } - - client.index(indexRequest, new ActionListener<>() { - @Override - public void onResponse(IndexResponse response) { - if (response.status().equals(RestStatus.CREATED) || response.status().equals(RestStatus.OK)) { - CorrelationRule ruleResponse = request.getCorrelationRule(); - ruleResponse.setId(response.getId()); - onOperation(ruleResponse); - } else { - onFailures(new OpenSearchStatusException(response.toString(), RestStatus.INTERNAL_SERVER_ERROR)); - } - } - - @Override - public void onFailure(Exception e) { - onFailures(e); - } - }); - } - - private void onCreateMappingsResponse(CreateIndexResponse response) throws IOException { - if (response.isAcknowledged()) { - log.info(String.format(Locale.ROOT, "Created %s with mappings.", CorrelationRule.CORRELATION_RULE_INDEX)); - IndexUtils.correlationRuleIndexUpdated(); - } else { - log.error(String.format(Locale.ROOT, "Create %s mappings call not acknowledged.", CorrelationRule.CORRELATION_RULE_INDEX)); - throw new OpenSearchStatusException( - String.format(Locale.getDefault(), "Create %s mappings call not acknowledged", CorrelationRule.CORRELATION_RULE_INDEX), - RestStatus.INTERNAL_SERVER_ERROR - ); - } - } - - private void onUpdateMappingsResponse(AcknowledgedResponse response) { - if (response.isAcknowledged()) { - log.info(String.format(Locale.ROOT, "Created %s with mappings.", CorrelationRule.CORRELATION_RULE_INDEX)); - IndexUtils.correlationRuleIndexUpdated(); - } else { - log.error(String.format(Locale.ROOT, "Create %s mappings call not acknowledged.", CorrelationRule.CORRELATION_RULE_INDEX)); - throw new OpenSearchStatusException( - String.format(Locale.getDefault(), "Create %s mappings call not acknowledged", CorrelationRule.CORRELATION_RULE_INDEX), - RestStatus.INTERNAL_SERVER_ERROR - ); - } - } - - private void onOperation(CorrelationRule correlationRule) { - finishHim(correlationRule, null); - } - - private void onFailures(Exception t) { - finishHim(null, t); - } - - private void finishHim(CorrelationRule correlationRule, Exception t) { - if (t != null) { - listener.onFailure(t); - } else { - listener.onResponse( - new IndexCorrelationRuleResponse( - correlationRule.getId(), - correlationRule.getVersion(), - request.getMethod() == RestRequest.Method.POST ? RestStatus.CREATED : RestStatus.OK, - correlationRule - ) - ); - } - } - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/package-info.java deleted file mode 100644 index 7a47efbb9bb45..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Transport Actions for correlation rules. - */ -package org.opensearch.plugin.correlation.rules.transport; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettings.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettings.java deleted file mode 100644 index 2e2dbbffbeaa2..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettings.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.settings; - -import org.opensearch.common.settings.Setting; -import org.opensearch.common.unit.TimeValue; - -import java.util.concurrent.TimeUnit; - -import static org.opensearch.common.settings.Setting.Property.IndexScope; - -/** - * Settings for events-correlation-engine. - * - * @opensearch.api - * @opensearch.experimental - */ -public class EventsCorrelationSettings { - /** - * Correlation Index setting name - */ - public static final String CORRELATION_INDEX = "index.correlation"; - /** - * Boolean setting to check if an OS index is a correlation index. - */ - public static final Setting IS_CORRELATION_INDEX_SETTING = Setting.boolSetting(CORRELATION_INDEX, false, IndexScope); - /** - * Global time window setting for Correlations - */ - public static final Setting CORRELATION_TIME_WINDOW = Setting.positiveTimeSetting( - "plugins.security_analytics.correlation_time_window", - new TimeValue(5, TimeUnit.MINUTES), - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - - /** - * Default constructor - */ - public EventsCorrelationSettings() {} -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/package-info.java deleted file mode 100644 index 795291cd0de2e..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Settings for events-correlation-engine - */ -package org.opensearch.plugin.correlation.settings; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/CorrelationRuleIndices.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/CorrelationRuleIndices.java deleted file mode 100644 index 3656bd413733a..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/CorrelationRuleIndices.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.utils; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.action.admin.indices.create.CreateIndexRequest; -import org.opensearch.action.admin.indices.create.CreateIndexResponse; -import org.opensearch.client.Client; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.Settings; -import org.opensearch.core.action.ActionListener; -import org.opensearch.plugin.correlation.rules.model.CorrelationRule; - -import java.io.IOException; -import java.nio.charset.Charset; -import java.util.Objects; - -/** - * Correlation Rule Index manager - * - * @opensearch.internal - */ -public class CorrelationRuleIndices { - private static final Logger log = LogManager.getLogger(CorrelationRuleIndices.class); - - private final Client client; - - private final ClusterService clusterService; - - /** - * Parameterized ctor for CorrelationRuleIndices - * @param client OS Client - * @param clusterService ClusterService - */ - public CorrelationRuleIndices(Client client, ClusterService clusterService) { - this.client = client; - this.clusterService = clusterService; - } - - /** - * get correlation rule index mappings - * @return mappings of correlation rule index - * @throws IOException IOException - */ - public static String correlationRuleIndexMappings() throws IOException { - return new String( - Objects.requireNonNull(CorrelationRuleIndices.class.getClassLoader().getResourceAsStream("mappings/correlation-rules.json")) - .readAllBytes(), - Charset.defaultCharset() - ); - } - - /** - * init the correlation rule index - * @param actionListener listener - * @throws IOException IOException - */ - public void initCorrelationRuleIndex(ActionListener actionListener) throws IOException { - if (correlationRuleIndexExists() == false) { - CreateIndexRequest indexRequest = new CreateIndexRequest(CorrelationRule.CORRELATION_RULE_INDEX).mapping( - correlationRuleIndexMappings() - ).settings(Settings.builder().put("index.hidden", true).build()); - client.admin().indices().create(indexRequest, actionListener); - } - } - - /** - * check if correlation rule index exists - * @return boolean - */ - public boolean correlationRuleIndexExists() { - ClusterState clusterState = clusterService.state(); - return clusterState.getRoutingTable().hasIndex(CorrelationRule.CORRELATION_RULE_INDEX); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/IndexUtils.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/IndexUtils.java deleted file mode 100644 index 362be3d2932e3..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/IndexUtils.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.utils; - -import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.client.IndicesAdminClient; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.core.action.ActionListener; -import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.core.xcontent.XContentParser; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; - -import static org.opensearch.core.ParseField.CommonFields._META; - -/** - * Index Management utils - * - * @opensearch.internal - */ -public class IndexUtils { - private static final Integer NO_SCHEMA_VERSION = 0; - private static final String SCHEMA_VERSION = "schema_version"; - - /** - * manages the mappings lifecycle for correlation rule index - */ - public static Boolean correlationRuleIndexUpdated = false; - - private IndexUtils() {} - - /** - * updates the status of correlationRuleIndexUpdated to true - */ - public static void correlationRuleIndexUpdated() { - correlationRuleIndexUpdated = true; - } - - /** - * util method which decides based on schema version whether to update an index. - * @param index IndexMetadata - * @param mapping new mappings - * @return Boolean - * @throws IOException IOException - */ - public static Boolean shouldUpdateIndex(IndexMetadata index, String mapping) throws IOException { - Integer oldVersion = NO_SCHEMA_VERSION; - Integer newVersion = getSchemaVersion(mapping); - - Map indexMapping = index.mapping().sourceAsMap(); - if (indexMapping != null - && indexMapping.containsKey(_META.getPreferredName()) - && indexMapping.get(_META.getPreferredName()) instanceof HashMap) { - Map metaData = (HashMap) indexMapping.get(_META.getPreferredName()); - if (metaData.containsKey(SCHEMA_VERSION)) { - oldVersion = (Integer) metaData.get(SCHEMA_VERSION); - } - } - return newVersion > oldVersion; - } - - /** - * Gets the schema version for the mapping - * @param mapping mappings as input - * @return schema version - * @throws IOException IOException - */ - public static Integer getSchemaVersion(String mapping) throws IOException { - XContentParser xcp = MediaTypeRegistry.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, mapping); - - while (!xcp.isClosed()) { - XContentParser.Token token = xcp.currentToken(); - if (token != null && token != XContentParser.Token.END_OBJECT && token != XContentParser.Token.START_OBJECT) { - if (!Objects.equals(xcp.currentName(), _META.getPreferredName())) { - xcp.nextToken(); - xcp.skipChildren(); - } else { - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - switch (xcp.currentName()) { - case SCHEMA_VERSION: - int version = xcp.intValue(); - if (version < 0) { - throw new IllegalArgumentException( - String.format(Locale.getDefault(), "%s cannot be negative", SCHEMA_VERSION) - ); - } - return version; - default: - xcp.nextToken(); - } - } - } - } - xcp.nextToken(); - } - return NO_SCHEMA_VERSION; - } - - /** - * updates the mappings for the index. - * @param index index for which mapping needs to be updated - * @param mapping new mappings - * @param clusterState ClusterState - * @param client Admin client - * @param actionListener listener - * @throws IOException IOException - */ - public static void updateIndexMapping( - String index, - String mapping, - ClusterState clusterState, - IndicesAdminClient client, - ActionListener actionListener - ) throws IOException { - if (clusterState.metadata().indices().containsKey(index)) { - if (shouldUpdateIndex(clusterState.metadata().index(index), mapping)) { - PutMappingRequest putMappingRequest = new PutMappingRequest(index).source(mapping, MediaTypeRegistry.JSON); - client.putMapping(putMappingRequest, actionListener); - } else { - actionListener.onResponse(new AcknowledgedResponse(true)); - } - } - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/package-info.java deleted file mode 100644 index 798196c47df20..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * utils package for events-correlation-engine - */ -package org.opensearch.plugin.correlation.utils; diff --git a/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec deleted file mode 100644 index 013c17e4a9736..0000000000000 --- a/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec +++ /dev/null @@ -1 +0,0 @@ -org.opensearch.plugin.correlation.core.index.codec.correlation990.CorrelationCodec diff --git a/plugins/events-correlation-engine/src/main/resources/mappings/correlation-rules.json b/plugins/events-correlation-engine/src/main/resources/mappings/correlation-rules.json deleted file mode 100644 index 7741b160eca24..0000000000000 --- a/plugins/events-correlation-engine/src/main/resources/mappings/correlation-rules.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "_meta" : { - "schema_version": 1 - }, - "properties": { - "name": { - "type": "text", - "analyzer" : "whitespace", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "correlate": { - "type": "nested", - "properties": { - "index": { - "type": "text", - "analyzer" : "whitespace", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "query": { - "type": "text", - "analyzer" : "whitespace", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "tags": { - "type": "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "timestampField": { - "type": "text", - "analyzer" : "whitespace", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - } - } - } - } -} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTests.java deleted file mode 100644 index 005ffa2097b03..0000000000000 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTests.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation; - -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Assert; - -public class EventsCorrelationPluginTests extends OpenSearchTestCase { - - public void testDummy() { - Assert.assertEquals(1, 1); - } -} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java deleted file mode 100644 index 19ce3b33514d8..0000000000000 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index; - -import org.apache.lucene.index.VectorSimilarityFunction; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.mapper.MapperParsingException; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Assert; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static org.opensearch.plugin.correlation.core.index.CorrelationParamsContext.PARAMETERS; -import static org.opensearch.plugin.correlation.core.index.CorrelationParamsContext.VECTOR_SIMILARITY_FUNCTION; - -/** - * Unit tests for CorrelationsParamsContext - */ -public class CorrelationParamsContextTests extends OpenSearchTestCase { - - /** - * Test reading from and writing to streams - */ - public void testStreams() throws IOException { - int efConstruction = 321; - int m = 12; - - Map parameters = new HashMap<>(); - parameters.put("m", m); - parameters.put("ef_construction", efConstruction); - - CorrelationParamsContext context = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, parameters); - - BytesStreamOutput streamOutput = new BytesStreamOutput(); - context.writeTo(streamOutput); - - CorrelationParamsContext copy = new CorrelationParamsContext(streamOutput.bytes().streamInput()); - Assert.assertEquals(context.getSimilarityFunction(), copy.getSimilarityFunction()); - Assert.assertEquals(context.getParameters(), copy.getParameters()); - } - - /** - * test get vector similarity function - */ - public void testVectorSimilarityFunction() { - int efConstruction = 321; - int m = 12; - - Map parameters = new HashMap<>(); - parameters.put("m", m); - parameters.put("ef_construction", efConstruction); - - CorrelationParamsContext context = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, parameters); - Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN, context.getSimilarityFunction()); - } - - /** - * test get parameters - */ - public void testParameters() { - int efConstruction = 321; - int m = 12; - - Map parameters = new HashMap<>(); - parameters.put("m", m); - parameters.put("ef_construction", efConstruction); - - CorrelationParamsContext context = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, parameters); - Assert.assertEquals(parameters, context.getParameters()); - } - - /** - * test parse method with invalid input - * @throws IOException IOException - */ - public void testParse_Invalid() throws IOException { - // Invalid input type - Integer invalidIn = 12; - expectThrows(MapperParsingException.class, () -> CorrelationParamsContext.parse(invalidIn)); - - // Invalid vector similarity function - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .field(CorrelationParamsContext.VECTOR_SIMILARITY_FUNCTION, 0) - .endObject(); - - final Map in2 = xContentBuilderToMap(xContentBuilder); - expectThrows(MapperParsingException.class, () -> CorrelationParamsContext.parse(in2)); - - // Invalid parameters - xContentBuilder = XContentFactory.jsonBuilder().startObject().field(PARAMETERS, 0).endObject(); - - final Map in4 = xContentBuilderToMap(xContentBuilder); - expectThrows(MapperParsingException.class, () -> CorrelationParamsContext.parse(in4)); - } - - /** - * test parse with null parameters - * @throws IOException IOException - */ - public void testParse_NullParameters() throws IOException { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .field(VECTOR_SIMILARITY_FUNCTION, VectorSimilarityFunction.EUCLIDEAN) - .field(PARAMETERS, (String) null) - .endObject(); - Map in = xContentBuilderToMap(xContentBuilder); - Assert.assertThrows(MapperParsingException.class, () -> { CorrelationParamsContext.parse(in); }); - } - - /** - * test parse method - * @throws IOException IOException - */ - public void testParse_Valid() throws IOException { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .field(VECTOR_SIMILARITY_FUNCTION, VectorSimilarityFunction.EUCLIDEAN) - .startObject(PARAMETERS) - .field("m", 2) - .field("ef_construction", 128) - .endObject() - .endObject(); - - Map in = xContentBuilderToMap(xContentBuilder); - CorrelationParamsContext context = CorrelationParamsContext.parse(in); - Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN, context.getSimilarityFunction()); - Assert.assertEquals(Map.of("m", 2, "ef_construction", 128), context.getParameters()); - } - - /** - * test toXContent method - * @throws IOException IOException - */ - public void testToXContent() throws IOException { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .field(VECTOR_SIMILARITY_FUNCTION, VectorSimilarityFunction.EUCLIDEAN) - .startObject(PARAMETERS) - .field("m", 2) - .field("ef_construction", 128) - .endObject() - .endObject(); - - Map in = xContentBuilderToMap(xContentBuilder); - CorrelationParamsContext context = CorrelationParamsContext.parse(in); - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder = context.toXContent(builder, ToXContent.EMPTY_PARAMS); - - Map out = xContentBuilderToMap(builder); - Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN.name(), out.get(VECTOR_SIMILARITY_FUNCTION)); - } - - private Map xContentBuilderToMap(XContentBuilder xContentBuilder) { - return XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(); - } -} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java deleted file mode 100644 index 32c71dcd37196..0000000000000 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index; - -import org.apache.lucene.document.FieldType; -import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; -import org.opensearch.common.Randomness; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Assert; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Random; - -/** - * Unit tests for VectorField - */ -public class VectorFieldTests extends OpenSearchTestCase { - - private final Random random = Randomness.get(); - - /** - * test VectorField ctor - */ - public void testVectorField_ctor() { - VectorField field = new VectorField("test-field", new float[] { 1.0f, 1.0f }, new FieldType()); - Assert.assertEquals("test-field", field.name()); - } - - /** - * test float vector to array serializer - * @throws IOException IOException - */ - public void testVectorAsArraySerializer() throws IOException { - final float[] vector = getArrayOfRandomFloats(20); - - final BytesStreamOutput objectStream = new BytesStreamOutput(); - objectStream.writeFloatArray(vector); - final byte[] serializedVector = objectStream.bytes().toBytesRef().bytes; - - final byte[] actualSerializedVector = VectorField.floatToByteArray(vector); - - Assert.assertNotNull(actualSerializedVector); - Assert.assertArrayEquals(serializedVector, actualSerializedVector); - - final float[] actualDeserializedVector = byteToFloatArray(actualSerializedVector); - Assert.assertNotNull(actualDeserializedVector); - Assert.assertArrayEquals(vector, actualDeserializedVector, 0.1f); - } - - /** - * test byte array to float vector failures - */ - public void testByteToFloatArrayFailures() { - final byte[] serializedVector = "test-dummy".getBytes(StandardCharsets.UTF_8); - expectThrows(OpenSearchException.class, () -> { byteToFloatArray(serializedVector); }); - } - - private float[] getArrayOfRandomFloats(int length) { - float[] vector = new float[length]; - for (int i = 0; i < 20; ++i) { - vector[i] = random.nextFloat(); - } - return vector; - } - - private static float[] byteToFloatArray(byte[] byteStream) { - try (BytesStreamInput objectStream = new BytesStreamInput(byteStream)) { - return objectStream.readFloatArray(); - } catch (IOException ex) { - throw ExceptionsHelper.convertToOpenSearchException(ex); - } - } -} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java deleted file mode 100644 index 7223b450a136c..0000000000000 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.codec.correlation990; - -import org.apache.lucene.codecs.Codec; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.FieldType; -import org.apache.lucene.document.KnnFloatVectorField; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.SerialMergeScheduler; -import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.store.Directory; -import org.apache.lucene.tests.index.RandomIndexWriter; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext; -import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper; -import org.opensearch.plugin.correlation.core.index.query.CorrelationQueryFactory; -import org.opensearch.test.OpenSearchTestCase; - -import java.util.Map; -import java.util.Optional; -import java.util.function.Function; - -import static org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat.METHOD_PARAMETER_EF_CONSTRUCTION; -import static org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat.METHOD_PARAMETER_M; -import static org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecVersion.V_9_9_0; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -/** - * Unit tests for custom correlation codec - */ -public class CorrelationCodecTests extends OpenSearchTestCase { - - private static final String FIELD_NAME_ONE = "test_vector_one"; - private static final String FIELD_NAME_TWO = "test_vector_two"; - - /** - * test correlation vector index - * @throws Exception Exception - */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8329") - public void testCorrelationVectorIndex() throws Exception { - Function perFieldCorrelationVectorsProvider = - mapperService -> new PerFieldCorrelationVectorsFormat(Optional.of(mapperService)); - Function correlationCodecProvider = (correlationVectorsFormat -> new CorrelationCodec( - V_9_9_0.getDefaultCodecDelegate(), - correlationVectorsFormat - )); - testCorrelationVectorIndex(correlationCodecProvider, perFieldCorrelationVectorsProvider); - } - - private void testCorrelationVectorIndex( - final Function codecProvider, - final Function perFieldCorrelationVectorsProvider - ) throws Exception { - final MapperService mapperService = mock(MapperService.class); - final CorrelationParamsContext correlationParamsContext = new CorrelationParamsContext( - VectorSimilarityFunction.EUCLIDEAN, - Map.of(METHOD_PARAMETER_M, 16, METHOD_PARAMETER_EF_CONSTRUCTION, 256) - ); - - final VectorFieldMapper.CorrelationVectorFieldType mappedFieldType1 = new VectorFieldMapper.CorrelationVectorFieldType( - FIELD_NAME_ONE, - Map.of(), - 3, - correlationParamsContext - ); - final VectorFieldMapper.CorrelationVectorFieldType mappedFieldType2 = new VectorFieldMapper.CorrelationVectorFieldType( - FIELD_NAME_TWO, - Map.of(), - 2, - correlationParamsContext - ); - when(mapperService.fieldType(eq(FIELD_NAME_ONE))).thenReturn(mappedFieldType1); - when(mapperService.fieldType(eq(FIELD_NAME_TWO))).thenReturn(mappedFieldType2); - - var perFieldCorrelationVectorsFormatSpy = spy(perFieldCorrelationVectorsProvider.apply(mapperService)); - final Codec codec = codecProvider.apply(perFieldCorrelationVectorsFormatSpy); - - Directory dir = newFSDirectory(createTempDir()); - IndexWriterConfig iwc = newIndexWriterConfig(); - iwc.setMergeScheduler(new SerialMergeScheduler()); - iwc.setCodec(codec); - - final FieldType luceneFieldType = KnnFloatVectorField.createFieldType(3, VectorSimilarityFunction.EUCLIDEAN); - float[] array = { 1.0f, 3.0f, 4.0f }; - KnnFloatVectorField vectorField = new KnnFloatVectorField(FIELD_NAME_ONE, array, luceneFieldType); - RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); - Document doc = new Document(); - doc.add(vectorField); - writer.addDocument(doc); - writer.commit(); - IndexReader reader = writer.getReader(); - writer.close(); - - verify(perFieldCorrelationVectorsFormatSpy).getKnnVectorsFormatForField(eq(FIELD_NAME_ONE)); - - IndexSearcher searcher = new IndexSearcher(reader); - Query query = CorrelationQueryFactory.create( - new CorrelationQueryFactory.CreateQueryRequest("dummy", FIELD_NAME_ONE, new float[] { 1.0f, 0.0f, 0.0f }, 1, null, null) - ); - - assertEquals(1, searcher.count(query)); - - reader.close(); - dir.close(); - } -} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java deleted file mode 100644 index 674f35069a742..0000000000000 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java +++ /dev/null @@ -1,310 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.mapper; - -import org.apache.lucene.document.KnnFloatVectorField; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.search.FieldExistsQuery; -import org.opensearch.Version; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Explicit; -import org.opensearch.common.settings.IndexScopedSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.mapper.ContentPath; -import org.opensearch.index.mapper.FieldMapper; -import org.opensearch.index.mapper.Mapper; -import org.opensearch.index.mapper.MapperParsingException; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.mapper.ParseContext; -import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.query.QueryShardException; -import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext; -import org.opensearch.search.lookup.SearchLookup; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Assert; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -import org.mockito.Mockito; - -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -/** - * Unit tests for correlation vector field mapper - */ -public class CorrelationVectorFieldMapperTests extends OpenSearchTestCase { - - private static final String CORRELATION_VECTOR_TYPE = "correlation_vector"; - private static final String DIMENSION_FIELD_NAME = "dimension"; - private static final String TYPE_FIELD_NAME = "type"; - - /** - * test builder construction from parse of correlation params context - * @throws IOException IOException - */ - public void testBuilder_parse_fromCorrelationParamsContext() throws IOException { - String fieldName = "test-field-name"; - String indexName = "test-index-name"; - Settings settings = Settings.builder().put(settings(Version.CURRENT).build()).build(); - - VectorFieldMapper.TypeParser typeParser = new VectorFieldMapper.TypeParser(); - - int efConstruction = 321; - int m = 12; - int dimension = 10; - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) - .field(DIMENSION_FIELD_NAME, dimension) - .startObject("correlation_ctx") - .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) - .startObject("parameters") - .field("m", m) - .field("ef_construction", efConstruction) - .endObject() - .endObject() - .endObject(); - - VectorFieldMapper.Builder builder = (VectorFieldMapper.Builder) typeParser.parse( - fieldName, - XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(), - buildParserContext(indexName, settings) - ); - Mapper.BuilderContext builderContext = new Mapper.BuilderContext(settings, new ContentPath()); - builder.build(builderContext); - - Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN, builder.correlationParamsContext.getValue().getSimilarityFunction()); - Assert.assertEquals(321, builder.correlationParamsContext.getValue().getParameters().get("ef_construction")); - - XContentBuilder xContentBuilderEmptyParams = XContentFactory.jsonBuilder() - .startObject() - .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) - .field(DIMENSION_FIELD_NAME, dimension) - .startObject("correlation_ctx") - .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) - .endObject() - .endObject(); - - VectorFieldMapper.Builder builderEmptyParams = (VectorFieldMapper.Builder) typeParser.parse( - fieldName, - XContentHelper.convertToMap(BytesReference.bytes(xContentBuilderEmptyParams), true, xContentBuilderEmptyParams.contentType()) - .v2(), - buildParserContext(indexName, settings) - ); - - Assert.assertEquals( - VectorSimilarityFunction.EUCLIDEAN, - builderEmptyParams.correlationParamsContext.getValue().getSimilarityFunction() - ); - Assert.assertTrue(builderEmptyParams.correlationParamsContext.getValue().getParameters().isEmpty()); - } - - /** - * test type parser construction throw error for invalid dimension of correlation vectors - * @throws IOException IOException - */ - public void testTypeParser_parse_fromCorrelationParamsContext_InvalidDimension() throws IOException { - String fieldName = "test-field-name"; - String indexName = "test-index-name"; - Settings settings = Settings.builder().put(settings(Version.CURRENT).build()).build(); - - VectorFieldMapper.TypeParser typeParser = new VectorFieldMapper.TypeParser(); - - int efConstruction = 321; - int m = 12; - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) - .field(DIMENSION_FIELD_NAME, 2000) - .startObject("correlation_ctx") - .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) - .startObject("parameters") - .field("m", m) - .field("ef_construction", efConstruction) - .endObject() - .endObject() - .endObject(); - - VectorFieldMapper.Builder builder = (VectorFieldMapper.Builder) typeParser.parse( - fieldName, - XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(), - buildParserContext(indexName, settings) - ); - - expectThrows(IllegalArgumentException.class, () -> builder.build(new Mapper.BuilderContext(settings, new ContentPath()))); - } - - /** - * test type parser construction error for invalid vector similarity function - * @throws IOException IOException - */ - public void testTypeParser_parse_fromCorrelationParamsContext_InvalidVectorSimilarityFunction() throws IOException { - String fieldName = "test-field-name"; - String indexName = "test-index-name"; - Settings settings = Settings.builder().put(settings(Version.CURRENT).build()).build(); - - VectorFieldMapper.TypeParser typeParser = new VectorFieldMapper.TypeParser(); - - int efConstruction = 321; - int m = 12; - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) - .field(DIMENSION_FIELD_NAME, 2000) - .startObject("correlation_ctx") - .field("similarityFunction", "invalid") - .startObject("parameters") - .field("m", m) - .field("ef_construction", efConstruction) - .endObject() - .endObject() - .endObject(); - - expectThrows( - MapperParsingException.class, - () -> typeParser.parse( - fieldName, - XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(), - buildParserContext(indexName, settings) - ) - ); - } - - /** - * test parseCreateField in CorrelationVectorFieldMapper - * @throws IOException ioexception - */ - public void testCorrelationVectorFieldMapper_parseCreateField() throws IOException { - String fieldName = "test-field-name"; - int dimension = 10; - float[] testVector = createInitializedFloatArray(dimension, 1.0f); - CorrelationParamsContext correlationParamsContext = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, Map.of()); - - VectorFieldMapper.CorrelationVectorFieldType correlationVectorFieldType = new VectorFieldMapper.CorrelationVectorFieldType( - fieldName, - Map.of(), - dimension, - correlationParamsContext - ); - - CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput input = new CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput( - fieldName, - correlationVectorFieldType, - FieldMapper.MultiFields.empty(), - FieldMapper.CopyTo.empty(), - new Explicit<>(true, true), - false, - false, - correlationParamsContext - ); - - ParseContext.Document document = new ParseContext.Document(); - ContentPath contentPath = new ContentPath(); - ParseContext parseContext = mock(ParseContext.class); - when(parseContext.doc()).thenReturn(document); - when(parseContext.path()).thenReturn(contentPath); - - CorrelationVectorFieldMapper correlationVectorFieldMapper = Mockito.spy(new CorrelationVectorFieldMapper(input)); - doReturn(Optional.of(testVector)).when(correlationVectorFieldMapper).getFloatsFromContext(parseContext, dimension); - - correlationVectorFieldMapper.parseCreateField(parseContext, dimension); - - List fields = document.getFields(); - assertEquals(1, fields.size()); - IndexableField field = fields.get(0); - - Assert.assertTrue(field instanceof KnnFloatVectorField); - KnnFloatVectorField knnFloatVectorField = (KnnFloatVectorField) field; - Assert.assertArrayEquals(testVector, knnFloatVectorField.vectorValue(), 0.001f); - } - - /** - * test CorrelationVectorFieldType subclass - */ - public void testCorrelationVectorFieldType() { - String fieldName = "test-field-name"; - int dimension = 10; - QueryShardContext context = mock(QueryShardContext.class); - SearchLookup searchLookup = mock(SearchLookup.class); - - VectorFieldMapper.CorrelationVectorFieldType correlationVectorFieldType = new VectorFieldMapper.CorrelationVectorFieldType( - fieldName, - Map.of(), - dimension - ); - Assert.assertThrows(QueryShardException.class, () -> { correlationVectorFieldType.termQuery(new Object(), context); }); - Assert.assertThrows( - UnsupportedOperationException.class, - () -> { correlationVectorFieldType.valueFetcher(context, searchLookup, ""); } - ); - Assert.assertTrue(correlationVectorFieldType.existsQuery(context) instanceof FieldExistsQuery); - Assert.assertEquals(VectorFieldMapper.CONTENT_TYPE, correlationVectorFieldType.typeName()); - } - - /** - * test constants in VectorFieldMapper - */ - public void testVectorFieldMapperConstants() { - Assert.assertNotNull(VectorFieldMapper.Defaults.IGNORE_MALFORMED); - Assert.assertNotNull(VectorFieldMapper.Names.IGNORE_MALFORMED); - } - - private IndexMetadata buildIndexMetaData(String index, Settings settings) { - return IndexMetadata.builder(index) - .settings(settings) - .numberOfShards(1) - .numberOfReplicas(0) - .version(7) - .mappingVersion(0) - .settingsVersion(0) - .aliasesVersion(0) - .creationDate(0) - .build(); - } - - private Mapper.TypeParser.ParserContext buildParserContext(String index, Settings settings) { - IndexSettings indexSettings = new IndexSettings( - buildIndexMetaData(index, settings), - Settings.EMPTY, - new IndexScopedSettings(Settings.EMPTY, new HashSet<>(IndexScopedSettings.BUILT_IN_INDEX_SETTINGS)) - ); - - MapperService mapperService = mock(MapperService.class); - when(mapperService.getIndexSettings()).thenReturn(indexSettings); - - return new Mapper.TypeParser.ParserContext( - null, - mapperService, - type -> new VectorFieldMapper.TypeParser(), - Version.CURRENT, - null, - null, - null - ); - } - - private static float[] createInitializedFloatArray(int dimension, float value) { - float[] array = new float[dimension]; - Arrays.fill(array, value); - return array; - } -} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java deleted file mode 100644 index 3e567d0c04e53..0000000000000 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.query; - -import org.apache.lucene.search.KnnFloatVectorQuery; -import org.opensearch.Version; -import org.opensearch.cluster.ClusterModule; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.common.Strings; -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.index.Index; -import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.mapper.NumberFieldMapper; -import org.opensearch.index.query.QueryBuilder; -import org.opensearch.index.query.QueryBuilders; -import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper; -import org.opensearch.plugins.SearchPlugin; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Assert; - -import java.io.IOException; -import java.util.List; -import java.util.Optional; - -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -/** - * Unit tests for Correlation Query Builder - */ -public class CorrelationQueryBuilderTests extends OpenSearchTestCase { - - private static final String FIELD_NAME = "myvector"; - private static final int K = 1; - private static final TermQueryBuilder TERM_QUERY = QueryBuilders.termQuery("field", "value"); - private static final float[] QUERY_VECTOR = new float[] { 1.0f, 2.0f, 3.0f, 4.0f }; - - /** - * test invalid number of nearby neighbors - */ - public void testInvalidK() { - float[] queryVector = { 1.0f, 1.0f }; - - expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, -K)); - expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, 0)); - expectThrows( - IllegalArgumentException.class, - () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, CorrelationQueryBuilder.K_MAX + 1) - ); - } - - /** - * test empty vector scenario - */ - public void testEmptyVector() { - final float[] queryVector = null; - expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, 1)); - final float[] queryVector1 = new float[] {}; - expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector1, 1)); - } - - /** - * test serde with xcontent - * @throws IOException IOException - */ - public void testFromXContent() throws IOException { - CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); - builder.startObject(correlationQueryBuilder.fieldName()); - builder.field(CorrelationQueryBuilder.VECTOR_FIELD.getPreferredName(), correlationQueryBuilder.vector()); - builder.field(CorrelationQueryBuilder.K_FIELD.getPreferredName(), correlationQueryBuilder.getK()); - builder.endObject(); - builder.endObject(); - XContentParser contentParser = createParser(builder); - contentParser.nextToken(); - CorrelationQueryBuilder actualBuilder = CorrelationQueryBuilder.parse(contentParser); - Assert.assertEquals(actualBuilder, correlationQueryBuilder); - } - - /** - * test serde with xcontent - * @throws IOException IOException - */ - public void testFromXContentFromString() throws IOException { - String correlationQuery = "{\n" - + " \"myvector\" : {\n" - + " \"vector\" : [\n" - + " 1.0,\n" - + " 2.0,\n" - + " 3.0,\n" - + " 4.0\n" - + " ],\n" - + " \"k\" : 1,\n" - + " \"boost\" : 1.0\n" - + " }\n" - + "}"; - XContentParser contentParser = createParser(JsonXContent.jsonXContent, correlationQuery); - contentParser.nextToken(); - CorrelationQueryBuilder actualBuilder = CorrelationQueryBuilder.parse(contentParser); - Assert.assertEquals(correlationQuery.replace("\n", "").replace(" ", ""), Strings.toString(MediaTypeRegistry.JSON, actualBuilder)); - } - - /** - * test serde with xcontent with filters - * @throws IOException IOException - */ - public void testFromXContentWithFilters() throws IOException { - CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K, TERM_QUERY); - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); - builder.startObject(correlationQueryBuilder.fieldName()); - builder.field(CorrelationQueryBuilder.VECTOR_FIELD.getPreferredName(), correlationQueryBuilder.vector()); - builder.field(CorrelationQueryBuilder.K_FIELD.getPreferredName(), correlationQueryBuilder.getK()); - builder.field(CorrelationQueryBuilder.FILTER_FIELD.getPreferredName(), correlationQueryBuilder.getFilter()); - builder.endObject(); - builder.endObject(); - XContentParser contentParser = createParser(builder); - contentParser.nextToken(); - CorrelationQueryBuilder actualBuilder = CorrelationQueryBuilder.parse(contentParser); - Assert.assertEquals(actualBuilder, correlationQueryBuilder); - } - - /** - * test conversion o KnnFloatVectorQuery logic - * @throws IOException IOException - */ - public void testDoToQuery() throws IOException { - CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); - Index dummyIndex = new Index("dummy", "dummy"); - QueryShardContext mockQueryShardContext = mock(QueryShardContext.class); - VectorFieldMapper.CorrelationVectorFieldType mockCorrVectorField = mock(VectorFieldMapper.CorrelationVectorFieldType.class); - when(mockQueryShardContext.index()).thenReturn(dummyIndex); - when(mockCorrVectorField.getDimension()).thenReturn(4); - when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField); - KnnFloatVectorQuery query = (KnnFloatVectorQuery) correlationQueryBuilder.doToQuery(mockQueryShardContext); - Assert.assertEquals(FIELD_NAME, query.getField()); - Assert.assertArrayEquals(QUERY_VECTOR, query.getTargetCopy(), 0.1f); - Assert.assertEquals(K, query.getK()); - } - - /** - * test conversion o KnnFloatVectorQuery logic with filter - * @throws IOException IOException - */ - public void testDoToQueryWithFilter() throws IOException { - CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K, TERM_QUERY); - Index dummyIndex = new Index("dummy", "dummy"); - QueryShardContext mockQueryShardContext = mock(QueryShardContext.class); - VectorFieldMapper.CorrelationVectorFieldType mockCorrVectorField = mock(VectorFieldMapper.CorrelationVectorFieldType.class); - when(mockQueryShardContext.index()).thenReturn(dummyIndex); - when(mockCorrVectorField.getDimension()).thenReturn(4); - when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField); - KnnFloatVectorQuery query = (KnnFloatVectorQuery) correlationQueryBuilder.doToQuery(mockQueryShardContext); - Assert.assertEquals(FIELD_NAME, query.getField()); - Assert.assertArrayEquals(QUERY_VECTOR, query.getTargetCopy(), 0.1f); - Assert.assertEquals(K, query.getK()); - Assert.assertEquals(TERM_QUERY.toQuery(mockQueryShardContext), query.getFilter()); - } - - /** - * test conversion o KnnFloatVectorQuery logic failure with invalid dimensions - */ - public void testDoToQueryInvalidDimensions() { - CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); - Index dummyIndex = new Index("dummy", "dummy"); - QueryShardContext mockQueryShardContext = mock(QueryShardContext.class); - VectorFieldMapper.CorrelationVectorFieldType mockCorrVectorField = mock(VectorFieldMapper.CorrelationVectorFieldType.class); - when(mockQueryShardContext.index()).thenReturn(dummyIndex); - when(mockCorrVectorField.getDimension()).thenReturn(400); - when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField); - expectThrows(IllegalArgumentException.class, () -> correlationQueryBuilder.doToQuery(mockQueryShardContext)); - } - - /** - * test conversion o KnnFloatVectorQuery logic failure with invalid field type - */ - public void testDoToQueryInvalidFieldType() { - CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); - Index dummyIndex = new Index("dummy", "dummy"); - QueryShardContext mockQueryShardContext = mock(QueryShardContext.class); - NumberFieldMapper.NumberFieldType mockCorrVectorField = mock(NumberFieldMapper.NumberFieldType.class); - when(mockQueryShardContext.index()).thenReturn(dummyIndex); - when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField); - expectThrows(IllegalArgumentException.class, () -> correlationQueryBuilder.doToQuery(mockQueryShardContext)); - } - - /** - * test serialization of Correlation Query Builder - * @throws Exception throws an IOException if serialization fails - * @throws Exception Exception - */ - public void testSerialization() throws Exception { - assertSerialization(Optional.empty()); - assertSerialization(Optional.of(TERM_QUERY)); - } - - private void assertSerialization(final Optional queryBuilderOptional) throws IOException { - final CorrelationQueryBuilder builder = queryBuilderOptional.isPresent() - ? new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K, queryBuilderOptional.get()) - : new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); - - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(Version.CURRENT); - output.writeNamedWriteable(builder); - - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { - in.setVersion(Version.CURRENT); - final QueryBuilder deserializedQuery = in.readNamedWriteable(QueryBuilder.class); - - assertNotNull(deserializedQuery); - assertTrue(deserializedQuery instanceof CorrelationQueryBuilder); - final CorrelationQueryBuilder deserializedKnnQueryBuilder = (CorrelationQueryBuilder) deserializedQuery; - assertEquals(FIELD_NAME, deserializedKnnQueryBuilder.fieldName()); - assertArrayEquals(QUERY_VECTOR, (float[]) deserializedKnnQueryBuilder.vector(), 0.0f); - assertEquals(K, deserializedKnnQueryBuilder.getK()); - if (queryBuilderOptional.isPresent()) { - assertNotNull(deserializedKnnQueryBuilder.getFilter()); - assertEquals(queryBuilderOptional.get(), deserializedKnnQueryBuilder.getFilter()); - } else { - assertNull(deserializedKnnQueryBuilder.getFilter()); - } - } - } - } - - @Override - protected NamedXContentRegistry xContentRegistry() { - List list = ClusterModule.getNamedXWriteables(); - SearchPlugin.QuerySpec spec = new SearchPlugin.QuerySpec<>( - TermQueryBuilder.NAME, - TermQueryBuilder::new, - TermQueryBuilder::fromXContent - ); - list.add(new NamedXContentRegistry.Entry(QueryBuilder.class, spec.getName(), (p, c) -> spec.getParser().fromXContent(p))); - NamedXContentRegistry registry = new NamedXContentRegistry(list); - return registry; - } - - @Override - protected NamedWriteableRegistry writableRegistry() { - final List entries = ClusterModule.getNamedWriteables(); - entries.add( - new NamedWriteableRegistry.Entry( - QueryBuilder.class, - CorrelationQueryBuilder.NAME_FIELD.getPreferredName(), - CorrelationQueryBuilder::new - ) - ); - entries.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, TermQueryBuilder.NAME, TermQueryBuilder::new)); - return new NamedWriteableRegistry(entries); - } -} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java deleted file mode 100644 index 45cb47b05b5c2..0000000000000 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.settings; - -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.plugin.correlation.EventsCorrelationPlugin; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Assert; -import org.junit.Before; - -import java.util.List; -import java.util.concurrent.TimeUnit; - -/** - * Unit tests for Correlation Engine settings - */ -public class EventsCorrelationSettingsTests extends OpenSearchTestCase { - - private EventsCorrelationPlugin plugin; - - @Before - public void setup() { - plugin = new EventsCorrelationPlugin(); - } - - /** - * test all plugin settings returned - */ - public void testAllPluginSettingsReturned() { - List expectedSettings = List.of( - EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING, - EventsCorrelationSettings.CORRELATION_TIME_WINDOW - ); - - List> settings = plugin.getSettings(); - Assert.assertTrue(settings.containsAll(expectedSettings)); - } - - /** - * test settings get value - */ - public void testSettingsGetValue() { - Settings settings = Settings.builder().put("index.correlation", true).build(); - Assert.assertEquals(EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING.get(settings), true); - settings = Settings.builder() - .put("plugins.security_analytics.correlation_time_window", new TimeValue(10, TimeUnit.MINUTES)) - .build(); - Assert.assertEquals(EventsCorrelationSettings.CORRELATION_TIME_WINDOW.get(settings), new TimeValue(10, TimeUnit.MINUTES)); - } -} diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 4f30ea9ea7e22..2948ca12904f5 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -89,7 +89,7 @@ dependencies { api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.2.2' + api 'org.apache.xmlbeans:xmlbeans:5.3.0' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.2.2.jar.sha1 deleted file mode 100644 index 613c1028dbd6d..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -586ffe10ae9864e19e85c24bd060790a70586f72 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.3.0.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.3.0.jar.sha1 new file mode 100644 index 0000000000000..4dbb0149da890 --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.3.0.jar.sha1 @@ -0,0 +1 @@ +f93c3ba820d7240b7fec4ec5bc35e7223cc6fc1f \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 74f199820262e..ad12ec9003e64 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,11 +44,11 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.51.0' + api 'com.azure:azure-core:1.54.1' api 'com.azure:azure-json:1.3.0' api 'com.azure:azure-xml:1.1.0' - api 'com.azure:azure-storage-common:12.27.1' - api 'com.azure:azure-core-http-netty:1.15.5' + api 'com.azure:azure-storage-common:12.28.0' + api 'com.azure:azure-core-http-netty:1.15.7' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" @@ -61,8 +61,8 @@ dependencies { // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' api "net.java.dev.jna:jna-platform:${versions.jna}" - api 'com.microsoft.azure:msal4j:1.17.2' - api 'com.nimbusds:oauth2-oidc-sdk:11.19.1' + api 'com.microsoft.azure:msal4j:1.18.0' + api 'com.nimbusds:oauth2-oidc-sdk:11.20.1' api 'com.nimbusds:nimbus-jose-jwt:9.41.1' api 'com.nimbusds:content-type:2.3' api 'com.nimbusds:lang-tag:1.7' @@ -108,7 +108,6 @@ thirdPartyAudit { // Optional and not enabled by Elasticsearch 'com.google.common.util.concurrent.internal.InternalFutureFailureAccess', 'com.google.common.util.concurrent.internal.InternalFutures', - 'com.azure.core.credential.ProofOfPossessionOptions', 'com.azure.storage.internal.avro.implementation.AvroObject', 'com.azure.storage.internal.avro.implementation.AvroReader', 'com.azure.storage.internal.avro.implementation.AvroReaderFactory', diff --git a/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 deleted file mode 100644 index 7200f59af2f9a..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ff5d0aedf75ca45ec0ace24673f790d2f7a57096 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.54.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.54.1.jar.sha1 new file mode 100644 index 0000000000000..9246d0dd8443a --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.54.1.jar.sha1 @@ -0,0 +1 @@ +9ae0cc4a8ff02a0146510ec9e1c06ab48950a66b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 deleted file mode 100644 index 2f5239cc26148..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -44d99705d3759e2ad7ee8110f811d4ed304a6a7c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1 new file mode 100644 index 0000000000000..d72f835c69903 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1 @@ -0,0 +1 @@ +a83247eeeb7f63f891e725228d54c3c24132c66a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.27.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.27.1.jar.sha1 deleted file mode 100644 index d7602da1418d1..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-common-12.27.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c477c5d8c0f2076da1c5345c1097be6a319fe7c4 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.28.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.28.0.jar.sha1 new file mode 100644 index 0000000000000..ed932cd0a07e9 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-common-12.28.0.jar.sha1 @@ -0,0 +1 @@ +3c5b7de96c68947ab74cc7925b27ca2b9f6b91d0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1 deleted file mode 100644 index b5219ee17e9fa..0000000000000 --- a/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6211e3d71d0388929babaa0ff0951b30d001852 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.18.0.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.18.0.jar.sha1 new file mode 100644 index 0000000000000..292259e9d862d --- /dev/null +++ b/plugins/repository-azure/licenses/msal4j-1.18.0.jar.sha1 @@ -0,0 +1 @@ +a47e4e9257a5d9cdb8282c331278492968e06250 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1 deleted file mode 100644 index 7d83b0e8ca639..0000000000000 --- a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58db85a807a56ae76baffa519772271ad5808195 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1 new file mode 100644 index 0000000000000..7527d31eb1d37 --- /dev/null +++ b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1 @@ -0,0 +1 @@ +8d1ecd62d31945534a7cd63062c3c48ff0df9c43 \ No newline at end of file diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index 970388498ee26..c7eae3eaa220b 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -88,6 +88,7 @@ import fixture.azure.AzureHttpHandler; import reactor.core.scheduler.Schedulers; +import reactor.netty.http.HttpResources; import static java.nio.charset.StandardCharsets.UTF_8; import static org.opensearch.repositories.azure.AzureRepository.Repository.CONTAINER_SETTING; @@ -142,6 +143,7 @@ public void tearDown() throws Exception { @AfterClass public static void shutdownSchedulers() { + HttpResources.disposeLoopsAndConnections(); Schedulers.shutdownNow(); } diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java index 3356e5174592a..0433a13baec2c 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java @@ -49,6 +49,7 @@ import java.util.List; import reactor.core.scheduler.Schedulers; +import reactor.netty.http.HttpResources; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -57,6 +58,7 @@ public class AzureRepositorySettingsTests extends OpenSearchTestCase { @AfterClass public static void shutdownSchedulers() { + HttpResources.disposeLoopsAndConnections(); Schedulers.shutdownNow(); } diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java index 9cff5bc2c30f1..324a20c9030c6 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java @@ -43,7 +43,6 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.test.OpenSearchTestCase; -import org.junit.After; import org.junit.AfterClass; import java.io.IOException; @@ -71,19 +70,10 @@ public class AzureStorageServiceTests extends OpenSearchTestCase { @AfterClass public static void shutdownSchedulers() { + HttpResources.disposeLoopsAndConnections(); Schedulers.shutdownNow(); } - @After - public void tearDown() throws Exception { - try { - // Properly shut down resources - HttpResources.disposeLoopsAndConnectionsLater().block(); - } finally { - super.tearDown(); - } - } - public void testReadSecuredSettings() { final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) diff --git a/plugins/repository-gcs/licenses/grpc-api-1.68.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-api-1.68.0.jar.sha1 deleted file mode 100644 index bf45716c5b8ce..0000000000000 --- a/plugins/repository-gcs/licenses/grpc-api-1.68.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a9f25c58d8d5b0fcf37ae889a50fec87e34ac08 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-api-1.68.2.jar.sha1 b/plugins/repository-gcs/licenses/grpc-api-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..1844172dec982 --- /dev/null +++ b/plugins/repository-gcs/licenses/grpc-api-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a257a5dd25dda1c97a99b56d5b9c1e56c12ae554 \ No newline at end of file diff --git a/plugins/transport-grpc/build.gradle b/plugins/transport-grpc/build.gradle new file mode 100644 index 0000000000000..47f62b2b8c3f3 --- /dev/null +++ b/plugins/transport-grpc/build.gradle @@ -0,0 +1,168 @@ +import org.gradle.api.attributes.java.TargetJvmEnvironment + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +opensearchplugin { + description 'gRPC based transport implementation' + classname 'org.opensearch.transport.grpc.GrpcPlugin' +} + +dependencies { + compileOnly "com.google.code.findbugs:jsr305:3.0.2" + runtimeOnly "com.google.guava:guava:${versions.guava}" + implementation "com.google.errorprone:error_prone_annotations:2.24.1" + implementation "com.google.guava:failureaccess:1.0.1" + implementation "io.grpc:grpc-api:${versions.grpc}" + implementation "io.grpc:grpc-core:${versions.grpc}" + implementation "io.grpc:grpc-netty-shaded:${versions.grpc}" + implementation "io.grpc:grpc-protobuf-lite:${versions.grpc}" + implementation "io.grpc:grpc-protobuf:${versions.grpc}" + implementation "io.grpc:grpc-services:${versions.grpc}" + implementation "io.grpc:grpc-stub:${versions.grpc}" + implementation "io.grpc:grpc-util:${versions.grpc}" + implementation "io.perfmark:perfmark-api:0.26.0" +} + +tasks.named("dependencyLicenses").configure { + mapping from: /grpc-.*/, to: 'grpc' +} + +thirdPartyAudit { + ignoreMissingClasses( + 'com.aayushatharva.brotli4j.Brotli4jLoader', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', + 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', + 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', + 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', + // classes are missing + + // from io.netty.logging.CommonsLoggerFactory (netty) + 'org.apache.commons.logging.Log', + 'org.apache.commons.logging.LogFactory', + + // from Log4j (deliberate, Netty will fallback to Log4j 2) + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + + // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) + 'org.bouncycastle.cert.X509v3CertificateBuilder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', + + // from io.netty.handler.ssl.JettyNpnSslEngine (netty) + 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', + 'org.eclipse.jetty.npn.NextProtoNego$ServerProvider', + 'org.eclipse.jetty.npn.NextProtoNego', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteInput (netty) + 'org.jboss.marshalling.ByteInput', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty) + 'org.jboss.marshalling.ByteOutput', + + // from io.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty) + 'org.jboss.marshalling.Marshaller', + + // from io.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty) + 'org.jboss.marshalling.MarshallerFactory', + 'org.jboss.marshalling.MarshallingConfiguration', + 'org.jboss.marshalling.Unmarshaller', + + // from io.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional + 'org.slf4j.helpers.FormattingTuple', + 'org.slf4j.helpers.MessageFormatter', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory', + 'org.slf4j.spi.LocationAwareLogger', + + 'com.google.gson.stream.JsonReader', + 'com.google.gson.stream.JsonToken', + 'com.google.protobuf.util.Durations', + 'com.google.protobuf.util.Timestamps', + 'com.google.protobuf.nano.CodedOutputByteBufferNano', + 'com.google.protobuf.nano.MessageNano', + 'com.google.rpc.Status', + 'com.google.rpc.Status$Builder', + 'com.ning.compress.BufferRecycler', + 'com.ning.compress.lzf.ChunkDecoder', + 'com.ning.compress.lzf.ChunkEncoder', + 'com.ning.compress.lzf.LZFChunk', + 'com.ning.compress.lzf.LZFEncoder', + 'com.ning.compress.lzf.util.ChunkDecoderFactory', + 'com.ning.compress.lzf.util.ChunkEncoderFactory', + 'lzma.sdk.lzma.Encoder', + 'net.jpountz.lz4.LZ4Compressor', + 'net.jpountz.lz4.LZ4Factory', + 'net.jpountz.lz4.LZ4FastDecompressor', + 'net.jpountz.xxhash.XXHash32', + 'net.jpountz.xxhash.XXHashFactory', + 'org.eclipse.jetty.alpn.ALPN$ClientProvider', + 'org.eclipse.jetty.alpn.ALPN$ServerProvider', + 'org.eclipse.jetty.alpn.ALPN', + + 'org.conscrypt.AllocatedBuffer', + 'org.conscrypt.BufferAllocator', + 'org.conscrypt.Conscrypt', + 'org.conscrypt.HandshakeListener', + + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration' + ) + + ignoreViolations( + // uses internal java api: sun.misc.Unsafe + 'com.google.common.cache.Striped64', + 'com.google.common.cache.Striped64$1', + 'com.google.common.cache.Striped64$Cell', + 'com.google.common.hash.Striped64', + 'com.google.common.hash.Striped64$1', + 'com.google.common.hash.Striped64$Cell', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$3', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$4', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$5', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$1', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$2', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$3', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$4', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$6', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess' + ) +} diff --git a/plugins/transport-grpc/licenses/error_prone_annotations-2.24.1.jar.sha1 b/plugins/transport-grpc/licenses/error_prone_annotations-2.24.1.jar.sha1 new file mode 100644 index 0000000000000..67723f6f51248 --- /dev/null +++ b/plugins/transport-grpc/licenses/error_prone_annotations-2.24.1.jar.sha1 @@ -0,0 +1 @@ +32b299e45105aa9b0df8279c74dc1edfcf313ff0 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/error_prone_annotations-LICENSE.txt b/plugins/transport-grpc/licenses/error_prone_annotations-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-grpc/licenses/error_prone_annotations-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/error_prone_annotations-NOTICE.txt b/plugins/transport-grpc/licenses/error_prone_annotations-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1 b/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1 new file mode 100644 index 0000000000000..4798b37e20691 --- /dev/null +++ b/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1 @@ -0,0 +1 @@ +1dcf1de382a0bf95a3d8b0849546c88bac1292c9 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/failureaccess-LICENSE.txt b/plugins/transport-grpc/licenses/failureaccess-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-grpc/licenses/failureaccess-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/failureaccess-NOTICE.txt b/plugins/transport-grpc/licenses/failureaccess-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/transport-grpc/licenses/grpc-LICENSE.txt b/plugins/transport-grpc/licenses/grpc-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/grpc-NOTICE.txt b/plugins/transport-grpc/licenses/grpc-NOTICE.txt new file mode 100644 index 0000000000000..f70c5620cf75a --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-NOTICE.txt @@ -0,0 +1,62 @@ +Copyright 2014 The gRPC Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +----------------------------------------------------------------------- + +This product contains a modified portion of 'OkHttp', an open source +HTTP & SPDY client for Android and Java applications, which can be obtained +at: + + * LICENSE: + * okhttp/third_party/okhttp/LICENSE (Apache License 2.0) + * HOMEPAGE: + * https://github.com/square/okhttp + * LOCATION_IN_GRPC: + * okhttp/third_party/okhttp + +This product contains a modified portion of 'Envoy', an open source +cloud-native high-performance edge/middle/service proxy, which can be +obtained at: + + * LICENSE: + * xds/third_party/envoy/LICENSE (Apache License 2.0) + * NOTICE: + * xds/third_party/envoy/NOTICE + * HOMEPAGE: + * https://www.envoyproxy.io + * LOCATION_IN_GRPC: + * xds/third_party/envoy + +This product contains a modified portion of 'protoc-gen-validate (PGV)', +an open source protoc plugin to generate polyglot message validators, +which can be obtained at: + + * LICENSE: + * xds/third_party/protoc-gen-validate/LICENSE (Apache License 2.0) + * NOTICE: + * xds/third_party/protoc-gen-validate/NOTICE + * HOMEPAGE: + * https://github.com/envoyproxy/protoc-gen-validate + * LOCATION_IN_GRPC: + * xds/third_party/protoc-gen-validate + +This product contains a modified portion of 'udpa', +an open source universal data plane API, which can be obtained at: + + * LICENSE: + * xds/third_party/udpa/LICENSE (Apache License 2.0) + * HOMEPAGE: + * https://github.com/cncf/udpa + * LOCATION_IN_GRPC: + * xds/third_party/udpa diff --git a/plugins/transport-grpc/licenses/grpc-api-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-api-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..1844172dec982 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-api-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a257a5dd25dda1c97a99b56d5b9c1e56c12ae554 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-core-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-core-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..e20345d29e914 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-core-1.68.2.jar.sha1 @@ -0,0 +1 @@ +b0fd51a1c029785d1c9ae2cfc80a296b60dfcfdb \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-netty-shaded-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-netty-shaded-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..53fa705a66129 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-netty-shaded-1.68.2.jar.sha1 @@ -0,0 +1 @@ +8ea4186fbdcc5432664364ed53e03cf0d458c3ec \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-protobuf-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-protobuf-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..e861b41837f33 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-protobuf-1.68.2.jar.sha1 @@ -0,0 +1 @@ +35b28e0d57874021cd31e76dd4a795f76a82471e \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..b2401f9752829 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a53064b896adcfefe74362a33e111492351dfc03 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-services-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-services-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..c4edf923791e5 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-services-1.68.2.jar.sha1 @@ -0,0 +1 @@ +6c2a0b0640544b9010a42bcf76f2791116a75c9d \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-stub-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-stub-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..118464f8f48ff --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-stub-1.68.2.jar.sha1 @@ -0,0 +1 @@ +d58ee1cf723b4b5536d44b67e328c163580a8d98 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-util-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-util-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..c3261b012e502 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-util-1.68.2.jar.sha1 @@ -0,0 +1 @@ +2d195570e9256d1357d584146a8e6b19587d4044 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/guava-33.2.1-jre.jar.sha1 b/plugins/transport-grpc/licenses/guava-33.2.1-jre.jar.sha1 new file mode 100644 index 0000000000000..27d5304e326df --- /dev/null +++ b/plugins/transport-grpc/licenses/guava-33.2.1-jre.jar.sha1 @@ -0,0 +1 @@ +818e780da2c66c63bbb6480fef1f3855eeafa3e4 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/guava-LICENSE.txt b/plugins/transport-grpc/licenses/guava-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-grpc/licenses/guava-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/guava-NOTICE.txt b/plugins/transport-grpc/licenses/guava-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1 b/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1 new file mode 100644 index 0000000000000..abf1becd13298 --- /dev/null +++ b/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1 @@ -0,0 +1 @@ +ef65452adaf20bf7d12ef55913aba24037b82738 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/perfmark-api-LICENSE.txt b/plugins/transport-grpc/licenses/perfmark-api-LICENSE.txt new file mode 100644 index 0000000000000..261eeb9e9f8b2 --- /dev/null +++ b/plugins/transport-grpc/licenses/perfmark-api-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/perfmark-api-NOTICE.txt b/plugins/transport-grpc/licenses/perfmark-api-NOTICE.txt new file mode 100644 index 0000000000000..7d74b6569cf64 --- /dev/null +++ b/plugins/transport-grpc/licenses/perfmark-api-NOTICE.txt @@ -0,0 +1,40 @@ +Copyright 2019 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +----------------------------------------------------------------------- + +This product contains a modified portion of 'Catapult', an open source +Trace Event viewer for Chome, Linux, and Android applications, which can +be obtained at: + + * LICENSE: + * traceviewer/src/main/resources/io/perfmark/traceviewer/third_party/catapult/LICENSE (New BSD License) + * HOMEPAGE: + * https://github.com/catapult-project/catapult + +This product contains a modified portion of 'Polymer', a library for Web +Components, which can be obtained at: + * LICENSE: + * traceviewer/src/main/resources/io/perfmark/traceviewer/third_party/polymer/LICENSE (New BSD License) + * HOMEPAGE: + * https://github.com/Polymer/polymer + + +This product contains a modified portion of 'ASM', an open source +Java Bytecode library, which can be obtained at: + + * LICENSE: + * agent/src/main/resources/io/perfmark/agent/third_party/asm/LICENSE (BSD style License) + * HOMEPAGE: + * https://asm.ow2.io/ diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java new file mode 100644 index 0000000000000..0a464e135350b --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.transport.grpc; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.GRPC_TRANSPORT_SETTING_KEY; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_BIND_HOST; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_HOST; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORTS; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_HOST; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_PORT; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_WORKER_COUNT; + +/** + * Main class for the gRPC plugin. + */ +public final class GrpcPlugin extends Plugin implements NetworkPlugin { + + /** + * Creates a new GrpcPlugin instance. + */ + public GrpcPlugin() {} + + @Override + public Map> getAuxTransports( + Settings settings, + ThreadPool threadPool, + CircuitBreakerService circuitBreakerService, + NetworkService networkService, + ClusterSettings clusterSettings, + Tracer tracer + ) { + return Collections.singletonMap( + GRPC_TRANSPORT_SETTING_KEY, + () -> new Netty4GrpcServerTransport(settings, Collections.emptyList(), networkService) + ); + } + + @Override + public List> getSettings() { + return List.of( + SETTING_GRPC_PORTS, + SETTING_GRPC_HOST, + SETTING_GRPC_PUBLISH_HOST, + SETTING_GRPC_BIND_HOST, + SETTING_GRPC_WORKER_COUNT, + SETTING_GRPC_PUBLISH_PORT + ); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java new file mode 100644 index 0000000000000..61c0722772b92 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java @@ -0,0 +1,277 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.grpc; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.PortsRange; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.transport.BindTransportException; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; + +import io.grpc.BindableService; +import io.grpc.InsecureServerCredentials; +import io.grpc.Server; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import io.grpc.netty.shaded.io.netty.channel.EventLoopGroup; +import io.grpc.netty.shaded.io.netty.channel.nio.NioEventLoopGroup; +import io.grpc.netty.shaded.io.netty.channel.socket.nio.NioServerSocketChannel; +import io.grpc.protobuf.services.HealthStatusManager; +import io.grpc.protobuf.services.ProtoReflectionService; + +import static java.util.Collections.emptyList; +import static org.opensearch.common.settings.Setting.intSetting; +import static org.opensearch.common.settings.Setting.listSetting; +import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; +import static org.opensearch.transport.Transport.resolveTransportPublishPort; + +/** + * Netty4 gRPC server implemented as a LifecycleComponent. + * Services injected through BindableService list. + */ +public class Netty4GrpcServerTransport extends NetworkPlugin.AuxTransport { + private static final Logger logger = LogManager.getLogger(Netty4GrpcServerTransport.class); + + /** + * Type key for configuring settings of this auxiliary transport. + */ + public static final String GRPC_TRANSPORT_SETTING_KEY = "experimental-transport-grpc"; + + /** + * Port range on which to bind. + * Note this setting is configured through AffixSetting AUX_TRANSPORT_PORTS where the aux transport type matches the GRPC_TRANSPORT_SETTING_KEY. + */ + public static final Setting SETTING_GRPC_PORTS = AUX_TRANSPORT_PORTS.getConcreteSettingForNamespace( + GRPC_TRANSPORT_SETTING_KEY + ); + + /** + * Port published to peers for this server. + */ + public static final Setting SETTING_GRPC_PUBLISH_PORT = intSetting("grpc.publish_port", -1, -1, Setting.Property.NodeScope); + + /** + * Host list to bind and publish. + * For distinct bind/publish hosts configure SETTING_GRPC_BIND_HOST + SETTING_GRPC_PUBLISH_HOST separately. + */ + public static final Setting> SETTING_GRPC_HOST = listSetting( + "grpc.host", + emptyList(), + Function.identity(), + Setting.Property.NodeScope + ); + + /** + * Host list to bind. + */ + public static final Setting> SETTING_GRPC_BIND_HOST = listSetting( + "grpc.bind_host", + SETTING_GRPC_HOST, + Function.identity(), + Setting.Property.NodeScope + ); + + /** + * Host list published to peers. + */ + public static final Setting> SETTING_GRPC_PUBLISH_HOST = listSetting( + "grpc.publish_host", + SETTING_GRPC_HOST, + Function.identity(), + Setting.Property.NodeScope + ); + + /** + * Configure size of thread pool backing this transport server. + */ + public static final Setting SETTING_GRPC_WORKER_COUNT = new Setting<>( + "grpc.netty.worker_count", + (s) -> Integer.toString(OpenSearchExecutors.allocatedProcessors(s)), + (s) -> Setting.parseInt(s, 1, "grpc.netty.worker_count"), + Setting.Property.NodeScope + ); + + private final Settings settings; + private final NetworkService networkService; + private final List services; + private final CopyOnWriteArrayList servers = new CopyOnWriteArrayList<>(); + private final String[] bindHosts; + private final String[] publishHosts; + private final PortsRange port; + private final int nettyEventLoopThreads; + + private volatile BoundTransportAddress boundAddress; + private volatile EventLoopGroup eventLoopGroup; + + /** + * Creates a new Netty4GrpcServerTransport instance. + * @param settings the configured settings. + * @param services the gRPC compatible services to be registered with the server. + * @param networkService the bind/publish addresses. + */ + public Netty4GrpcServerTransport(Settings settings, List services, NetworkService networkService) { + this.settings = Objects.requireNonNull(settings); + this.services = Objects.requireNonNull(services); + this.networkService = Objects.requireNonNull(networkService); + + final List httpBindHost = SETTING_GRPC_BIND_HOST.get(settings); + this.bindHosts = (httpBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : httpBindHost).toArray( + Strings.EMPTY_ARRAY + ); + + final List httpPublishHost = SETTING_GRPC_PUBLISH_HOST.get(settings); + this.publishHosts = (httpPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings) : httpPublishHost) + .toArray(Strings.EMPTY_ARRAY); + + this.port = SETTING_GRPC_PORTS.get(settings); + this.nettyEventLoopThreads = SETTING_GRPC_WORKER_COUNT.get(settings); + } + + BoundTransportAddress boundAddress() { + return this.boundAddress; + } + + @Override + protected void doStart() { + boolean success = false; + try { + this.eventLoopGroup = new NioEventLoopGroup(nettyEventLoopThreads, daemonThreadFactory(settings, "grpc_event_loop")); + bindServer(); + success = true; + logger.info("Started gRPC server on port {}", port); + } finally { + if (!success) { + doStop(); + } + } + } + + @Override + protected void doStop() { + for (Server server : servers) { + if (server != null) { + server.shutdown(); + try { + server.awaitTermination(30, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("Interrupted while shutting down gRPC server"); + } finally { + server.shutdownNow(); + } + } + } + if (eventLoopGroup != null) { + try { + eventLoopGroup.shutdownGracefully(0, 10, TimeUnit.SECONDS).await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("Failed to shut down event loop group"); + } + } + } + + @Override + protected void doClose() { + + } + + private void bindServer() { + InetAddress[] hostAddresses; + try { + hostAddresses = networkService.resolveBindHostAddresses(bindHosts); + } catch (IOException e) { + throw new BindTransportException("Failed to resolve host [" + Arrays.toString(bindHosts) + "]", e); + } + + List boundAddresses = new ArrayList<>(hostAddresses.length); + for (InetAddress address : hostAddresses) { + boundAddresses.add(bindAddress(address, port)); + } + + final InetAddress publishInetAddress; + try { + publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts); + } catch (Exception e) { + throw new BindTransportException("Failed to resolve publish address", e); + } + + final int publishPort = resolveTransportPublishPort(SETTING_GRPC_PUBLISH_PORT.get(settings), boundAddresses, publishInetAddress); + if (publishPort < 0) { + throw new BindTransportException( + "Failed to auto-resolve grpc publish port, multiple bound addresses " + + boundAddresses + + " with distinct ports and none of them matched the publish address (" + + publishInetAddress + + "). " + + "Please specify a unique port by setting " + + SETTING_GRPC_PORTS.getKey() + + " or " + + SETTING_GRPC_PUBLISH_PORT.getKey() + ); + } + + TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); + this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), publishAddress); + logger.info("{}", boundAddress); + } + + private TransportAddress bindAddress(InetAddress hostAddress, PortsRange portRange) { + AtomicReference lastException = new AtomicReference<>(); + AtomicReference addr = new AtomicReference<>(); + + boolean success = portRange.iterate(portNumber -> { + try { + + final InetSocketAddress address = new InetSocketAddress(hostAddress, portNumber); + final NettyServerBuilder serverBuilder = NettyServerBuilder.forAddress(address, InsecureServerCredentials.create()) + .bossEventLoopGroup(eventLoopGroup) + .workerEventLoopGroup(eventLoopGroup) + .channelType(NioServerSocketChannel.class) + .addService(new HealthStatusManager().getHealthService()) + .addService(ProtoReflectionService.newInstance()); + + services.forEach(serverBuilder::addService); + + Server srv = serverBuilder.build().start(); + servers.add(srv); + addr.set(new TransportAddress(hostAddress, portNumber)); + logger.debug("Bound gRPC to address {{}}", address); + return true; + } catch (Exception e) { + lastException.set(e); + return false; + } + }); + + if (!success) { + throw new RuntimeException("Failed to bind to " + hostAddress + " on ports " + portRange, lastException.get()); + } + + return addr.get(); + } +} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java similarity index 59% rename from plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/package-info.java rename to plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java index 4fdc622c3d886..4a5d9d02b5b91 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/package-info.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java @@ -7,6 +7,7 @@ */ /** - * correlation field mapper package + * gRPC transport implementation for OpenSearch. + * Provides network communication using the gRPC protocol. */ -package org.opensearch.plugin.correlation.core.index.mapper; +package org.opensearch.transport.grpc; diff --git a/plugins/transport-grpc/src/main/plugin-metadata/plugin-security.policy b/plugins/transport-grpc/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..398de576b6c5a --- /dev/null +++ b/plugins/transport-grpc/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant codeBase "${codebase.grpc-netty-shaded}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; + + // netty makes and accepts socket connections + permission java.net.SocketPermission "*", "accept,connect"; + + // Netty sets custom classloader for some of its internal threads + permission java.lang.RuntimePermission "*", "setContextClassLoader"; +}; diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java new file mode 100644 index 0000000000000..ebeff62c2c23c --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.grpc; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; +import org.hamcrest.MatcherAssert; +import org.junit.Before; + +import java.util.List; + +import io.grpc.BindableService; + +import static org.hamcrest.Matchers.emptyArray; +import static org.hamcrest.Matchers.not; + +public class Netty4GrpcServerTransportTests extends OpenSearchTestCase { + + private NetworkService networkService; + private List services; + + @Before + public void setup() { + networkService = new NetworkService(List.of()); + services = List.of(); + } + + public void test() { + try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(createSettings(), services, networkService)) { + transport.start(); + + MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); + assertNotNull(transport.boundAddress().publishAddress().address()); + + transport.stop(); + } + } + + private static Settings createSettings() { + return Settings.builder().put(Netty4GrpcServerTransport.SETTING_GRPC_PORTS.getKey(), getPortRange()).build(); + } +} diff --git a/release-notes/opensearch.release-notes-1.3.20.md b/release-notes/opensearch.release-notes-1.3.20.md new file mode 100644 index 0000000000000..b3cc89fb37985 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.20.md @@ -0,0 +1,15 @@ +## 2024-12-10 Version 1.3.20 Release Notes + +### Dependencies +- Bump `icu4j` from 62.1 to 62.2 ([#15469](https://github.com/opensearch-project/OpenSearch/pull/15469)) +- Bump `org.bouncycastle:bc-fips` from 1.0.2.4 to 1.0.2.5 ([#13446](https://github.com/opensearch-project/OpenSearch/pull/13446)) +- Bump `Netty` from 4.1.112.Final to 4.1.115.Final ([#16661](https://github.com/opensearch-project/OpenSearch/pull/16661)) +- Bump `avro` from 1.11.3 to 1.11.4 ([#16773](https://github.com/opensearch-project/OpenSearch/pull/16773)) +- Bump `commonsio` to 2.16.0 ([#16780](https://github.com/opensearch-project/OpenSearch/pull/16780)) +- Bump `protobuf-java` to 3.25.5 ([#16792](https://github.com/opensearch-project/OpenSearch/pull/16792)) +- Bump `snappy-java` to 1.1.10.7 ([#16792](https://github.com/opensearch-project/OpenSearch/pull/16792)) +- Bump `mime4j-core` to 0.8.11 ([#16810](https://github.com/opensearch-project/OpenSearch/pull/16810)) + +### Fixed +- Update help output for _cat ([#14722](https://github.com/opensearch-project/OpenSearch/pull/14722)) +- Bugfix to guard against stack overflow errors caused by very large reg-ex input ([#16101](https://github.com/opensearch-project/OpenSearch/pull/16101)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml index 9ec39660a4928..c840276ee1157 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml @@ -1,8 +1,9 @@ --- # The test setup includes: -# - Create flat_object mapping for flat_object_doc_values_test index -# - Index 9 example documents -# - Search tests about doc_values and index +# - 1.Create flat_object mapping for flat_object_doc_values_test index +# - 2.Index 9 example documents +# - 3.Search tests about doc_values and index +# - 4.Fetch doc_value from flat_object field setup: - skip: @@ -786,3 +787,48 @@ teardown: - length: { hits.hits: 1 } - match: { hits.hits.0._source.order: "order8" } + + # Stored Fields with exact dot path. + - do: + search: + body: { + _source: false, + query: { + bool: { + must: [ + { + term: { + order: "order0" + } + } + ] + } + }, + stored_fields: "_none_", + docvalue_fields: [ "issue.labels.name","order" ] + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields: { "order" : [ "order0" ], "issue.labels.name": [ "abc0" ] } } + + - do: + search: + body: { + _source: false, + query: { + bool: { + must: [ + { + term: { + order: "order0" + } + } + ] + } + }, + stored_fields: "_none_", + docvalue_fields: [ "issue.labels.name" ] + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields: { "issue.labels.name": [ "abc0" ] } } diff --git a/server/build.gradle b/server/build.gradle index f1679ccfbec30..8dd23491ccd69 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -74,60 +74,46 @@ dependencies { compileOnly project(':libs:opensearch-plugin-classloader') testRuntimeOnly project(':libs:opensearch-plugin-classloader') - // lucene - api "org.apache.lucene:lucene-core:${versions.lucene}" - api "org.apache.lucene:lucene-analysis-common:${versions.lucene}" - api "org.apache.lucene:lucene-backward-codecs:${versions.lucene}" - api "org.apache.lucene:lucene-grouping:${versions.lucene}" - api "org.apache.lucene:lucene-highlighter:${versions.lucene}" - api "org.apache.lucene:lucene-join:${versions.lucene}" - api "org.apache.lucene:lucene-memory:${versions.lucene}" - api "org.apache.lucene:lucene-misc:${versions.lucene}" - api "org.apache.lucene:lucene-queries:${versions.lucene}" - api "org.apache.lucene:lucene-queryparser:${versions.lucene}" - api "org.apache.lucene:lucene-sandbox:${versions.lucene}" - api "org.apache.lucene:lucene-spatial-extras:${versions.lucene}" - api "org.apache.lucene:lucene-spatial3d:${versions.lucene}" - api "org.apache.lucene:lucene-suggest:${versions.lucene}" + api libs.bundles.lucene // utilities api project(":libs:opensearch-cli") // time handling, remove with java 8 time - api "joda-time:joda-time:${versions.joda}" + api libs.jodatime // percentiles aggregation - api "com.tdunning:t-digest:${versions.tdigest}" + api libs.tdigest // percentile ranks aggregation - api "org.hdrhistogram:HdrHistogram:${versions.hdrhistogram}" + api libs.hdrhistogram // lucene spatial - api "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional - api "org.locationtech.jts:jts-core:${versions.jts}", optional + api libs.spatial4j, optional + api libs.jtscore, optional // logging - api "org.apache.logging.log4j:log4j-api:${versions.log4j}" - api "org.apache.logging.log4j:log4j-jul:${versions.log4j}" - api "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional - annotationProcessor "org.apache.logging.log4j:log4j-core:${versions.log4j}" + api libs.log4japi + api libs.log4jjul + api libs.log4jcore, optional + annotationProcessor libs.log4jcore annotationProcessor project(':libs:opensearch-common') // jna - api "net.java.dev.jna:jna:${versions.jna}" + api libs.jna // jcraft - api "com.jcraft:jzlib:${versions.jzlib}" + api libs.jzlib // reactor - api "io.projectreactor:reactor-core:${versions.reactor}" - api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" + api libs.reactorcore + api libs.reactivestreams // protobuf - api "com.google.protobuf:protobuf-java:${versions.protobuf}" - api "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" + api libs.protobuf + api libs.jakartaannotation // https://mvnrepository.com/artifact/org.roaringbitmap/RoaringBitmap - implementation 'org.roaringbitmap:RoaringBitmap:1.3.0' + api libs.roaringbitmap testImplementation(project(":test:framework")) { // tests use the locally compiled version of server diff --git a/server/licenses/lucene-analysis-common-9.12.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.12.0.jar.sha1 deleted file mode 100644 index fd952034f3742..0000000000000 --- a/server/licenses/lucene-analysis-common-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c2503cfaba37249e20ea877555cb52ee89d1ae1 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.12.1.jar.sha1 b/server/licenses/lucene-analysis-common-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..2b9a8cf6e43fd --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.12.1.jar.sha1 @@ -0,0 +1 @@ +86836497e35c1ab33259d9864ceb280c0016075e \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.12.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.12.0.jar.sha1 deleted file mode 100644 index 2993134edd610..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68fe98c94e9644a584ea1bf525e68d9406fc61ec \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.12.1.jar.sha1 b/server/licenses/lucene-backward-codecs-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..89d6ddbec3eec --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.12.1.jar.sha1 @@ -0,0 +1 @@ +d0e79d06a0ed021663737e4df777ab7b80cd28c4 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.12.0.jar.sha1 b/server/licenses/lucene-core-9.12.0.jar.sha1 deleted file mode 100644 index e55f896dedb63..0000000000000 --- a/server/licenses/lucene-core-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fdb055d569bb20bfce9618fe2b01c29bab7f290c \ No newline at end of file diff --git a/server/licenses/lucene-core-9.12.1.jar.sha1 b/server/licenses/lucene-core-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..2521c91a81d64 --- /dev/null +++ b/server/licenses/lucene-core-9.12.1.jar.sha1 @@ -0,0 +1 @@ +91447c90c1180122142773b5baddaf8547124794 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.12.0.jar.sha1 b/server/licenses/lucene-grouping-9.12.0.jar.sha1 deleted file mode 100644 index 48388974bb38f..0000000000000 --- a/server/licenses/lucene-grouping-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ccf99f8db57aa97b2c1f95c5cc2a11156a043921 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.12.1.jar.sha1 b/server/licenses/lucene-grouping-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..61d7ff62ac3cc --- /dev/null +++ b/server/licenses/lucene-grouping-9.12.1.jar.sha1 @@ -0,0 +1 @@ +e4bc3d0aa7eec4f41b4f350de0263a8d5625d2b3 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.12.0.jar.sha1 b/server/licenses/lucene-highlighter-9.12.0.jar.sha1 deleted file mode 100644 index 3d457579da892..0000000000000 --- a/server/licenses/lucene-highlighter-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e93429f66fbcd3b58d81f01223d6ce5688047296 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.12.1.jar.sha1 b/server/licenses/lucene-highlighter-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..57fc10a58b806 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.12.1.jar.sha1 @@ -0,0 +1 @@ +2eeedfcec47dd65969f36e88931ed452291dd43e \ No newline at end of file diff --git a/server/licenses/lucene-join-9.12.0.jar.sha1 b/server/licenses/lucene-join-9.12.0.jar.sha1 deleted file mode 100644 index c5f6d16598a60..0000000000000 --- a/server/licenses/lucene-join-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -14c802d6955eaf11107375a2ada8fe8ec53b3e01 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.12.1.jar.sha1 b/server/licenses/lucene-join-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..8d46f20c39974 --- /dev/null +++ b/server/licenses/lucene-join-9.12.1.jar.sha1 @@ -0,0 +1 @@ +3c5e9ff2925a8373ae0d35c1d0a7b2465cebec9f \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.12.0.jar.sha1 b/server/licenses/lucene-memory-9.12.0.jar.sha1 deleted file mode 100644 index e7ac44089c006..0000000000000 --- a/server/licenses/lucene-memory-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ffe090339540876b40df792aee51a42af6b3f37f \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.12.1.jar.sha1 b/server/licenses/lucene-memory-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..55de1c9322aa3 --- /dev/null +++ b/server/licenses/lucene-memory-9.12.1.jar.sha1 @@ -0,0 +1 @@ +e80eecfb1dcc324140387c8357c81e12c2a01937 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.12.0.jar.sha1 b/server/licenses/lucene-misc-9.12.0.jar.sha1 deleted file mode 100644 index afb546be4e032..0000000000000 --- a/server/licenses/lucene-misc-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad17704ee90eb926b6d3105f7027485cdadbecd9 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.12.1.jar.sha1 b/server/licenses/lucene-misc-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..86982eb1c900c --- /dev/null +++ b/server/licenses/lucene-misc-9.12.1.jar.sha1 @@ -0,0 +1 @@ +4e65d01d1c23f3f49dc325d552701bbefafee7ee \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.12.0.jar.sha1 b/server/licenses/lucene-queries-9.12.0.jar.sha1 deleted file mode 100644 index e24756e38dad2..0000000000000 --- a/server/licenses/lucene-queries-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ac2a62b0b55c5725bb65f0c5454f9f8a401cf43 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.12.1.jar.sha1 b/server/licenses/lucene-queries-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..f2087ec8eb623 --- /dev/null +++ b/server/licenses/lucene-queries-9.12.1.jar.sha1 @@ -0,0 +1 @@ +14f24315041b686683dba4bc679ca7dc6a505906 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.12.0.jar.sha1 b/server/licenses/lucene-queryparser-9.12.0.jar.sha1 deleted file mode 100644 index e93e00a063dd0..0000000000000 --- a/server/licenses/lucene-queryparser-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55959399373876f4c184944315458dc6b88fbd81 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.12.1.jar.sha1 b/server/licenses/lucene-queryparser-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..489e6719da342 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.12.1.jar.sha1 @@ -0,0 +1 @@ +aa6df09a99f8881d843e9863aa1713dc9f3ed24f \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.12.0.jar.sha1 b/server/licenses/lucene-sandbox-9.12.0.jar.sha1 deleted file mode 100644 index a3fd8446e0dbc..0000000000000 --- a/server/licenses/lucene-sandbox-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f65882536d681c11a1cbc920e5679201101e3603 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.12.1.jar.sha1 b/server/licenses/lucene-sandbox-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..c1d613e23f1fe --- /dev/null +++ b/server/licenses/lucene-sandbox-9.12.1.jar.sha1 @@ -0,0 +1 @@ +1a66485629d60779f039fc26360f4374ef1496e7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.12.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.12.0.jar.sha1 deleted file mode 100644 index b0f11fb667faf..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d00cc7cc2279822ef6740f0677cafacfb439fa8 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.12.1.jar.sha1 b/server/licenses/lucene-spatial-extras-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..c38b794ce9948 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.12.1.jar.sha1 @@ -0,0 +1 @@ +0a7379410eff21676472adc8ea76a57891ec83c2 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.12.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.12.0.jar.sha1 deleted file mode 100644 index 858eee25ac191..0000000000000 --- a/server/licenses/lucene-spatial3d-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e3092632ca1d4427d3ebb2c866ac89d90f5b61ec \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.12.1.jar.sha1 b/server/licenses/lucene-spatial3d-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..bc327a8cec830 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.12.1.jar.sha1 @@ -0,0 +1 @@ +d2fdea4edabb1f616f494999651c43abfd0aa124 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.12.0.jar.sha1 b/server/licenses/lucene-suggest-9.12.0.jar.sha1 deleted file mode 100644 index 973a7726d845d..0000000000000 --- a/server/licenses/lucene-suggest-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e1c6636499317ebe498f3490a1ec8b86b8a363dd \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.12.1.jar.sha1 b/server/licenses/lucene-suggest-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..961f6da619149 --- /dev/null +++ b/server/licenses/lucene-suggest-9.12.1.jar.sha1 @@ -0,0 +1 @@ +0660e0996ec7653fe0c13c608137e264645eecac \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsActionIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsActionIT.java index 32d5b3db85629..a7cb4847b45e5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsActionIT.java @@ -8,9 +8,15 @@ package org.opensearch.action.admin.cluster.shards; +import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.opensearch.action.admin.indices.datastream.DataStreamTestCase; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.pagination.PageParams; +import org.opensearch.client.Requests; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; @@ -20,15 +26,19 @@ import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.opensearch.common.unit.TimeValue.timeValueMillis; import static org.opensearch.search.SearchService.NO_TIMEOUT; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) -public class TransportCatShardsActionIT extends OpenSearchIntegTestCase { +public class TransportCatShardsActionIT extends DataStreamTestCase { public void testCatShardsWithSuccessResponse() throws InterruptedException { internalCluster().startClusterManagerOnlyNodes(1); @@ -125,4 +135,334 @@ public void onFailure(Exception e) { latch.await(); } + public void testListShardsWithHiddenIndex() throws Exception { + final int numShards = 1; + final int numReplicas = 1; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(2); + createIndex( + "test-hidden-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .build() + ); + ensureGreen(); + + // Verify result for a default query: "_list/shards" + CatShardsRequest listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, 100); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-hidden-idx", 2, true); + + // Verify result when hidden index is explicitly queried: "_list/shards" + listShardsRequest = getListShardsTransportRequest(new String[] { "test-hidden-idx" }, 100); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-hidden-idx", 2, true); + + // Verify result when hidden index is queried with wildcard: "_list/shards*" + // Since the ClusterStateAction underneath is invoked with lenientExpandOpen IndicesOptions, + // Wildcards for hidden indices should not get resolved. + listShardsRequest = getListShardsTransportRequest(new String[] { "test-hidden-idx*" }, 100); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertEquals(0, listShardsResponse.get().getResponseShards().size()); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-hidden-idx", 0, false); + } + + public void testListShardsWithClosedIndex() throws Exception { + final int numShards = 1; + final int numReplicas = 1; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(2); + createIndex( + "test-closed-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + ensureGreen(); + + // close index "test-closed-idx" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx")).get(); + ensureGreen(); + + // Verify result for a default query: "_list/shards" + CatShardsRequest listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, 100); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-closed-idx", 2, false); + + // Verify result when closed index is explicitly queried: "_list/shards" + listShardsRequest = getListShardsTransportRequest(new String[] { "test-closed-idx" }, 100); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-closed-idx", 2, false); + + // Verify result when closed index is queried with wildcard: "_list/shards*" + // Since the ClusterStateAction underneath is invoked with lenientExpandOpen IndicesOptions, + // Wildcards for closed indices should not get resolved. + listShardsRequest = getListShardsTransportRequest(new String[] { "test-closed-idx*" }, 100); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-closed-idx", 0, false); + } + + public void testListShardsWithClosedAndHiddenIndices() throws InterruptedException, ExecutionException { + final int numIndices = 4; + final int numShards = 1; + final int numReplicas = 2; + final int pageSize = 100; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(3); + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-2", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-closed-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-hidden-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .build() + ); + // close index "test-closed-idx" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx")).get(); + ensureGreen(); + + // Verifying response for default queries: /_list/shards + // all the shards should be part of response, however stats should not be displayed for closed index + CatShardsRequest listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, pageSize); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertTrue(listShardsResponse.get().getResponseShards().stream().anyMatch(shard -> shard.getIndexName().equals("test-closed-idx"))); + assertTrue(listShardsResponse.get().getResponseShards().stream().anyMatch(shard -> shard.getIndexName().equals("test-hidden-idx"))); + assertEquals(numIndices * numShards * (numReplicas + 1), listShardsResponse.get().getResponseShards().size()); + assertFalse( + Arrays.stream(listShardsResponse.get().getIndicesStatsResponse().getShards()) + .anyMatch(shardStats -> shardStats.getShardRouting().getIndexName().equals("test-closed-idx")) + ); + assertEquals( + (numIndices - 1) * numShards * (numReplicas + 1), + listShardsResponse.get().getIndicesStatsResponse().getShards().length + ); + + // Verifying responses when hidden indices are explicitly queried: /_list/shards/test-hidden-idx + // Shards for hidden index should appear in response along with stats + listShardsRequest.setIndices(List.of("test-hidden-idx").toArray(new String[0])); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertTrue(listShardsResponse.get().getResponseShards().stream().allMatch(shard -> shard.getIndexName().equals("test-hidden-idx"))); + assertTrue( + Arrays.stream(listShardsResponse.get().getIndicesStatsResponse().getShards()) + .allMatch(shardStats -> shardStats.getShardRouting().getIndexName().equals("test-hidden-idx")) + ); + assertEquals( + listShardsResponse.get().getResponseShards().size(), + listShardsResponse.get().getIndicesStatsResponse().getShards().length + ); + + // Verifying responses when hidden indices are queried with wildcards: /_list/shards/test-hidden-idx* + // Shards for hidden index should not appear in response with stats. + listShardsRequest.setIndices(List.of("test-hidden-idx*").toArray(new String[0])); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertEquals(0, listShardsResponse.get().getResponseShards().size()); + assertEquals(0, listShardsResponse.get().getIndicesStatsResponse().getShards().length); + + // Explicitly querying for closed index: /_list/shards/test-closed-idx + // should output closed shards without stats. + listShardsRequest.setIndices(List.of("test-closed-idx").toArray(new String[0])); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertTrue(listShardsResponse.get().getResponseShards().stream().anyMatch(shard -> shard.getIndexName().equals("test-closed-idx"))); + assertEquals(0, listShardsResponse.get().getIndicesStatsResponse().getShards().length); + + // Querying for closed index with wildcards: /_list/shards/test-closed-idx* + // should not output any closed shards. + listShardsRequest.setIndices(List.of("test-closed-idx*").toArray(new String[0])); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertEquals(0, listShardsResponse.get().getResponseShards().size()); + assertEquals(0, listShardsResponse.get().getIndicesStatsResponse().getShards().length); + } + + public void testListShardsWithClosedIndicesAcrossPages() throws InterruptedException, ExecutionException { + final int numIndices = 4; + final int numShards = 1; + final int numReplicas = 2; + final int pageSize = numShards * (numReplicas + 1); + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(3); + createIndex( + "test-open-idx-1", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-closed-idx-1", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-open-idx-2", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-closed-idx-2", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .build() + ); + // close index "test-closed-idx-1" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx-1")).get(); + ensureGreen(); + // close index "test-closed-idx-2" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx-2")).get(); + ensureGreen(); + + // Verifying response for default queries: /_list/shards + List responseShardRouting = new ArrayList<>(); + List responseShardStats = new ArrayList<>(); + String nextToken = null; + CatShardsRequest listShardsRequest; + ActionFuture listShardsResponse; + do { + listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, nextToken, pageSize); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + nextToken = listShardsResponse.get().getPageToken().getNextToken(); + responseShardRouting.addAll(listShardsResponse.get().getResponseShards()); + responseShardStats.addAll(List.of(listShardsResponse.get().getIndicesStatsResponse().getShards())); + } while (nextToken != null); + + assertTrue(responseShardRouting.stream().anyMatch(shard -> shard.getIndexName().equals("test-closed-idx-1"))); + assertTrue(responseShardRouting.stream().anyMatch(shard -> shard.getIndexName().equals("test-closed-idx-2"))); + assertEquals(numIndices * numShards * (numReplicas + 1), responseShardRouting.size()); + // ShardsStats should only appear for 2 open indices + assertFalse( + responseShardStats.stream().anyMatch(shardStats -> shardStats.getShardRouting().getIndexName().contains("test-closed-idx")) + ); + assertEquals(2 * numShards * (numReplicas + 1), responseShardStats.size()); + } + + public void testListShardsWithDataStream() throws Exception { + final int numDataNodes = 3; + String dataStreamName = "logs-test"; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(numDataNodes); + // Create an index template for data streams. + createDataStreamIndexTemplate("data-stream-template", List.of("logs-*")); + // Create data streams matching the "logs-*" index pattern. + createDataStream(dataStreamName); + ensureGreen(); + // Verifying default query's result. Data stream should have created a hidden backing index in the + // background and all the corresponding shards should appear in the response along with stats. + CatShardsRequest listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, numDataNodes * numDataNodes); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), dataStreamName, numDataNodes + 1, true); + // Verifying result when data stream is directly queried. Again, all the shards with stats should appear + listShardsRequest = getListShardsTransportRequest(new String[] { dataStreamName }, numDataNodes * numDataNodes); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), dataStreamName, numDataNodes + 1, true); + } + + public void testListShardsWithAliases() throws Exception { + final int numShards = 1; + final int numReplicas = 1; + final String aliasName = "test-alias"; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(3); + createIndex( + "test-closed-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-hidden-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .build() + ); + ensureGreen(); + + // Point test alias to both the indices (one being hidden while the other is closed) + final IndicesAliasesRequest request = new IndicesAliasesRequest().origin("allowed"); + request.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test-closed-idx").alias(aliasName)); + assertAcked(client().admin().indices().aliases(request).actionGet()); + + request.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test-hidden-idx").alias(aliasName)); + assertAcked(client().admin().indices().aliases(request).actionGet()); + + // close index "test-closed-idx" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx")).get(); + ensureGreen(); + + // Verifying result when an alias is explicitly queried. + CatShardsRequest listShardsRequest = getListShardsTransportRequest(new String[] { aliasName }, 100); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertTrue( + listShardsResponse.get() + .getResponseShards() + .stream() + .allMatch(shard -> shard.getIndexName().equals("test-hidden-idx") || shard.getIndexName().equals("test-closed-idx")) + ); + assertTrue( + Arrays.stream(listShardsResponse.get().getIndicesStatsResponse().getShards()) + .allMatch(shardStats -> shardStats.getShardRouting().getIndexName().equals("test-hidden-idx")) + ); + assertEquals(4, listShardsResponse.get().getResponseShards().size()); + assertEquals(2, listShardsResponse.get().getIndicesStatsResponse().getShards().length); + } + + private void assertSingleIndexResponseShards( + CatShardsResponse catShardsResponse, + String indexNamePattern, + final int totalNumShards, + boolean shardStatsExist + ) { + assertTrue(catShardsResponse.getResponseShards().stream().allMatch(shard -> shard.getIndexName().contains(indexNamePattern))); + assertEquals(totalNumShards, catShardsResponse.getResponseShards().size()); + if (shardStatsExist) { + assertTrue( + Arrays.stream(catShardsResponse.getIndicesStatsResponse().getShards()) + .allMatch(shardStats -> shardStats.getShardRouting().getIndexName().contains(indexNamePattern)) + ); + } + assertEquals(shardStatsExist ? totalNumShards : 0, catShardsResponse.getIndicesStatsResponse().getShards().length); + } + + private CatShardsRequest getListShardsTransportRequest(String[] indices, final int pageSize) { + return getListShardsTransportRequest(indices, null, pageSize); + } + + private CatShardsRequest getListShardsTransportRequest(String[] indices, String nextToken, final int pageSize) { + CatShardsRequest listShardsRequest = new CatShardsRequest(); + listShardsRequest.setCancelAfterTimeInterval(NO_TIMEOUT); + listShardsRequest.setIndices(indices); + listShardsRequest.setPageParams(new PageParams(nextToken, PageParams.PARAM_ASC_SORT_VALUE, pageSize)); + return listShardsRequest; + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java index 70124c8c46700..377f99cd8b791 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java @@ -33,12 +33,21 @@ package org.opensearch.discovery; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; import org.opensearch.cluster.coordination.JoinHelper; +import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.coordination.PublicationTransportHandler; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Randomness; import org.opensearch.common.settings.Settings; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.disruption.ServiceDisruptionScheme; @@ -46,10 +55,15 @@ import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; +import org.junit.Assert; +import java.util.Arrays; import java.util.HashSet; +import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; @@ -250,4 +264,142 @@ public void testNodeNotReachableFromClusterManager() throws Exception { ensureStableCluster(3); } + /** + * Tests the scenario where-in a cluster-state containing new repository meta-data as part of a node-join from a + * repository-configured node fails on a commit stag and has a master switch. This would lead to master nodes + * doing another round of node-joins with the new cluster-state as the previous attempt had a successful publish. + */ + public void testElectClusterManagerRemotePublicationConfigurationNodeJoinCommitFails() throws Exception { + final String remoteStateRepoName = "remote-state-repo"; + final String remoteRoutingTableRepoName = "routing-table-repo"; + + Settings remotePublicationSettings = buildRemotePublicationNodeAttributes( + remoteStateRepoName, + ReloadableFsRepository.TYPE, + remoteRoutingTableRepoName, + ReloadableFsRepository.TYPE + ); + internalCluster().startClusterManagerOnlyNodes(3); + internalCluster().startDataOnlyNodes(3); + + String clusterManagerNode = internalCluster().getClusterManagerName(); + List nonClusterManagerNodes = Arrays.stream(internalCluster().getNodeNames()) + .filter(node -> !node.equals(clusterManagerNode)) + .collect(Collectors.toList()); + + ensureStableCluster(6); + + MockTransportService clusterManagerTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + clusterManagerNode + ); + logger.info("Blocking Cluster Manager Commit Request on all nodes"); + // This is to allow the new node to have commit failures on the nodes in the send path itself. This will lead to the + // nodes have a successful publish operation but failed commit operation. This will come into play once the new node joins + nonClusterManagerNodes.forEach(node -> { + TransportService targetTransportService = internalCluster().getInstance(TransportService.class, node); + clusterManagerTransportService.addSendBehavior(targetTransportService, (connection, requestId, action, request, options) -> { + if (action.equals(PublicationTransportHandler.COMMIT_STATE_ACTION_NAME)) { + logger.info("--> preventing {} request", PublicationTransportHandler.COMMIT_STATE_ACTION_NAME); + throw new FailedToCommitClusterStateException("Blocking Commit"); + } + connection.sendRequest(requestId, action, request, options); + }); + }); + + logger.info("Starting Node with remote publication settings"); + // Start a node with remote-publication repositories configured. This will lead to the active cluster-manager create + // a new cluster-state event with the new node-join along with new repositories setup in the cluster meta-data. + internalCluster().startDataOnlyNodes(1, remotePublicationSettings, Boolean.TRUE); + + // Checking if publish succeeded in the nodes before shutting down the blocked cluster-manager + assertBusy(() -> { + String randomNode = nonClusterManagerNodes.get(Randomness.get().nextInt(nonClusterManagerNodes.size())); + PersistedStateRegistry registry = internalCluster().getInstance(PersistedStateRegistry.class, randomNode); + + ClusterState state = registry.getPersistedState(PersistedStateRegistry.PersistedStateType.LOCAL).getLastAcceptedState(); + RepositoriesMetadata repositoriesMetadata = state.metadata().custom(RepositoriesMetadata.TYPE); + Boolean isRemoteStateRepoConfigured = Boolean.FALSE; + Boolean isRemoteRoutingTableRepoConfigured = Boolean.FALSE; + + assertNotNull(repositoriesMetadata); + assertNotNull(repositoriesMetadata.repositories()); + + for (RepositoryMetadata repo : repositoriesMetadata.repositories()) { + if (repo.name().equals(remoteStateRepoName)) { + isRemoteStateRepoConfigured = Boolean.TRUE; + } else if (repo.name().equals(remoteRoutingTableRepoName)) { + isRemoteRoutingTableRepoConfigured = Boolean.TRUE; + } + } + // Asserting that the metadata is present in the persisted cluster-state + assertTrue(isRemoteStateRepoConfigured); + assertTrue(isRemoteRoutingTableRepoConfigured); + + RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, randomNode); + + isRemoteStateRepoConfigured = isRepoPresentInRepositoryService(repositoriesService, remoteStateRepoName); + isRemoteRoutingTableRepoConfigured = isRepoPresentInRepositoryService(repositoriesService, remoteRoutingTableRepoName); + + // Asserting that the metadata is not present in the repository service. + Assert.assertFalse(isRemoteStateRepoConfigured); + Assert.assertFalse(isRemoteRoutingTableRepoConfigured); + }); + + logger.info("Stopping current Cluster Manager"); + // We stop the current cluster-manager whose outbound paths were blocked. This is to force a new election onto nodes + // we had the new cluster-state published but not commited. + internalCluster().stopCurrentClusterManagerNode(); + + // We expect that the repositories validations are skipped in this case and node-joins succeeds as expected. The + // repositories validations are skipped because even though the cluster-state is updated in the persisted registry, + // the repository service will not be updated as the commit attempt failed. + ensureStableCluster(6); + + String randomNode = nonClusterManagerNodes.get(Randomness.get().nextInt(nonClusterManagerNodes.size())); + + // Checking if the final cluster-state is updated. + RepositoriesMetadata repositoriesMetadata = internalCluster().getInstance(ClusterService.class, randomNode) + .state() + .metadata() + .custom(RepositoriesMetadata.TYPE); + + Boolean isRemoteStateRepoConfigured = Boolean.FALSE; + Boolean isRemoteRoutingTableRepoConfigured = Boolean.FALSE; + + for (RepositoryMetadata repo : repositoriesMetadata.repositories()) { + if (repo.name().equals(remoteStateRepoName)) { + isRemoteStateRepoConfigured = Boolean.TRUE; + } else if (repo.name().equals(remoteRoutingTableRepoName)) { + isRemoteRoutingTableRepoConfigured = Boolean.TRUE; + } + } + + Assert.assertTrue("RemoteState Repo is not set in RepositoriesMetadata", isRemoteStateRepoConfigured); + Assert.assertTrue("RemoteRoutingTable Repo is not set in RepositoriesMetadata", isRemoteRoutingTableRepoConfigured); + + RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, randomNode); + + isRemoteStateRepoConfigured = isRepoPresentInRepositoryService(repositoriesService, remoteStateRepoName); + isRemoteRoutingTableRepoConfigured = isRepoPresentInRepositoryService(repositoriesService, remoteRoutingTableRepoName); + + Assert.assertTrue("RemoteState Repo is not set in RepositoryService", isRemoteStateRepoConfigured); + Assert.assertTrue("RemoteRoutingTable Repo is not set in RepositoryService", isRemoteRoutingTableRepoConfigured); + + logger.info("Stopping current Cluster Manager"); + } + + private Boolean isRepoPresentInRepositoryService(RepositoriesService repositoriesService, String repoName) { + try { + Repository remoteStateRepo = repositoriesService.repository(repoName); + if (Objects.nonNull(remoteStateRepo)) { + return Boolean.TRUE; + } + } catch (RepositoryMissingException e) { + return Boolean.FALSE; + } + + return Boolean.FALSE; + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index c91c4d7bbb63b..1d01f717aad1f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -26,6 +26,8 @@ import org.opensearch.index.compositeindex.datacube.DataCubeDateTimeUnit; import org.opensearch.index.compositeindex.datacube.DateDimension; import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.OrdinalDimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; @@ -41,6 +43,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -56,7 +59,7 @@ public class StarTreeMapperIT extends OpenSearchIntegTestCase { .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) .build(); - private static XContentBuilder createMinimalTestMapping(boolean invalidDim, boolean invalidMetric, boolean ipdim) { + private static XContentBuilder createMinimalTestMapping(boolean invalidDim, boolean invalidMetric, boolean wildcard) { try { return jsonBuilder().startObject() .startObject("composite") @@ -68,7 +71,7 @@ private static XContentBuilder createMinimalTestMapping(boolean invalidDim, bool .endObject() .startArray("ordered_dimensions") .startObject() - .field("name", getDim(invalidDim, ipdim)) + .field("name", getDim(invalidDim, wildcard)) .endObject() .startObject() .field("name", "keyword_dv") @@ -102,6 +105,195 @@ private static XContentBuilder createMinimalTestMapping(boolean invalidDim, bool .field("type", "keyword") .field("doc_values", false) .endObject() + .startObject("ip_no_dv") + .field("type", "ip") + .field("doc_values", false) + .endObject() + .startObject("ip") + .field("type", "ip") + .field("doc_values", true) + .endObject() + .startObject("wildcard") + .field("type", "wildcard") + .field("doc_values", false) + .endObject() + .endObject() + .endObject(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + private static XContentBuilder createNestedTestMapping() { + try { + return jsonBuilder().startObject() + .startObject("composite") + .startObject("startree-1") + .field("type", "star_tree") + .startObject("config") + .startObject("date_dimension") + .field("name", "timestamp") + .endObject() + .startArray("ordered_dimensions") + .startObject() + .field("name", "nested.nested1.status") + .endObject() + .startObject() + .field("name", "nested.nested1.keyword_dv") + .endObject() + .endArray() + .startArray("metrics") + .startObject() + .field("name", "nested3.numeric_dv") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("nested3") + .startObject("properties") + .startObject("numeric_dv") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .startObject("numeric") + .field("type", "integer") + .field("doc_values", false) + .endObject() + .startObject("nested") + .startObject("properties") + .startObject("nested1") + .startObject("properties") + .startObject("status") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .startObject("nested-not-startree") + .startObject("properties") + .startObject("nested1") + .startObject("properties") + .startObject("status") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .startObject("keyword") + .field("type", "keyword") + .field("doc_values", false) + .endObject() + .startObject("ip") + .field("type", "ip") + .field("doc_values", false) + .endObject() + .endObject() + .endObject(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + private static XContentBuilder createNestedTestMappingForArray() { + try { + return jsonBuilder().startObject() + .startObject("composite") + .startObject("startree-1") + .field("type", "star_tree") + .startObject("config") + .startObject("date_dimension") + .field("name", "timestamp") + .endObject() + .startArray("ordered_dimensions") + .startObject() + .field("name", "status") + .endObject() + .startObject() + .field("name", "nested.nested1.keyword_dv") + .endObject() + .endArray() + .startArray("metrics") + .startObject() + .field("name", "nested3.numeric_dv") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("status") + .field("type", "integer") + .endObject() + .startObject("nested3") + .startObject("properties") + .startObject("numeric_dv") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .startObject("numeric") + .field("type", "integer") + .field("doc_values", false) + .endObject() + .startObject("nested") + .startObject("properties") + .startObject("nested1") + .startObject("properties") + .startObject("status") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .startObject("nested-not-startree") + .startObject("properties") + .startObject("nested1") + .startObject("properties") + .startObject("status") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .startObject("keyword") + .field("type", "keyword") + .field("doc_values", false) + .endObject() .startObject("ip") .field("type", "ip") .field("doc_values", false) @@ -362,11 +554,11 @@ private XContentBuilder getMappingWithDuplicateFields(boolean isDuplicateDim, bo return mapping; } - private static String getDim(boolean hasDocValues, boolean isKeyword) { + private static String getDim(boolean hasDocValues, boolean isWildCard) { if (hasDocValues) { - return random().nextBoolean() ? "numeric" : "keyword"; - } else if (isKeyword) { - return "ip"; + return random().nextBoolean() ? "numeric" : random().nextBoolean() ? "keyword" : "ip_no_dv"; + } else if (isWildCard) { + return "wildcard"; } return "numeric_dv"; } @@ -467,6 +659,46 @@ public void testValidCompositeIndexWithDates() { } } + public void testValidCompositeIndexWithNestedFields() { + prepareCreate(TEST_INDEX).setMapping(createNestedTestMapping()).setSettings(settings).get(); + Iterable dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class); + for (IndicesService service : dataNodeInstances) { + final Index index = resolveIndex("test"); + if (service.hasIndex(index)) { + IndexService indexService = service.indexService(index); + Set fts = indexService.mapperService().getCompositeFieldTypes(); + + for (CompositeMappedFieldType ft : fts) { + assertTrue(ft instanceof StarTreeMapper.StarTreeFieldType); + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) ft; + assertEquals("timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedTimeUnits = Arrays.asList( + new DateTimeUnitAdapter(Rounding.DateTimeUnit.MINUTES_OF_HOUR), + DataCubeDateTimeUnit.HALF_HOUR_OF_DAY + ); + for (int i = 0; i < dateDim.getIntervals().size(); i++) { + assertEquals(expectedTimeUnits.get(i).shortName(), dateDim.getSortedCalendarIntervals().get(i).shortName()); + } + assertEquals("nested.nested1.status", starTreeFieldType.getDimensions().get(1).getField()); + assertTrue(starTreeFieldType.getDimensions().get(1) instanceof NumericDimension); + assertEquals("nested.nested1.keyword_dv", starTreeFieldType.getDimensions().get(2).getField()); + assertTrue(starTreeFieldType.getDimensions().get(2) instanceof OrdinalDimension); + assertEquals("nested3.numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals( + StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, + starTreeFieldType.getStarTreeConfig().getBuildMode() + ); + assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); + } + } + } + } + public void testValidCompositeIndexWithDuplicateDates() { prepareCreate(TEST_INDEX).setMapping(createDateTestMapping(true)).setSettings(settings).get(); Iterable dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class); @@ -555,11 +787,156 @@ public void testCompositeIndexWithArraysInCompositeField() throws IOException { () -> client().prepareIndex(TEST_INDEX).setSource(doc).get() ); assertEquals( - "object mapping for [_doc] with array for [numeric_dv] cannot be accepted as field is also part of composite index mapping which does not accept arrays", + "object mapping for [_doc] with array for [numeric_dv] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", ex.getMessage() ); } + public void testCompositeIndexWithArraysInNestedCompositeField() throws IOException { + // here nested.nested1.status is part of the composite field but "nested" field itself is an array + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMapping()).get(); + // Attempt to index a document with an array field + XContentBuilder doc = jsonBuilder().startObject() + .field("timestamp", "2023-06-01T12:00:00Z") + .startArray("nested") + .startObject() + .startArray("nested1") + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 10) + .endObject() + .endArray() + .endObject() + .endArray() + .endObject(); + // Index the document and refresh + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> client().prepareIndex(TEST_INDEX).setSource(doc).get() + ); + assertEquals( + "object mapping for [_doc] with array for [nested] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", + ex.getMessage() + ); + } + + public void testCompositeIndexWithArraysInChildNestedCompositeField() throws IOException { + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMapping()).get(); + // here nested.nested1.status is part of the composite field but "nested.nested1" field is an array + XContentBuilder doc = jsonBuilder().startObject() + .field("timestamp", "2023-06-01T12:00:00Z") + .startObject("nested") + .startArray("nested1") + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 10) + .endObject() + .endArray() + .endObject() + .endObject(); + // Index the document and refresh + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> client().prepareIndex(TEST_INDEX).setSource(doc).get() + ); + assertEquals( + "object mapping for [nested] with array for [nested1] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", + ex.getMessage() + ); + } + + public void testCompositeIndexWithArraysInNestedCompositeFieldSameNameAsNormalField() throws IOException { + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMappingForArray()).get(); + // here status is part of the composite field but "nested.nested1.status" field is an array which is not + // part of composite field + XContentBuilder doc = jsonBuilder().startObject() + .field("timestamp", "2023-06-01T12:00:00Z") + .startObject("nested") + .startObject("nested1") + .startArray("status") + .value(10) + .value(20) + .value(30) + .endArray() + .endObject() + .endObject() + .field("status", "200") + .endObject(); + // Index the document and refresh + // Index the document and refresh + IndexResponse indexResponse = client().prepareIndex(TEST_INDEX).setSource(doc).get(); + + assertEquals(RestStatus.CREATED, indexResponse.status()); + + client().admin().indices().prepareRefresh(TEST_INDEX).get(); + // Verify the document was indexed + SearchResponse searchResponse = client().prepareSearch(TEST_INDEX).setQuery(QueryBuilders.matchAllQuery()).get(); + + assertEquals(1, searchResponse.getHits().getTotalHits().value); + + // Verify the values in the indexed document + SearchHit hit = searchResponse.getHits().getAt(0); + assertEquals("2023-06-01T12:00:00Z", hit.getSourceAsMap().get("timestamp")); + + int values = Integer.parseInt((String) hit.getSourceAsMap().get("status")); + assertEquals(200, values); + } + + public void testCompositeIndexWithNestedArraysInNonCompositeField() throws IOException { + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMapping()).get(); + // Attempt to index a document with an array field + XContentBuilder doc = jsonBuilder().startObject() + .field("timestamp", "2023-06-01T12:00:00Z") + .startObject("nested-not-startree") + .startArray("nested1") + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 20) + .endObject() + .startObject() + .field("status", 30) + .endObject() + .endArray() + .endObject() + .endObject(); + + // Index the document and refresh + IndexResponse indexResponse = client().prepareIndex(TEST_INDEX).setSource(doc).get(); + + assertEquals(RestStatus.CREATED, indexResponse.status()); + + client().admin().indices().prepareRefresh(TEST_INDEX).get(); + // Verify the document was indexed + SearchResponse searchResponse = client().prepareSearch(TEST_INDEX).setQuery(QueryBuilders.matchAllQuery()).get(); + + assertEquals(1, searchResponse.getHits().getTotalHits().value); + + // Verify the values in the indexed document + SearchHit hit = searchResponse.getHits().getAt(0); + assertEquals("2023-06-01T12:00:00Z", hit.getSourceAsMap().get("timestamp")); + + List values = (List) ((Map) (hit.getSourceAsMap().get("nested-not-startree"))).get("nested1"); + assertEquals(3, values.size()); + int i = 1; + for (Object val : values) { + Map valMap = (Map) val; + assertEquals(10 * i, valMap.get("status")); + i++; + } + } + public void testCompositeIndexWithArraysInNonCompositeField() throws IOException { prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get(); // Attempt to index a document with an array field @@ -748,7 +1125,7 @@ public void testUnsupportedDim() { () -> prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, true)).get() ); assertEquals( - "Failed to parse mapping [_doc]: unsupported field type associated with dimension [ip] as part of star tree field [startree-1]", + "Failed to parse mapping [_doc]: unsupported field type associated with dimension [wildcard] as part of star tree field [startree-1]", ex.getMessage() ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java index a1b512c326ac5..f660695af9965 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java @@ -8,14 +8,20 @@ package org.opensearch.indices.replication; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.SegmentReplicationPerGroupStats; +import org.opensearch.index.SegmentReplicationShardStats; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; import java.nio.file.Path; +import java.util.List; +import java.util.Set; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SearchReplicaReplicationIT extends SegmentReplicationBaseIT { @@ -82,4 +88,47 @@ public void testReplication() throws Exception { waitForSearchableDocs(docCount, primary, replica); } + public void testSegmentReplicationStatsResponseWithSearchReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final List nodes = internalCluster().startDataOnlyNodes(2); + createIndex( + INDEX_NAME, + Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .put("number_of_search_only_replicas", 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build() + ); + ensureGreen(INDEX_NAME); + + final int docCount = 5; + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + waitForSearchableDocs(docCount, nodes); + + SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() + .indices() + .prepareSegmentReplicationStats(INDEX_NAME) + .setDetailed(true) + .execute() + .actionGet(); + + // Verify the number of indices + assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().size()); + // Verify total shards + assertEquals(2, segmentReplicationStatsResponse.getTotalShards()); + // Verify the number of primary shards + assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).size()); + + SegmentReplicationPerGroupStats perGroupStats = segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).get(0); + Set replicaStats = perGroupStats.getReplicaStats(); + // Verify the number of replica stats + assertEquals(1, replicaStats.size()); + for (SegmentReplicationShardStats replicaStat : replicaStats) { + assertNotNull(replicaStat.getCurrentReplicationState()); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java index ef7da395d2151..79caef1f45a26 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java @@ -82,8 +82,7 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testSimpleTimeout() throws Exception { - final int numDocs = 1000; - for (int i = 0; i < numDocs; i++) { + for (int i = 0; i < 32; i++) { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } refresh("test"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java index 40c9301ef4bce..d200b9177353a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java @@ -314,7 +314,7 @@ public void testSearchCancellationWithBackpressureDisabled() throws InterruptedE assertNull("SearchShardTask shouldn't have cancelled for monitor_only mode", caughtException); } - private static class ExceptionCatchingListener implements ActionListener { + public static class ExceptionCatchingListener implements ActionListener { private final CountDownLatch latch; private Exception exception = null; @@ -333,7 +333,11 @@ public void onFailure(Exception e) { latch.countDown(); } - private Exception getException() { + public CountDownLatch getLatch() { + return latch; + } + + public Exception getException() { return exception; } } @@ -349,7 +353,7 @@ private Supplier descriptionSupplier(String description) { return () -> description; } - interface TaskFactory { + public interface TaskFactory { T createTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java index 2ce96092203e8..60a6e59014e11 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java @@ -1023,7 +1023,7 @@ public void testDocValueFields() throws Exception { .startObject("ip_field") .field("type", "ip") .endObject() - .startObject("flat_object_field") + .startObject("flat_object_field1") .field("type", "flat_object") .endObject() .endObject() @@ -1050,9 +1050,11 @@ public void testDocValueFields() throws Exception { .field("boolean_field", true) .field("binary_field", new byte[] { 42, 100 }) .field("ip_field", "::1") - .field("flat_object_field") + .field("flat_object_field1") .startObject() + .field("fooa", "bara") .field("foo", "bar") + .field("foob", "barb") .endObject() .endObject() ) @@ -1075,7 +1077,7 @@ public void testDocValueFields() throws Exception { .addDocValueField("boolean_field") .addDocValueField("binary_field") .addDocValueField("ip_field") - .addDocValueField("flat_object_field"); + .addDocValueField("flat_object_field1.foo"); SearchResponse searchResponse = builder.get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); @@ -1097,7 +1099,7 @@ public void testDocValueFields() throws Exception { "keyword_field", "binary_field", "ip_field", - "flat_object_field" + "flat_object_field1.foo" ) ) ); @@ -1116,7 +1118,7 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ")); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field").getValue(), equalTo("flat_object_field.foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field1.foo").getValue(), equalTo("bar")); builder = client().prepareSearch().setQuery(matchAllQuery()).addDocValueField("*field"); searchResponse = builder.get(); @@ -1139,8 +1141,7 @@ public void testDocValueFields() throws Exception { "text_field", "keyword_field", "binary_field", - "ip_field", - "flat_object_field" + "ip_field" ) ) ); @@ -1160,7 +1161,6 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ")); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field").getValue(), equalTo("flat_object_field.foo")); builder = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1176,7 +1176,7 @@ public void testDocValueFields() throws Exception { .addDocValueField("boolean_field", "use_field_mapping") .addDocValueField("binary_field", "use_field_mapping") .addDocValueField("ip_field", "use_field_mapping") - .addDocValueField("flat_object_field", "use_field_mapping"); + .addDocValueField("flat_object_field1.foo", null); ; searchResponse = builder.get(); @@ -1199,7 +1199,7 @@ public void testDocValueFields() throws Exception { "keyword_field", "binary_field", "ip_field", - "flat_object_field" + "flat_object_field1.foo" ) ) ); @@ -1219,7 +1219,7 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ")); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field").getValue(), equalTo("flat_object_field.foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field1.foo").getValue(), equalTo("bar")); builder = client().prepareSearch() .setQuery(matchAllQuery()) diff --git a/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java b/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java new file mode 100644 index 0000000000000..6b68a83da94e2 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java @@ -0,0 +1,434 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.search.SearchTask; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.search.backpressure.SearchBackpressureIT.ExceptionCatchingListener; +import org.opensearch.search.backpressure.SearchBackpressureIT.TaskFactory; +import org.opensearch.search.backpressure.SearchBackpressureIT.TestResponse; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.Task; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.threadpool.ThreadPool.Names.SAME; +import static org.opensearch.wlm.QueryGroupTask.QUERY_GROUP_ID_HEADER; +import static org.hamcrest.Matchers.instanceOf; + +public class WorkloadManagementIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + final static String PUT = "PUT"; + final static String MEMORY = "MEMORY"; + final static String CPU = "CPU"; + final static String ENABLED = "enabled"; + final static String DELETE = "DELETE"; + private static final TimeValue TIMEOUT = new TimeValue(1, TimeUnit.SECONDS); + + public WorkloadManagementIT(Settings nodeSettings) { + super(nodeSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Collection> nodePlugins() { + final List> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(TestClusterUpdatePlugin.class); + return plugins; + } + + @Before + public final void setupNodeSettings() { + Settings request = Settings.builder() + .put(WorkloadManagementSettings.NODE_LEVEL_MEMORY_REJECTION_THRESHOLD.getKey(), 0.8) + .put(WorkloadManagementSettings.NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD.getKey(), 0.9) + .put(WorkloadManagementSettings.NODE_LEVEL_CPU_REJECTION_THRESHOLD.getKey(), 0.8) + .put(WorkloadManagementSettings.NODE_LEVEL_CPU_CANCELLATION_THRESHOLD.getKey(), 0.9) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + } + + @After + public final void cleanupNodeSettings() { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + public void testHighCPUInEnforcedMode() throws InterruptedException { + Settings request = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), ENABLED).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment( + MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.01, ResourceType.MEMORY, 0.01) + ) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + assertNotNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); + MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testHighCPUInMonitorMode() throws InterruptedException { + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment( + MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.01, ResourceType.MEMORY, 0.01) + ) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + assertNull(caughtException); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testHighMemoryInEnforcedMode() throws InterruptedException { + Settings request = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), ENABLED).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.01)) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(MEMORY, queryGroup.get_id()); + assertNotNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); + MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testHighMemoryInMonitorMode() throws InterruptedException { + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.01)) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(MEMORY, queryGroup.get_id()); + assertNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testNoCancellation() throws InterruptedException { + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment( + MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.8, ResourceType.MEMORY, 0.8) + ) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + assertNull(caughtException); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public Exception executeQueryGroupTask(String resourceType, String queryGroupId) throws InterruptedException { + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + client().execute( + TestQueryGroupTaskTransportAction.ACTION, + new TestQueryGroupTaskRequest( + resourceType, + queryGroupId, + (TaskFactory) (id, type, action, description, parentTaskId, headers) -> new SearchTask( + id, + type, + action, + () -> description, + parentTaskId, + headers + ) + ), + listener + ); + assertTrue(listener.getLatch().await(TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS)); + return listener.getException(); + } + + public void updateQueryGroupInClusterState(String method, QueryGroup queryGroup) throws InterruptedException { + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + client().execute(TestClusterUpdateTransportAction.ACTION, new TestClusterUpdateRequest(queryGroup, method), listener); + assertTrue(listener.getLatch().await(TIMEOUT.getSeconds(), TimeUnit.SECONDS)); + assertEquals(0, listener.getLatch().getCount()); + } + + public static class TestClusterUpdateRequest extends ClusterManagerNodeRequest { + final private String method; + final private QueryGroup queryGroup; + + public TestClusterUpdateRequest(QueryGroup queryGroup, String method) { + this.method = method; + this.queryGroup = queryGroup; + } + + public TestClusterUpdateRequest(StreamInput in) throws IOException { + super(in); + this.method = in.readString(); + this.queryGroup = new QueryGroup(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(method); + queryGroup.writeTo(out); + } + + public QueryGroup getQueryGroup() { + return queryGroup; + } + + public String getMethod() { + return method; + } + } + + public static class TestClusterUpdateTransportAction extends TransportClusterManagerNodeAction { + public static final ActionType ACTION = new ActionType<>("internal::test_cluster_update_action", TestResponse::new); + + @Inject + public TestClusterUpdateTransportAction( + ThreadPool threadPool, + TransportService transportService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + ClusterService clusterService + ) { + super( + ACTION.name(), + transportService, + clusterService, + threadPool, + actionFilters, + TestClusterUpdateRequest::new, + indexNameExpressionResolver + ); + } + + @Override + protected String executor() { + return SAME; + } + + @Override + protected TestResponse read(StreamInput in) throws IOException { + return new TestResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(TestClusterUpdateRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void clusterManagerOperation( + TestClusterUpdateRequest request, + ClusterState clusterState, + ActionListener listener + ) { + clusterService.submitStateUpdateTask("query-group-persistence-service", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + Map currentGroups = currentState.metadata().queryGroups(); + QueryGroup queryGroup = request.getQueryGroup(); + String id = queryGroup.get_id(); + String method = request.getMethod(); + Metadata metadata; + if (method.equals(PUT)) { // create + metadata = Metadata.builder(currentState.metadata()).put(queryGroup).build(); + } else { // delete + metadata = Metadata.builder(currentState.metadata()).remove(currentGroups.get(id)).build(); + } + return ClusterState.builder(currentState).metadata(metadata).build(); + } + + @Override + public void onFailure(String source, Exception e) { + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + listener.onResponse(new TestResponse()); + } + }); + } + } + + public static class TestQueryGroupTaskRequest extends ActionRequest { + private final String type; + private final String queryGroupId; + private TaskFactory taskFactory; + + public TestQueryGroupTaskRequest(String type, String queryGroupId, TaskFactory taskFactory) { + this.type = type; + this.queryGroupId = queryGroupId; + this.taskFactory = taskFactory; + } + + public TestQueryGroupTaskRequest(StreamInput in) throws IOException { + super(in); + this.type = in.readString(); + this.queryGroupId = in.readString(); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return taskFactory.createTask(id, type, action, "", parentTaskId, headers); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(type); + out.writeString(queryGroupId); + } + + public String getType() { + return type; + } + + public String getQueryGroupId() { + return queryGroupId; + } + } + + public static class TestQueryGroupTaskTransportAction extends HandledTransportAction { + public static final ActionType ACTION = new ActionType<>("internal::test_query_group_task_action", TestResponse::new); + private final ThreadPool threadPool; + + @Inject + public TestQueryGroupTaskTransportAction(TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters) { + super(ACTION.name(), transportService, actionFilters, TestQueryGroupTaskRequest::new); + this.threadPool = threadPool; + } + + @Override + protected void doExecute(Task task, TestQueryGroupTaskRequest request, ActionListener listener) { + threadPool.getThreadContext().putHeader(QUERY_GROUP_ID_HEADER, request.getQueryGroupId()); + threadPool.executor(ThreadPool.Names.SEARCH).execute(() -> { + try { + CancellableTask cancellableTask = (CancellableTask) task; + ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + assertEquals(request.getQueryGroupId(), ((QueryGroupTask) task).getQueryGroupId()); + long startTime = System.nanoTime(); + while (System.nanoTime() - startTime < TIMEOUT.getNanos()) { + doWork(request); + if (cancellableTask.isCancelled()) { + break; + } + } + if (cancellableTask.isCancelled()) { + throw new TaskCancelledException(cancellableTask.getReasonCancelled()); + } else { + listener.onResponse(new TestResponse()); + } + } catch (Exception e) { + listener.onFailure(e); + } + }); + } + + private void doWork(TestQueryGroupTaskRequest request) throws InterruptedException { + switch (request.getType()) { + case "CPU": + long i = 0, j = 1, k = 1, iterations = 1000; + do { + j += i; + k *= j; + i++; + } while (i < iterations); + break; + case "MEMORY": + int bytesToAllocate = (int) (Runtime.getRuntime().totalMemory() * 0.01); + Byte[] bytes = new Byte[bytesToAllocate]; + int[] ints = new int[bytesToAllocate]; + break; + } + } + } + + public static class TestClusterUpdatePlugin extends Plugin implements ActionPlugin { + @Override + public List> getActions() { + return Arrays.asList( + new ActionHandler<>(TestClusterUpdateTransportAction.ACTION, TestClusterUpdateTransportAction.class), + new ActionHandler<>(TestQueryGroupTaskTransportAction.ACTION, TestQueryGroupTaskTransportAction.class) + ); + } + + @Override + public List> getClientActions() { + return Arrays.asList(TestClusterUpdateTransportAction.ACTION, TestQueryGroupTaskTransportAction.ACTION); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsAction.java index 7b36b7a10f4f2..01efa96a7369e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsAction.java @@ -18,6 +18,8 @@ import org.opensearch.action.support.HandledTransportAction; import org.opensearch.action.support.TimeoutTaskCancellationUtility; import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.breaker.ResponseLimitBreachedException; import org.opensearch.common.breaker.ResponseLimitSettings; import org.opensearch.common.inject.Inject; @@ -27,6 +29,7 @@ import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; +import java.util.List; import java.util.Objects; import static org.opensearch.common.breaker.ResponseLimitSettings.LimitEntity.SHARDS; @@ -98,9 +101,6 @@ public void onResponse(ClusterStateResponse clusterStateResponse) { shardsRequest.getPageParams(), clusterStateResponse ); - String[] indices = Objects.isNull(paginationStrategy) - ? shardsRequest.getIndices() - : paginationStrategy.getRequestedIndices().toArray(new String[0]); catShardsResponse.setNodes(clusterStateResponse.getState().getNodes()); catShardsResponse.setResponseShards( Objects.isNull(paginationStrategy) @@ -108,8 +108,12 @@ public void onResponse(ClusterStateResponse clusterStateResponse) { : paginationStrategy.getRequestedEntities() ); catShardsResponse.setPageToken(Objects.isNull(paginationStrategy) ? null : paginationStrategy.getResponseToken()); + + String[] indices = Objects.isNull(paginationStrategy) + ? shardsRequest.getIndices() + : filterClosedIndices(clusterStateResponse.getState(), paginationStrategy.getRequestedIndices()); // For paginated queries, if strategy outputs no shards to be returned, avoid fetching IndicesStats. - if (shouldSkipIndicesStatsRequest(paginationStrategy)) { + if (shouldSkipIndicesStatsRequest(paginationStrategy, indices)) { catShardsResponse.setIndicesStatsResponse(IndicesStatsResponse.getEmptyResponse()); cancellableListener.onResponse(catShardsResponse); return; @@ -166,7 +170,19 @@ private void validateRequestLimit( } } - private boolean shouldSkipIndicesStatsRequest(ShardPaginationStrategy paginationStrategy) { - return Objects.nonNull(paginationStrategy) && paginationStrategy.getRequestedEntities().isEmpty(); + private boolean shouldSkipIndicesStatsRequest(ShardPaginationStrategy paginationStrategy, String[] indices) { + return Objects.nonNull(paginationStrategy) && (indices == null || indices.length == 0); + } + + /** + * Will be used by paginated query (_list/shards) to filter out closed indices (only consider OPEN) before fetching + * IndicesStats. Since pagination strategy always passes concrete indices to TransportIndicesStatsAction, + * the default behaviour of StrictExpandOpenAndForbidClosed leads to errors if closed indices are encountered. + */ + private String[] filterClosedIndices(ClusterState clusterState, List strategyIndices) { + return strategyIndices.stream().filter(index -> { + IndexMetadata metadata = clusterState.metadata().indices().get(index); + return metadata != null && metadata.getState().equals(IndexMetadata.State.CLOSE) == false; + }).toArray(String[]::new); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java index fc97d67c6c3af..44408c5043fcf 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java @@ -21,7 +21,6 @@ import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.IndexService; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationPressureService; import org.opensearch.index.SegmentReplicationShardStats; @@ -38,7 +37,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; /** * Transport action for shard segment replication operation. This transport action does not actually @@ -96,11 +97,11 @@ protected SegmentReplicationStatsResponse newResponse( ) { String[] shards = request.shards(); final List shardsToFetch = Arrays.stream(shards).map(Integer::valueOf).collect(Collectors.toList()); - // organize replica responses by allocationId. final Map replicaStats = new HashMap<>(); // map of index name to list of replication group stats. final Map> primaryStats = new HashMap<>(); + for (SegmentReplicationShardStatsResponse response : responses) { if (response != null) { if (response.getReplicaStats() != null) { @@ -109,6 +110,7 @@ protected SegmentReplicationStatsResponse newResponse( replicaStats.putIfAbsent(shardRouting.allocationId().getId(), response.getReplicaStats()); } } + if (response.getPrimaryStats() != null) { final ShardId shardId = response.getPrimaryStats().getShardId(); if (shardsToFetch.isEmpty() || shardsToFetch.contains(shardId.getId())) { @@ -126,15 +128,20 @@ protected SegmentReplicationStatsResponse newResponse( } } } - // combine the replica stats to the shard stat entry in each group. - for (Map.Entry> entry : primaryStats.entrySet()) { - for (SegmentReplicationPerGroupStats group : entry.getValue()) { - for (SegmentReplicationShardStats replicaStat : group.getReplicaStats()) { - replicaStat.setCurrentReplicationState(replicaStats.getOrDefault(replicaStat.getAllocationId(), null)); - } - } - } - return new SegmentReplicationStatsResponse(totalShards, successfulShards, failedShards, primaryStats, shardFailures); + + Map> replicationStats = primaryStats.entrySet() + .stream() + .collect( + Collectors.toMap( + Map.Entry::getKey, + entry -> entry.getValue() + .stream() + .map(groupStats -> updateGroupStats(groupStats, replicaStats)) + .collect(Collectors.toList()) + ) + ); + + return new SegmentReplicationStatsResponse(totalShards, successfulShards, failedShards, replicationStats, shardFailures); } @Override @@ -144,9 +151,8 @@ protected SegmentReplicationStatsRequest readRequestFrom(StreamInput in) throws @Override protected SegmentReplicationShardStatsResponse shardOperation(SegmentReplicationStatsRequest request, ShardRouting shardRouting) { - IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(shardRouting.shardId().id()); ShardId shardId = shardRouting.shardId(); + IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); if (indexShard.indexSettings().isSegRepEnabledOrRemoteNode() == false) { return null; @@ -156,11 +162,7 @@ protected SegmentReplicationShardStatsResponse shardOperation(SegmentReplication return new SegmentReplicationShardStatsResponse(pressureService.getStatsForShard(indexShard)); } - // return information about only on-going segment replication events. - if (request.activeOnly()) { - return new SegmentReplicationShardStatsResponse(targetService.getOngoingEventSegmentReplicationState(shardId)); - } - return new SegmentReplicationShardStatsResponse(targetService.getSegmentReplicationState(shardId)); + return new SegmentReplicationShardStatsResponse(getSegmentReplicationState(shardId, request.activeOnly())); } @Override @@ -181,4 +183,83 @@ protected ClusterBlockException checkRequestBlock( ) { return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices); } + + private SegmentReplicationPerGroupStats updateGroupStats( + SegmentReplicationPerGroupStats groupStats, + Map replicaStats + ) { + // Update the SegmentReplicationState for each of the replicas + Set updatedReplicaStats = groupStats.getReplicaStats() + .stream() + .peek(replicaStat -> replicaStat.setCurrentReplicationState(replicaStats.getOrDefault(replicaStat.getAllocationId(), null))) + .collect(Collectors.toSet()); + + // Compute search replica stats + Set searchReplicaStats = computeSearchReplicaStats(groupStats.getShardId(), replicaStats); + + // Combine ReplicaStats and SearchReplicaStats + Set combinedStats = Stream.concat(updatedReplicaStats.stream(), searchReplicaStats.stream()) + .collect(Collectors.toSet()); + + return new SegmentReplicationPerGroupStats(groupStats.getShardId(), combinedStats, groupStats.getRejectedRequestCount()); + } + + private Set computeSearchReplicaStats( + ShardId shardId, + Map replicaStats + ) { + return replicaStats.values() + .stream() + .filter(segmentReplicationState -> segmentReplicationState.getShardRouting().shardId().equals(shardId)) + .filter(segmentReplicationState -> segmentReplicationState.getShardRouting().isSearchOnly()) + .map(segmentReplicationState -> { + ShardRouting shardRouting = segmentReplicationState.getShardRouting(); + SegmentReplicationShardStats segmentReplicationStats = computeSegmentReplicationShardStats(shardRouting); + segmentReplicationStats.setCurrentReplicationState(segmentReplicationState); + return segmentReplicationStats; + }) + .collect(Collectors.toSet()); + } + + SegmentReplicationShardStats computeSegmentReplicationShardStats(ShardRouting shardRouting) { + ShardId shardId = shardRouting.shardId(); + SegmentReplicationState completedSegmentReplicationState = targetService.getlatestCompletedEventSegmentReplicationState(shardId); + SegmentReplicationState ongoingSegmentReplicationState = targetService.getOngoingEventSegmentReplicationState(shardId); + + return new SegmentReplicationShardStats( + shardRouting.allocationId().getId(), + 0, + calculateBytesRemainingToReplicate(ongoingSegmentReplicationState), + 0, + getCurrentReplicationLag(ongoingSegmentReplicationState), + getLastCompletedReplicationLag(completedSegmentReplicationState) + ); + } + + private SegmentReplicationState getSegmentReplicationState(ShardId shardId, boolean isActiveOnly) { + if (isActiveOnly) { + return targetService.getOngoingEventSegmentReplicationState(shardId); + } else { + return targetService.getSegmentReplicationState(shardId); + } + } + + private long calculateBytesRemainingToReplicate(SegmentReplicationState ongoingSegmentReplicationState) { + if (ongoingSegmentReplicationState == null) { + return 0; + } + return ongoingSegmentReplicationState.getIndex() + .fileDetails() + .stream() + .mapToLong(index -> index.length() - index.recovered()) + .sum(); + } + + private long getCurrentReplicationLag(SegmentReplicationState ongoingSegmentReplicationState) { + return ongoingSegmentReplicationState != null ? ongoingSegmentReplicationState.getTimer().time() : 0; + } + + private long getLastCompletedReplicationLag(SegmentReplicationState completedSegmentReplicationState) { + return completedSegmentReplicationState != null ? completedSegmentReplicationState.getTimer().time() : 0; + } } diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 819e09312a0df..558b7370749d5 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -430,6 +430,13 @@ private ClusterState getStateFromLocalNode(GetTermVersionResponse termVersionRes if (remoteClusterStateService != null && termVersionResponse.isStatePresentInRemote()) { try { + logger.info( + () -> new ParameterizedMessage( + "Term version checker downloading full cluster state for term {}, version {}", + termVersion.getTerm(), + termVersion.getVersion() + ) + ); ClusterStateTermVersion clusterStateTermVersion = termVersionResponse.getClusterStateTermVersion(); Optional clusterMetadataManifest = remoteClusterStateService .getClusterMetadataManifestByTermVersion( @@ -454,7 +461,7 @@ private ClusterState getStateFromLocalNode(GetTermVersionResponse termVersionRes return clusterStateFromRemote; } } catch (Exception e) { - logger.trace("Error while fetching from remote cluster state", e); + logger.error("Error while fetching from remote cluster state", e); } } return null; diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/term/TransportGetTermVersionAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/term/TransportGetTermVersionAction.java index 1cab739a20838..22861e0ba5c31 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/term/TransportGetTermVersionAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/term/TransportGetTermVersionAction.java @@ -98,7 +98,7 @@ private GetTermVersionResponse buildResponse(GetTermVersionRequest request, Clus ClusterStateTermVersion termVersion = new ClusterStateTermVersion(state); if (discovery instanceof Coordinator) { Coordinator coordinator = (Coordinator) discovery; - if (coordinator.isRemotePublicationEnabled()) { + if (coordinator.canDownloadFullStateFromRemote()) { return new GetTermVersionResponse(termVersion, coordinator.isRemotePublicationEnabled()); } } diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 53b1d990f9a0c..9f1dcbe8fb587 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -35,7 +35,9 @@ import org.opensearch.cli.Command; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.PortsRange; import org.opensearch.env.Environment; import org.opensearch.http.HttpTransportSettings; import org.opensearch.plugins.PluginInfo; @@ -71,6 +73,9 @@ import static org.opensearch.bootstrap.FilePermissionUtils.addDirectoryPath; import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_PORT_DEFAULTS; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_PORTS; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_SETTING; /** * Initializes SecurityManager with necessary permissions. @@ -402,6 +407,7 @@ static void addFilePermissions(Permissions policy, Environment environment) thro private static void addBindPermissions(Permissions policy, Settings settings) { addSocketPermissionForHttp(policy, settings); addSocketPermissionForTransportProfiles(policy, settings); + addSocketPermissionForAux(policy, settings); } /** @@ -416,6 +422,29 @@ private static void addSocketPermissionForHttp(final Permissions policy, final S addSocketPermissionForPortRange(policy, httpRange); } + /** + * Add dynamic {@link SocketPermission} based on AffixSetting AUX_TRANSPORT_PORTS. + * If an auxiliary transport type is enabled but has no corresponding port range setting fall back to AUX_PORT_DEFAULTS. + * + * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to. + * @param settings the {@link Settings} instance to read the gRPC settings from + */ + private static void addSocketPermissionForAux(final Permissions policy, final Settings settings) { + Set portsRanges = new HashSet<>(); + for (String auxType : AUX_TRANSPORT_TYPES_SETTING.get(settings)) { + Setting auxTypePortSettings = AUX_TRANSPORT_PORTS.getConcreteSettingForNamespace(auxType); + if (auxTypePortSettings.exists(settings)) { + portsRanges.add(auxTypePortSettings.get(settings)); + } else { + portsRanges.add(new PortsRange(AUX_PORT_DEFAULTS)); + } + } + + for (PortsRange portRange : portsRanges) { + addSocketPermissionForPortRange(policy, portRange.getPortRangeString()); + } + } + /** * Add dynamic {@link SocketPermission} based on transport settings. This method will first check if there is a port range specified in * the transport profile specified by {@code profileSettings} and will fall back to {@code settings}. diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 6fee2037501e7..ef0f49b8ae394 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -1906,4 +1906,12 @@ public boolean isRemotePublicationEnabled() { } return false; } + + public boolean canDownloadFullStateFromRemote() { + if (remoteClusterStateService != null) { + return remoteClusterStateService.isRemotePublicationEnabled() && remoteClusterStateService.canDownloadFromRemoteForReadAPI(); + } + return false; + } + } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index 7275d72f2db9f..4ad5b80038048 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -258,7 +258,7 @@ PublishWithJoinResponse handleIncomingRemotePublishRequest(RemotePublishRequest } if (applyFullState == true) { - logger.debug( + logger.info( () -> new ParameterizedMessage( "Downloading full cluster state for term {}, version {}, stateUUID {}", manifest.getClusterTerm(), diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java index a05938c176678..7999faece52ca 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java @@ -247,11 +247,17 @@ void balance() { final Map nodePrimaryShardCount = calculateNodePrimaryShardCount(remoteRoutingNodes); int totalPrimaryShardCount = nodePrimaryShardCount.values().stream().reduce(0, Integer::sum); - totalPrimaryShardCount += routingNodes.unassigned().getNumPrimaries(); - int avgPrimaryPerNode = (totalPrimaryShardCount + routingNodes.size() - 1) / routingNodes.size(); + int unassignedRemotePrimaryShardCount = 0; + for (ShardRouting shard : routingNodes.unassigned()) { + if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation)) && shard.primary()) { + unassignedRemotePrimaryShardCount++; + } + } + totalPrimaryShardCount += unassignedRemotePrimaryShardCount; + final int avgPrimaryPerNode = (totalPrimaryShardCount + remoteRoutingNodes.size() - 1) / remoteRoutingNodes.size(); - ArrayDeque sourceNodes = new ArrayDeque<>(); - ArrayDeque targetNodes = new ArrayDeque<>(); + final ArrayDeque sourceNodes = new ArrayDeque<>(); + final ArrayDeque targetNodes = new ArrayDeque<>(); for (RoutingNode node : remoteRoutingNodes) { if (nodePrimaryShardCount.get(node.nodeId()) > avgPrimaryPerNode) { sourceNodes.add(node); diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index d0b6f812e9ee2..6489f3cb33ce0 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -233,6 +233,13 @@ public ClusterState state() { return clusterState; } + /** + * Returns true if the appliedClusterState is not null + */ + public boolean isStateInitialised() { + return this.state.get() != null; + } + /** * Returns true if the appliedClusterState is not null */ diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index 1a79161d223e2..b4f2250f6dec9 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -183,6 +183,13 @@ public ClusterState state() { return clusterApplierService.state(); } + /** + * Returns true if the state in appliedClusterState is not null + */ + public boolean isStateInitialised() { + return clusterApplierService.isStateInitialised(); + } + /** * The state that is persisted to store but may not be applied to cluster. * @return ClusterState diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index bb8da190a6f35..5d55fb52c323d 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -80,6 +80,9 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_KEY; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_SETTING; + /** * A module to handle registering and binding all network related classes. * @@ -157,6 +160,8 @@ public final class NetworkModule { private final Map> transportFactories = new HashMap<>(); private final Map> transportHttpFactories = new HashMap<>(); + private final Map> transportAuxFactories = new HashMap<>(); + private final List transportInterceptors = new ArrayList<>(); /** @@ -222,6 +227,18 @@ public NetworkModule( registerHttpTransport(entry.getKey(), entry.getValue()); } + Map> auxTransportFactory = plugin.getAuxTransports( + settings, + threadPool, + circuitBreakerService, + networkService, + clusterSettings, + tracer + ); + for (Map.Entry> entry : auxTransportFactory.entrySet()) { + registerAuxTransport(entry.getKey(), entry.getValue()); + } + Map> transportFactory = plugin.getTransports( settings, threadPool, @@ -305,6 +322,12 @@ private void registerHttpTransport(String key, Supplier fac } } + private void registerAuxTransport(String key, Supplier factory) { + if (transportAuxFactories.putIfAbsent(key, factory) != null) { + throw new IllegalArgumentException("transport for name: " + key + " is already registered"); + } + } + /** * Register an allocation command. *

@@ -346,6 +369,25 @@ public Supplier getHttpServerTransportSupplier() { return factory; } + /** + * Optional client/server transports that run in parallel to HttpServerTransport. + * Multiple transport types can be registered and enabled via AUX_TRANSPORT_TYPES_SETTING. + * An IllegalStateException is thrown if a transport type is enabled not registered. + */ + public List getAuxServerTransportList() { + List serverTransportSuppliers = new ArrayList<>(); + + for (String transportType : AUX_TRANSPORT_TYPES_SETTING.get(settings)) { + final Supplier factory = transportAuxFactories.get(transportType); + if (factory == null) { + throw new IllegalStateException("Unsupported " + AUX_TRANSPORT_TYPES_KEY + " [" + transportType + "]"); + } + serverTransportSuppliers.add(factory.get()); + } + + return serverTransportSuppliers; + } + public Supplier getTransportSupplier() { final String name; if (TRANSPORT_TYPE_SETTING.exists(settings)) { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 04a19e32c4ebc..c27efa080ac4e 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -149,6 +149,7 @@ import org.opensearch.node.resource.tracker.ResourceTrackerSettings; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.decider.EnableAssignmentDecider; +import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.PluginsService; import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; @@ -362,6 +363,7 @@ public void apply(Settings value, Settings current, Settings previous) { NetworkModule.TRANSPORT_SSL_DUAL_MODE_ENABLED, NetworkModule.TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION, NetworkModule.TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION_RESOLVE_HOST_NAME, + NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_SETTING, HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS, HttpTransportSettings.SETTING_CORS_ENABLED, HttpTransportSettings.SETTING_CORS_MAX_AGE, @@ -738,6 +740,8 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterStateCleanupManager.REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING, RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, RemoteClusterStateService.REMOTE_PUBLICATION_SETTING, + RemoteClusterStateService.REMOTE_STATE_DOWNLOAD_TO_SERVE_READ_API, + INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, diff --git a/server/src/main/java/org/opensearch/common/time/DateUtils.java b/server/src/main/java/org/opensearch/common/time/DateUtils.java index 7ab395a1117e7..e5a019b58f7da 100644 --- a/server/src/main/java/org/opensearch/common/time/DateUtils.java +++ b/server/src/main/java/org/opensearch/common/time/DateUtils.java @@ -272,6 +272,30 @@ public static Instant clampToNanosRange(Instant instant) { return instant; } + static final Instant INSTANT_LONG_MIN_VALUE = Instant.ofEpochMilli(Long.MIN_VALUE); + static final Instant INSTANT_LONG_MAX_VALUE = Instant.ofEpochMilli(Long.MAX_VALUE); + + /** + * Clamps the given {@link Instant} to the valid epoch millisecond range. + * + * - If the input is before {@code Long.MIN_VALUE}, it returns {@code Instant.ofEpochMilli(Long.MIN_VALUE)}. + * - If the input is after {@code Long.MAX_VALUE}, it returns {@code Instant.ofEpochMilli(Long.MAX_VALUE)}. + * - Otherwise, it returns the input as-is. + * + * @param instant the {@link Instant} to clamp + * @return the clamped {@link Instant} + * @throws NullPointerException if the input is {@code null} + */ + public static Instant clampToMillisRange(Instant instant) { + if (instant.isBefore(INSTANT_LONG_MIN_VALUE)) { + return INSTANT_LONG_MIN_VALUE; + } + if (instant.isAfter(INSTANT_LONG_MAX_VALUE)) { + return INSTANT_LONG_MAX_VALUE; + } + return instant; + } + /** * convert a long value to a java time instant * the long value resembles the nanoseconds since the epoch diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index c5fc6d5cae6a7..778ab3e56cf76 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -129,6 +129,7 @@ public class RemoteClusterStateService implements Closeable { * Gates the functionality of remote publication. */ public static final String REMOTE_PUBLICATION_SETTING_KEY = "cluster.remote_store.publication.enabled"; + public static final String REMOTE_STATE_DOWNLOAD_TO_SERVE_READ_API_KEY = "cluster.remote_state.download.serve_read_api.enabled"; public static final Setting REMOTE_PUBLICATION_SETTING = Setting.boolSetting( REMOTE_PUBLICATION_SETTING_KEY, @@ -137,6 +138,13 @@ public class RemoteClusterStateService implements Closeable { Property.Dynamic ); + public static final Setting REMOTE_STATE_DOWNLOAD_TO_SERVE_READ_API = Setting.boolSetting( + REMOTE_STATE_DOWNLOAD_TO_SERVE_READ_API_KEY, + true, + Property.NodeScope, + Property.Dynamic + ); + /** * Used to specify if cluster state metadata should be published to remote store */ @@ -235,6 +243,9 @@ public static RemoteClusterStateValidationMode parseString(String mode) { + "indices, coordination metadata updated : [{}], settings metadata updated : [{}], templates metadata " + "updated : [{}], custom metadata updated : [{}], indices routing updated : [{}]"; private volatile AtomicBoolean isPublicationEnabled; + + private volatile AtomicBoolean downloadFromRemoteForReadAPI; + private final String remotePathPrefix; private final RemoteClusterStateCache remoteClusterStateCache; @@ -281,6 +292,8 @@ public RemoteClusterStateService( && RemoteStoreNodeAttribute.isRemoteRoutingTableConfigured(settings) ); clusterSettings.addSettingsUpdateConsumer(REMOTE_PUBLICATION_SETTING, this::setRemotePublicationSetting); + this.downloadFromRemoteForReadAPI = new AtomicBoolean(clusterSettings.get(REMOTE_STATE_DOWNLOAD_TO_SERVE_READ_API)); + clusterSettings.addSettingsUpdateConsumer(REMOTE_STATE_DOWNLOAD_TO_SERVE_READ_API, this::setRemoteDownloadForReadAPISetting); this.remotePathPrefix = CLUSTER_REMOTE_STORE_STATE_PATH_PREFIX.get(settings); this.remoteRoutingTableService = RemoteRoutingTableServiceFactory.getService( repositoriesService, @@ -1124,6 +1137,14 @@ private void setRemotePublicationSetting(boolean remotePublicationSetting) { } } + private void setRemoteDownloadForReadAPISetting(boolean remoteDownloadForReadAPISetting) { + this.downloadFromRemoteForReadAPI.set(remoteDownloadForReadAPISetting); + } + + public boolean canDownloadFromRemoteForReadAPI() { + return this.downloadFromRemoteForReadAPI.get(); + } + // Package private for unit test RemoteRoutingTableService getRemoteRoutingTableService() { return this.remoteRoutingTableService; @@ -1473,8 +1494,22 @@ public ClusterState getClusterStateForManifest( try { ClusterState stateFromCache = remoteClusterStateCache.getState(clusterName, manifest); if (stateFromCache != null) { + logger.trace( + () -> new ParameterizedMessage( + "Found cluster state in cache for term {} and version {}", + manifest.getClusterTerm(), + manifest.getStateVersion() + ) + ); return stateFromCache; } + logger.info( + () -> new ParameterizedMessage( + "Cluster state not found in cache for term {} and version {}", + manifest.getClusterTerm(), + manifest.getStateVersion() + ) + ); final ClusterState clusterState; final long startTimeNanos = relativeTimeNanosSupplier.getAsLong(); diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index 991fbf12072be..7f78ae0b9d2ff 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -62,6 +62,7 @@ import org.opensearch.telemetry.tracing.channels.TraceableRestChannel; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.BindTransportException; +import org.opensearch.transport.Transport; import java.io.IOException; import java.net.InetAddress; @@ -71,7 +72,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -192,7 +192,25 @@ protected void bindServer() { throw new BindTransportException("Failed to resolve publish address", e); } - final int publishPort = resolvePublishPort(settings, boundAddresses, publishInetAddress); + final int publishPort = Transport.resolveTransportPublishPort( + SETTING_HTTP_PUBLISH_PORT.get(settings), + boundAddresses, + publishInetAddress + ); + if (publishPort < 0) { + throw new BindHttpException( + "Failed to auto-resolve http publish port, multiple bound addresses " + + boundAddresses + + " with distinct ports and none of them matched the publish address (" + + publishInetAddress + + "). " + + "Please specify a unique port by setting " + + SETTING_HTTP_PORT.getKey() + + " or " + + SETTING_HTTP_PUBLISH_PORT.getKey() + ); + } + TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), publishAddress); logger.info("{}", boundAddress); @@ -258,47 +276,6 @@ protected void doClose() {} */ protected abstract void stopInternal(); - // package private for tests - static int resolvePublishPort(Settings settings, List boundAddresses, InetAddress publishInetAddress) { - int publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings); - - if (publishPort < 0) { - for (TransportAddress boundAddress : boundAddresses) { - InetAddress boundInetAddress = boundAddress.address().getAddress(); - if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { - publishPort = boundAddress.getPort(); - break; - } - } - } - - // if no matching boundAddress found, check if there is a unique port for all bound addresses - if (publishPort < 0) { - final Set ports = new HashSet<>(); - for (TransportAddress boundAddress : boundAddresses) { - ports.add(boundAddress.getPort()); - } - if (ports.size() == 1) { - publishPort = ports.iterator().next(); - } - } - - if (publishPort < 0) { - throw new BindHttpException( - "Failed to auto-resolve http publish port, multiple bound addresses " - + boundAddresses - + " with distinct ports and none of them matched the publish address (" - + publishInetAddress - + "). " - + "Please specify a unique port by setting " - + SETTING_HTTP_PORT.getKey() - + " or " - + SETTING_HTTP_PUBLISH_PORT.getKey() - ); - } - return publishPort; - } - public void onException(HttpChannel channel, Exception e) { channel.handleException(e); if (lifecycle.started() == false) { diff --git a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java index 904d6a7aba5c6..ca52d8bf4bca0 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java @@ -33,9 +33,11 @@ import org.opensearch.index.compositeindex.datacube.startree.builder.StarTreesBuilder; import org.opensearch.index.compositeindex.datacube.startree.index.CompositeIndexValues; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; import org.opensearch.index.mapper.CompositeMappedFieldType; import org.opensearch.index.mapper.DocCountFieldMapper; -import org.opensearch.index.mapper.KeywordFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import java.io.IOException; @@ -44,6 +46,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; @@ -262,22 +265,38 @@ public SortedSetDocValues getSortedSet(FieldInfo field) { return DocValues.emptySortedSet(); } }); - } - // TODO : change this logic to evaluate for sortedNumericField specifically - else { + } else if (isSortedNumericField(compositeField)) { fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { @Override public SortedNumericDocValues getSortedNumeric(FieldInfo field) { return DocValues.emptySortedNumeric(); } }); + } else { + throw new IllegalStateException( + String.format(Locale.ROOT, "Unsupported DocValues field associated with the composite field : %s", compositeField) + ); } } compositeFieldSet.remove(compositeField); } private boolean isSortedSetField(String field) { - return mapperService.fieldType(field) instanceof KeywordFieldMapper.KeywordFieldType; + MappedFieldType ft = mapperService.fieldType(field); + assert ft.isAggregatable(); + return ft.fielddataBuilder( + "", + () -> { throw new UnsupportedOperationException("SearchLookup not available"); } + ) instanceof SortedSetOrdinalsIndexFieldData.Builder; + } + + private boolean isSortedNumericField(String field) { + MappedFieldType ft = mapperService.fieldType(field); + assert ft.isAggregatable(); + return ft.fielddataBuilder( + "", + () -> { throw new UnsupportedOperationException("SearchLookup not available"); } + ) instanceof IndexNumericFieldData.Builder; } @Override @@ -370,5 +389,4 @@ private static SegmentWriteState getSegmentWriteState(SegmentWriteState segmentW segmentWriteState.segmentSuffix ); } - } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java index e834706e2fa9d..b1e78d78d3ad2 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java @@ -24,7 +24,8 @@ import java.util.stream.Collectors; import static org.opensearch.index.compositeindex.datacube.DateDimension.CALENDAR_INTERVALS; -import static org.opensearch.index.compositeindex.datacube.KeywordDimension.KEYWORD; +import static org.opensearch.index.compositeindex.datacube.IpDimension.IP; +import static org.opensearch.index.compositeindex.datacube.OrdinalDimension.ORDINAL; /** * Dimension factory class mainly used to parse and create dimension from the mappings @@ -44,8 +45,10 @@ public static Dimension parseAndCreateDimension( return parseAndCreateDateDimension(name, dimensionMap, c); case NumericDimension.NUMERIC: return new NumericDimension(name); - case KEYWORD: - return new KeywordDimension(name); + case ORDINAL: + return new OrdinalDimension(name); + case IP: + return new IpDimension(name); default: throw new IllegalArgumentException( String.format(Locale.ROOT, "unsupported field type associated with dimension [%s] as part of star tree field", name) @@ -69,8 +72,10 @@ public static Dimension parseAndCreateDimension( return parseAndCreateDateDimension(name, dimensionMap, c); case NUMERIC: return new NumericDimension(name); - case KEYWORD: - return new KeywordDimension(name); + case ORDINAL: + return new OrdinalDimension(name); + case IP: + return new IpDimension(name); default: throw new IllegalArgumentException( String.format(Locale.ROOT, "unsupported field type associated with star tree dimension [%s]", name) diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java index d327f8ca1fa1e..f7911e72f36fc 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java @@ -30,8 +30,14 @@ public enum DimensionType { DATE, /** - * Represents a keyword dimension type. - * This is used for dimensions that contain keyword ordinals. + * Represents dimension types which uses ordinals. + * This is used for dimensions that contain sortedSet ordinals. */ - KEYWORD + ORDINAL, + + /** + * Represents an IP dimension type. + * This is used for dimensions that contain IP ordinals. + */ + IP } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/IpDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/IpDimension.java new file mode 100644 index 0000000000000..9c3682bd2e0ea --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/IpDimension.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube; + +import org.apache.lucene.index.DocValuesType; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.mapper.CompositeDataCubeFieldType; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.function.Consumer; + +/** + * Composite index keyword dimension class + * + * @opensearch.experimental + */ +@ExperimentalApi +public class IpDimension implements Dimension { + public static final String IP = "ip"; + private final String field; + + public IpDimension(String field) { + this.field = field; + } + + @Override + public String getField() { + return field; + } + + @Override + public int getNumSubDimensions() { + return 1; + } + + @Override + public void setDimensionValues(Long value, Consumer dimSetter) { + // This will set the keyword dimension value's ordinal + dimSetter.accept(value); + } + + @Override + public List getSubDimensionNames() { + return List.of(field); + } + + @Override + public DocValuesType getDocValuesType() { + return DocValuesType.SORTED_SET; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CompositeDataCubeFieldType.NAME, field); + builder.field(CompositeDataCubeFieldType.TYPE, IP); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IpDimension dimension = (IpDimension) o; + return Objects.equals(field, dimension.getField()); + } + + @Override + public int hashCode() { + return Objects.hash(field); + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/KeywordDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/OrdinalDimension.java similarity index 87% rename from server/src/main/java/org/opensearch/index/compositeindex/datacube/KeywordDimension.java rename to server/src/main/java/org/opensearch/index/compositeindex/datacube/OrdinalDimension.java index 58e248fd548d6..9cb4cd78bdaac 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/KeywordDimension.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/OrdinalDimension.java @@ -24,11 +24,11 @@ * @opensearch.experimental */ @ExperimentalApi -public class KeywordDimension implements Dimension { - public static final String KEYWORD = "keyword"; +public class OrdinalDimension implements Dimension { + public static final String ORDINAL = "ordinal"; private final String field; - public KeywordDimension(String field) { + public OrdinalDimension(String field) { this.field = field; } @@ -62,7 +62,7 @@ public DocValuesType getDocValuesType() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(CompositeDataCubeFieldType.NAME, field); - builder.field(CompositeDataCubeFieldType.TYPE, KEYWORD); + builder.field(CompositeDataCubeFieldType.TYPE, ORDINAL); builder.endObject(); return builder; } @@ -71,7 +71,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - KeywordDimension dimension = (KeywordDimension) o; + OrdinalDimension dimension = (OrdinalDimension) o; return Objects.equals(field, dimension.getField()); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java index e538be5d5bece..e46cf6f56b36e 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java @@ -152,7 +152,7 @@ private static MetricStat validateStarTreeMetricSupport( MetricStat metricStat = ((MetricAggregatorFactory) aggregatorFactory).getMetricStat(); field = ((MetricAggregatorFactory) aggregatorFactory).getField(); - if (supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat)) { + if (field != null && supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat)) { return metricStat; } } diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index 7fbb38c47572c..effee53d7cf63 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -122,7 +122,7 @@ public enum Resolution { MILLISECONDS(CONTENT_TYPE, NumericType.DATE) { @Override public long convert(Instant instant) { - return instant.toEpochMilli(); + return clampToValidRange(instant).toEpochMilli(); } @Override @@ -132,7 +132,7 @@ public Instant toInstant(long value) { @Override public Instant clampToValidRange(Instant instant) { - return instant; + return DateUtils.clampToMillisRange(instant); } @Override diff --git a/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java index 827792cdb1091..48da9b30ac1b0 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java @@ -43,6 +43,7 @@ import java.util.List; import static java.util.Collections.emptyList; +import static org.opensearch.index.mapper.FlatObjectFieldMapper.DOC_VALUE_NO_MATCH; /** * Value fetcher that loads from doc values. @@ -70,7 +71,10 @@ public List fetchValues(SourceLookup lookup) throws IOException { } List result = new ArrayList(leaf.docValueCount()); for (int i = 0, count = leaf.docValueCount(); i < count; ++i) { - result.add(leaf.nextValue()); + Object value = leaf.nextValue(); + if (value != DOC_VALUE_NO_MATCH) { + result.add(value); + } } return result; } diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java index 50ff816695156..134baa70f80c2 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java @@ -661,12 +661,22 @@ private static void parseNonDynamicArray(ParseContext context, ObjectMapper mapp throws IOException { XContentParser parser = context.parser(); XContentParser.Token token; + String path = context.path().pathAsText(arrayFieldName); + boolean isNested = path.contains(".") || context.mapperService().isCompositeIndexFieldNestedField(path); // block array values for composite index fields - if (context.indexSettings().isCompositeIndex() && context.mapperService().isFieldPartOfCompositeIndex(arrayFieldName)) { + // Assume original index has 2 fields - status , nested.nested1.status + // case 1 : if status is part of composite index and nested.nested1.status is not part of composite index, + // then nested.nested1.status/nested.nested1/nested array should not be blocked + // case 2 : if nested.nested1.status is part of composite index and status is not part of composite index, + // then arrays in nested/nested.nested1 and nested.nested1.status fields should be blocked + // but arrays in status should not be blocked + if (context.indexSettings().isCompositeIndex() + && ((isNested == false && context.mapperService().isFieldPartOfCompositeIndex(arrayFieldName)) + || (isNested && context.mapperService().isCompositeIndexFieldNestedField(path)))) { throw new MapperParsingException( String.format( Locale.ROOT, - "object mapping for [%s] with array for [%s] cannot be accepted as field is also part of composite index mapping which does not accept arrays", + "object mapping for [%s] with array for [%s] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", mapper.name(), arrayFieldName ) diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index 0ccdb40f9d33a..13063a4761006 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -28,6 +28,7 @@ import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.xcontent.JsonToStringXContentParser; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; @@ -36,11 +37,13 @@ import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; import org.opensearch.index.mapper.KeywordFieldMapper.KeywordFieldType; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.lookup.SearchLookup; import java.io.IOException; import java.io.UncheckedIOException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; @@ -63,6 +66,7 @@ public final class FlatObjectFieldMapper extends DynamicKeyFieldMapper { public static final String CONTENT_TYPE = "flat_object"; + public static final Object DOC_VALUE_NO_MATCH = new Object(); /** * In flat_object field mapper, field type is similar to keyword field type @@ -272,7 +276,7 @@ NamedAnalyzer normalizer() { @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { failIfNoDocValues(); - return new SortedSetOrdinalsIndexFieldData.Builder(name(), CoreValuesSourceType.BYTES); + return new SortedSetOrdinalsIndexFieldData.Builder(valueFieldType().name(), CoreValuesSourceType.BYTES); } @Override @@ -304,6 +308,30 @@ protected String parseSourceValue(Object value) { }; } + @Override + public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom formats"); + } + if (timeZone != null) { + throw new IllegalArgumentException( + "Field [" + name() + "] of type [" + typeName() + "] does not support custom time zones" + ); + } + if (mappedFieldTypeName != null) { + return new FlatObjectDocValueFormat(mappedFieldTypeName + DOT_SYMBOL + name() + EQUAL_SYMBOL); + } else { + throw new IllegalArgumentException( + "Field [" + name() + "] of type [" + typeName() + "] does not support doc_value in root field" + ); + } + } + + @Override + public boolean isAggregatable() { + return false; + } + @Override public Object valueForDisplay(Object value) { if (value == null) { @@ -530,6 +558,39 @@ public Query wildcardQuery( return valueFieldType().wildcardQuery(rewriteValue(value), method, caseInsensitve, context); } + /** + * A doc_value formatter for flat_object field. + */ + public class FlatObjectDocValueFormat implements DocValueFormat { + private static final String NAME = "flat_object"; + private final String prefix; + + public FlatObjectDocValueFormat(String prefix) { + this.prefix = prefix; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) {} + + @Override + public Object format(BytesRef value) { + String parsedValue = inputToString(value); + if (parsedValue.startsWith(prefix) == false) { + return DOC_VALUE_NO_MATCH; + } + return parsedValue.substring(prefix.length()); + } + + @Override + public BytesRef parseBytesRef(String value) { + return new BytesRef((String) valueFieldType.rewriteForDocValue(rewriteValue(value))); + } + } } private final ValueFieldMapper valueFieldMapper; diff --git a/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java index e23a48f94f450..1283aa302c111 100644 --- a/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java @@ -52,6 +52,7 @@ import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.network.NetworkAddress; +import org.opensearch.index.compositeindex.datacube.DimensionType; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; @@ -68,6 +69,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.function.BiFunction; import java.util.function.Supplier; @@ -161,6 +163,11 @@ public IpFieldMapper build(BuilderContext context) { ); } + @Override + public Optional getSupportedDataCubeDimensionType() { + return Optional.of(DimensionType.IP); + } + } public static final TypeParser PARSER = new TypeParser((n, c) -> { diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index df14a5811f6a0..90e43c818e137 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -259,7 +259,7 @@ public KeywordFieldMapper build(BuilderContext context) { @Override public Optional getSupportedDataCubeDimensionType() { - return Optional.of(DimensionType.KEYWORD); + return Optional.of(DimensionType.ORDINAL); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index 84b0b1d69432d..5a7c6a0102052 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -228,6 +228,7 @@ public enum MergeReason { private volatile Set compositeMappedFieldTypes; private volatile Set fieldsPartOfCompositeMappings; + private volatile Set nestedFieldsPartOfCompositeMappings; public MapperService( IndexSettings indexSettings, @@ -554,10 +555,29 @@ private synchronized Map internalMerge(DocumentMapper ma private void buildCompositeFieldLookup() { Set fieldsPartOfCompositeMappings = new HashSet<>(); + Set nestedFieldsPartOfCompositeMappings = new HashSet<>(); + for (CompositeMappedFieldType fieldType : compositeMappedFieldTypes) { fieldsPartOfCompositeMappings.addAll(fieldType.fields()); + + for (String field : fieldType.fields()) { + String[] parts = field.split("\\."); + if (parts.length > 1) { + StringBuilder path = new StringBuilder(); + for (int i = 0; i < parts.length; i++) { + if (i == 0) { + path.append(parts[i]); + } else { + path.append(".").append(parts[i]); + } + nestedFieldsPartOfCompositeMappings.add(path.toString()); + } + } + } } + this.fieldsPartOfCompositeMappings = fieldsPartOfCompositeMappings; + this.nestedFieldsPartOfCompositeMappings = nestedFieldsPartOfCompositeMappings; } private boolean assertSerialization(DocumentMapper mapper) { @@ -690,6 +710,11 @@ public boolean isFieldPartOfCompositeIndex(String field) { return fieldsPartOfCompositeMappings.contains(field); } + public boolean isCompositeIndexFieldNestedField(String field) { + return nestedFieldsPartOfCompositeMappings.contains(field); + + } + public ObjectMapper getObjectMapper(String name) { return this.mapper == null ? null : this.mapper.objectMappers().get(name); } diff --git a/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java index fb97f8c309a70..757de65248d33 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java @@ -16,6 +16,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; @@ -290,6 +291,16 @@ public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, return new SourceFieldMatchQuery(builder.build(), phrasePrefixQuery, this, context); } + @Override + public Query termQuery(Object value, QueryShardContext context) { + return new ConstantScoreQuery(super.termQuery(value, context)); + } + + @Override + public Query termQueryCaseInsensitive(Object value, QueryShardContext context) { + return new ConstantScoreQuery(super.termQueryCaseInsensitive(value, context)); + } + private List> getTermsFromTokenStream(TokenStream stream) throws IOException { final List> termArray = new ArrayList<>(); TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class); diff --git a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java index 40f05a8b76755..7b361e12330a3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java @@ -23,6 +23,7 @@ import org.opensearch.search.lookup.SearchLookup; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.LinkedList; @@ -431,8 +432,46 @@ private static boolean isBuilderAllowedForMetric(Mapper.Builder builder) { return builder.isDataCubeMetricSupported(); } - private Optional findMapperBuilderByName(String field, List mappersBuilders) { - return mappersBuilders.stream().filter(builder -> builder.name().equals(field)).findFirst(); + private Optional findMapperBuilderByName(String name, List mappersBuilders) { + String[] parts = name.split("\\."); + + // Start with the top-level builders + Optional currentBuilder = mappersBuilders.stream() + .filter(builder -> builder.name().equals(parts[0])) + .findFirst(); + + // If we can't find the first part, or if there's only one part, return the result + if (currentBuilder.isEmpty() || parts.length == 1) { + return currentBuilder; + } + + // Navigate through the nested structure + try { + Mapper.Builder builder = currentBuilder.get(); + for (int i = 1; i < parts.length; i++) { + List childBuilders = getChildBuilders(builder); + int finalI = i; + builder = childBuilders.stream() + .filter(b -> b.name().equals(parts[finalI])) + .findFirst() + .orElseThrow( + () -> new IllegalArgumentException( + String.format(Locale.ROOT, "Could not find nested field [%s] in path [%s]", parts[finalI], name) + ) + ); + } + return Optional.of(builder); + } catch (Exception e) { + return Optional.empty(); + } + } + + // Helper method to get child builders from a parent builder + private List getChildBuilders(Mapper.Builder builder) { + if (builder instanceof ObjectMapper.Builder) { + return ((ObjectMapper.Builder) builder).mappersBuilders; + } + return Collections.emptyList(); } public Builder(String name, ObjectMapper.Builder objBuilder) { diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index c78ee6711dcda..704a23890b07a 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -1216,6 +1216,9 @@ protected Node( SearchExecutionStatsCollector.makeWrapper(responseCollectorService) ); final HttpServerTransport httpServerTransport = newHttpTransport(networkModule); + + pluginComponents.addAll(newAuxTransports(networkModule)); + final IndexingPressureService indexingPressureService = new IndexingPressureService(settings, clusterService); // Going forward, IndexingPressureService will have required constructs for exposing listeners/interfaces for plugin // development. Then we can deprecate Getter and Setter for IndexingPressureService in ClusterService (#478). @@ -2113,6 +2116,10 @@ protected HttpServerTransport newHttpTransport(NetworkModule networkModule) { return networkModule.getHttpServerTransportSupplier().get(); } + protected List newAuxTransports(NetworkModule networkModule) { + return networkModule.getAuxServerTransportList(); + } + private static class LocalNodeFactory implements Function { private final SetOnce localNode = new SetOnce<>(); private final String persistentNodeId; diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java index c1c041ce01198..fb97cf40d90d6 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java @@ -21,6 +21,7 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryException; +import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -183,6 +184,20 @@ public RepositoriesMetadata updateRepositoriesMetadata(DiscoveryNode joiningNode boolean repositoryAlreadyPresent = false; for (RepositoryMetadata existingRepositoryMetadata : existingRepositories.repositories()) { if (newRepositoryMetadata.name().equals(existingRepositoryMetadata.name())) { + try { + // This is to handle cases where-in the during a previous node-join attempt if the publish operation succeeded + // but the commit operation failed, the cluster-state may have the repository metadata which is not applied + // into the repository service. This may lead to assertion failures down the line. + repositoriesService.get().repository(newRepositoryMetadata.name()); + } catch (RepositoryMissingException e) { + logger.warn( + "Skipping repositories metadata checks: Remote repository [{}] is in the cluster state but not present " + + "in the repository service.", + newRepositoryMetadata.name() + ); + break; + } + try { // This will help in handling two scenarios - // 1. When a fresh cluster is formed and a node tries to join the cluster, the repository diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index 138ef6f71280d..516aa94534f94 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -31,9 +31,13 @@ package org.opensearch.plugins; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.PortsRange; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; @@ -49,8 +53,12 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.function.Function; import java.util.function.Supplier; +import static java.util.Collections.emptyList; +import static org.opensearch.common.settings.Setting.affixKeySetting; + /** * Plugin for extending network and transport related classes * @@ -58,6 +66,49 @@ */ public interface NetworkPlugin { + /** + * Auxiliary transports are lifecycle components with an associated port range. + * These pluggable client/server transport implementations have their lifecycle managed by Node. + * + * Auxiliary transports are additionally defined by a port range on which they bind. Opening permissions on these + * ports is awkward as {@link org.opensearch.bootstrap.Security} is configured previous to Node initialization during + * bootstrap. To allow pluggable AuxTransports access to configurable port ranges we require the port range be provided + * through an {@link org.opensearch.common.settings.Setting.AffixSetting} of the form 'AUX_SETTINGS_PREFIX.{aux-transport-key}.ports'. + */ + abstract class AuxTransport extends AbstractLifecycleComponent { + public static final String AUX_SETTINGS_PREFIX = "aux.transport."; + public static final String AUX_TRANSPORT_TYPES_KEY = AUX_SETTINGS_PREFIX + "types"; + public static final String AUX_PORT_DEFAULTS = "9400-9500"; + public static final Setting.AffixSetting AUX_TRANSPORT_PORTS = affixKeySetting( + AUX_SETTINGS_PREFIX, + "ports", + key -> new Setting<>(key, AUX_PORT_DEFAULTS, PortsRange::new, Setting.Property.NodeScope) + ); + + public static final Setting> AUX_TRANSPORT_TYPES_SETTING = Setting.listSetting( + AUX_TRANSPORT_TYPES_KEY, + emptyList(), + Function.identity(), + Setting.Property.NodeScope + ); + } + + /** + * Auxiliary transports are optional and run in parallel to the default HttpServerTransport. + * Returns a map of AuxTransport suppliers. + */ + @ExperimentalApi + default Map> getAuxTransports( + Settings settings, + ThreadPool threadPool, + CircuitBreakerService circuitBreakerService, + NetworkService networkService, + ClusterSettings clusterSettings, + Tracer tracer + ) { + return Collections.emptyMap(); + } + /** * Returns a list of {@link TransportInterceptor} instances that are used to intercept incoming and outgoing * transport (inter-node) requests. This must not return null diff --git a/server/src/main/java/org/opensearch/plugins/PluginInfo.java b/server/src/main/java/org/opensearch/plugins/PluginInfo.java index b6030f4ded5e5..4ff699e8017ba 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/opensearch/plugins/PluginInfo.java @@ -86,6 +86,8 @@ public class PluginInfo implements Writeable, ToXContentObject { private final String classname; private final String customFolderName; private final List extendedPlugins; + // Optional extended plugins are a subset of extendedPlugins that only contains the optional extended plugins + private final List optionalExtendedPlugins; private final boolean hasNativeController; /** @@ -149,7 +151,11 @@ public PluginInfo( this.javaVersion = javaVersion; this.classname = classname; this.customFolderName = customFolderName; - this.extendedPlugins = Collections.unmodifiableList(extendedPlugins); + this.extendedPlugins = extendedPlugins.stream().map(s -> s.split(";")[0]).collect(Collectors.toUnmodifiableList()); + this.optionalExtendedPlugins = extendedPlugins.stream() + .filter(PluginInfo::isOptionalExtension) + .map(s -> s.split(";")[0]) + .collect(Collectors.toUnmodifiableList()); this.hasNativeController = hasNativeController; } @@ -209,6 +215,16 @@ public PluginInfo(final StreamInput in) throws IOException { this.customFolderName = in.readString(); this.extendedPlugins = in.readStringList(); this.hasNativeController = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_2_19_0)) { + this.optionalExtendedPlugins = in.readStringList(); + } else { + this.optionalExtendedPlugins = new ArrayList<>(); + } + } + + static boolean isOptionalExtension(String extendedPlugin) { + String[] dependency = extendedPlugin.split(";"); + return dependency.length > 1 && "optional=true".equals(dependency[1]); } @Override @@ -234,6 +250,9 @@ This works for currently supported range notations (=,~) } out.writeStringCollection(extendedPlugins); out.writeBoolean(hasNativeController); + if (out.getVersion().onOrAfter(Version.V_2_19_0)) { + out.writeStringCollection(optionalExtendedPlugins); + } } /** @@ -417,8 +436,17 @@ public String getFolderName() { * * @return the names of the plugins extended */ + public boolean isExtendedPluginOptional(String extendedPlugin) { + return optionalExtendedPlugins.contains(extendedPlugin); + } + + /** + * Other plugins this plugin extends through SPI + * + * @return the names of the plugins extended + */ public List getExtendedPlugins() { - return extendedPlugins; + return extendedPlugins.stream().map(s -> s.split(";")[0]).collect(Collectors.toUnmodifiableList()); } /** @@ -493,6 +521,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("custom_foldername", customFolderName); builder.field("extended_plugins", extendedPlugins); builder.field("has_native_controller", hasNativeController); + builder.field("optional_extended_plugins", optionalExtendedPlugins); } builder.endObject(); diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index f08c9c738f1b4..9bc1f1334122e 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -524,7 +524,13 @@ private static void addSortedBundle( for (String dependency : bundle.plugin.getExtendedPlugins()) { Bundle depBundle = bundles.get(dependency); if (depBundle == null) { - throw new IllegalArgumentException("Missing plugin [" + dependency + "], dependency of [" + name + "]"); + if (bundle.plugin.isExtendedPluginOptional(dependency)) { + logger.warn("Missing plugin [" + dependency + "], dependency of [" + name + "]"); + logger.warn("Some features of this plugin may not function without the dependencies being installed.\n"); + continue; + } else { + throw new IllegalArgumentException("Missing plugin [" + dependency + "], dependency of [" + name + "]"); + } } addSortedBundle(depBundle, bundles, sortedBundles, dependencyStack); assert sortedBundles.contains(depBundle); @@ -653,6 +659,9 @@ static void checkBundleJarHell(Set classpath, Bundle bundle, Map urls = new HashSet<>(); for (String extendedPlugin : exts) { Set pluginUrls = transitiveUrls.get(extendedPlugin); + if (pluginUrls == null && bundle.plugin.isExtendedPluginOptional(extendedPlugin)) { + continue; + } assert pluginUrls != null : "transitive urls should have already been set for " + extendedPlugin; Set intersection = new HashSet<>(urls); @@ -704,6 +713,10 @@ private Plugin loadBundle(Bundle bundle, Map loaded) { List extendedLoaders = new ArrayList<>(); for (String extendedPluginName : bundle.plugin.getExtendedPlugins()) { Plugin extendedPlugin = loaded.get(extendedPluginName); + if (extendedPlugin == null && bundle.plugin.isExtendedPluginOptional(extendedPluginName)) { + // extended plugin is optional and is not installed + continue; + } assert extendedPlugin != null; if (ExtensiblePlugin.class.isInstance(extendedPlugin) == false) { throw new IllegalStateException("Plugin [" + name + "] cannot extend non-extensible plugin [" + extendedPluginName + "]"); diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index 9aec81536dbd0..49065be0abb25 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -80,6 +80,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -904,6 +905,12 @@ public void ensureValidSystemRepositoryUpdate(RepositoryMetadata newRepositoryMe Settings newRepositoryMetadataSettings = newRepositoryMetadata.settings(); Settings currentRepositoryMetadataSettings = currentRepositoryMetadata.settings(); + assert Objects.nonNull(repository) : String.format( + Locale.ROOT, + "repository [%s] not present in RepositoryService", + currentRepositoryMetadata.name() + ); + List restrictedSettings = repository.getRestrictedSystemRepositorySettings() .stream() .map(setting -> setting.getKey()) diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index a82c05dab0b44..998ae5e4791b7 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -2994,7 +2994,12 @@ public String startVerification() { */ private BlobContainer testContainer(String seed) { BlobPath testBlobPath; - if (prefixModeVerification == true) { + + if (prefixModeVerification == true + && (clusterService.isStateInitialised() == false + || clusterService.state().nodes().getMinNodeVersion().onOrAfter(Version.V_2_17_0))) { + // During the remote store node bootstrap, the cluster state is not initialised + // Otherwise, the cluster state is initialised and available with the min node version information PathInput pathInput = PathInput.builder().basePath(basePath()).indexUUID(seed).build(); testBlobPath = PathType.HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); } else { diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java index d862b2c2784de..41344fd06cbbc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java @@ -104,6 +104,6 @@ public String getStatsSubtype() { } public String getField() { - return config.fieldContext().field(); + return config.fieldContext() != null ? config.fieldContext().field() : null; } } diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 269a4c87dfb72..59d3b110aeca8 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -198,7 +198,7 @@ public static ThreadPoolType fromType(String type) { map.put(Names.REMOTE_PURGE, ThreadPoolType.SCALING); map.put(Names.REMOTE_REFRESH_RETRY, ThreadPoolType.SCALING); map.put(Names.REMOTE_RECOVERY, ThreadPoolType.SCALING); - map.put(Names.REMOTE_STATE_READ, ThreadPoolType.SCALING); + map.put(Names.REMOTE_STATE_READ, ThreadPoolType.FIXED); map.put(Names.INDEX_SEARCHER, ThreadPoolType.RESIZABLE); map.put(Names.REMOTE_STATE_CHECKSUM, ThreadPoolType.FIXED); THREAD_POOL_TYPES = Collections.unmodifiableMap(map); @@ -306,7 +306,7 @@ public ThreadPool( ); builders.put( Names.REMOTE_STATE_READ, - new ScalingExecutorBuilder(Names.REMOTE_STATE_READ, 1, boundedBy(4 * allocatedProcessors, 4, 32), TimeValue.timeValueMinutes(5)) + new FixedExecutorBuilder(settings, Names.REMOTE_STATE_READ, boundedBy(4 * allocatedProcessors, 4, 32), 120000) ); builders.put( Names.INDEX_SEARCHER, diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index f56cd146ce953..f80a29872a78d 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -521,38 +521,8 @@ private BoundTransportAddress createBoundTransportAddress(ProfileSettings profil throw new BindTransportException("Failed to resolve publish address", e); } - final int publishPort = resolvePublishPort(profileSettings, boundAddresses, publishInetAddress); - final TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); - return new BoundTransportAddress(transportBoundAddresses, publishAddress); - } - - // package private for tests - static int resolvePublishPort(ProfileSettings profileSettings, List boundAddresses, InetAddress publishInetAddress) { - int publishPort = profileSettings.publishPort; - - // if port not explicitly provided, search for port of address in boundAddresses that matches publishInetAddress - if (publishPort < 0) { - for (InetSocketAddress boundAddress : boundAddresses) { - InetAddress boundInetAddress = boundAddress.getAddress(); - if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { - publishPort = boundAddress.getPort(); - break; - } - } - } - - // if no matching boundAddress found, check if there is a unique port for all bound addresses - if (publishPort < 0) { - final Set ports = new HashSet<>(); - for (InetSocketAddress boundAddress : boundAddresses) { - ports.add(boundAddress.getPort()); - } - if (ports.size() == 1) { - publishPort = ports.iterator().next(); - } - } - - if (publishPort < 0) { + final int publishPort = Transport.resolvePublishPort(profileSettings.publishPort, boundAddresses, publishInetAddress); + if (publishPort == -1) { String profileExplanation = profileSettings.isDefaultProfile ? "" : " for profile " + profileSettings.profileName; throw new BindTransportException( "Failed to auto-resolve publish port" @@ -568,7 +538,9 @@ static int resolvePublishPort(ProfileSettings profileSettings, List boundAddresses, InetAddress publishInetAddress) { + if (publishPort < 0) { + for (InetSocketAddress boundAddress : boundAddresses) { + InetAddress boundInetAddress = boundAddress.getAddress(); + if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { + publishPort = boundAddress.getPort(); + break; + } + } + } + + if (publishPort < 0) { + final Set ports = new HashSet<>(); + for (InetSocketAddress boundAddress : boundAddresses) { + ports.add(boundAddress.getPort()); + } + if (ports.size() == 1) { + publishPort = ports.iterator().next(); + } + } + + return publishPort; + } + + static int resolveTransportPublishPort(int publishPort, List boundAddresses, InetAddress publishInetAddress) { + return Transport.resolvePublishPort( + publishPort, + boundAddresses.stream().map(TransportAddress::address).collect(Collectors.toList()), + publishInetAddress + ); + } + /** * A unidirectional connection to a {@link DiscoveryNode} * diff --git a/server/src/test/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsActionTests.java new file mode 100644 index 0000000000000..ea455d607f058 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsActionTests.java @@ -0,0 +1,595 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.replication; + +import org.opensearch.Version; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlock; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.block.ClusterBlocks; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.routing.AllocationId; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardIterator; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardsIterator; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.SegmentReplicationPerGroupStats; +import org.opensearch.index.SegmentReplicationPressureService; +import org.opensearch.index.SegmentReplicationShardStats; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.replication.common.ReplicationTimer; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.TransportService; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class TransportSegmentReplicationStatsActionTests extends OpenSearchTestCase { + @Mock + private ClusterService clusterService; + @Mock + private TransportService transportService; + @Mock + private IndicesService indicesService; + @Mock + private SegmentReplicationTargetService targetService; + @Mock + private ActionFilters actionFilters; + @Mock + private IndexNameExpressionResolver indexNameExpressionResolver; + @Mock + private SegmentReplicationPressureService pressureService; + @Mock + private IndexShard indexShard; + @Mock + private IndexService indexService; + + private TransportSegmentReplicationStatsAction action; + + @Before + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this); + super.setUp(); + action = new TransportSegmentReplicationStatsAction( + clusterService, + transportService, + indicesService, + targetService, + actionFilters, + indexNameExpressionResolver, + pressureService + ); + } + + public void testShardReturnsAllTheShardsForTheIndex() { + SegmentReplicationStatsRequest segmentReplicationStatsRequest = mock(SegmentReplicationStatsRequest.class); + String[] concreteIndices = new String[] { "test-index" }; + ClusterState clusterState = mock(ClusterState.class); + RoutingTable routingTables = mock(RoutingTable.class); + ShardsIterator shardsIterator = mock(ShardIterator.class); + + when(clusterState.routingTable()).thenReturn(routingTables); + when(routingTables.allShardsIncludingRelocationTargets(any())).thenReturn(shardsIterator); + assertEquals(shardsIterator, action.shards(clusterState, segmentReplicationStatsRequest, concreteIndices)); + } + + public void testShardOperationWithPrimaryShard() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.primary()).thenReturn(true); + when(indicesService.indexServiceSafe(shardId.getIndex())).thenReturn(indexService); + when(indexService.getShard(shardId.id())).thenReturn(indexShard); + when(indexShard.indexSettings()).thenReturn(createIndexSettingsWithSegRepEnabled()); + + SegmentReplicationShardStatsResponse response = action.shardOperation(request, shardRouting); + + assertNotNull(response); + verify(pressureService).getStatsForShard(any()); + } + + public void testShardOperationWithReplicaShard() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + request.activeOnly(false); + SegmentReplicationState completedSegmentReplicationState = mock(SegmentReplicationState.class); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.primary()).thenReturn(false); + when(indicesService.indexServiceSafe(shardId.getIndex())).thenReturn(indexService); + when(indexService.getShard(shardId.id())).thenReturn(indexShard); + when(indexShard.indexSettings()).thenReturn(createIndexSettingsWithSegRepEnabled()); + when(targetService.getSegmentReplicationState(shardId)).thenReturn(completedSegmentReplicationState); + + SegmentReplicationShardStatsResponse response = action.shardOperation(request, shardRouting); + + assertNotNull(response); + assertNull(response.getPrimaryStats()); + assertNotNull(response.getReplicaStats()); + verify(targetService).getSegmentReplicationState(shardId); + } + + public void testShardOperationWithReplicaShardActiveOnly() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + request.activeOnly(true); + SegmentReplicationState onGoingSegmentReplicationState = mock(SegmentReplicationState.class); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.primary()).thenReturn(false); + when(indicesService.indexServiceSafe(shardId.getIndex())).thenReturn(indexService); + when(indexService.getShard(shardId.id())).thenReturn(indexShard); + when(indexShard.indexSettings()).thenReturn(createIndexSettingsWithSegRepEnabled()); + when(targetService.getOngoingEventSegmentReplicationState(shardId)).thenReturn(onGoingSegmentReplicationState); + + SegmentReplicationShardStatsResponse response = action.shardOperation(request, shardRouting); + + assertNotNull(response); + assertNull(response.getPrimaryStats()); + assertNotNull(response.getReplicaStats()); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testComputeBytesRemainingToReplicateWhenCompletedAndOngoingStateNotNull() { + ShardRouting shardRouting = mock(ShardRouting.class); + SegmentReplicationState completedSegmentReplicationState = mock(SegmentReplicationState.class); + SegmentReplicationState onGoingSegmentReplicationState = mock(SegmentReplicationState.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + AllocationId allocationId = AllocationId.newInitializing(); + ReplicationTimer replicationTimerCompleted = mock(ReplicationTimer.class); + ReplicationTimer replicationTimerOngoing = mock(ReplicationTimer.class); + long time1 = 10; + long time2 = 15; + ReplicationLuceneIndex replicationLuceneIndex = new ReplicationLuceneIndex(); + replicationLuceneIndex.addFileDetail("name1", 10, false); + replicationLuceneIndex.addFileDetail("name2", 15, false); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.allocationId()).thenReturn(allocationId); + when(targetService.getlatestCompletedEventSegmentReplicationState(shardId)).thenReturn(completedSegmentReplicationState); + when(targetService.getOngoingEventSegmentReplicationState(shardId)).thenReturn(onGoingSegmentReplicationState); + when(completedSegmentReplicationState.getTimer()).thenReturn(replicationTimerCompleted); + when(onGoingSegmentReplicationState.getTimer()).thenReturn(replicationTimerOngoing); + when(replicationTimerOngoing.time()).thenReturn(time1); + when(replicationTimerCompleted.time()).thenReturn(time2); + when(onGoingSegmentReplicationState.getIndex()).thenReturn(replicationLuceneIndex); + + SegmentReplicationShardStats segmentReplicationShardStats = action.computeSegmentReplicationShardStats(shardRouting); + + assertNotNull(segmentReplicationShardStats); + assertEquals(25, segmentReplicationShardStats.getBytesBehindCount()); + assertEquals(10, segmentReplicationShardStats.getCurrentReplicationLagMillis()); + assertEquals(15, segmentReplicationShardStats.getLastCompletedReplicationTimeMillis()); + + verify(targetService).getlatestCompletedEventSegmentReplicationState(shardId); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testCalculateBytesRemainingToReplicateWhenNoCompletedState() { + ShardRouting shardRouting = mock(ShardRouting.class); + SegmentReplicationState onGoingSegmentReplicationState = mock(SegmentReplicationState.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + AllocationId allocationId = AllocationId.newInitializing(); + ReplicationTimer replicationTimerOngoing = mock(ReplicationTimer.class); + long time1 = 10; + ReplicationLuceneIndex replicationLuceneIndex = new ReplicationLuceneIndex(); + replicationLuceneIndex.addFileDetail("name1", 10, false); + replicationLuceneIndex.addFileDetail("name2", 15, false); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.allocationId()).thenReturn(allocationId); + when(targetService.getOngoingEventSegmentReplicationState(shardId)).thenReturn(onGoingSegmentReplicationState); + when(onGoingSegmentReplicationState.getTimer()).thenReturn(replicationTimerOngoing); + when(replicationTimerOngoing.time()).thenReturn(time1); + when(onGoingSegmentReplicationState.getIndex()).thenReturn(replicationLuceneIndex); + + SegmentReplicationShardStats segmentReplicationShardStats = action.computeSegmentReplicationShardStats(shardRouting); + + assertNotNull(segmentReplicationShardStats); + assertEquals(25, segmentReplicationShardStats.getBytesBehindCount()); + assertEquals(10, segmentReplicationShardStats.getCurrentReplicationLagMillis()); + assertEquals(0, segmentReplicationShardStats.getLastCompletedReplicationTimeMillis()); + + verify(targetService).getlatestCompletedEventSegmentReplicationState(shardId); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testCalculateBytesRemainingToReplicateWhenNoOnGoingState() { + ShardRouting shardRouting = mock(ShardRouting.class); + SegmentReplicationState completedSegmentReplicationState = mock(SegmentReplicationState.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + AllocationId allocationId = AllocationId.newInitializing(); + ReplicationTimer replicationTimerCompleted = mock(ReplicationTimer.class); + long time2 = 15; + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.allocationId()).thenReturn(allocationId); + when(targetService.getlatestCompletedEventSegmentReplicationState(shardId)).thenReturn(completedSegmentReplicationState); + when(completedSegmentReplicationState.getTimer()).thenReturn(replicationTimerCompleted); + when(replicationTimerCompleted.time()).thenReturn(time2); + + SegmentReplicationShardStats segmentReplicationShardStats = action.computeSegmentReplicationShardStats(shardRouting); + + assertNotNull(segmentReplicationShardStats); + assertEquals(0, segmentReplicationShardStats.getBytesBehindCount()); + assertEquals(0, segmentReplicationShardStats.getCurrentReplicationLagMillis()); + assertEquals(15, segmentReplicationShardStats.getLastCompletedReplicationTimeMillis()); + + verify(targetService).getlatestCompletedEventSegmentReplicationState(shardId); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testCalculateBytesRemainingToReplicateWhenNoCompletedAndOngoingState() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + AllocationId allocationId = AllocationId.newInitializing(); + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.allocationId()).thenReturn(allocationId); + + SegmentReplicationShardStats segmentReplicationShardStats = action.computeSegmentReplicationShardStats(shardRouting); + + assertNotNull(segmentReplicationShardStats); + assertEquals(0, segmentReplicationShardStats.getBytesBehindCount()); + assertEquals(0, segmentReplicationShardStats.getCurrentReplicationLagMillis()); + assertEquals(0, segmentReplicationShardStats.getLastCompletedReplicationTimeMillis()); + + verify(targetService).getlatestCompletedEventSegmentReplicationState(shardId); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testNewResponseWhenAllReplicasReturnResponseCombinesTheResults() { + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + List shardFailures = new ArrayList<>(); + String[] shards = { "0", "1" }; + request.shards(shards); + + int totalShards = 6; + int successfulShards = 6; + int failedShard = 0; + String allocIdOne = "allocIdOne"; + String allocIdTwo = "allocIdTwo"; + String allocIdThree = "allocIdThree"; + String allocIdFour = "allocIdFour"; + String allocIdFive = "allocIdFive"; + String allocIdSix = "allocIdSix"; + + ShardId shardId0 = mock(ShardId.class); + ShardRouting primary0 = mock(ShardRouting.class); + ShardRouting replica0 = mock(ShardRouting.class); + ShardRouting searchReplica0 = mock(ShardRouting.class); + + ShardId shardId1 = mock(ShardId.class); + ShardRouting primary1 = mock(ShardRouting.class); + ShardRouting replica1 = mock(ShardRouting.class); + ShardRouting searchReplica1 = mock(ShardRouting.class); + + when(shardId0.getId()).thenReturn(0); + when(shardId0.getIndexName()).thenReturn("test-index-1"); + when(primary0.shardId()).thenReturn(shardId0); + when(replica0.shardId()).thenReturn(shardId0); + when(searchReplica0.shardId()).thenReturn(shardId0); + + when(shardId1.getId()).thenReturn(1); + when(shardId1.getIndexName()).thenReturn("test-index-1"); + when(primary1.shardId()).thenReturn(shardId1); + when(replica1.shardId()).thenReturn(shardId1); + when(searchReplica1.shardId()).thenReturn(shardId1); + + AllocationId allocationIdOne = mock(AllocationId.class); + AllocationId allocationIdTwo = mock(AllocationId.class); + AllocationId allocationIdThree = mock(AllocationId.class); + AllocationId allocationIdFour = mock(AllocationId.class); + AllocationId allocationIdFive = mock(AllocationId.class); + AllocationId allocationIdSix = mock(AllocationId.class); + + when(allocationIdOne.getId()).thenReturn(allocIdOne); + when(allocationIdTwo.getId()).thenReturn(allocIdTwo); + when(allocationIdThree.getId()).thenReturn(allocIdThree); + when(allocationIdFour.getId()).thenReturn(allocIdFour); + when(allocationIdFive.getId()).thenReturn(allocIdFive); + when(allocationIdSix.getId()).thenReturn(allocIdSix); + when(primary0.allocationId()).thenReturn(allocationIdOne); + when(replica0.allocationId()).thenReturn(allocationIdTwo); + when(searchReplica0.allocationId()).thenReturn(allocationIdThree); + when(primary1.allocationId()).thenReturn(allocationIdFour); + when(replica1.allocationId()).thenReturn(allocationIdFive); + when(searchReplica1.allocationId()).thenReturn(allocationIdSix); + + when(primary0.isSearchOnly()).thenReturn(false); + when(replica0.isSearchOnly()).thenReturn(false); + when(searchReplica0.isSearchOnly()).thenReturn(true); + when(primary1.isSearchOnly()).thenReturn(false); + when(replica1.isSearchOnly()).thenReturn(false); + when(searchReplica1.isSearchOnly()).thenReturn(true); + + Set segmentReplicationShardStats0 = new HashSet<>(); + SegmentReplicationShardStats segmentReplicationShardStatsOfReplica0 = new SegmentReplicationShardStats(allocIdTwo, 0, 0, 0, 0, 0); + segmentReplicationShardStats0.add(segmentReplicationShardStatsOfReplica0); + + Set segmentReplicationShardStats1 = new HashSet<>(); + SegmentReplicationShardStats segmentReplicationShardStatsOfReplica1 = new SegmentReplicationShardStats(allocIdFive, 0, 0, 0, 0, 0); + segmentReplicationShardStats1.add(segmentReplicationShardStatsOfReplica1); + + SegmentReplicationPerGroupStats segmentReplicationPerGroupStats0 = new SegmentReplicationPerGroupStats( + shardId0, + segmentReplicationShardStats0, + 0 + ); + + SegmentReplicationPerGroupStats segmentReplicationPerGroupStats1 = new SegmentReplicationPerGroupStats( + shardId1, + segmentReplicationShardStats1, + 0 + ); + + SegmentReplicationState segmentReplicationState0 = mock(SegmentReplicationState.class); + SegmentReplicationState searchReplicaSegmentReplicationState0 = mock(SegmentReplicationState.class); + SegmentReplicationState segmentReplicationState1 = mock(SegmentReplicationState.class); + SegmentReplicationState searchReplicaSegmentReplicationState1 = mock(SegmentReplicationState.class); + + when(segmentReplicationState0.getShardRouting()).thenReturn(replica0); + when(searchReplicaSegmentReplicationState0.getShardRouting()).thenReturn(searchReplica0); + when(segmentReplicationState1.getShardRouting()).thenReturn(replica1); + when(searchReplicaSegmentReplicationState1.getShardRouting()).thenReturn(searchReplica1); + + List responses = List.of( + new SegmentReplicationShardStatsResponse(segmentReplicationPerGroupStats0), + new SegmentReplicationShardStatsResponse(segmentReplicationState0), + new SegmentReplicationShardStatsResponse(searchReplicaSegmentReplicationState0), + new SegmentReplicationShardStatsResponse(segmentReplicationPerGroupStats1), + new SegmentReplicationShardStatsResponse(segmentReplicationState1), + new SegmentReplicationShardStatsResponse(searchReplicaSegmentReplicationState1) + ); + + SegmentReplicationStatsResponse response = action.newResponse( + request, + totalShards, + successfulShards, + failedShard, + responses, + shardFailures, + ClusterState.EMPTY_STATE + ); + + List responseStats = response.getReplicationStats().get("test-index-1"); + SegmentReplicationPerGroupStats primStats0 = responseStats.get(0); + Set replicaStats0 = primStats0.getReplicaStats(); + assertEquals(2, replicaStats0.size()); + for (SegmentReplicationShardStats replicaStat : replicaStats0) { + if (replicaStat.getAllocationId().equals(allocIdTwo)) { + assertEquals(segmentReplicationState0, replicaStat.getCurrentReplicationState()); + } + + if (replicaStat.getAllocationId().equals(allocIdThree)) { + assertEquals(searchReplicaSegmentReplicationState0, replicaStat.getCurrentReplicationState()); + } + } + + SegmentReplicationPerGroupStats primStats1 = responseStats.get(1); + Set replicaStats1 = primStats1.getReplicaStats(); + assertEquals(2, replicaStats1.size()); + for (SegmentReplicationShardStats replicaStat : replicaStats1) { + if (replicaStat.getAllocationId().equals(allocIdFive)) { + assertEquals(segmentReplicationState1, replicaStat.getCurrentReplicationState()); + } + + if (replicaStat.getAllocationId().equals(allocIdSix)) { + assertEquals(searchReplicaSegmentReplicationState1, replicaStat.getCurrentReplicationState()); + } + } + } + + public void testNewResponseWhenShardsToFetchEmptyAndResponsesContainsNull() { + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + List shardFailures = new ArrayList<>(); + String[] shards = {}; + request.shards(shards); + + int totalShards = 3; + int successfulShards = 3; + int failedShard = 0; + String allocIdOne = "allocIdOne"; + String allocIdTwo = "allocIdTwo"; + ShardId shardIdOne = mock(ShardId.class); + ShardId shardIdTwo = mock(ShardId.class); + ShardId shardIdThree = mock(ShardId.class); + ShardRouting shardRoutingOne = mock(ShardRouting.class); + ShardRouting shardRoutingTwo = mock(ShardRouting.class); + ShardRouting shardRoutingThree = mock(ShardRouting.class); + when(shardIdOne.getId()).thenReturn(1); + when(shardIdTwo.getId()).thenReturn(2); + when(shardIdThree.getId()).thenReturn(3); + when(shardRoutingOne.shardId()).thenReturn(shardIdOne); + when(shardRoutingTwo.shardId()).thenReturn(shardIdTwo); + when(shardRoutingThree.shardId()).thenReturn(shardIdThree); + AllocationId allocationId = mock(AllocationId.class); + when(allocationId.getId()).thenReturn(allocIdOne); + when(shardRoutingTwo.allocationId()).thenReturn(allocationId); + when(shardIdOne.getIndexName()).thenReturn("test-index"); + + Set segmentReplicationShardStats = new HashSet<>(); + SegmentReplicationShardStats segmentReplicationShardStatsOfReplica = new SegmentReplicationShardStats(allocIdOne, 0, 0, 0, 0, 0); + segmentReplicationShardStats.add(segmentReplicationShardStatsOfReplica); + SegmentReplicationPerGroupStats segmentReplicationPerGroupStats = new SegmentReplicationPerGroupStats( + shardIdOne, + segmentReplicationShardStats, + 0 + ); + + SegmentReplicationState segmentReplicationState = mock(SegmentReplicationState.class); + SegmentReplicationShardStats segmentReplicationShardStatsFromSearchReplica = mock(SegmentReplicationShardStats.class); + when(segmentReplicationShardStatsFromSearchReplica.getAllocationId()).thenReturn("alloc2"); + when(segmentReplicationState.getShardRouting()).thenReturn(shardRoutingTwo); + + List responses = new ArrayList<>(); + responses.add(null); + responses.add(new SegmentReplicationShardStatsResponse(segmentReplicationPerGroupStats)); + responses.add(new SegmentReplicationShardStatsResponse(segmentReplicationState)); + + SegmentReplicationStatsResponse response = action.newResponse( + request, + totalShards, + successfulShards, + failedShard, + responses, + shardFailures, + ClusterState.EMPTY_STATE + ); + + List responseStats = response.getReplicationStats().get("test-index"); + SegmentReplicationPerGroupStats primStats = responseStats.get(0); + Set segRpShardStatsSet = primStats.getReplicaStats(); + + for (SegmentReplicationShardStats segRpShardStats : segRpShardStatsSet) { + if (segRpShardStats.getAllocationId().equals(allocIdOne)) { + assertEquals(segmentReplicationState, segRpShardStats.getCurrentReplicationState()); + } + + if (segRpShardStats.getAllocationId().equals(allocIdTwo)) { + assertEquals(segmentReplicationShardStatsFromSearchReplica, segRpShardStats); + } + } + } + + public void testShardOperationWithSegRepDisabled() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + + when(shardRouting.shardId()).thenReturn(shardId); + when(indicesService.indexServiceSafe(shardId.getIndex())).thenReturn(indexService); + when(indexService.getShard(shardId.id())).thenReturn(indexShard); + when(indexShard.indexSettings()).thenReturn(createIndexSettingsWithSegRepDisabled()); + + SegmentReplicationShardStatsResponse response = action.shardOperation(request, shardRouting); + + assertNull(response); + } + + public void testGlobalBlockCheck() { + ClusterBlock writeClusterBlock = new ClusterBlock( + 1, + "uuid", + "", + true, + true, + true, + RestStatus.OK, + EnumSet.of(ClusterBlockLevel.METADATA_WRITE) + ); + + ClusterBlock readClusterBlock = new ClusterBlock( + 1, + "uuid", + "", + true, + true, + true, + RestStatus.OK, + EnumSet.of(ClusterBlockLevel.METADATA_READ) + ); + + ClusterBlocks.Builder builder = ClusterBlocks.builder(); + builder.addGlobalBlock(writeClusterBlock); + ClusterState metadataWriteBlockedState = ClusterState.builder(ClusterState.EMPTY_STATE).blocks(builder).build(); + assertNull(action.checkGlobalBlock(metadataWriteBlockedState, new SegmentReplicationStatsRequest())); + + builder = ClusterBlocks.builder(); + builder.addGlobalBlock(readClusterBlock); + ClusterState metadataReadBlockedState = ClusterState.builder(ClusterState.EMPTY_STATE).blocks(builder).build(); + assertNotNull(action.checkGlobalBlock(metadataReadBlockedState, new SegmentReplicationStatsRequest())); + } + + public void testIndexBlockCheck() { + ClusterBlock writeClusterBlock = new ClusterBlock( + 1, + "uuid", + "", + true, + true, + true, + RestStatus.OK, + EnumSet.of(ClusterBlockLevel.METADATA_WRITE) + ); + + ClusterBlock readClusterBlock = new ClusterBlock( + 1, + "uuid", + "", + true, + true, + true, + RestStatus.OK, + EnumSet.of(ClusterBlockLevel.METADATA_READ) + ); + + String indexName = "test"; + ClusterBlocks.Builder builder = ClusterBlocks.builder(); + builder.addIndexBlock(indexName, writeClusterBlock); + ClusterState metadataWriteBlockedState = ClusterState.builder(ClusterState.EMPTY_STATE).blocks(builder).build(); + assertNull(action.checkRequestBlock(metadataWriteBlockedState, new SegmentReplicationStatsRequest(), new String[] { indexName })); + + builder = ClusterBlocks.builder(); + builder.addIndexBlock(indexName, readClusterBlock); + ClusterState metadataReadBlockedState = ClusterState.builder(ClusterState.EMPTY_STATE).blocks(builder).build(); + assertNotNull(action.checkRequestBlock(metadataReadBlockedState, new SegmentReplicationStatsRequest(), new String[] { indexName })); + } + + private IndexSettings createIndexSettingsWithSegRepEnabled() { + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + + return new IndexSettings(IndexMetadata.builder("test").settings(settings).build(), settings); + } + + private IndexSettings createIndexSettingsWithSegRepDisabled() { + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + return new IndexSettings(IndexMetadata.builder("test").settings(settings).build(), settings); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index f6fb203bfe1a9..9590e5615d451 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -55,6 +55,7 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -1378,6 +1379,72 @@ public void testJoinRemoteStoreClusterWithRemotePublicationNodeInMixedMode() { JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); } + public void testUpdatesClusterStateWithRepositoryMetadataNotInSync() throws Exception { + Map newNodeAttributes = new HashMap<>(); + newNodeAttributes.putAll(remoteStateNodeAttributes(CLUSTER_STATE_REPO)); + newNodeAttributes.putAll(remoteRoutingTableAttributes(ROUTING_TABLE_REPO)); + + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + RepositoriesService repositoriesService = mock(RepositoriesService.class); + when(repositoriesService.repository(any())).thenThrow(RepositoryMissingException.class); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, null); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + newNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final RepositoryMetadata clusterStateRepo = buildRepositoryMetadata(clusterManagerNode, CLUSTER_STATE_REPO); + final RepositoryMetadata routingTableRepo = buildRepositoryMetadata(clusterManagerNode, ROUTING_TABLE_REPO); + List repositoriesMetadata = new ArrayList<>() { + { + add(clusterStateRepo); + add(routingTableRepo); + } + }; + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata))) + .build(); + + final DiscoveryNode joiningNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + newNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(joiningNode, "test")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validatePublicationRepositoryMetadata(result.resultingState, clusterManagerNode); + + } + private void validateRepositoryMetadata(ClusterState updatedState, DiscoveryNode existingNode, int expectedRepositories) throws Exception { diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java index 6a03a1f79bcde..a7f18aabf8436 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java @@ -194,7 +194,7 @@ public AllocationService createRemoteCapableAllocationService() { } public AllocationService createRemoteCapableAllocationService(String excludeNodes) { - Settings settings = Settings.builder().put("cluster.routing.allocation.exclude.node_id", excludeNodes).build(); + Settings settings = Settings.builder().put("cluster.routing.allocation.exclude._id", excludeNodes).build(); return new MockAllocationService( randomAllocationDeciders(settings, EMPTY_CLUSTER_SETTINGS, random()), new TestGatewayAllocator(), diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java index e1c0a7eff1f6e..e55a9de160114 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java @@ -25,25 +25,51 @@ public class RemoteShardsRebalanceShardsTests extends RemoteShardsBalancerBaseTe * Post rebalance primaries should be balanced across all the nodes. */ public void testShardAllocationAndRebalance() { - int localOnlyNodes = 20; - int remoteCapableNodes = 40; - int localIndices = 40; - int remoteIndices = 80; + final int localOnlyNodes = 20; + final int remoteCapableNodes = 40; + final int halfRemoteCapableNodes = remoteCapableNodes / 2; + final int localIndices = 40; + final int remoteIndices = 80; ClusterState clusterState = createInitialCluster(localOnlyNodes, remoteCapableNodes, localIndices, remoteIndices); - AllocationService service = this.createRemoteCapableAllocationService(); + final StringBuilder excludeNodes = new StringBuilder(); + for (int i = 0; i < halfRemoteCapableNodes; i++) { + excludeNodes.append(getNodeId(i, true)); + if (i != (remoteCapableNodes / 2 - 1)) { + excludeNodes.append(", "); + } + } + AllocationService service = this.createRemoteCapableAllocationService(excludeNodes.toString()); clusterState = allocateShardsAndBalance(clusterState, service); RoutingNodes routingNodes = clusterState.getRoutingNodes(); RoutingAllocation allocation = getRoutingAllocation(clusterState, routingNodes); - final Map nodePrimariesCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, true); - final Map nodeReplicaCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, false); + Map nodePrimariesCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, true); + Map nodeReplicaCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, false); int avgPrimariesPerNode = getTotalShardCountAcrossNodes(nodePrimariesCounter) / remoteCapableNodes; - // Primary and replica are balanced post first reroute + // Primary and replica are balanced after first allocating unassigned + for (RoutingNode node : routingNodes) { + if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getNodePool(node))) { + if (Integer.parseInt(node.nodeId().split("-")[4]) < halfRemoteCapableNodes) { + assertEquals(0, (int) nodePrimariesCounter.getOrDefault(node.nodeId(), 0)); + } else { + assertEquals(avgPrimariesPerNode * 2, (int) nodePrimariesCounter.get(node.nodeId())); + } + assertTrue(nodeReplicaCounter.getOrDefault(node.nodeId(), 0) >= 0); + } + } + + // Remove exclude constraint and rebalance + service = this.createRemoteCapableAllocationService(); + clusterState = allocateShardsAndBalance(clusterState, service); + routingNodes = clusterState.getRoutingNodes(); + allocation = getRoutingAllocation(clusterState, routingNodes); + nodePrimariesCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, true); + nodeReplicaCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, false); for (RoutingNode node : routingNodes) { if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getNodePool(node))) { - assertInRange(nodePrimariesCounter.get(node.nodeId()), avgPrimariesPerNode, remoteCapableNodes - 1); - assertTrue(nodeReplicaCounter.get(node.nodeId()) >= 0); + assertEquals(avgPrimariesPerNode, (int) nodePrimariesCounter.get(node.nodeId())); + assertTrue(nodeReplicaCounter.getOrDefault(node.nodeId(), 0) >= 0); } } } diff --git a/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java b/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java index 98a79f3ca38dc..cb691f2177f6d 100644 --- a/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java +++ b/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java @@ -260,4 +260,21 @@ public void testRoundYear() { long startOf1996 = Year.of(1996).atDay(1).atStartOfDay().toInstant(ZoneOffset.UTC).toEpochMilli(); assertThat(DateUtils.roundYear(endOf1996), is(startOf1996)); } + + public void testClampToMillisRange() { + Instant normalInstant = Instant.now(); + assertEquals(normalInstant, DateUtils.clampToMillisRange(normalInstant)); + + Instant beforeMinInstant = DateUtils.INSTANT_LONG_MIN_VALUE.minusMillis(1); + assertEquals(DateUtils.INSTANT_LONG_MIN_VALUE, DateUtils.clampToMillisRange(beforeMinInstant)); + + Instant afterMaxInstant = DateUtils.INSTANT_LONG_MAX_VALUE.plusMillis(1); + assertEquals(DateUtils.INSTANT_LONG_MAX_VALUE, DateUtils.clampToMillisRange(afterMaxInstant)); + + assertEquals(DateUtils.INSTANT_LONG_MIN_VALUE, DateUtils.clampToMillisRange(DateUtils.INSTANT_LONG_MIN_VALUE)); + + assertEquals(DateUtils.INSTANT_LONG_MAX_VALUE, DateUtils.clampToMillisRange(DateUtils.INSTANT_LONG_MAX_VALUE)); + + assertThrows(NullPointerException.class, () -> DateUtils.clampToMillisRange(null)); + } } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index be07aa0d05e9f..e3684178a18ea 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -2354,6 +2354,14 @@ public void testReadLatestClusterStateFromCache() throws IOException { .getState(clusterState.getClusterName().value(), expectedManifest); assertEquals(stateFromCache.getMetadata(), state.getMetadata()); + ClusterState stateFromCache2 = remoteClusterStateService.getClusterStateForManifest( + clusterState.getClusterName().value(), + expectedManifest, + "nodeA", + true + ); + assertEquals(stateFromCache2.getMetadata(), state.getMetadata()); + final ClusterMetadataManifest notExistMetadata = ClusterMetadataManifest.builder() .indices(List.of()) .clusterTerm(1L) diff --git a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java index c34f13041cb11..a4295289c3109 100644 --- a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java @@ -59,6 +59,7 @@ import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.Transport; import org.junit.After; import org.junit.Before; @@ -70,8 +71,6 @@ import static java.net.InetAddress.getByName; import static java.util.Arrays.asList; -import static org.opensearch.http.AbstractHttpServerTransport.resolvePublishPort; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class AbstractHttpServerTransportTests extends OpenSearchTestCase { @@ -101,47 +100,40 @@ public void testHttpPublishPort() throws Exception { int boundPort = randomIntBetween(9000, 9100); int otherBoundPort = randomIntBetween(9200, 9300); - int publishPort = resolvePublishPort( - Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.getKey(), 9080).build(), - randomAddresses(), - getByName("127.0.0.2") - ); + int publishPort = Transport.resolveTransportPublishPort(9080, randomAddresses(), getByName("127.0.0.2")); assertThat("Publish port should be explicitly set to 9080", publishPort, equalTo(9080)); - publishPort = resolvePublishPort( - Settings.EMPTY, + publishPort = Transport.resolveTransportPublishPort( + -1, asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.1") ); assertThat("Publish port should be derived from matched address", publishPort, equalTo(boundPort)); - publishPort = resolvePublishPort( - Settings.EMPTY, + publishPort = Transport.resolveTransportPublishPort( + -1, asList(address("127.0.0.1", boundPort), address("127.0.0.2", boundPort)), getByName("127.0.0.3") ); assertThat("Publish port should be derived from unique port of bound addresses", publishPort, equalTo(boundPort)); - final BindHttpException e = expectThrows( - BindHttpException.class, - () -> resolvePublishPort( - Settings.EMPTY, - asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), - getByName("127.0.0.3") - ) + publishPort = Transport.resolveTransportPublishPort( + -1, + asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), + getByName("127.0.0.3") ); - assertThat(e.getMessage(), containsString("Failed to auto-resolve http publish port")); + assertThat(publishPort, equalTo(-1)); - publishPort = resolvePublishPort( - Settings.EMPTY, + publishPort = Transport.resolveTransportPublishPort( + -1, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.1") ); assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort)); if (NetworkUtils.SUPPORTS_V6) { - publishPort = resolvePublishPort( - Settings.EMPTY, + publishPort = Transport.resolveTransportPublishPort( + -1, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)), getByName("::1") ); diff --git a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java index 402ed1dbee98a..5603fe4e30f9f 100644 --- a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; @@ -25,6 +26,7 @@ import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.network.InetAddresses; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.codec.composite.CompositeIndexReader; @@ -36,6 +38,8 @@ import org.opensearch.index.mapper.NumberFieldMapper; import java.io.IOException; +import java.net.InetAddress; +import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -65,12 +69,15 @@ public void testStarTreeKeywordDocValues() throws IOException { doc.add(new SortedNumericDocValuesField("sndv", 1)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10"))))); iw.addDocument(doc); doc = new Document(); doc.add(new StringField("_id", "2", Field.Store.NO)); doc.add(new SortedNumericDocValuesField("sndv", 1)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11"))))); + iw.addDocument(doc); iw.flush(); iw.deleteDocuments(new Term("_id", "2")); @@ -80,12 +87,14 @@ public void testStarTreeKeywordDocValues() throws IOException { doc.add(new SortedNumericDocValuesField("sndv", 2)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10"))))); iw.addDocument(doc); doc = new Document(); doc.add(new StringField("_id", "4", Field.Store.NO)); doc.add(new SortedNumericDocValuesField("sndv", 2)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11"))))); iw.addDocument(doc); iw.flush(); iw.deleteDocuments(new Term("_id", "4")); @@ -166,6 +175,9 @@ public void testStarTreeKeywordDocValuesWithDeletions() throws IOException { doc.add(new SortedSetDocValuesField("keyword2", new BytesRef(keyword2Value))); map.put(keyword1Value + "-" + keyword2Value, sndvValue + map.getOrDefault(keyword1Value + "-" + keyword2Value, 0)); + doc.add( + new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10." + i)))) + ); iw.addDocument(doc); documents.put(id, doc); } @@ -221,9 +233,7 @@ public void testStarTreeKeywordDocValuesWithDeletions() throws IOException { SortedSetStarTreeValuesIterator k1 = (SortedSetStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator( "keyword1" ); - SortedSetStarTreeValuesIterator k2 = (SortedSetStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator( - "keyword2" - ); + SortedSetStarTreeValuesIterator k2 = (SortedSetStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator("ip1"); for (StarTreeDocument starDoc : actualStarTreeDocuments) { String keyword1 = null; if (starDoc.dimensions[0] != null) { @@ -232,7 +242,11 @@ public void testStarTreeKeywordDocValuesWithDeletions() throws IOException { String keyword2 = null; if (starDoc.dimensions[1] != null) { - keyword2 = k2.lookupOrd(starDoc.dimensions[1]).utf8ToString(); + BytesRef encoded = k2.lookupOrd(starDoc.dimensions[1]); + InetAddress address = InetAddressPoint.decode( + Arrays.copyOfRange(encoded.bytes, encoded.offset, encoded.offset + encoded.length) + ); + keyword2 = InetAddresses.toAddrString(address); } double metric = (double) starDoc.metrics[0]; if (map.containsKey(keyword1 + "-" + keyword2)) { @@ -254,21 +268,28 @@ public void testStarKeywordDocValuesWithMissingDocs() throws IOException { Document doc = new Document(); doc.add(new SortedNumericDocValuesField("sndv", 1)); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10"))))); + iw.addDocument(doc); doc = new Document(); doc.add(new SortedNumericDocValuesField("sndv", 1)); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11"))))); iw.addDocument(doc); iw.forceMerge(1); doc = new Document(); doc.add(new SortedNumericDocValuesField("sndv", 2)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10"))))); + iw.addDocument(doc); doc = new Document(); doc.add(new SortedNumericDocValuesField("sndv", 2)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11"))))); + iw.addDocument(doc); iw.forceMerge(1); iw.close(); @@ -340,11 +361,14 @@ public void testStarKeywordDocValuesWithMissingDocsInSegment() throws IOExceptio doc.add(new SortedNumericDocValuesField("sndv", 2)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10"))))); iw.addDocument(doc); doc = new Document(); doc.add(new SortedNumericDocValuesField("sndv", 2)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11"))))); + iw.addDocument(doc); iw.forceMerge(1); iw.close(); @@ -538,7 +562,7 @@ protected XContentBuilder getMapping() throws IOException { b.field("name", "keyword1"); b.endObject(); b.startObject(); - b.field("name", "keyword2"); + b.field("name", "ip1"); b.endObject(); b.endArray(); b.startArray("metrics"); @@ -566,6 +590,9 @@ protected XContentBuilder getMapping() throws IOException { b.startObject("keyword2"); b.field("type", "keyword"); b.endObject(); + b.startObject("ip1"); + b.field("type", "ip"); + b.endObject(); b.endObject(); }); } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java index 440268f1f803c..70cc20fe4a9f6 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java @@ -20,10 +20,11 @@ import org.opensearch.index.codec.composite.LuceneDocValuesConsumerFactory; import org.opensearch.index.codec.composite.composite912.Composite912DocValuesFormat; import org.opensearch.index.compositeindex.datacube.Dimension; -import org.opensearch.index.compositeindex.datacube.KeywordDimension; +import org.opensearch.index.compositeindex.datacube.IpDimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.OrdinalDimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; @@ -426,7 +427,7 @@ public void testFlushFlowForKeywords() throws IOException { ); List metricsWithField = List.of(0, 1, 2, 3, 4, 5); - compositeField = getStarTreeFieldWithKeywordField(); + compositeField = getStarTreeFieldWithKeywordField(random().nextBoolean()); SortedSetStarTreeValuesIterator d1sndv = new SortedSetStarTreeValuesIterator(getSortedSetMock(dimList, docsWithField)); SortedSetStarTreeValuesIterator d2sndv = new SortedSetStarTreeValuesIterator(getSortedSetMock(dimList2, docsWithField2)); SortedNumericStarTreeValuesIterator m1sndv = new SortedNumericStarTreeValuesIterator( @@ -531,9 +532,9 @@ private StarTreeField getStarTreeFieldWithMultipleMetrics() { return new StarTreeField("sf", dims, metrics, c); } - private StarTreeField getStarTreeFieldWithKeywordField() { - Dimension d1 = new KeywordDimension("field1"); - Dimension d2 = new KeywordDimension("field3"); + private StarTreeField getStarTreeFieldWithKeywordField(boolean isIp) { + Dimension d1 = isIp ? new IpDimension("field1") : new OrdinalDimension("field1"); + Dimension d2 = isIp ? new IpDimension("field3") : new OrdinalDimension("field3"); Metric m1 = new Metric("field2", List.of(MetricStat.SUM)); Metric m2 = new Metric("field2", List.of(MetricStat.VALUE_COUNT)); Metric m3 = new Metric("field2", List.of(MetricStat.AVG)); diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java index be16961e781db..74ecff04076b1 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java @@ -1831,7 +1831,7 @@ public void testMergeFlowWithKeywords() throws IOException { List metricsList2 = List.of(0L, 1L, 2L, 3L, 4L); List metricsWithField2 = List.of(0, 1, 2, 3, 4); - compositeField = getStarTreeFieldWithKeywords(); + compositeField = getStarTreeFieldWithKeywords(random().nextBoolean()); StarTreeValues starTreeValues = getStarTreeValuesWithKeywords( getSortedSetMock(dimList, docsWithField), getSortedSetMock(dimList2, docsWithField2), diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java index 9c9beaea4f52c..cca987b6f9b16 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java @@ -32,10 +32,11 @@ import org.opensearch.index.compositeindex.datacube.DataCubeDateTimeUnit; import org.opensearch.index.compositeindex.datacube.DateDimension; import org.opensearch.index.compositeindex.datacube.Dimension; -import org.opensearch.index.compositeindex.datacube.KeywordDimension; +import org.opensearch.index.compositeindex.datacube.IpDimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.OrdinalDimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; @@ -352,9 +353,9 @@ protected StarTreeMetadata getStarTreeMetadata( ); } - protected StarTreeField getStarTreeFieldWithKeywords() { - Dimension d1 = new KeywordDimension("field1"); - Dimension d2 = new KeywordDimension("field3"); + protected StarTreeField getStarTreeFieldWithKeywords(boolean ip) { + Dimension d1 = ip ? new IpDimension("field1") : new OrdinalDimension("field1"); + Dimension d2 = ip ? new IpDimension("field3") : new OrdinalDimension("field3"); Metric m1 = new Metric("field2", List.of(MetricStat.VALUE_COUNT, MetricStat.SUM)); List dims = List.of(d1, d2); List metrics = List.of(m1); diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java index 98bcaa3a1a46b..9032e2cdaed16 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java @@ -156,7 +156,6 @@ public void testIgnoreMalformedLegacy() throws IOException { "failed to parse date field [2016-03-99] with format [strict_date_optional_time||epoch_millis]" ); testIgnoreMalformedForValue("-2147483648", "Invalid value for Year (valid values -999999999 - 999999999): -2147483648"); - testIgnoreMalformedForValue("-522000000", "long overflow"); } public void testIgnoreMalformed() throws IOException { @@ -170,7 +169,6 @@ public void testIgnoreMalformed() throws IOException { "failed to parse date field [2016-03-99] with format [strict_date_time_no_millis||strict_date_optional_time||epoch_millis]" ); testIgnoreMalformedForValue("-2147483648", "Invalid value for Year (valid values -999999999 - 999999999): -2147483648"); - testIgnoreMalformedForValue("-522000000", "long overflow"); } private void testIgnoreMalformedForValue(String value, String expectedCause) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java index 15b16f4610062..52091d571ee72 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java @@ -31,20 +31,32 @@ package org.opensearch.index.mapper; +import org.apache.lucene.document.Field; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -71,8 +83,12 @@ import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; import java.time.ZoneOffset; +import java.util.Arrays; import java.util.Collections; +import java.util.List; +import java.util.Locale; import static org.hamcrest.CoreMatchers.is; import static org.apache.lucene.document.LongPoint.pack; @@ -490,4 +506,187 @@ public void testParseSourceValueNanos() throws IOException { MappedFieldType nullValueMapper = fieldType(Resolution.NANOSECONDS, "strict_date_time||epoch_millis", nullValueDate); assertEquals(Collections.singletonList(nullValueDate), fetchSourceValue(nullValueMapper, null)); } + + public void testDateResolutionForOverflow() throws IOException { + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); + + DateFieldType ft = new DateFieldType( + "test_date", + true, + true, + true, + DateFormatter.forPattern("yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis||strict_date_optional_time"), + Resolution.MILLISECONDS, + null, + Collections.emptyMap() + ); + + List dates = Arrays.asList( + null, + "2020-01-01T00:00:00Z", + null, + "2021-01-01T00:00:00Z", + "+292278994-08-17T07:12:55.807Z", + null, + "-292275055-05-16T16:47:04.192Z" + ); + + int numNullDates = 0; + long minDateValue = Long.MAX_VALUE; + long maxDateValue = Long.MIN_VALUE; + + for (int i = 0; i < dates.size(); i++) { + ParseContext.Document doc = new ParseContext.Document(); + String dateStr = dates.get(i); + + if (dateStr != null) { + long timestamp = Resolution.MILLISECONDS.convert(DateFormatters.from(ft.dateTimeFormatter().parse(dateStr)).toInstant()); + doc.add(new LongPoint(ft.name(), timestamp)); + doc.add(new SortedNumericDocValuesField(ft.name(), timestamp)); + doc.add(new StoredField(ft.name(), timestamp)); + doc.add(new StoredField("id", i)); + minDateValue = Math.min(minDateValue, timestamp); + maxDateValue = Math.max(maxDateValue, timestamp); + } else { + numNullDates++; + doc.add(new StoredField("id", i)); + } + w.addDocument(doc); + } + + DirectoryReader reader = DirectoryReader.open(w); + IndexSearcher searcher = new IndexSearcher(reader); + + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + QueryShardContext context = new QueryShardContext( + 0, + new IndexSettings(IndexMetadata.builder("foo").settings(indexSettings).build(), indexSettings), + BigArrays.NON_RECYCLING_INSTANCE, + null, + null, + null, + null, + null, + xContentRegistry(), + writableRegistry(), + null, + null, + () -> nowInMillis, + null, + null, + () -> true, + null + ); + + Query rangeQuery = ft.rangeQuery( + "-292275055-05-16T16:47:04.192Z", + "+292278994-08-17T07:12:55.807Z", + true, + true, + null, + null, + null, + context + ); + + TopDocs topDocs = searcher.search(rangeQuery, dates.size()); + assertEquals("Number of non-null date documents", dates.size() - numNullDates, topDocs.totalHits.value); + + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + org.apache.lucene.document.Document doc = reader.document(scoreDoc.doc); + IndexableField dateField = doc.getField(ft.name()); + if (dateField != null) { + long dateValue = dateField.numericValue().longValue(); + assertTrue( + "Date value " + dateValue + " should be within valid range", + dateValue >= minDateValue && dateValue <= maxDateValue + ); + } + } + + DateFieldType ftWithNullValue = new DateFieldType( + "test_date", + true, + true, + true, + DateFormatter.forPattern("yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis||strict_date_optional_time"), + Resolution.MILLISECONDS, + "2020-01-01T00:00:00Z", + Collections.emptyMap() + ); + + Query nullValueQuery = ftWithNullValue.termQuery("2020-01-01T00:00:00Z", context); + topDocs = searcher.search(nullValueQuery, dates.size()); + assertEquals("Documents matching the 2020-01-01 date", 1, topDocs.totalHits.value); + + IOUtils.close(reader, w, dir); + } + + public void testDateFieldTypeWithNulls() throws IOException { + DateFieldType ft = new DateFieldType( + "domainAttributes.dueDate", + true, + true, + true, + DateFormatter.forPattern("yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis||date_optional_time"), + Resolution.MILLISECONDS, + null, + Collections.emptyMap() + ); + + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); + + int nullDocs = 3500; + int datedDocs = 50; + + for (int i = 0; i < nullDocs; i++) { + ParseContext.Document doc = new ParseContext.Document(); + doc.add(new StringField("domainAttributes.firmId", "12345678910111213", Field.Store.YES)); + w.addDocument(doc); + } + + for (int i = 1; i <= datedDocs; i++) { + ParseContext.Document doc = new ParseContext.Document(); + String dateStr = String.format(Locale.ROOT, "2022-03-%02dT15:40:58.324", (i % 30) + 1); + long timestamp = Resolution.MILLISECONDS.convert(DateFormatters.from(ft.dateTimeFormatter().parse(dateStr)).toInstant()); + doc.add(new StringField("domainAttributes.firmId", "12345678910111213", Field.Store.YES)); + doc.add(new LongPoint(ft.name(), timestamp)); + doc.add(new SortedNumericDocValuesField(ft.name(), timestamp)); + doc.add(new StoredField(ft.name(), timestamp)); + w.addDocument(doc); + } + + DirectoryReader reader = DirectoryReader.open(w); + IndexSearcher searcher = new IndexSearcher(reader); + + BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder(); + queryBuilder.add(new TermQuery(new Term("domainAttributes.firmId", "12345678910111213")), BooleanClause.Occur.MUST); + + Sort sort = new Sort(new SortField(ft.name(), SortField.Type.DOC, false)); + + for (int i = 0; i < 100; i++) { + TopDocs topDocs = searcher.search(queryBuilder.build(), nullDocs + datedDocs, sort); + assertEquals("Total hits should match total documents", nullDocs + datedDocs, topDocs.totalHits.value); + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + org.apache.lucene.document.Document doc = reader.document(scoreDoc.doc); + IndexableField dateField = doc.getField(ft.name()); + if (dateField != null) { + long dateValue = dateField.numericValue().longValue(); + Instant dateInstant = Instant.ofEpochMilli(dateValue); + assertTrue( + "Date should be in March 2022", + dateInstant.isAfter(Instant.parse("2022-03-01T00:00:00Z")) + && dateInstant.isBefore(Instant.parse("2022-04-01T00:00:00Z")) + ); + } + } + } + IOUtils.close(reader, w, dir); + } } diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java index afd9e994ce3ae..7e6aa00c87290 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java @@ -21,6 +21,7 @@ import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.DocValueFormat; import java.io.IOException; @@ -397,6 +398,27 @@ public void testDeduplicationValue() throws IOException { assertEquals(new BytesRef("field.labels=3"), fieldValueAndPaths[4].binaryValue()); } + public void testFetchDocValues() throws IOException { + MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "flat_object"))); + { + // test valueWithPathField + MappedFieldType ft = mapperService.fieldType("field.name"); + DocValueFormat format = ft.docValueFormat(null, null); + String storedValue = "field.field.name=1234"; + + Object object = format.format(new BytesRef(storedValue)); + assertEquals("1234", object); + } + + { + // test valueField + MappedFieldType ft = mapperService.fieldType("field"); + Throwable throwable = assertThrows(IllegalArgumentException.class, () -> ft.docValueFormat(null, null)); + assertEquals("Field [field] of type [flat_object] does not support doc_value in root field", throwable.getMessage()); + } + + } + @Override protected void registerParameters(ParameterChecker checker) throws IOException { // In the future we will want to make sure parameter updates are covered. diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java index 580f8cccc9af5..d9f0fd6657085 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java @@ -15,11 +15,13 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; +import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -28,6 +30,7 @@ import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.SourceFieldMatchQuery; +import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.index.search.MatchQuery; import org.junit.Before; @@ -391,7 +394,7 @@ public void testPhraseQuery() throws IOException { assertThat(q, is(expectedQuery)); Query q4 = new MatchPhraseQueryBuilder("field", "singleton").toQuery(queryShardContext); - assertThat(q4, is(new TermQuery(new Term("field", "singleton")))); + assertThat(q4, is(new ConstantScoreQuery(new TermQuery(new Term("field", "singleton"))))); Query q2 = new MatchPhraseQueryBuilder("field", "three words here").toQuery(queryShardContext); expectedQuery = new SourceFieldMatchQuery( @@ -447,4 +450,22 @@ public void testPhraseQuery() throws IOException { ); assertThat(q6, is(expectedQuery)); } + + public void testTermQuery() throws Exception { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("field"); + { + b.field("type", textFieldName); + b.field("analyzer", "my_stop_analyzer"); // "standard" will be replaced with MockSynonymAnalyzer + } + b.endObject(); + })); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + + Query q = new TermQueryBuilder("field", "foo").rewrite(queryShardContext).toQuery(queryShardContext); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field", "foo"))), q); + + q = new TermQueryBuilder("field", "foo").caseInsensitive(true).rewrite(queryShardContext).toQuery(queryShardContext); + assertEquals(new ConstantScoreQuery(AutomatonQueries.caseInsensitiveTermQuery(new Term("field", "foo"))), q); + } } diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java index 51234fa04ddc2..0170cdde8b21c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java @@ -8,7 +8,11 @@ package org.opensearch.index.mapper; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.TermQuery; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.AutomatonQueries; public class MatchOnlyTextFieldTypeTests extends TextFieldTypeTests { @@ -28,4 +32,18 @@ TextFieldMapper.TextFieldType createFieldType(boolean searchable) { ParametrizedFieldMapper.Parameter.metaParam().get() ); } + + @Override + public void testTermQuery() { + MappedFieldType ft = createFieldType(true); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field", "foo"))), ft.termQuery("foo", null)); + assertEquals( + new ConstantScoreQuery(AutomatonQueries.caseInsensitiveTermQuery(new Term("field", "fOo"))), + ft.termQueryCaseInsensitive("fOo", null) + ); + + MappedFieldType unsearchable = createFieldType(false); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("bar", null)); + assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + } } diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java index 8ec34b3eb660c..684704ad65b0a 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java @@ -111,7 +111,7 @@ public void testCompositeIndexWithArraysInCompositeField() throws IOException { () -> mapper.parse(source(b -> b.startArray("status").value(0).value(1).endArray())) ); assertEquals( - "object mapping for [_doc] with array for [status] cannot be accepted as field is also part of composite index mapping which does not accept arrays", + "object mapping for [_doc] with array for [status] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", ex.getMessage() ); ParsedDocument doc = mapper.parse(source(b -> b.startArray("size").value(0).value(1).endArray())); @@ -284,6 +284,33 @@ public void testValidStarTreeDateDims() throws IOException { } } + public void testValidStarTreeNestedFields() throws IOException { + MapperService mapperService = createMapperService(getMinMappingWithNestedField()); + Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); + for (CompositeMappedFieldType type : compositeFieldTypes) { + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; + assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedDimensionFields = Arrays.asList("@timestamp_minute", "@timestamp_half-hour"); + assertEquals(expectedDimensionFields, dateDim.getSubDimensionNames()); + List expectedTimeUnits = Arrays.asList( + new DateTimeUnitAdapter(Rounding.DateTimeUnit.MINUTES_OF_HOUR), + DataCubeDateTimeUnit.HALF_HOUR_OF_DAY + ); + for (int i = 0; i < expectedTimeUnits.size(); i++) { + assertEquals(expectedTimeUnits.get(i).shortName(), dateDim.getSortedCalendarIntervals().get(i).shortName()); + } + assertEquals("nested.status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals("nested.status", starTreeFieldType.getMetrics().get(0).getField()); + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); + assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); + } + } + public void testInValidStarTreeMinDims() throws IOException { MapperParsingException ex = expectThrows( MapperParsingException.class, @@ -1047,6 +1074,56 @@ private XContentBuilder getMinMappingWith2StarTrees() throws IOException { }); } + private XContentBuilder getMinMappingWithNestedField() throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree"); + b.field("type", "star_tree"); + b.startObject("config"); + + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "@timestamp"); + b.endObject(); + b.startObject(); + b.field("name", "nested.status"); + b.endObject(); + b.endArray(); + + b.startArray("metrics"); + b.startObject(); + b.field("name", "nested.status"); + b.endObject(); + b.startObject(); + b.field("name", "metric_field"); + b.endObject(); + b.endArray(); + + b.endObject(); + b.endObject(); + + b.endObject(); + b.startObject("properties"); + b.startObject("@timestamp"); + b.field("type", "date"); + b.endObject(); + b.startObject("nested"); + b.startObject("properties"); + b.startObject("status"); + b.field("type", "integer"); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("metric_field"); + b.field("type", "integer"); + b.endObject(); + b.startObject("keyword1"); + b.field("type", "keyword"); + b.endObject(); + b.endObject(); + }); + } + private XContentBuilder getInvalidMapping( boolean singleDim, boolean invalidSkipDims, @@ -1085,6 +1162,9 @@ private XContentBuilder getInvalidMapping( b.startObject(); b.field("name", "keyword1"); b.endObject(); + b.startObject(); + b.field("name", "ip1"); + b.endObject(); } b.endArray(); b.startArray("metrics"); @@ -1117,7 +1197,7 @@ private XContentBuilder getInvalidMapping( if (!invalidDimType) { b.field("type", "integer"); } else { - b.field("type", "ip"); + b.field("type", "wildcard"); } b.endObject(); b.startObject("metric_field"); @@ -1130,6 +1210,9 @@ private XContentBuilder getInvalidMapping( b.startObject("keyword1"); b.field("type", "keyword"); b.endObject(); + b.startObject("ip1"); + b.field("type", "ip"); + b.endObject(); b.endObject(); }); } diff --git a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java index 12c7dc870c104..76294d85c64d4 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java @@ -44,6 +44,7 @@ import org.opensearch.semver.SemverRange; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Path; import java.util.ArrayList; @@ -55,6 +56,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class PluginInfoTests extends OpenSearchTestCase { @@ -281,6 +283,30 @@ public void testReadFromPropertiesJvmMissingClassname() throws Exception { assertThat(e.getMessage(), containsString("property [classname] is missing")); } + public void testExtendedPluginsSingleOptionalExtension() throws IOException { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "opensearch.version", + Version.CURRENT.toString(), + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin", + "extended.plugins", + "foo;optional=true" + ); + PluginInfo info = PluginInfo.readFromProperties(pluginDir); + assertThat(info.getExtendedPlugins(), contains("foo")); + assertThat(info.isExtendedPluginOptional("foo"), is(true)); + } + public void testExtendedPluginsSingleExtension() throws Exception { Path pluginDir = createTempDir().resolve("fake-plugin"); PluginTestUtil.writePluginProperties( @@ -302,6 +328,7 @@ public void testExtendedPluginsSingleExtension() throws Exception { ); PluginInfo info = PluginInfo.readFromProperties(pluginDir); assertThat(info.getExtendedPlugins(), contains("foo")); + assertThat(info.isExtendedPluginOptional("foo"), is(false)); } public void testExtendedPluginsMultipleExtensions() throws Exception { diff --git a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java index bd9ee33856f14..f5702fa1a7ade 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java @@ -361,7 +361,7 @@ public void testSortBundlesNoDeps() throws Exception { assertThat(sortedBundles, Matchers.contains(bundle1, bundle2, bundle3)); } - public void testSortBundlesMissingDep() throws Exception { + public void testSortBundlesMissingRequiredDep() throws Exception { Path pluginDir = createTempDir(); PluginInfo info = new PluginInfo("foo", "desc", "1.0", Version.CURRENT, "1.8", "MyPlugin", Collections.singletonList("dne"), false); PluginsService.Bundle bundle = new PluginsService.Bundle(info, pluginDir); @@ -372,6 +372,33 @@ public void testSortBundlesMissingDep() throws Exception { assertEquals("Missing plugin [dne], dependency of [foo]", e.getMessage()); } + public void testSortBundlesMissingOptionalDep() throws Exception { + try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(PluginsService.class))) { + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "[.test] warning", + "org.opensearch.plugins.PluginsService", + Level.WARN, + "Missing plugin [dne], dependency of [foo]" + ) + ); + Path pluginDir = createTempDir(); + PluginInfo info = new PluginInfo( + "foo", + "desc", + "1.0", + Version.CURRENT, + "1.8", + "MyPlugin", + Collections.singletonList("dne;optional=true"), + false + ); + PluginsService.Bundle bundle = new PluginsService.Bundle(info, pluginDir); + PluginsService.sortBundles(Collections.singleton(bundle)); + mockLogAppender.assertAllExpectationsMatched(); + } + } + public void testSortBundlesCommonDep() throws Exception { Path pluginDir = createTempDir(); Set bundles = new LinkedHashSet<>(); // control iteration order diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java index 12e83cbbadd5d..05f48eb9243af 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java @@ -28,18 +28,27 @@ import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.codec.composite.CompositeIndexReader; import org.opensearch.index.codec.composite.composite912.Composite912Codec; import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; +import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.aggregations.AggregatorFactory; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder; @@ -49,14 +58,17 @@ import org.opensearch.search.aggregations.metrics.InternalSum; import org.opensearch.search.aggregations.metrics.InternalValueCount; import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.opensearch.search.aggregations.metrics.MetricAggregatorFactory; import org.opensearch.search.aggregations.metrics.MinAggregationBuilder; import org.opensearch.search.aggregations.metrics.SumAggregationBuilder; import org.opensearch.search.aggregations.metrics.ValueCountAggregationBuilder; +import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Random; @@ -69,6 +81,8 @@ import static org.opensearch.search.aggregations.AggregationBuilders.min; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class MetricAggregatorTests extends AggregatorTestCase { @@ -267,6 +281,110 @@ public void testStarTreeDocValues() throws IOException { ); } + CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); + + QueryShardContext queryShardContext = queryShardContextMock( + indexSearcher, + mapperServiceMock(), + createIndexSettings(), + circuitBreakerService, + new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), circuitBreakerService).withCircuitBreaking() + ); + + MetricAggregatorFactory aggregatorFactory = mock(MetricAggregatorFactory.class); + when(aggregatorFactory.getSubFactories()).thenReturn(AggregatorFactories.EMPTY); + when(aggregatorFactory.getField()).thenReturn(FIELD_NAME); + when(aggregatorFactory.getMetricStat()).thenReturn(MetricStat.SUM); + + // Case when field and metric type in aggregation are fully supported by star tree. + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + true + ); + + // Case when the field is not supported by star tree + SumAggregationBuilder invalidFieldSumAggBuilder = sum("_name").field("hello"); + testCase( + indexSearcher, + query, + queryBuilder, + invalidFieldSumAggBuilder, + starTree, + supportedDimensions, + Collections.emptyList(), + verifyAggregation(InternalSum::getValue), + invalidFieldSumAggBuilder.build(queryShardContext, null), + false // Invalid fields will return null StarTreeQueryContext which will not cause early termination by leaf collector + ); + + // Case when metric type in aggregation is not supported by star tree but the field is supported. + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric(FIELD_NAME, List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + false + ); + + // Case when field is not present in supported metrics + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + false + ); + + AggregatorFactories aggregatorFactories = mock(AggregatorFactories.class); + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] { mock(MetricAggregatorFactory.class) }); + when(aggregatorFactory.getSubFactories()).thenReturn(aggregatorFactories); + + // Case when sub aggregations are present + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + false + ); + + // Case when aggregation factory is not metric aggregation + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + mock(ValuesSourceAggregatorFactory.class), + false + ); + ir.close(); directory.close(); } @@ -287,6 +405,21 @@ private void testC CompositeIndexFieldInfo starTree, List supportedDimensions, BiConsumer verify + ) throws IOException { + testCase(searcher, query, queryBuilder, aggBuilder, starTree, supportedDimensions, Collections.emptyList(), verify, null, true); + } + + private void testCase( + IndexSearcher searcher, + Query query, + QueryBuilder queryBuilder, + T aggBuilder, + CompositeIndexFieldInfo starTree, + List supportedDimensions, + List supportedMetrics, + BiConsumer verify, + AggregatorFactory aggregatorFactory, + boolean assertCollectorEarlyTermination ) throws IOException { V starTreeAggregation = searchAndReduceStarTree( createIndexSettings(), @@ -296,8 +429,11 @@ private void testC aggBuilder, starTree, supportedDimensions, + supportedMetrics, DEFAULT_MAX_BUCKETS, false, + aggregatorFactory, + assertCollectorEarlyTermination, DEFAULT_MAPPED_FIELD ); V expectedAggregation = searchAndReduceStarTree( @@ -308,8 +444,11 @@ private void testC aggBuilder, null, null, + null, DEFAULT_MAX_BUCKETS, false, + aggregatorFactory, + assertCollectorEarlyTermination, DEFAULT_MAPPED_FIELD ); verify.accept(expectedAggregation, starTreeAggregation); diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java index b03cb5ac7bb9d..c1cb19b9576e4 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java @@ -87,7 +87,8 @@ public void testStarTreeFilterWithDocsInSVDFieldButNoStarNode() throws IOExcepti testStarTreeFilter(10, false); } - private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension) throws IOException { + private Directory createStarTreeIndex(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension, List docs) + throws IOException { Directory directory = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(null); conf.setCodec(getCodec(maxLeafDoc, skipStarNodeCreationForSDVDimension)); @@ -95,7 +96,6 @@ private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForS RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); int totalDocs = 100; - List docs = new ArrayList<>(); for (int i = 0; i < totalDocs; i++) { Document doc = new Document(); doc.add(new SortedNumericDocValuesField(SNDV, i)); @@ -110,6 +110,15 @@ private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForS } iw.forceMerge(1); iw.close(); + return directory; + } + + private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension) throws IOException { + List docs = new ArrayList<>(); + + Directory directory = createStarTreeIndex(maxLeafDoc, skipStarNodeCreationForSDVDimension, docs); + + int totalDocs = docs.size(); DirectoryReader ir = DirectoryReader.open(directory); initValuesSourceRegistry(); diff --git a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java index b4726bab50198..23c21648b1263 100644 --- a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java @@ -156,7 +156,6 @@ private int expectedSize(final String threadPoolName, final int numberOfProcesso sizes.put(ThreadPool.Names.REMOTE_PURGE, ThreadPool::halfAllocatedProcessors); sizes.put(ThreadPool.Names.REMOTE_REFRESH_RETRY, ThreadPool::halfAllocatedProcessors); sizes.put(ThreadPool.Names.REMOTE_RECOVERY, ThreadPool::twiceAllocatedProcessors); - sizes.put(ThreadPool.Names.REMOTE_STATE_READ, n -> ThreadPool.boundedBy(4 * n, 4, 32)); return sizes.get(threadPoolName).apply(numberOfProcessors); } diff --git a/server/src/test/java/org/opensearch/transport/PublishPortTests.java b/server/src/test/java/org/opensearch/transport/PublishPortTests.java index 6a41409f6f181..2e5a57c4cdd60 100644 --- a/server/src/test/java/org/opensearch/transport/PublishPortTests.java +++ b/server/src/test/java/org/opensearch/transport/PublishPortTests.java @@ -43,8 +43,6 @@ import static java.net.InetAddress.getByName; import static java.util.Arrays.asList; -import static org.opensearch.transport.TcpTransport.resolvePublishPort; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class PublishPortTests extends OpenSearchTestCase { @@ -73,48 +71,44 @@ public void testPublishPort() throws Exception { } - int publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(settings, profile), + int publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(settings, profile).publishPort, randomAddresses(), getByName("127.0.0.2") ); assertThat("Publish port should be explicitly set", publishPort, equalTo(useProfile ? 9080 : 9081)); - publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), + publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.1") ); assertThat("Publish port should be derived from matched address", publishPort, equalTo(boundPort)); - publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), + publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, asList(address("127.0.0.1", boundPort), address("127.0.0.2", boundPort)), getByName("127.0.0.3") ); assertThat("Publish port should be derived from unique port of bound addresses", publishPort, equalTo(boundPort)); - try { - resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), - asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), - getByName("127.0.0.3") - ); - fail("Expected BindTransportException as publish_port not specified and non-unique port of bound addresses"); - } catch (BindTransportException e) { - assertThat(e.getMessage(), containsString("Failed to auto-resolve publish port")); - } + int resPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, + asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), + getByName("127.0.0.3") + ); + assertThat("as publish_port not specified and non-unique port of bound addresses", resPort, equalTo(-1)); - publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), + publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.1") ); assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort)); if (NetworkUtils.SUPPORTS_V6) { - publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), + publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)), getByName("::1") ); diff --git a/settings.gradle b/settings.gradle index 035fe69eda7e9..a24da40069b90 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.develocity" version "3.18.2" + id "com.gradle.develocity" version "3.19" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index f531a3c6ade5a..bb2b7ebafdf81 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -72,14 +72,14 @@ dependencies { api "org.eclipse.jetty:jetty-server:${versions.jetty}" api "org.eclipse.jetty.websocket:javax-websocket-server-impl:${versions.jetty}" api 'org.apache.zookeeper:zookeeper:3.9.3' - api "org.apache.commons:commons-text:1.12.0" + api "org.apache.commons:commons-text:1.13.0" api "commons-net:commons-net:3.11.1" - api "ch.qos.logback:logback-core:1.5.12" - api "ch.qos.logback:logback-classic:1.5.12" + api "ch.qos.logback:logback-core:1.5.16" + api "ch.qos.logback:logback-classic:1.5.15" api "org.jboss.xnio:xnio-nio:3.8.16.Final" - api 'org.jline:jline:3.27.1' + api 'org.jline:jline:3.28.0' api 'org.apache.commons:commons-configuration2:2.11.0' - api 'com.nimbusds:nimbus-jose-jwt:9.46' + api 'com.nimbusds:nimbus-jose-jwt:9.47' api ('org.apache.kerby:kerb-admin:2.1.0') { exclude group: "org.jboss.xnio" exclude group: "org.jline" diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index e1728c4476699..27142b298db52 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -93,6 +93,7 @@ import org.opensearch.index.cache.query.DisabledQueryCache; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldDataCache; @@ -348,7 +349,9 @@ protected CountingAggregator createCountingAggregator( IndexSettings indexSettings, CompositeIndexFieldInfo starTree, List supportedDimensions, + List supportedMetrics, MultiBucketConsumer bucketConsumer, + AggregatorFactory aggregatorFactory, MappedFieldType... fieldTypes ) throws IOException { SearchContext searchContext; @@ -360,7 +363,9 @@ protected CountingAggregator createCountingAggregator( queryBuilder, starTree, supportedDimensions, + supportedMetrics, bucketConsumer, + aggregatorFactory, fieldTypes ); } else { @@ -389,7 +394,9 @@ protected SearchContext createSearchContextWithStarTreeContext( QueryBuilder queryBuilder, CompositeIndexFieldInfo starTree, List supportedDimensions, + List supportedMetrics, MultiBucketConsumer bucketConsumer, + AggregatorFactory aggregatorFactory, MappedFieldType... fieldTypes ) throws IOException { SearchContext searchContext = createSearchContext( @@ -406,7 +413,12 @@ protected SearchContext createSearchContextWithStarTreeContext( AggregatorFactories aggregatorFactories = mock(AggregatorFactories.class); when(searchContext.aggregations()).thenReturn(searchContextAggregations); when(searchContextAggregations.factories()).thenReturn(aggregatorFactories); - when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] {}); + + if (aggregatorFactory != null) { + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] { aggregatorFactory }); + } else { + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] {}); + } CompositeDataCubeFieldType compositeMappedFieldType = mock(CompositeDataCubeFieldType.class); when(compositeMappedFieldType.name()).thenReturn(starTree.getField()); @@ -414,6 +426,7 @@ protected SearchContext createSearchContextWithStarTreeContext( Set compositeFieldTypes = Set.of(compositeMappedFieldType); when((compositeMappedFieldType).getDimensions()).thenReturn(supportedDimensions); + when((compositeMappedFieldType).getMetrics()).thenReturn(supportedMetrics); MapperService mapperService = mock(MapperService.class); when(mapperService.getCompositeFieldTypes()).thenReturn(compositeFieldTypes); when(searchContext.mapperService()).thenReturn(mapperService); @@ -740,8 +753,11 @@ protected A searchAndReduc AggregationBuilder builder, CompositeIndexFieldInfo compositeIndexFieldInfo, List supportedDimensions, + List supportedMetrics, int maxBucket, boolean hasNested, + AggregatorFactory aggregatorFactory, + boolean assertCollectorEarlyTermination, MappedFieldType... fieldTypes ) throws IOException { query = query.rewrite(searcher); @@ -764,7 +780,9 @@ protected A searchAndReduc indexSettings, compositeIndexFieldInfo, supportedDimensions, + supportedMetrics, bucketConsumer, + aggregatorFactory, fieldTypes ); @@ -772,7 +790,7 @@ protected A searchAndReduc searcher.search(query, countingAggregator); countingAggregator.postCollection(); aggs.add(countingAggregator.buildTopLevel()); - if (compositeIndexFieldInfo != null) { + if (compositeIndexFieldInfo != null && assertCollectorEarlyTermination) { assertEquals(0, countingAggregator.collectCounter.get()); } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index fa5fb736f518f..7b2c653e9bdb2 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2322,10 +2322,24 @@ public List startNodes(int numOfNodes, Settings settings) { return startNodes(Collections.nCopies(numOfNodes, settings).toArray(new Settings[0])); } + /** + * Starts multiple nodes with the given settings and returns their names + */ + public List startNodes(int numOfNodes, Settings settings, Boolean waitForNodeJoin) { + return startNodes(waitForNodeJoin, Collections.nCopies(numOfNodes, settings).toArray(new Settings[0])); + } + /** * Starts multiple nodes with the given settings and returns their names */ public synchronized List startNodes(Settings... extraSettings) { + return startNodes(false, extraSettings); + } + + /** + * Starts multiple nodes with the given settings and returns their names + */ + public synchronized List startNodes(Boolean waitForNodeJoin, Settings... extraSettings) { final int newClusterManagerCount = Math.toIntExact(Stream.of(extraSettings).filter(DiscoveryNode::isClusterManagerNode).count()); final int defaultMinClusterManagerNodes; if (autoManageClusterManagerNodes) { @@ -2377,7 +2391,7 @@ public synchronized List startNodes(Settings... extraSettings) { nodes.add(nodeAndClient); } startAndPublishNodesAndClients(nodes); - if (autoManageClusterManagerNodes) { + if (autoManageClusterManagerNodes && !waitForNodeJoin) { validateClusterFormed(); } return nodes.stream().map(NodeAndClient::getName).collect(Collectors.toList()); @@ -2422,6 +2436,10 @@ public List startDataOnlyNodes(int numNodes, Settings settings) { return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.DATA_ROLE)).build()); } + public List startDataOnlyNodes(int numNodes, Settings settings, Boolean ignoreNodeJoin) { + return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.DATA_ROLE)).build(), ignoreNodeJoin); + } + public List startSearchOnlyNodes(int numNodes) { return startSearchOnlyNodes(numNodes, Settings.EMPTY); } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 1ee856d3092f0..1c26ea4ca2c91 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -214,6 +214,8 @@ import java.util.function.Function; import java.util.stream.Collectors; +import reactor.util.annotation.NonNull; + import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.unit.TimeValue.timeValueMillis; @@ -2915,6 +2917,43 @@ protected static Settings buildRemoteStoreNodeAttributes( return settings.build(); } + protected Settings buildRemotePublicationNodeAttributes( + @NonNull String remoteStateRepoName, + @NonNull String remoteStateRepoType, + @NonNull String routingTableRepoName, + @NonNull String routingTableRepoType + ) { + String remoteStateRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + remoteStateRepoName + ); + String routingTableRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + routingTableRepoName + ); + String remoteStateRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + remoteStateRepoName + ); + String routingTableRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + routingTableRepoName + ); + + return Settings.builder() + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, remoteStateRepoName) + .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, routingTableRepoName) + .put(remoteStateRepositoryTypeAttributeKey, remoteStateRepoType) + .put(routingTableRepositoryTypeAttributeKey, routingTableRepoType) + .put(remoteStateRepositorySettingsAttributeKeyPrefix + "location", randomRepoPath().toAbsolutePath()) + .put(routingTableRepositorySettingsAttributeKeyPrefix + "location", randomRepoPath().toAbsolutePath()) + .build(); + } + public static String resolvePath(IndexId indexId, String shardId) { PathType pathType = PathType.fromCode(indexId.getShardPathType()); RemoteStorePathStrategy.SnapshotShardPathInput shardPathInput = new RemoteStorePathStrategy.SnapshotShardPathInput.Builder()