diff --git a/.ci/bwcVersions b/.ci/bwcVersions index b6acb886dc327..65fd9e7281ad1 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -1,47 +1,4 @@ BWC_VERSION: - - "7.0.0" - - "7.0.1" - - "7.1.0" - - "7.1.1" - - "7.2.0" - - "7.2.1" - - "7.3.0" - - "7.3.1" - - "7.3.2" - - "7.4.0" - - "7.4.1" - - "7.4.2" - - "7.5.0" - - "7.5.1" - - "7.5.2" - - "7.6.0" - - "7.6.1" - - "7.6.2" - - "7.7.0" - - "7.7.1" - - "7.8.0" - - "7.8.1" - - "7.9.0" - - "7.9.1" - - "7.9.2" - - "7.9.3" - - "7.10.0" - - "7.10.1" - - "7.10.2" - - "1.0.0" - - "1.1.0" - - "1.2.0" - - "1.2.1" - - "1.2.2" - - "1.2.3" - - "1.2.4" - - "1.2.5" - - "1.3.0" - - "1.3.1" - - "1.3.2" - - "1.3.3" - - "1.3.4" - - "1.3.5" - "2.0.0" - "2.0.1" - "2.0.2" @@ -53,4 +10,5 @@ BWC_VERSION: - "2.3.0" - "2.3.1" - "2.4.0" + - "2.4.1" - "2.5.0" diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index a54822c1311c1..5435da8419f5e 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -81,9 +81,16 @@ jobs: if: ${{ github.event_name == 'pull_request_target' && env.result != 'SUCCESS' }} run: | TEST_FAILURES=`curl -s "${{ env.workflow_url }}/testReport/api/json?tree=suites\[cases\[status,className,name\]\]" | jq -r '.. | objects | select(.status=="FAILED",.status=="REGRESSION") | (.className + "." + .name)' | uniq -c | sort -n -r | head -n 10` - echo "test_failures<> $GITHUB_ENV - echo "$TEST_FAILURES" >> $GITHUB_ENV - echo "EOF" >> $GITHUB_ENV + if [[ "$TEST_FAILURES" != "" ]] + then + echo "test_failures<> $GITHUB_ENV + echo "" >> $GITHUB_ENV + echo "* **TEST FAILURES:**" >> $GITHUB_ENV + echo '```' >> $GITHUB_ENV + echo "$TEST_FAILURES" >> $GITHUB_ENV + echo '```' >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + fi - name: Create Comment Flaky if: ${{ github.event_name == 'pull_request_target' && success() && env.result != 'SUCCESS' }} @@ -92,16 +99,10 @@ jobs: issue-number: ${{ env.pr_number }} body: | ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :grey_exclamation: - * **FLAKY TEST FAILURES:** - The following tests failed but succeeded upon retry: - ``` - ${{ env.test_failures }} - ``` + * **RESULT:** ${{ env.result }} :grey_exclamation: ${{ env.test_failures }} * **URL:** ${{ env.workflow_url }} * **CommitID:** ${{ env.pr_from_sha }} - Please examine the workflow log, locate, and copy-paste the failure below, then iterate to green. - Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? + Please review all [flaky tests](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) that succeeded after retry and create an issue if one does not already exist to track the flaky failure. - name: Create Comment Failure if: ${{ github.event_name == 'pull_request_target' && failure() }} @@ -110,12 +111,8 @@ jobs: issue-number: ${{ env.pr_number }} body: | ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :x: - * **FAILURES:** - ``` - ${{ env.test_failures }} - ``` + * **RESULT:** ${{ env.result }} :x: ${{ env.test_failures }} * **URL:** ${{ env.workflow_url }} * **CommitID:** ${{ env.pr_from_sha }} - Please examine the workflow log, locate, and copy-paste the failure below, then iterate to green. + Please examine the workflow log, locate, and copy-paste the failure(s) below, then iterate to green. Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d75e9e9db577..c1de24d8fc376 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,16 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bumps `com.diffplug.spotless` from 6.10.0 to 6.11.0 ([#4547](https://github.com/opensearch-project/OpenSearch/pull/4547)) - Bumps `reactor-core` from 3.4.18 to 3.4.23 ([#4548](https://github.com/opensearch-project/OpenSearch/pull/4548)) - Bumps `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) +- Bumps `commons-compress` from 1.21 to 1.22 +- Bumps `jcodings` from 1.0.57 to 1.0.58 ([#5233](https://github.com/opensearch-project/OpenSearch/pull/5233)) +- Bumps `google-http-client-jackson2` from 1.35.0 to 1.42.3 ([#5234](https://github.com/opensearch-project/OpenSearch/pull/5234)) +- Bumps `maxmind-db` from 2.0.0 to 2.1.0 ([#5236](https://github.com/opensearch-project/OpenSearch/pull/5236)) +- Bumps `azure-core` from 1.33.0 to 1.34.0 ([#5235](https://github.com/opensearch-project/OpenSearch/pull/5235)) +- Bumps `azure-core-http-netty` from 1.12.4 to 1.12.7 ([#5235](https://github.com/opensearch-project/OpenSearch/pull/5235)) +- Bumps `spock-core` from from 2.1-groovy-3.0 to 2.3-groovy-3.0 ([#5315](https://github.com/opensearch-project/OpenSearch/pull/5315)) +- Bumps `json-schema-validator` from 1.0.69 to 1.0.73 ([#5316](https://github.com/opensearch-project/OpenSearch/pull/5316)) +- Bumps `proto-google-common-protos` from 2.8.0 to 2.10.0 ([#5318](https://github.com/opensearch-project/OpenSearch/pull/5318)) +- Bumps `protobuf-java` from 3.21.7 to 3.21.9 ([#5319](https://github.com/opensearch-project/OpenSearch/pull/5319)) - Update Apache Lucene to 9.5.0-snapshot-a4ef70f ([#4979](https://github.com/opensearch-project/OpenSearch/pull/4979)) - Update to Gradle 7.6 and JDK-19 ([#4973](https://github.com/opensearch-project/OpenSearch/pull/4973)) @@ -43,6 +53,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) - Use ReplicationFailedException instead of OpensearchException in ReplicationTarget ([#4725](https://github.com/opensearch-project/OpenSearch/pull/4725)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) +- Support remote translog transfer for request level durability([#4480](https://github.com/opensearch-project/OpenSearch/pull/4480)) ### Deprecated @@ -64,7 +75,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - Fixed compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) -- Add jvm option to allow security manager ([#5194](https://github.com/opensearch-project/OpenSearch/pull/5194)) +- Reject bulk requests with invalid actions ([#5299](https://github.com/opensearch-project/OpenSearch/issues/5299)) + ### Security ## [Unreleased 2.x] @@ -76,6 +88,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bumps `commons-compress` from 1.21 to 1.22 ([#5104](https://github.com/opensearch-project/OpenSearch/pull/5104)) - Bump `opencensus-contrib-http-util` from 0.18.0 to 0.31.1 ([#3633](https://github.com/opensearch-project/OpenSearch/pull/3633)) - Bump `geoip2` from 3.0.1 to 3.0.2 ([#5103](https://github.com/opensearch-project/OpenSearch/pull/5103)) +- Bump gradle-extra-configurations-plugin from 7.0.0 to 8.0.0 ([#4808](https://github.com/opensearch-project/OpenSearch/pull/4808)) ### Changed ### Deprecated ### Removed diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 6f85088104021..adf69a533fcc9 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -105,7 +105,7 @@ dependencies { api 'commons-codec:commons-codec:1.15' api 'org.apache.commons:commons-compress:1.21' api 'org.apache.ant:ant:1.10.12' - api 'com.netflix.nebula:gradle-extra-configurations-plugin:7.0.0' + api 'com.netflix.nebula:gradle-extra-configurations-plugin:8.0.0' api 'com.netflix.nebula:nebula-publishing-plugin:4.6.0' api 'com.netflix.nebula:gradle-info-plugin:11.3.3' api 'org.apache.rat:apache-rat:0.13' @@ -118,7 +118,7 @@ dependencies { api 'com.avast.gradle:gradle-docker-compose-plugin:0.15.2' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.6.2' - api 'com.networknt:json-schema-validator:1.0.69' + api 'com.networknt:json-schema-validator:1.0.73' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" testFixturesApi "junit:junit:${props.getProperty('junit')}" @@ -127,7 +127,7 @@ dependencies { testFixturesApi gradleTestKit() testImplementation 'com.github.tomakehurst:wiremock-jre8-standalone:2.33.2' testImplementation "org.mockito:mockito-core:${props.getProperty('mockito')}" - integTestImplementation('org.spockframework:spock-core:2.1-groovy-3.0') { + integTestImplementation('org.spockframework:spock-core:2.3-groovy-3.0') { exclude module: "groovy" } } diff --git a/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/package-info.java b/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/package-info.java deleted file mode 100644 index 40c4b60f6deb0..0000000000000 --- a/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/package-info.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to OpenSearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. OpenSearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ -/** - * Adding a sample package level javadoc to pass javadoc validation - * on reaper package. - * TODO - Need to add package description - */ -package org.elasticsearch.gradle.reaper; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java index 3f65abcc25d17..cddd03ccc2019 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java @@ -105,7 +105,7 @@ public class BwcVersions { private final Map> groupByMajor; private final Map unreleased; - public class UnreleasedVersionInfo { + public static class UnreleasedVersionInfo { public final Version version; public final String branch; public final String gradleProjectPath; @@ -149,13 +149,7 @@ public BwcVersions(SortedSet allVersions, Version currentVersionPropert groupByMajor = allVersions.stream() // We only care about the last 2 majors when it comes to BWC. - // It might take us time to remove the older ones from versionLines, so we allow them to exist. - // Adjust the major number since OpenSearch 1.x is released after predecessor version 7.x - .filter( - version -> (version.getMajor() == 1 ? 7 : version.getMajor()) > (currentVersion.getMajor() == 1 - ? 7 - : currentVersion.getMajor()) - 2 - ) + .filter(version -> version.getMajor() > currentVersion.getMajor() - 2) .collect(Collectors.groupingBy(Version::getMajor, Collectors.toList())); assertCurrentVersionMatchesParsed(currentVersionProperty); @@ -174,9 +168,7 @@ public BwcVersions(SortedSet allVersions, Version currentVersionPropert private void assertNoOlderThanTwoMajors() { Set majors = groupByMajor.keySet(); - // until OpenSearch 3.0 we will need to carry three major support - // (1, 7, 6) && (2, 1, 7) since OpenSearch 1.0 === Legacy 7.x - int numSupportedMajors = (currentVersion.getMajor() < 3) ? 3 : 2; + int numSupportedMajors = 2; if (majors.size() != numSupportedMajors && currentVersion.getMinor() != 0 && currentVersion.getRevision() != 0) { throw new IllegalStateException("Expected exactly 2 majors in parsed versions but found: " + majors); } @@ -207,7 +199,7 @@ public void forPreviousUnreleased(Consumer consumer) { .map(version -> new UnreleasedVersionInfo(version, getBranchFor(version), getGradleProjectPathFor(version))) .collect(Collectors.toList()); - collect.forEach(uvi -> consumer.accept(uvi)); + collect.forEach(consumer); } private String getGradleProjectPathFor(Version version) { @@ -271,18 +263,9 @@ public List getUnreleased() { // The current version is being worked, is always unreleased unreleased.add(currentVersion); - // No unreleased versions for 1.0.0 - // todo remove this hack - if (currentVersion.equals(Version.fromString("1.0.0"))) { - return unmodifiableList(unreleased); - } - // the tip of the previous major is unreleased for sure, be it a minor or a bugfix if (currentVersion.getMajor() != 1) { - final Version latestOfPreviousMajor = getLatestVersionByKey( - this.groupByMajor, - currentVersion.getMajor() == 1 ? 7 : currentVersion.getMajor() - 1 - ); + final Version latestOfPreviousMajor = getLatestVersionByKey(this.groupByMajor, currentVersion.getMajor() - 1); unreleased.add(latestOfPreviousMajor); if (latestOfPreviousMajor.getRevision() == 0) { // if the previous major is a x.y.0 release, then the tip of the minor before that (y-1) is also unreleased @@ -311,7 +294,7 @@ public List getUnreleased() { } } - return unmodifiableList(unreleased.stream().sorted().distinct().collect(Collectors.toList())); + return unreleased.stream().sorted().distinct().collect(Collectors.toUnmodifiableList()); } private Version getLatestInMinor(int major, int minor) { @@ -342,7 +325,7 @@ private Map> getReleasedMajorGroupedByMinor() { public void compareToAuthoritative(List authoritativeReleasedVersions) { Set notReallyReleased = new HashSet<>(getReleased()); - notReallyReleased.removeAll(authoritativeReleasedVersions); + authoritativeReleasedVersions.forEach(notReallyReleased::remove); if (notReallyReleased.isEmpty() == false) { throw new IllegalStateException( "out-of-date released versions" @@ -370,32 +353,21 @@ private List getReleased() { .stream() .flatMap(Collection::stream) .filter(each -> unreleased.contains(each) == false) - // this is to make sure we only consider OpenSearch versions - // TODO remove this filter once legacy ES versions are no longer supported - .filter(v -> v.onOrAfter("1.0.0")) .collect(Collectors.toList()); } public List getIndexCompatible() { int currentMajor = currentVersion.getMajor(); int prevMajor = getPreviousMajor(currentMajor); - List result = Stream.concat(groupByMajor.get(prevMajor).stream(), groupByMajor.get(currentMajor).stream()) + return Stream.concat(groupByMajor.get(prevMajor).stream(), groupByMajor.get(currentMajor).stream()) .filter(version -> version.equals(currentVersion) == false) - .collect(Collectors.toList()); - if (currentMajor == 1) { - // add 6.x compatible for OpenSearch 1.0.0 - return unmodifiableList(Stream.concat(groupByMajor.get(prevMajor - 1).stream(), result.stream()).collect(Collectors.toList())); - } else if (currentMajor == 2) { - // add 7.x compatible for OpenSearch 2.0.0 - return unmodifiableList(Stream.concat(groupByMajor.get(7).stream(), result.stream()).collect(Collectors.toList())); - } - return unmodifiableList(result); + .collect(Collectors.toUnmodifiableList()); } public List getWireCompatible() { List wireCompat = new ArrayList<>(); int currentMajor = currentVersion.getMajor(); - int lastMajor = currentMajor == 1 ? 6 : currentMajor == 2 ? 7 : currentMajor - 1; + int lastMajor = currentMajor - 1; List lastMajorList = groupByMajor.get(lastMajor); if (lastMajorList == null) { throw new IllegalStateException("Expected to find a list of versions for version: " + lastMajor); @@ -405,20 +377,6 @@ public List getWireCompatible() { wireCompat.add(lastMajorList.get(i)); } - // if current is OpenSearch 1.0.0 add all of the 7.x line: - if (currentMajor == 1) { - List previousMajor = groupByMajor.get(7); - for (Version v : previousMajor) { - wireCompat.add(v); - } - } else if (currentMajor == 2) { - // add all of the 1.x line: - List previousMajor = groupByMajor.get(1); - for (Version v : previousMajor) { - wireCompat.add(v); - } - } - wireCompat.addAll(groupByMajor.get(currentMajor)); wireCompat.remove(currentVersion); wireCompat.sort(Version::compareTo); @@ -438,7 +396,7 @@ public List getUnreleasedWireCompatible() { } private int getPreviousMajor(int currentMajor) { - return currentMajor == 1 ? 7 : currentMajor - 1; + return currentMajor - 1; } } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java index eb3cd4d089417..87a565e6f4431 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java @@ -51,8 +51,7 @@ import java.util.Arrays; import java.util.Comparator; -import java.util.List; -import java.util.stream.Collectors; +import java.util.Objects; /** * A plugin to manage getting and extracting distributions of OpenSearch. @@ -71,12 +70,6 @@ public class DistributionDownloadPlugin implements Plugin { private static final String SNAPSHOT_REPO_NAME = "opensearch-snapshots"; public static final String DISTRO_EXTRACTED_CONFIG_PREFIX = "opensearch_distro_extracted_"; - // for downloading Elasticsearch OSS distributions to run BWC - private static final String FAKE_IVY_GROUP_ES = "elasticsearch-distribution"; - private static final String DOWNLOAD_REPO_NAME_ES = "elasticsearch-downloads"; - private static final String SNAPSHOT_REPO_NAME_ES = "elasticsearch-snapshots"; - private static final String FAKE_SNAPSHOT_IVY_GROUP_ES = "elasticsearch-distribution-snapshot"; - private static final String RELEASE_PATTERN_LAYOUT = "/core/opensearch/[revision]/[module]-min-[revision](-[classifier]).[ext]"; private static final String SNAPSHOT_PATTERN_LAYOUT = "/snapshots/core/opensearch/[revision]/[module]-min-[revision](-[classifier])-latest.[ext]"; @@ -159,35 +152,20 @@ private DistributionDependency resolveDependencyNotation(Project p, OpenSearchDi return distributionsResolutionStrategiesContainer.stream() .sorted(Comparator.comparingInt(DistributionResolution::getPriority)) .map(r -> r.getResolver().resolve(p, distribution)) - .filter(d -> d != null) + .filter(Objects::nonNull) .findFirst() .orElseGet(() -> DistributionDependency.of(dependencyNotation(distribution))); } private static void addIvyRepo(Project project, String name, String url, String group, String... patternLayout) { - final List repos = Arrays.stream(patternLayout).map(pattern -> project.getRepositories().ivy(repo -> { - repo.setName(name); - repo.setUrl(url); - repo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); - repo.patternLayout(layout -> layout.artifact(pattern)); - })).collect(Collectors.toList()); - project.getRepositories().exclusiveContent(exclusiveContentRepository -> { exclusiveContentRepository.filter(config -> config.includeGroup(group)); - exclusiveContentRepository.forRepositories(repos.toArray(new IvyArtifactRepository[0])); - }); - } - - private static void addIvyRepo2(Project project, String name, String url, String group) { - IvyArtifactRepository ivyRepo = project.getRepositories().ivy(repo -> { - repo.setName(name); - repo.setUrl(url); - repo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); - repo.patternLayout(layout -> layout.artifact("/downloads/elasticsearch/elasticsearch-oss-[revision](-[classifier]).[ext]")); - }); - project.getRepositories().exclusiveContent(exclusiveContentRepository -> { - exclusiveContentRepository.filter(config -> config.includeGroup(group)); - exclusiveContentRepository.forRepositories(ivyRepo); + exclusiveContentRepository.forRepositories(Arrays.stream(patternLayout).map(pattern -> project.getRepositories().ivy(repo -> { + repo.setName(name); + repo.setUrl(url); + repo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); + repo.patternLayout(layout -> layout.artifact(pattern)); + })).toArray(IvyArtifactRepository[]::new)); }); } @@ -211,9 +189,6 @@ private static void setupDownloadServiceRepo(Project project) { ); addIvyRepo(project, SNAPSHOT_REPO_NAME, "https://artifacts.opensearch.org", FAKE_SNAPSHOT_IVY_GROUP, SNAPSHOT_PATTERN_LAYOUT); } - - addIvyRepo2(project, DOWNLOAD_REPO_NAME_ES, "https://artifacts-no-kpi.elastic.co", FAKE_IVY_GROUP_ES); - addIvyRepo2(project, SNAPSHOT_REPO_NAME_ES, "https://snapshots-no-kpi.elastic.co", FAKE_SNAPSHOT_IVY_GROUP_ES); } /** @@ -222,16 +197,12 @@ private static void setupDownloadServiceRepo(Project project) { * The returned object is suitable to be passed to {@link DependencyHandler}. * The concrete type of the object will be a set of maven coordinates as a {@link String}. * Maven coordinates point to either the integ-test-zip coordinates on maven central, or a set of artificial - * coordinates that resolve to the Elastic download service through an ivy repository. + * coordinates that resolve to the OpenSearch download service through an ivy repository. */ private String dependencyNotation(OpenSearchDistribution distribution) { Version distroVersion = Version.fromString(distribution.getVersion()); if (distribution.getType() == Type.INTEG_TEST_ZIP) { - if (distroVersion.onOrAfter("1.0.0")) { - return "org.opensearch.distribution.integ-test-zip:opensearch:" + distribution.getVersion() + "@zip"; - } else { - return "org.elasticsearch.distribution.integ-test-zip:elasticsearch:" + distribution.getVersion() + "@zip"; - } + return "org.opensearch.distribution.integ-test-zip:opensearch:" + distribution.getVersion() + "@zip"; } String extension = distribution.getType().toString(); @@ -239,42 +210,24 @@ private String dependencyNotation(OpenSearchDistribution distribution) { if (distribution.getType() == Type.ARCHIVE) { extension = distribution.getPlatform() == Platform.WINDOWS ? "zip" : "tar.gz"; - if (distroVersion.onOrAfter("1.0.0")) { - switch (distribution.getArchitecture()) { - case ARM64: - classifier = ":" + distribution.getPlatform() + "-arm64"; - break; - case X64: - classifier = ":" + distribution.getPlatform() + "-x64"; - break; - case S390X: - classifier = ":" + distribution.getPlatform() + "-s390x"; - break; - default: - throw new IllegalArgumentException("Unsupported architecture: " + distribution.getArchitecture()); - } - } else if (distroVersion.onOrAfter("7.0.0")) { - classifier = ":" + distribution.getPlatform() + "-x86_64"; - } else { - classifier = ""; + switch (distribution.getArchitecture()) { + case ARM64: + classifier = ":" + distribution.getPlatform() + "-arm64"; + break; + case X64: + classifier = ":" + distribution.getPlatform() + "-x64"; + break; + case S390X: + classifier = ":" + distribution.getPlatform() + "-s390x"; + break; + default: + throw new IllegalArgumentException("Unsupported architecture: " + distribution.getArchitecture()); } } else if (distribution.getType() == Type.DEB) { - if (distroVersion.onOrAfter("7.0.0")) { - classifier = ":amd64"; - } else { - classifier = ""; - } - } else if (distribution.getType() == Type.RPM && distroVersion.before("7.0.0")) { - classifier = ""; + classifier = ":amd64"; } - String group; - if (distroVersion.onOrAfter("1.0.0")) { - group = distribution.getVersion().endsWith("-SNAPSHOT") ? FAKE_SNAPSHOT_IVY_GROUP : FAKE_IVY_GROUP; - return group + ":opensearch" + ":" + distribution.getVersion() + classifier + "@" + extension; - } else { - group = distribution.getVersion().endsWith("-SNAPSHOT") ? FAKE_SNAPSHOT_IVY_GROUP_ES : FAKE_IVY_GROUP_ES; - return group + ":elasticsearch-oss" + ":" + distribution.getVersion() + classifier + "@" + extension; - } + String group = distribution.getVersion().endsWith("-SNAPSHOT") ? FAKE_SNAPSHOT_IVY_GROUP : FAKE_IVY_GROUP; + return group + ":opensearch" + ":" + distribution.getVersion() + classifier + "@" + extension; } } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java index 0f5348d5a8dcf..86823b82a379f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java @@ -32,7 +32,6 @@ package org.opensearch.gradle.testclusters; import org.opensearch.gradle.FileSupplier; -import org.opensearch.gradle.Jdk; import org.opensearch.gradle.PropertyNormalization; import org.opensearch.gradle.ReaperService; import org.opensearch.gradle.http.WaitForHttpResource; @@ -75,7 +74,6 @@ public class OpenSearchCluster implements TestClusterConfiguration, Named { private final String path; private final String clusterName; private final NamedDomainObjectContainer nodes; - private final Jdk bwcJdk; private final File workingDirBase; private final LinkedHashMap> waitConditions = new LinkedHashMap<>(); private final Project project; @@ -92,8 +90,7 @@ public OpenSearchCluster( ReaperService reaper, File workingDirBase, FileSystemOperations fileSystemOperations, - ArchiveOperations archiveOperations, - Jdk bwcJdk + ArchiveOperations archiveOperations ) { this.path = project.getPath(); this.clusterName = clusterName; @@ -103,7 +100,6 @@ public OpenSearchCluster( this.archiveOperations = archiveOperations; this.workingDirBase = workingDirBase; this.nodes = project.container(OpenSearchNode.class); - this.bwcJdk = bwcJdk; // Always add the first node String zone = hasZoneProperty() ? "zone-1" : ""; @@ -167,7 +163,6 @@ private void addNode(String nodeName, String zoneName) { fileSystemOperations, archiveOperations, workingDirBase, - bwcJdk, zoneName ); // configure the cluster name eagerly diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java index ab765efde7885..bcf9a8ba4d780 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java @@ -37,7 +37,6 @@ import org.opensearch.gradle.DistributionDownloadPlugin; import org.opensearch.gradle.OpenSearchDistribution; import org.opensearch.gradle.FileSupplier; -import org.opensearch.gradle.Jdk; import org.opensearch.gradle.LazyPropertyList; import org.opensearch.gradle.LazyPropertyMap; import org.opensearch.gradle.LoggedExec; @@ -132,7 +131,6 @@ public class OpenSearchNode implements TestClusterConfiguration { private final String name; private final Project project; private final ReaperService reaper; - private final Jdk bwcJdk; private final FileSystemOperations fileSystemOperations; private final ArchiveOperations archiveOperations; @@ -163,7 +161,7 @@ public class OpenSearchNode implements TestClusterConfiguration { private int currentDistro = 0; private TestDistribution testDistribution; - private List distributions = new ArrayList<>(); + private final List distributions = new ArrayList<>(); private volatile Process opensearchProcess; private Function nameCustomization = Function.identity(); private boolean isWorkingDirConfigured = false; @@ -172,11 +170,12 @@ public class OpenSearchNode implements TestClusterConfiguration { private Path confPathData; private String keystorePassword = ""; private boolean preserveDataDir = false; - private final Config opensearchConfig; - private final Config legacyESConfig; - private Config currentConfig; - private String zone; + private final Path configFile; + private final Path stdoutFile; + private final Path stderrFile; + private final Path stdinFile; + private final String zone; OpenSearchNode( String path, @@ -186,7 +185,6 @@ public class OpenSearchNode implements TestClusterConfiguration { FileSystemOperations fileSystemOperations, ArchiveOperations archiveOperations, File workingDirBase, - Jdk bwcJdk, String zone ) { this.path = path; @@ -195,7 +193,6 @@ public class OpenSearchNode implements TestClusterConfiguration { this.reaper = reaper; this.fileSystemOperations = fileSystemOperations; this.archiveOperations = archiveOperations; - this.bwcJdk = bwcJdk; workingDir = workingDirBase.toPath().resolve(safeName(name)).toAbsolutePath(); confPathRepo = workingDir.resolve("repo"); confPathData = workingDir.resolve("data"); @@ -203,107 +200,16 @@ public class OpenSearchNode implements TestClusterConfiguration { transportPortFile = confPathLogs.resolve("transport.ports"); httpPortsFile = confPathLogs.resolve("http.ports"); tmpDir = workingDir.resolve("tmp"); + configFile = workingDir.resolve("config/opensearch.yml"); + stdoutFile = confPathLogs.resolve("opensearch.stdout.log"); + stderrFile = confPathLogs.resolve("opensearch.stderr.log"); + stdinFile = workingDir.resolve("opensearch.stdin"); waitConditions.put("ports files", this::checkPortsFilesExistWithDelay); setTestDistribution(TestDistribution.INTEG_TEST); setVersion(VersionProperties.getOpenSearch()); - opensearchConfig = Config.getOpenSearchConfig(workingDir); - legacyESConfig = Config.getLegacyESConfig(workingDir); - currentConfig = opensearchConfig; this.zone = zone; } - /* - * An object to contain the configuration needed to install - * either an OpenSearch or an elasticsearch distribution on - * this test node. - * - * This is added to be able to run BWC testing against a - * cluster running elasticsearch. - * - * legacyESConfig will be removed in a future release. - */ - private static class Config { - final String distroName; - final String command; - final String keystoreTool; - final String pluginTool; - final String envTempDir; - final String envJavaOpts; - final String envPathConf; - final Path configFile; - final Path stdoutFile; - final Path stderrFile; - final Path stdinFile; - - Config( - String distroName, - String command, - String keystoreTool, - String pluginTool, - String envTempDir, - String envJavaOpts, - String envPathConf, - Path configFile, - Path stdoutFile, - Path stderrFile, - Path stdinFile - ) { - this.distroName = distroName; - this.command = command; - this.keystoreTool = keystoreTool; - this.pluginTool = pluginTool; - this.envTempDir = envTempDir; - this.envJavaOpts = envJavaOpts; - this.envPathConf = envPathConf; - this.configFile = configFile; - this.stdoutFile = stdoutFile; - this.stderrFile = stderrFile; - this.stdinFile = stdinFile; - } - - static Config getOpenSearchConfig(Path workingDir) { - Path confPathLogs = workingDir.resolve("logs"); - return new Config( - "OpenSearch", - "opensearch", - "opensearch-keystore", - "opensearch-plugin", - "OPENSEARCH_TMPDIR", - "OPENSEARCH_JAVA_OPTS", - "OPENSEARCH_PATH_CONF", - workingDir.resolve("config/opensearch.yml"), - confPathLogs.resolve("opensearch.stdout.log"), - confPathLogs.resolve("opensearch.stderr.log"), - workingDir.resolve("opensearch.stdin") - ); - } - - static Config getLegacyESConfig(Path workingDir) { - Path confPathLogs = workingDir.resolve("logs"); - return new Config( - "Elasticsearch", - "elasticsearch", - "elasticsearch-keystore", - "elasticsearch-plugin", - "ES_TMPDIR", - "ES_JAVA_OPTS", - "ES_PATH_CONF", - workingDir.resolve("config/elasticsearch.yml"), - confPathLogs.resolve("es.stdout.log"), - confPathLogs.resolve("es.stderr.log"), - workingDir.resolve("es.stdin") - ); - } - } - - private void applyConfig() { - if (getVersion().onOrAfter("1.0.0")) { - currentConfig = opensearchConfig; - } else { - currentConfig = legacyESConfig; - } - } - @Input @Optional public String getName() { @@ -321,7 +227,6 @@ public void setVersion(String version) { checkFrozen(); distributions.clear(); doSetVersion(version); - applyConfig(); } @Override @@ -331,7 +236,6 @@ public void setVersions(List versions) { for (String version : versions) { doSetVersion(version); } - applyConfig(); } private void doSetVersion(String version) { @@ -528,7 +432,7 @@ public void jvmArgs(String... values) { @Internal public Path getConfigDir() { - return currentConfig.configFile.getParent(); + return configFile.getParent(); } @Override @@ -555,7 +459,7 @@ public void freeze() { * @return stream of log lines */ public Stream logLines() throws IOException { - return Files.lines(currentConfig.stdoutFile, StandardCharsets.UTF_8); + return Files.lines(stdoutFile, StandardCharsets.UTF_8); } @Override @@ -601,23 +505,17 @@ public synchronized void start() { } if (pluginsToInstall.isEmpty() == false) { - if (getVersion().onOrAfter("7.6.0")) { - logToProcessStdout("installing " + pluginsToInstall.size() + " plugins in a single transaction"); - final String[] arguments = Stream.concat(Stream.of("install", "--batch"), pluginsToInstall.stream()).toArray(String[]::new); - runOpenSearchBinScript(currentConfig.pluginTool, arguments); - logToProcessStdout("installed plugins"); - } else { - logToProcessStdout("installing " + pluginsToInstall.size() + " plugins sequentially"); - pluginsToInstall.forEach(plugin -> runOpenSearchBinScript(currentConfig.pluginTool, "install", "--batch", plugin)); - logToProcessStdout("installed plugins"); - } + logToProcessStdout("installing " + pluginsToInstall.size() + " plugins in a single transaction"); + final String[] arguments = Stream.concat(Stream.of("install", "--batch"), pluginsToInstall.stream()).toArray(String[]::new); + runOpenSearchBinScript("opensearch-plugin", arguments); + logToProcessStdout("installed plugins"); } - logToProcessStdout("Creating " + currentConfig.command + " keystore with password set to [" + keystorePassword + "]"); + logToProcessStdout("Creating opensearch keystore with password set to [" + keystorePassword + "]"); if (keystorePassword.length() > 0) { - runOpenSearchBinScriptWithInput(keystorePassword + "\n" + keystorePassword, currentConfig.keystoreTool, "create", "-p"); + runOpenSearchBinScriptWithInput(keystorePassword + "\n" + keystorePassword, "opensearch-keystore", "create", "-p"); } else { - runOpenSearchBinScript(currentConfig.keystoreTool, "-v", "create"); + runOpenSearchBinScript("opensearch-keystore", "-v", "create"); } if (keystoreSettings.isEmpty() == false || keystoreFiles.isEmpty() == false) { @@ -645,7 +543,7 @@ public synchronized void start() { } } - logToProcessStdout("Starting " + currentConfig.distroName + " process"); + logToProcessStdout("Starting OpenSearch process"); startOpenSearchProcess(); } @@ -657,11 +555,11 @@ private boolean canUseSharedDistribution() { private void logToProcessStdout(String message) { try { - if (Files.exists(currentConfig.stdoutFile.getParent()) == false) { - Files.createDirectories(currentConfig.stdoutFile.getParent()); + if (Files.exists(stdoutFile.getParent()) == false) { + Files.createDirectories(stdoutFile.getParent()); } Files.write( - currentConfig.stdoutFile, + stdoutFile, ("[" + Instant.now().toString() + "] [BUILD] " + message + "\n").getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE, StandardOpenOption.APPEND @@ -684,7 +582,6 @@ void goToNextVersion() { } logToProcessStdout("Switch version from " + getVersion() + " to " + distributions.get(currentDistro + 1).getVersion()); currentDistro += 1; - applyConfig(); setting("node.attr.upgraded", "true"); } @@ -696,7 +593,7 @@ private void copyExtraConfigFiles() { if (Files.exists(from.toPath()) == false) { throw new TestClustersException("Can't create extra config file from " + from + " for " + this + " as it does not exist"); } - Path dst = currentConfig.configFile.getParent().resolve(destination); + Path dst = configFile.getParent().resolve(destination); try { Files.createDirectories(dst.getParent()); Files.copy(from.toPath(), dst, StandardCopyOption.REPLACE_EXISTING); @@ -721,7 +618,7 @@ private void copyExtraJars() { Files.copy(from.toPath(), destination, StandardCopyOption.REPLACE_EXISTING); LOGGER.info("Added extra jar {} to {}", from.getName(), destination); } catch (IOException e) { - throw new UncheckedIOException("Can't copy extra jar dependency " + from.getName() + " to " + destination.toString(), e); + throw new UncheckedIOException("Can't copy extra jar dependency " + from.getName() + " to " + destination, e); } }); } @@ -794,9 +691,7 @@ private void runOpenSearchBinScriptWithInput(String input, String tool, CharSequ ArrayList result = new ArrayList<>(); result.add("/c"); result.add("bin\\" + tool + ".bat"); - for (CharSequence arg : args) { - result.add(arg); - } + result.addAll(Arrays.asList(args)); return result; }).onUnix(() -> Arrays.asList(args)).supply()); spec.setStandardInput(byteArrayInputStream); @@ -809,7 +704,7 @@ private void runOpenSearchBinScriptWithInput(String input, String tool, CharSequ private void runKeystoreCommandWithPassword(String keystorePassword, String input, CharSequence... args) { final String actualInput = keystorePassword.length() > 0 ? keystorePassword + "\n" + input : input; - runOpenSearchBinScriptWithInput(actualInput, currentConfig.keystoreTool, args); + runOpenSearchBinScriptWithInput(actualInput, "opensearch-keystore", args); } private void runOpenSearchBinScript(String tool, CharSequence... args) { @@ -819,7 +714,7 @@ private void runOpenSearchBinScript(String tool, CharSequence... args) { private Map getOpenSearchEnvironment() { Map defaultEnv = new HashMap<>(); getRequiredJavaHome().ifPresent(javaHome -> defaultEnv.put("JAVA_HOME", javaHome)); - defaultEnv.put(currentConfig.envPathConf, currentConfig.configFile.getParent().toString()); + defaultEnv.put("OPENSEARCH_PATH_CONF", configFile.getParent().toString()); String systemPropertiesString = ""; if (systemProperties.isEmpty() == false) { systemPropertiesString = " " @@ -829,7 +724,7 @@ private Map getOpenSearchEnvironment() { // OPENSEARCH_PATH_CONF is also set as an environment variable and for a reference to ${OPENSEARCH_PATH_CONF} // to work OPENSEARCH_JAVA_OPTS, we need to make sure that OPENSEARCH_PATH_CONF before OPENSEARCH_JAVA_OPTS. Instead, // we replace the reference with the actual value in other environment variables - .map(p -> p.replace("${" + currentConfig.envPathConf + "}", currentConfig.configFile.getParent().toString())) + .map(p -> p.replace("${OPENSEARCH_PATH_CONF}", configFile.getParent().toString())) .collect(Collectors.joining(" ")); } String jvmArgsString = ""; @@ -844,12 +739,12 @@ private Map getOpenSearchEnvironment() { } String heapSize = System.getProperty("tests.heap.size", "512m"); defaultEnv.put( - currentConfig.envJavaOpts, + "OPENSEARCH_JAVA_OPTS", "-Xms" + heapSize + " -Xmx" + heapSize + " -ea -esa " + systemPropertiesString + " " + jvmArgsString + " " + // Support passing in additional JVM arguments System.getProperty("tests.jvm.argline", "") ); - defaultEnv.put(currentConfig.envTempDir, tmpDir.toString()); + defaultEnv.put("OPENSEARCH_TMPDIR", tmpDir.toString()); // Windows requires this as it defaults to `c:\windows` despite OPENSEARCH_TMPDIR defaultEnv.put("TMP", tmpDir.toString()); @@ -868,27 +763,20 @@ private Map getOpenSearchEnvironment() { } private java.util.Optional getRequiredJavaHome() { - // If we are testing the current version of Elasticsearch, use the configured runtime Java + // If we are testing the current version of OpenSearch, use the configured runtime Java if (getTestDistribution() == TestDistribution.INTEG_TEST || getVersion().equals(VersionProperties.getOpenSearchVersion())) { return java.util.Optional.of(BuildParams.getRuntimeJavaHome()).map(File::getAbsolutePath); - } else if (getVersion().before("7.0.0")) { - return java.util.Optional.of(bwcJdk.getJavaHomePath().toString()); } else { // otherwise use the bundled JDK return java.util.Optional.empty(); } } - @Internal - Jdk getBwcJdk() { - return getVersion().before("7.0.0") ? bwcJdk : null; - } - private void startOpenSearchProcess() { final ProcessBuilder processBuilder = new ProcessBuilder(); Path effectiveDistroDir = getDistroDir(); List command = OS.>conditional() - .onUnix(() -> Arrays.asList(effectiveDistroDir.resolve("./bin/" + currentConfig.command).toString())) - .onWindows(() -> Arrays.asList("cmd", "/c", effectiveDistroDir.resolve("bin\\" + currentConfig.command + ".bat").toString())) + .onUnix(() -> List.of(effectiveDistroDir.resolve("./bin/opensearch").toString())) + .onWindows(() -> Arrays.asList("cmd", "/c", effectiveDistroDir.resolve("bin\\opensearch.bat").toString())) .supply(); processBuilder.command(command); processBuilder.directory(workingDir.toFile()); @@ -898,13 +786,13 @@ private void startOpenSearchProcess() { environment.putAll(getOpenSearchEnvironment()); // don't buffer all in memory, make sure we don't block on the default pipes - processBuilder.redirectError(ProcessBuilder.Redirect.appendTo(currentConfig.stderrFile.toFile())); - processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(currentConfig.stdoutFile.toFile())); + processBuilder.redirectError(ProcessBuilder.Redirect.appendTo(stderrFile.toFile())); + processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(stdoutFile.toFile())); if (keystorePassword != null && keystorePassword.length() > 0) { try { - Files.write(currentConfig.stdinFile, (keystorePassword + "\n").getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE); - processBuilder.redirectInput(currentConfig.stdinFile.toFile()); + Files.write(stdinFile, (keystorePassword + "\n").getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE); + processBuilder.redirectInput(stdinFile.toFile()); } catch (IOException e) { throw new TestClustersException("Failed to set the keystore password for " + this, e); } @@ -913,7 +801,7 @@ private void startOpenSearchProcess() { try { opensearchProcess = processBuilder.start(); } catch (IOException e) { - throw new TestClustersException("Failed to start " + currentConfig.command + " process for " + this, e); + throw new TestClustersException("Failed to start opensearch process for " + this, e); } reaper.registerPid(toString(), opensearchProcess.pid()); } @@ -985,8 +873,8 @@ public synchronized void stop(boolean tailLogs) { stopProcess(opensearchProcess.toHandle(), true); reaper.unregister(toString()); if (tailLogs) { - logFileContents("Standard output of node", currentConfig.stdoutFile); - logFileContents("Standard error of node", currentConfig.stderrFile); + logFileContents("Standard output of node", stdoutFile); + logFileContents("Standard error of node", stderrFile); } opensearchProcess = null; // Clean up the ports file in case this is started again. @@ -1014,16 +902,13 @@ private void stopProcess(ProcessHandle processHandle, boolean forcibly) { return; } - // Stop all children last - if the ML processes are killed before the ES JVM then + // Stop all children last - if the ML processes are killed before the OpenSearch JVM then // they'll be recorded as having failed and won't restart when the cluster restarts. - // ES could actually be a child when there's some wrapper process like on Windows, + // OpenSearch could actually be a child when there's some wrapper process like on Windows, // and in that case the ML processes will be grandchildren of the wrapper. List children = processHandle.children().collect(Collectors.toList()); try { - logProcessInfo( - "Terminating " + currentConfig.command + " process" + (forcibly ? " forcibly " : "gracefully") + ":", - processHandle.info() - ); + logProcessInfo("Terminating opensearch process" + (forcibly ? " forcibly " : "gracefully") + ":", processHandle.info()); if (forcibly) { processHandle.destroyForcibly(); @@ -1043,7 +928,7 @@ private void stopProcess(ProcessHandle processHandle, boolean forcibly) { waitForProcessToExit(processHandle); if (processHandle.isAlive()) { - throw new TestClustersException("Was not able to terminate " + currentConfig.command + " process for " + this); + throw new TestClustersException("Was not able to terminate opensearch process for " + this); } } finally { children.forEach(each -> stopProcess(each, forcibly)); @@ -1051,7 +936,7 @@ private void stopProcess(ProcessHandle processHandle, boolean forcibly) { waitForProcessToExit(processHandle); if (processHandle.isAlive()) { - throw new TestClustersException("Was not able to terminate " + currentConfig.command + " process for " + this); + throw new TestClustersException("Was not able to terminate opensearch process for " + this); } } @@ -1135,7 +1020,7 @@ private void waitForProcessToExit(ProcessHandle processHandle) { try { processHandle.onExit().get(OPENSEARCH_DESTROY_TIMEOUT, OPENSEARCH_DESTROY_TIMEOUT_UNIT); } catch (InterruptedException e) { - LOGGER.info("Interrupted while waiting for {} process", currentConfig.command, e); + LOGGER.info("Interrupted while waiting for opensearch process", e); Thread.currentThread().interrupt(); } catch (ExecutionException e) { LOGGER.info("Failure while waiting for process to exist", e); @@ -1146,8 +1031,8 @@ private void waitForProcessToExit(ProcessHandle processHandle) { private void createWorkingDir() throws IOException { // Start configuration from scratch in case of a restart - fileSystemOperations.delete(d -> d.delete(currentConfig.configFile.getParent())); - Files.createDirectories(currentConfig.configFile.getParent()); + fileSystemOperations.delete(d -> d.delete(configFile.getParent())); + Files.createDirectories(configFile.getParent()); Files.createDirectories(confPathRepo); Files.createDirectories(confPathData); Files.createDirectories(confPathLogs); @@ -1250,42 +1135,27 @@ private void createConfiguration() { } baseConfig.put("node.portsfile", "true"); baseConfig.put("http.port", httpPort); - if (getVersion().onOrAfter(Version.fromString("6.7.0"))) { - baseConfig.put("transport.port", transportPort); - } else { - baseConfig.put("transport.tcp.port", transportPort); - } + baseConfig.put("transport.port", transportPort); // Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space baseConfig.put("cluster.routing.allocation.disk.watermark.low", "1b"); baseConfig.put("cluster.routing.allocation.disk.watermark.high", "1b"); // increase script compilation limit since tests can rapid-fire script compilations - if (getVersion().onOrAfter(Version.fromString("7.9.0"))) { - baseConfig.put("script.disable_max_compilations_rate", "true"); - } else { - baseConfig.put("script.max_compilations_rate", "2048/1m"); - } + baseConfig.put("script.disable_max_compilations_rate", "true"); baseConfig.put("cluster.routing.allocation.disk.watermark.flood_stage", "1b"); // Temporarily disable the real memory usage circuit breaker. It depends on real memory usage which we have no full control // over and the REST client will not retry on circuit breaking exceptions yet (see #31986 for details). Once the REST client // can retry on circuit breaking exceptions, we can revert again to the default configuration. - if (getVersion().onOrAfter("7.0.0")) { - baseConfig.put("indices.breaker.total.use_real_memory", "false"); - } + baseConfig.put("indices.breaker.total.use_real_memory", "false"); // Don't wait for state, just start up quickly. This will also allow new and old nodes in the BWC case to become the master baseConfig.put("discovery.initial_state_timeout", "0s"); // TODO: Remove these once https://github.com/elastic/elasticsearch/issues/46091 is fixed - if (getVersion().onOrAfter("1.0.0")) { - baseConfig.put("logger.org.opensearch.action.support.master", "DEBUG"); - baseConfig.put("logger.org.opensearch.cluster.coordination", "DEBUG"); - } else { - baseConfig.put("logger.org.elasticsearch.action.support.master", "DEBUG"); - baseConfig.put("logger.org.elasticsearch.cluster.coordination", "DEBUG"); - } + baseConfig.put("logger.org.opensearch.action.support.master", "DEBUG"); + baseConfig.put("logger.org.opensearch.cluster.coordination", "DEBUG"); HashSet overriden = new HashSet<>(baseConfig.keySet()); overriden.retainAll(settings.keySet()); - overriden.removeAll(OVERRIDABLE_SETTINGS); + OVERRIDABLE_SETTINGS.forEach(overriden::remove); if (overriden.isEmpty() == false) { throw new IllegalArgumentException( "Testclusters does not allow the following settings to be changed:" + overriden + " for " + this @@ -1294,10 +1164,10 @@ private void createConfiguration() { // Make sure no duplicate config keys settings.keySet().stream().filter(OVERRIDABLE_SETTINGS::contains).forEach(baseConfig::remove); - final Path configFileRoot = currentConfig.configFile.getParent(); + final Path configFileRoot = configFile.getParent(); try { Files.write( - currentConfig.configFile, + configFile, Stream.concat(settings.entrySet().stream(), baseConfig.entrySet().stream()) .map(entry -> entry.getKey() + ": " + entry.getValue()) .collect(Collectors.joining("\n")) @@ -1312,17 +1182,17 @@ private void createConfiguration() { } logToProcessStdout("Copying additional config files from distro " + configFiles); for (Path file : configFiles) { - Path dest = currentConfig.configFile.getParent().resolve(file.getFileName()); + Path dest = configFile.getParent().resolve(file.getFileName()); if (Files.exists(dest) == false) { Files.copy(file, dest); } } } catch (IOException e) { - throw new UncheckedIOException("Could not write config file: " + currentConfig.configFile, e); + throw new UncheckedIOException("Could not write config file: " + configFile, e); } tweakJvmOptions(configFileRoot); - LOGGER.info("Written config file:{} for {}", currentConfig.configFile, this); + LOGGER.info("Written config file:{} for {}", configFile, this); } private void tweakJvmOptions(Path configFileRoot) { @@ -1346,18 +1216,11 @@ private void tweakJvmOptions(Path configFileRoot) { private Map jvmOptionExpansions() { Map expansions = new HashMap<>(); Version version = getVersion(); - String heapDumpOrigin = getVersion().onOrAfter("6.3.0") ? "-XX:HeapDumpPath=data" : "-XX:HeapDumpPath=/heap/dump/path"; + String heapDumpOrigin = "-XX:HeapDumpPath=data"; Path relativeLogPath = workingDir.relativize(confPathLogs); - expansions.put(heapDumpOrigin, "-XX:HeapDumpPath=" + relativeLogPath.toString()); - if (version.onOrAfter("6.2.0")) { - expansions.put("logs/gc.log", relativeLogPath.resolve("gc.log").toString()); - } - if (getVersion().onOrAfter("7.0.0")) { - expansions.put( - "-XX:ErrorFile=logs/hs_err_pid%p.log", - "-XX:ErrorFile=" + relativeLogPath.resolve("hs_err_pid%p.log").toString() - ); - } + expansions.put(heapDumpOrigin, "-XX:HeapDumpPath=" + relativeLogPath); + expansions.put("logs/gc.log", relativeLogPath.resolve("gc.log").toString()); + expansions.put("-XX:ErrorFile=logs/hs_err_pid%p.log", "-XX:ErrorFile=" + relativeLogPath.resolve("hs_err_pid%p.log")); return expansions; } @@ -1488,7 +1351,7 @@ void waitForAllConditions() { // Installing plugins at config time and loading them when nods start requires additional time we need to // account for ADDITIONAL_CONFIG_TIMEOUT_UNIT.toMillis( - ADDITIONAL_CONFIG_TIMEOUT * (plugins.size() + keystoreFiles.size() + keystoreSettings.size() + credentials.size()) + (long) ADDITIONAL_CONFIG_TIMEOUT * (plugins.size() + keystoreFiles.size() + keystoreSettings.size() + credentials.size()) ), TimeUnit.MILLISECONDS, this); } @@ -1546,17 +1409,17 @@ void setDataPath(Path dataPath) { @Internal Path getOpensearchStdoutFile() { - return currentConfig.stdoutFile; + return stdoutFile; } @Internal Path getOpensearchStderrFile() { - return currentConfig.stderrFile; + return stderrFile; } private static class FileEntry implements Named { - private String name; - private File file; + private final String name; + private final File file; FileEntry(String name, File file) { this.name = name; @@ -1577,8 +1440,8 @@ public File getFile() { } private static class CliEntry { - private String executable; - private CharSequence[] args; + private final String executable; + private final CharSequence[] args; CliEntry(String executable, CharSequence[] args) { this.executable = executable; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java index e5d264121b0aa..e5c413df00d0d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java @@ -31,7 +31,6 @@ package org.opensearch.gradle.testclusters; -import org.opensearch.gradle.Jdk; import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.tasks.Nested; @@ -52,9 +51,6 @@ default void useCluster(OpenSearchCluster cluster) { // Add configured distributions as task dependencies so they are built before starting the cluster cluster.getNodes().stream().flatMap(node -> node.getDistributions().stream()).forEach(distro -> dependsOn(distro.getExtracted())); - // Add legacy BWC JDK runtime as a dependency so it's downloaded before starting the cluster if necessary - cluster.getNodes().stream().map(node -> (Callable) node::getBwcJdk).forEach(this::dependsOn); - cluster.getNodes().forEach(node -> dependsOn((Callable>) node::getPluginAndModuleConfigurations)); getClusters().add(cluster); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersPlugin.java index 2ef14a39b6669..8735970b0d65b 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersPlugin.java @@ -31,11 +31,8 @@ package org.opensearch.gradle.testclusters; -import org.opensearch.gradle.Architecture; import org.opensearch.gradle.DistributionDownloadPlugin; -import org.opensearch.gradle.Jdk; import org.opensearch.gradle.JdkDownloadPlugin; -import org.opensearch.gradle.OS; import org.opensearch.gradle.ReaperPlugin; import org.opensearch.gradle.ReaperService; import org.opensearch.gradle.info.BuildParams; @@ -68,8 +65,6 @@ public class TestClustersPlugin implements Plugin { private static final String LIST_TASK_NAME = "listTestClusters"; private static final String REGISTRY_SERVICE_NAME = "testClustersRegistry"; - private static final String LEGACY_JAVA_VENDOR = "adoptopenjdk"; - private static final String LEGACY_JAVA_VERSION = "8u242+b08"; private static final Logger logger = Logging.getLogger(TestClustersPlugin.class); @Inject @@ -95,16 +90,8 @@ public void apply(Project project) { ReaperService reaper = project.getRootProject().getExtensions().getByType(ReaperService.class); - // register legacy jdk distribution for testing pre-7.0 BWC clusters - Jdk bwcJdk = JdkDownloadPlugin.getContainer(project).create("bwc_jdk", jdk -> { - jdk.setVendor(LEGACY_JAVA_VENDOR); - jdk.setVersion(LEGACY_JAVA_VERSION); - jdk.setPlatform(OS.current().name().toLowerCase()); - jdk.setArchitecture(Architecture.current().name().toLowerCase()); - }); - // enable the DSL to describe clusters - NamedDomainObjectContainer container = createTestClustersContainerExtension(project, reaper, bwcJdk); + NamedDomainObjectContainer container = createTestClustersContainerExtension(project, reaper); // provide a task to be able to list defined clusters. createListClustersTask(project, container); @@ -125,11 +112,7 @@ public void apply(Project project) { project.getRootProject().getPluginManager().apply(TestClustersHookPlugin.class); } - private NamedDomainObjectContainer createTestClustersContainerExtension( - Project project, - ReaperService reaper, - Jdk bwcJdk - ) { + private NamedDomainObjectContainer createTestClustersContainerExtension(Project project, ReaperService reaper) { // Create an extensions that allows describing clusters NamedDomainObjectContainer container = project.container( OpenSearchCluster.class, @@ -139,8 +122,7 @@ private NamedDomainObjectContainer createTestClustersContaine reaper, new File(project.getBuildDir(), "testclusters"), getFileSystemOperations(), - getArchiveOperations(), - bwcJdk + getArchiveOperations() ) ); project.getExtensions().add(EXTENSION_NAME, container); diff --git a/buildSrc/src/test/java/org/opensearch/gradle/BwcOpenSearchVersionsTests.java b/buildSrc/src/test/java/org/opensearch/gradle/BwcOpenSearchVersionsTests.java deleted file mode 100644 index 14931c83ba29b..0000000000000 --- a/buildSrc/src/test/java/org/opensearch/gradle/BwcOpenSearchVersionsTests.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.gradle; - -import org.opensearch.gradle.test.GradleUnitTestCase; -import org.junit.Rule; -import org.junit.rules.ExpectedException; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import static java.util.Arrays.asList; - -/** - * Tests to specifically verify the OpenSearch version 1.x with Legacy ES versions. - * This supplements the tests in BwcVersionsTests. - * - * Currently the versioning logic doesn't work for OpenSearch 2.x as the masking - * is only applied specifically for 1.x. - */ -public class BwcOpenSearchVersionsTests extends GradleUnitTestCase { - - private static final Map> sampleVersions = new HashMap<>(); - - @Rule - public ExpectedException expectedEx = ExpectedException.none(); - - static { - sampleVersions.put("1.0.0", asList("5_6_13", "6_6_1", "6_8_15", "7_0_0", "7_9_1", "7_10_0", "7_10_1", "7_10_2", "1_0_0")); - sampleVersions.put("1.1.0", asList("5_6_13", "6_6_1", "6_8_15", "7_0_0", "7_9_1", "7_10_0", "7_10_1", "7_10_2", "1_0_0", "1_1_0")); - sampleVersions.put( - "2.0.0", - asList("5_6_13", "6_6_1", "6_8_15", "7_0_0", "7_9_1", "7_10_0", "7_10_1", "7_10_2", "1_0_0", "1_1_0", "2_0_0") - ); - } - - public void testWireCompatible() { - assertVersionsEquals( - asList("6.8.15", "7.0.0", "7.9.1", "7.10.0", "7.10.1", "7.10.2"), - getVersionCollection("1.0.0").getWireCompatible() - ); - assertVersionsEquals( - asList("6.8.15", "7.0.0", "7.9.1", "7.10.0", "7.10.1", "7.10.2", "1.0.0"), - getVersionCollection("1.1.0").getWireCompatible() - ); - } - - public void testWireCompatibleUnreleased() { - assertVersionsEquals(Collections.emptyList(), getVersionCollection("1.0.0").getUnreleasedWireCompatible()); - } - - public void testIndexCompatible() { - assertVersionsEquals( - asList("6.6.1", "6.8.15", "7.0.0", "7.9.1", "7.10.0", "7.10.1", "7.10.2"), - getVersionCollection("1.0.0").getIndexCompatible() - ); - assertVersionsEquals( - asList("6.6.1", "6.8.15", "7.0.0", "7.9.1", "7.10.0", "7.10.1", "7.10.2", "1.0.0"), - getVersionCollection("1.1.0").getIndexCompatible() - ); - } - - public void testIndexCompatibleUnreleased() { - assertVersionsEquals(Collections.emptyList(), getVersionCollection("1.0.0").getUnreleasedIndexCompatible()); - } - - public void testGetUnreleased() { - assertVersionsEquals(Collections.singletonList("1.0.0"), getVersionCollection("1.0.0").getUnreleased()); - } - - private String formatVersionToLine(final String version) { - return " public static final Version V_" + version.replaceAll("\\.", "_") + " "; - } - - private void assertVersionsEquals(List expected, List actual) { - assertEquals(expected.stream().map(Version::fromString).collect(Collectors.toList()), actual); - } - - private BwcVersions getVersionCollection(String versionString) { - List versionMap = sampleVersions.get(versionString); - assertNotNull(versionMap); - Version version = Version.fromString(versionString); - assertNotNull(version); - return new BwcVersions(versionMap.stream().map(this::formatVersionToLine).collect(Collectors.toList()), version); - } -} diff --git a/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java b/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java index d7798ef5040bb..1a9647573f948 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java @@ -86,7 +86,7 @@ public void testCustomDistributionUrlWithUrl() { project.getExtensions().getExtraProperties().set("customDistributionUrl", customUrl); DistributionDownloadPlugin plugin = new DistributionDownloadPlugin(); plugin.apply(project); - assertEquals(4, project.getRepositories().size()); + assertEquals(2, project.getRepositories().size()); assertEquals( ((DefaultIvyArtifactRepository) project.getRepositories().getAt("opensearch-downloads")).getUrl().toString(), customUrl @@ -95,22 +95,13 @@ public void testCustomDistributionUrlWithUrl() { ((DefaultIvyArtifactRepository) project.getRepositories().getAt("opensearch-snapshots")).getUrl().toString(), customUrl ); - assertEquals( - ((DefaultIvyArtifactRepository) project.getRepositories().getAt("elasticsearch-downloads")).getUrl().toString(), - "https://artifacts-no-kpi.elastic.co" - ); - assertEquals( - ((DefaultIvyArtifactRepository) project.getRepositories().getAt("elasticsearch-snapshots")).getUrl().toString(), - "https://snapshots-no-kpi.elastic.co" - ); - } public void testCustomDistributionUrlWithoutUrl() { Project project = ProjectBuilder.builder().build(); DistributionDownloadPlugin plugin = new DistributionDownloadPlugin(); plugin.apply(project); - assertEquals(5, project.getRepositories().size()); + assertEquals(3, project.getRepositories().size()); assertEquals( ((DefaultIvyArtifactRepository) project.getRepositories().getAt("opensearch-downloads")).getUrl().toString(), "https://artifacts.opensearch.org" @@ -123,14 +114,6 @@ public void testCustomDistributionUrlWithoutUrl() { ((DefaultIvyArtifactRepository) project.getRepositories().getAt("opensearch-snapshots")).getUrl().toString(), "https://artifacts.opensearch.org" ); - assertEquals( - ((DefaultIvyArtifactRepository) project.getRepositories().getAt("elasticsearch-downloads")).getUrl().toString(), - "https://artifacts-no-kpi.elastic.co" - ); - assertEquals( - ((DefaultIvyArtifactRepository) project.getRepositories().getAt("elasticsearch-snapshots")).getUrl().toString(), - "https://snapshots-no-kpi.elastic.co" - ); } public void testBadVersionFormat() { @@ -332,7 +315,8 @@ private void checkBwc( Project archiveProject = ProjectBuilder.builder().withParent(bwcProject).withName(projectName).build(); archiveProject.getConfigurations().create(config); archiveProject.getArtifacts().add(config, new File("doesnotmatter")); - createDistro(project, "distro", version.toString(), type, platform, true); + final OpenSearchDistribution distro = createDistro(project, "distro", version.toString(), type, platform, true); + distro.setArchitecture(Architecture.current()); checkPlugin(project); } diff --git a/libs/cli/build.gradle b/libs/cli/build.gradle index 7f1e9cb8d04b3..bbb7bf68e2ced 100644 --- a/libs/cli/build.gradle +++ b/libs/cli/build.gradle @@ -28,7 +28,7 @@ * under the License. */ apply plugin: 'opensearch.build' -apply plugin: 'nebula.optional-base' +apply plugin: 'com.netflix.nebula.optional-base' apply plugin: 'opensearch.publish' dependencies { diff --git a/libs/core/build.gradle b/libs/core/build.gradle index 374f2fe572a12..fb8bed207dbc6 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -30,7 +30,7 @@ import org.opensearch.gradle.info.BuildParams -apply plugin: 'nebula.optional-base' +apply plugin: 'com.netflix.nebula.optional-base' apply plugin: 'opensearch.publish' archivesBaseName = 'opensearch-core' diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index 86414d18108a1..43a55f84b9d55 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -29,9 +29,9 @@ */ dependencies { - api 'org.jruby.joni:joni:2.1.43' + api 'org.jruby.joni:joni:2.1.44' // joni dependencies: - api 'org.jruby.jcodings:jcodings:1.0.57' + api 'org.jruby.jcodings:jcodings:1.0.58' testImplementation(project(":test:framework")) { exclude group: 'org.opensearch', module: 'opensearch-grok' diff --git a/libs/grok/licenses/jcodings-1.0.57.jar.sha1 b/libs/grok/licenses/jcodings-1.0.57.jar.sha1 deleted file mode 100644 index 1a703c2644787..0000000000000 --- a/libs/grok/licenses/jcodings-1.0.57.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -603a9ceac39cbf7f6f27fe18b2fded4714319b0a \ No newline at end of file diff --git a/libs/grok/licenses/jcodings-1.0.58.jar.sha1 b/libs/grok/licenses/jcodings-1.0.58.jar.sha1 new file mode 100644 index 0000000000000..0202d24704a50 --- /dev/null +++ b/libs/grok/licenses/jcodings-1.0.58.jar.sha1 @@ -0,0 +1 @@ +dce27159dc0382e5f7518d4f3e499fc8396357ed \ No newline at end of file diff --git a/libs/grok/licenses/joni-2.1.43.jar.sha1 b/libs/grok/licenses/joni-2.1.43.jar.sha1 deleted file mode 100644 index ef5dfabb2b391..0000000000000 --- a/libs/grok/licenses/joni-2.1.43.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a3bf154469d5ff1d1107755904279081a5fb618 \ No newline at end of file diff --git a/libs/grok/licenses/joni-2.1.44.jar.sha1 b/libs/grok/licenses/joni-2.1.44.jar.sha1 new file mode 100644 index 0000000000000..bff9ca56f7e8c --- /dev/null +++ b/libs/grok/licenses/joni-2.1.44.jar.sha1 @@ -0,0 +1 @@ +35746c2aee04ce459a2aa8dc2d626946c5dfb051 \ No newline at end of file diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/URLDecodeProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/URLDecodeProcessor.java index bf80c5b064703..bb236f957a587 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/URLDecodeProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/URLDecodeProcessor.java @@ -32,8 +32,8 @@ package org.opensearch.ingest.common; -import java.io.UnsupportedEncodingException; import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; import java.util.Map; /** @@ -48,11 +48,7 @@ public final class URLDecodeProcessor extends AbstractStringProcessor { } public static String apply(String value) { - try { - return URLDecoder.decode(value, "UTF-8"); - } catch (UnsupportedEncodingException e) { - throw new IllegalArgumentException("Could not URL-decode value.", e); - } + return URLDecoder.decode(value, StandardCharsets.UTF_8); } @Override diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/URLDecodeProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/URLDecodeProcessorTests.java index 81ed3c89768b7..3d68648825594 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/URLDecodeProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/URLDecodeProcessorTests.java @@ -32,13 +32,14 @@ package org.opensearch.ingest.common; -import java.io.UnsupportedEncodingException; import java.net.URLDecoder; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; public class URLDecodeProcessorTests extends AbstractStringProcessorTestCase { @Override protected String modifyInput(String input) { - return "Hello%20G%C3%BCnter" + input; + return "Hello%20G%C3%BCnter" + urlEncode(input); } @Override @@ -48,10 +49,10 @@ protected AbstractStringProcessor newProcessor(String field, boolean ign @Override protected String expectedResult(String input) { - try { - return "Hello Günter" + URLDecoder.decode(input, "UTF-8"); - } catch (UnsupportedEncodingException e) { - throw new IllegalArgumentException("invalid"); - } + return "Hello Günter" + URLDecoder.decode(urlEncode(input), StandardCharsets.UTF_8); + } + + private static String urlEncode(String s) { + return URLEncoder.encode(s, StandardCharsets.UTF_8); } } diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 8c6f279c445b3..fb056192dcbec 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -43,7 +43,7 @@ dependencies { // geoip2 dependencies: api("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") api("com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}") - api('com.maxmind.db:maxmind-db:2.0.0') + api('com.maxmind.db:maxmind-db:2.1.0') testImplementation 'org.elasticsearch:geolite2-databases:20191119' } diff --git a/modules/ingest-geoip/licenses/maxmind-db-2.0.0.jar.sha1 b/modules/ingest-geoip/licenses/maxmind-db-2.0.0.jar.sha1 deleted file mode 100644 index 32c18f89c6a29..0000000000000 --- a/modules/ingest-geoip/licenses/maxmind-db-2.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7e0fd82da0a160b7928ba214e699a7e6a74fff4 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/maxmind-db-2.1.0.jar.sha1 b/modules/ingest-geoip/licenses/maxmind-db-2.1.0.jar.sha1 new file mode 100644 index 0000000000000..3d9f6c443ec9f --- /dev/null +++ b/modules/ingest-geoip/licenses/maxmind-db-2.1.0.jar.sha1 @@ -0,0 +1 @@ +5fb0a7c4677ba725149ed557df9d0809d1836b80 \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 3aa2bbb7dd2f6..9dbfd5d3fb822 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,9 +44,9 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.33.0' + api 'com.azure:azure-core:1.34.0' api 'com.azure:azure-storage-common:12.18.1' - api 'com.azure:azure-core-http-netty:1.12.4' + api 'com.azure:azure-core-http-netty:1.12.7' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 deleted file mode 100644 index 9077fc4ebf84b..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -93f105c2e923f0ab90521cc0e6e729b9c8304ad8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.34.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.34.0.jar.sha1 new file mode 100644 index 0000000000000..df0341f5ce236 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.34.0.jar.sha1 @@ -0,0 +1 @@ +59827c9aeab1c67053fc598207781e56fb8709f6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.4.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.4.jar.sha1 deleted file mode 100644 index 5cb180b20cf8b..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -70dcc08887f2d70a8f812bf00d4fa10390fab3fd \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.7.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.7.jar.sha1 new file mode 100644 index 0000000000000..0c6588c512e29 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.7.jar.sha1 @@ -0,0 +1 @@ +e7739b5c0d9c968afcb6100f15f3491143d47814 \ No newline at end of file diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 9528537a3dd5e..561119e9e2c30 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -68,7 +68,7 @@ dependencies { api 'com.google.protobuf:protobuf-java-util:3.20.0' api 'com.google.protobuf:protobuf-java:3.21.7' api 'com.google.code.gson:gson:2.9.0' - api 'com.google.api.grpc:proto-google-common-protos:2.8.0' + api 'com.google.api.grpc:proto-google-common-protos:2.10.0' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' api 'com.google.cloud:google-cloud-core-http:1.93.3' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" @@ -76,7 +76,7 @@ dependencies { api 'com.google.oauth-client:google-oauth-client:1.33.3' api 'com.google.api-client:google-api-client:1.34.0' api 'com.google.http-client:google-http-client-appengine:1.41.8' - api 'com.google.http-client:google-http-client-jackson2:1.35.0' + api 'com.google.http-client:google-http-client-jackson2:1.42.3' api 'com.google.http-client:google-http-client-gson:1.41.4' api 'com.google.api:gax-httpjson:0.103.1' api 'io.grpc:grpc-context:1.46.0' diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.35.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.35.0.jar.sha1 deleted file mode 100644 index 0342f57779315..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c1c2a08792b935f3345590783ada872f4a0997f1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.42.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.42.3.jar.sha1 new file mode 100644 index 0000000000000..34d7d49f7b147 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.42.3.jar.sha1 @@ -0,0 +1 @@ +789cafde696403b429026bf19071caf46d8c8934 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 new file mode 100644 index 0000000000000..bf97707836c70 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 @@ -0,0 +1 @@ +cf5ac081c05682b0eba6659dee55352fde5852e1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.8.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.8.0.jar.sha1 deleted file mode 100644 index 3f14d9e59c9e9..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-2.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8adcbc3c5c3b1b7af1cf1e8a25af26a516d62a4c \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 0fd28154fc2fb..e5d65c9451c1f 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -69,12 +69,12 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.10' runtimeOnly 'com.google.guava:guava:31.1-jre' - api 'com.google.protobuf:protobuf-java:3.21.7' + api 'com.google.protobuf:protobuf-java:3.21.9' api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.5.0' api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' - api 'org.apache.commons:commons-compress:1.21' + api 'org.apache.commons:commons-compress:1.22' api 'org.apache.commons:commons-configuration2:2.8.0' api 'commons-io:commons-io:2.11.0' api 'org.apache.commons:commons-lang3:3.12.0' diff --git a/plugins/repository-hdfs/licenses/commons-compress-1.21.jar.sha1 b/plugins/repository-hdfs/licenses/commons-compress-1.21.jar.sha1 deleted file mode 100644 index 81ac609a1aa26..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-compress-1.21.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ec95b60d4e86b5c95a0e919cb172a0af98011ef \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-compress-1.22.jar.sha1 b/plugins/repository-hdfs/licenses/commons-compress-1.22.jar.sha1 new file mode 100644 index 0000000000000..9ab7216c8050a --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-compress-1.22.jar.sha1 @@ -0,0 +1 @@ +691a8b4e6cf4248c3bc72c8b719337d5cb7359fa \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 deleted file mode 100644 index faa673a23ef41..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -96cfc7147192f1de72c3d7d06972155ffb7d180c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.9.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.9.jar.sha1 new file mode 100644 index 0000000000000..2e03dbe5dafd0 --- /dev/null +++ b/plugins/repository-hdfs/licenses/protobuf-java-3.21.9.jar.sha1 @@ -0,0 +1 @@ +ed1240d9231044ce6ccf1978512f6e44416bb7e7 \ No newline at end of file diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index e50ca63c3da69..82aa4cd511ef1 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -38,9 +38,6 @@ apply plugin: 'opensearch.standalone-test' apply from : "$rootDir/gradle/bwc-test.gradle" for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { - if (bwcVersion.before('6.3.0')) { - continue; - } String baseName = "v${bwcVersion}" testClusters { diff --git a/qa/repository-multi-version/build.gradle b/qa/repository-multi-version/build.gradle index fdde0997df371..67710095d30bc 100644 --- a/qa/repository-multi-version/build.gradle +++ b/qa/repository-multi-version/build.gradle @@ -41,9 +41,6 @@ dependencies { } for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { - if (bwcVersion.before('6.3.0')) { - continue; - } String baseName = "v${bwcVersion}" String oldClusterName = "${baseName}-old" String newClusterName = "${baseName}-new" @@ -76,28 +73,20 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { systemProperty 'tests.rest.suite', 'step2' } - // Step 3 and Step 4 registered for versions for OpenSearch - // since the ES cluster would not be able to read snapshots from OpenSearch cluster in Step 3. - if (bwcVersion.after('7.10.2')) { - tasks.register("${baseName}#Step3OldClusterTest", StandaloneRestIntegTestTask) { + tasks.register("${baseName}#Step3OldClusterTest", StandaloneRestIntegTestTask) { useCluster testClusters."${oldClusterName}" dependsOn "${baseName}#Step2NewClusterTest" systemProperty 'tests.rest.suite', 'step3' } - tasks.register("${baseName}#Step4NewClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${newClusterName}" - dependsOn "${baseName}#Step3OldClusterTest" - systemProperty 'tests.rest.suite', 'step4' - } + tasks.register("${baseName}#Step4NewClusterTest", StandaloneRestIntegTestTask) { + useCluster testClusters."${newClusterName}" + dependsOn "${baseName}#Step3OldClusterTest" + systemProperty 'tests.rest.suite', 'step4' + } - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn tasks.named("${baseName}#Step4NewClusterTest") - } - } else { - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn tasks.named("${baseName}#Step2NewClusterTest") - } + tasks.register(bwcTaskName(bwcVersion)) { + dependsOn tasks.named("${baseName}#Step4NewClusterTest") } tasks.matching { it.name.startsWith(baseName) && it.name.endsWith("ClusterTest") }.configureEach { diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index 27a3b07157d21..8b0dd20899862 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -38,9 +38,6 @@ apply plugin: 'opensearch.standalone-test' apply from : "$rootDir/gradle/bwc-test.gradle" for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { - if (bwcVersion.before('6.3.0')) { - continue; - } String baseName = "v${bwcVersion}" testClusters { diff --git a/server/build.gradle b/server/build.gradle index a660bc54ea1d4..9c33199f99d4d 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -31,7 +31,7 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.build' -apply plugin: 'nebula.optional-base' +apply plugin: 'com.netflix.nebula.optional-base' apply plugin: 'opensearch.publish' apply plugin: 'opensearch.internal-cluster-test' diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java index 13595cae4be8c..aa0f90bc4a6d9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java @@ -229,7 +229,6 @@ public void testInvariantsAndLogsOnDecommissionedNodes() throws Exception { logger.info("--> starting decommissioning nodes in zone {}", 'a'); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "a"); - String activeNode = getNonDecommissionedNode(internalCluster().clusterService().state(), "a"); DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); decommissionRequest.setNoDelay(true); DecommissionResponse decommissionResponse = client().execute(DecommissionAction.INSTANCE, decommissionRequest).get(); @@ -239,6 +238,7 @@ public void testInvariantsAndLogsOnDecommissionedNodes() throws Exception { client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); String decommissionedNode = randomFrom(clusterManagerNodes.get(0), dataNodes.get(0)); + String activeNode = dataNodes.get(1); ClusterService decommissionedNodeClusterService = internalCluster().getInstance(ClusterService.class, decommissionedNode); DecommissionAttributeMetadata metadata = decommissionedNodeClusterService.state() @@ -278,7 +278,7 @@ public boolean innerMatch(LogEvent event) { ); TransportService clusterManagerTransportService = internalCluster().getInstance( TransportService.class, - internalCluster().getClusterManagerName() + internalCluster().getClusterManagerName(activeNode) ); MockTransportService decommissionedNodeTransportService = (MockTransportService) internalCluster().getInstance( TransportService.class, @@ -385,17 +385,27 @@ private void assertNodesRemovedAfterZoneDecommission(boolean originalClusterMana clusterManagerNameToZone.put(clusterManagerNodes.get(2), "c"); logger.info("--> starting 4 data nodes each on zones 'a' & 'b' & 'c'"); - List nodes_in_zone_a = internalCluster().startDataOnlyNodes( - dataNodeCountPerAZ, - Settings.builder().put(commonSettings).put("node.attr.zone", "a").build() + Map> zoneToNodesMap = new HashMap<>(); + zoneToNodesMap.put( + "a", + internalCluster().startDataOnlyNodes( + dataNodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build() + ) ); - List nodes_in_zone_b = internalCluster().startDataOnlyNodes( - dataNodeCountPerAZ, - Settings.builder().put(commonSettings).put("node.attr.zone", "b").build() + zoneToNodesMap.put( + "b", + internalCluster().startDataOnlyNodes( + dataNodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build() + ) ); - List nodes_in_zone_c = internalCluster().startDataOnlyNodes( - dataNodeCountPerAZ, - Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + zoneToNodesMap.put( + "c", + internalCluster().startDataOnlyNodes( + dataNodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ) ); ensureStableCluster(15); ClusterHealthResponse health = client().admin() @@ -420,7 +430,20 @@ private void assertNodesRemovedAfterZoneDecommission(boolean originalClusterMana tempZones.remove(originalClusterManagerZone); zoneToDecommission = randomFrom(tempZones); } - String activeNode = getNonDecommissionedNode(internalCluster().clusterService().state(), zoneToDecommission); + String activeNode; + switch (zoneToDecommission) { + case "a": + activeNode = randomFrom(randomFrom(zoneToNodesMap.get("b")), randomFrom(zoneToNodesMap.get("c"))); + break; + case "b": + activeNode = randomFrom(randomFrom(zoneToNodesMap.get("a")), randomFrom(zoneToNodesMap.get("c"))); + break; + case "c": + activeNode = randomFrom(randomFrom(zoneToNodesMap.get("a")), randomFrom(zoneToNodesMap.get("b"))); + break; + default: + throw new IllegalStateException("unexpected zone decommissioned"); + } logger.info("--> setting shard routing weights for weighted round robin"); Map weights = new HashMap<>(Map.of("a", 1.0, "b", 1.0, "c", 1.0)); @@ -631,8 +654,8 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx assertTrue(weightedRoutingResponse.isAcknowledged()); logger.info("--> starting decommissioning nodes in zone {}", 'c'); + String activeNode = randomFrom(dataNodes.get(0), dataNodes.get(1)); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "c"); - String activeNode = getNonDecommissionedNode(internalCluster().clusterService().state(), "c"); // Set the timeout to 0 to do immediate Decommission DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); decommissionRequest.setNoDelay(true); @@ -860,16 +883,6 @@ public void testDecommissionFailedWithOnlyOneAttributeValue() throws Exception { ensureStableCluster(6, TimeValue.timeValueMinutes(2)); } - private String getNonDecommissionedNode(ClusterState clusterState, String decommissionedZone) { - List allNodes = new ArrayList<>(); - for (DiscoveryNode node : clusterState.nodes()) { - if (node.getAttributes().get("zone").equals(decommissionedZone) == false) { - allNodes.add(node.getName()); - } - } - return randomFrom(allNodes); - } - private static class WaitForFailedDecommissionState implements ClusterStateObserver.Listener { final CountDownLatch doneLatch; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java new file mode 100644 index 0000000000000..f8629e2c88b07 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java @@ -0,0 +1,313 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure; + +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.search.backpressure.settings.NodeDuressSettings; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.search.backpressure.settings.SearchShardTaskSettings; +import org.opensearch.search.backpressure.trackers.CpuUsageTracker; +import org.opensearch.search.backpressure.trackers.ElapsedTimeTracker; +import org.opensearch.search.backpressure.trackers.HeapUsageTracker; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancelledException; +import org.opensearch.tasks.TaskId; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) +public class SearchBackpressureIT extends OpenSearchIntegTestCase { + + private static final TimeValue TIMEOUT = new TimeValue(10, TimeUnit.SECONDS); + + @Override + protected Collection> nodePlugins() { + final List> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(TestPlugin.class); + return plugins; + } + + @Before + public final void setupNodeSettings() { + Settings request = Settings.builder() + .put(NodeDuressSettings.SETTING_CPU_THRESHOLD.getKey(), 0.0) + .put(NodeDuressSettings.SETTING_HEAP_THRESHOLD.getKey(), 0.0) + .put(NodeDuressSettings.SETTING_NUM_SUCCESSIVE_BREACHES.getKey(), 1) + .put(SearchShardTaskSettings.SETTING_TOTAL_HEAP_PERCENT_THRESHOLD.getKey(), 0.0) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + } + + @After + public final void cleanupNodeSettings() { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + public void testSearchShardTaskCancellationWithHighElapsedTime() throws InterruptedException { + Settings request = Settings.builder() + .put(SearchBackpressureSettings.SETTING_MODE.getKey(), "enforced") + .put(ElapsedTimeTracker.SETTING_ELAPSED_TIME_MILLIS_THRESHOLD.getKey(), 1000) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + client().execute(TestTransportAction.ACTION, new TestRequest(RequestType.HIGH_ELAPSED_TIME), listener); + assertTrue(listener.latch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS)); + + Exception caughtException = listener.getException(); + assertNotNull("SearchShardTask should have been cancelled with TaskCancelledException", caughtException); + MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); + MatcherAssert.assertThat(caughtException.getMessage(), containsString("elapsed time exceeded")); + } + + public void testSearchShardTaskCancellationWithHighCpu() throws InterruptedException { + Settings request = Settings.builder() + .put(SearchBackpressureSettings.SETTING_MODE.getKey(), "enforced") + .put(CpuUsageTracker.SETTING_CPU_TIME_MILLIS_THRESHOLD.getKey(), 1000) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + client().execute(TestTransportAction.ACTION, new TestRequest(RequestType.HIGH_CPU), listener); + assertTrue(listener.latch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS)); + + Exception caughtException = listener.getException(); + assertNotNull("SearchShardTask should have been cancelled with TaskCancelledException", caughtException); + MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); + MatcherAssert.assertThat(caughtException.getMessage(), containsString("cpu usage exceeded")); + } + + public void testSearchShardTaskCancellationWithHighHeapUsage() throws InterruptedException { + // Before SearchBackpressureService cancels a task based on its heap usage, we need to build up the heap moving average + // To build up the heap moving average, we need to hit the same node with multiple requests and then hit the same node with a + // request having higher heap usage + String node = randomFrom(internalCluster().getNodeNames()); + final int MOVING_AVERAGE_WINDOW_SIZE = 10; + Settings request = Settings.builder() + .put(SearchBackpressureSettings.SETTING_MODE.getKey(), "enforced") + .put(HeapUsageTracker.SETTING_HEAP_PERCENT_THRESHOLD.getKey(), 0.0) + .put(HeapUsageTracker.SETTING_HEAP_VARIANCE_THRESHOLD.getKey(), 1.0) + .put(HeapUsageTracker.SETTING_HEAP_MOVING_AVERAGE_WINDOW_SIZE.getKey(), MOVING_AVERAGE_WINDOW_SIZE) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + for (int i = 0; i < MOVING_AVERAGE_WINDOW_SIZE; i++) { + client(node).execute(TestTransportAction.ACTION, new TestRequest(RequestType.HIGH_HEAP), listener); + } + + listener = new ExceptionCatchingListener(); + client(node).execute(TestTransportAction.ACTION, new TestRequest(RequestType.HIGHER_HEAP), listener); + assertTrue(listener.latch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS)); + + Exception caughtException = listener.getException(); + assertNotNull("SearchShardTask should have been cancelled with TaskCancelledException", caughtException); + MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); + MatcherAssert.assertThat(caughtException.getMessage(), containsString("heap usage exceeded")); + } + + public void testSearchCancellationWithBackpressureDisabled() throws InterruptedException { + Settings request = Settings.builder().put(SearchBackpressureSettings.SETTING_MODE.getKey(), "monitor_only").build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + client().execute(TestTransportAction.ACTION, new TestRequest(RequestType.HIGH_ELAPSED_TIME), listener); + // waiting for the TIMEOUT * 3 time for the request to complete and the latch to countdown. + assertTrue( + "SearchShardTask should have been completed by now and countdown the latch", + listener.latch.await(TIMEOUT.getSeconds() * 3, TimeUnit.SECONDS) + ); + + Exception caughtException = listener.getException(); + assertNull("SearchShardTask shouldn't have cancelled for monitor_only mode", caughtException); + } + + private static class ExceptionCatchingListener implements ActionListener { + private final CountDownLatch latch; + private Exception exception = null; + + public ExceptionCatchingListener() { + this.latch = new CountDownLatch(1); + } + + @Override + public void onResponse(TestResponse r) { + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + this.exception = e; + latch.countDown(); + } + + private Exception getException() { + return exception; + } + } + + enum RequestType { + HIGH_CPU, + HIGH_HEAP, + HIGHER_HEAP, + HIGH_ELAPSED_TIME; + } + + public static class TestRequest extends ActionRequest { + private final RequestType type; + + public TestRequest(RequestType type) { + this.type = type; + } + + public TestRequest(StreamInput in) throws IOException { + super(in); + this.type = in.readEnum(RequestType.class); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new SearchShardTask(id, type, action, "", parentTaskId, headers); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeEnum(type); + } + + public RequestType getType() { + return this.type; + } + } + + public static class TestResponse extends ActionResponse { + public TestResponse() {} + + public TestResponse(StreamInput in) {} + + @Override + public void writeTo(StreamOutput out) throws IOException {} + } + + public static class TestTransportAction extends HandledTransportAction { + public static final ActionType ACTION = new ActionType<>("internal::test_action", TestResponse::new); + private final ThreadPool threadPool; + + @Inject + public TestTransportAction(TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters) { + super(ACTION.name(), transportService, actionFilters, TestRequest::new); + this.threadPool = threadPool; + } + + @Override + protected void doExecute(Task task, TestRequest request, ActionListener listener) { + threadPool.executor(ThreadPool.Names.SEARCH).execute(() -> { + try { + SearchShardTask searchShardTask = (SearchShardTask) task; + long startTime = System.nanoTime(); + + // Doing a busy-wait until task cancellation or timeout. + // We are running HIGH_HEAP requests to build up heap moving average and not expect it to get cancelled. + do { + doWork(request); + } while (request.type != RequestType.HIGH_HEAP + && searchShardTask.isCancelled() == false + && (System.nanoTime() - startTime) < TIMEOUT.getNanos()); + + if (searchShardTask.isCancelled()) { + throw new TaskCancelledException(searchShardTask.getReasonCancelled()); + } else { + listener.onResponse(new TestResponse()); + } + } catch (Exception e) { + listener.onFailure(e); + } + }); + } + + private void doWork(TestRequest request) throws InterruptedException { + switch (request.getType()) { + case HIGH_CPU: + long i = 0, j = 1, k = 1, iterations = 1000; + do { + j += i; + k *= j; + i++; + } while (i < iterations); + break; + case HIGH_HEAP: + Byte[] bytes = new Byte[100000]; + int[] ints = new int[1000]; + break; + case HIGHER_HEAP: + Byte[] more_bytes = new Byte[1000000]; + int[] more_ints = new int[10000]; + break; + case HIGH_ELAPSED_TIME: + Thread.sleep(100); + break; + } + } + } + + public static class TestPlugin extends Plugin implements ActionPlugin { + @Override + public List> getActions() { + return Collections.singletonList(new ActionHandler<>(TestTransportAction.ACTION, TestTransportAction.class)); + } + + @Override + public List> getClientActions() { + return Collections.singletonList(TestTransportAction.ACTION); + } + } +} diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index bb1186575cd01..4e667d0a9f3a5 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -655,8 +655,8 @@ public static OpenSearchException[] guessRootCauses(Throwable t) { * parsing exception because that is generally the most interesting * exception to return to the user. If that exception is caused by * an OpenSearchException we'd like to keep unwrapping because - * ElasticserachExceptions tend to contain useful information for - * the user. + * OpenSearchException instances tend to contain useful information + * for the user. */ Throwable cause = ex.getCause(); if (cause != null) { diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index b48384e9439ec..b9117589f0fe4 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -67,25 +67,25 @@ public class Version implements Comparable, ToXContentFragment { * All listed versions MUST be released versions, except the last major, the last minor and the last revison. ONLY those are required * as unreleased versions. * - * Example: assume the last release is 7.3.0 - * The unreleased last major is the next major release, e.g. _8_.0.0 - * The unreleased last minor is the current major with a upped minor: 7._4_.0 - * The unreleased revision is the very release with a upped revision 7.3._1_ + * Example: assume the last release is 2.4.0 + * The unreleased last major is the next major release, e.g. _3_.0.0 + * The unreleased last minor is the current major with a upped minor: 2._5_.0 + * The unreleased revision is the very release with a upped revision 2.4._1_ */ public static final int V_EMPTY_ID = 0; public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); + // RELEASED public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_1 = new Version(2000199, org.apache.lucene.util.Version.LUCENE_9_1_0); - public static final Version V_2_0_2 = new Version(2000299, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_2_0); - public static final Version V_2_1_1 = new Version(2010199, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_2_2_0 = new Version(2020099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_2_1 = new Version(2020199, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version V_2_2_2 = new Version(2020299, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version V_2_3_1 = new Version(2030199, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_4_1); + + // UNRELEASED + public static final Version V_2_4_1 = new Version(2040199, org.apache.lucene.util.Version.LUCENE_9_4_1); public static final Version V_2_5_0 = new Version(2050099, org.apache.lucene.util.Version.LUCENE_9_4_1); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_5_0); public static final Version CURRENT = V_3_0_0; diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java index 212450515b57e..af0408453e652 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java @@ -53,6 +53,7 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; +import java.util.Set; import java.util.function.Consumer; import java.util.function.Function; @@ -78,6 +79,8 @@ public final class BulkRequestParser { private static final ParseField IF_PRIMARY_TERM = new ParseField("if_primary_term"); private static final ParseField REQUIRE_ALIAS = new ParseField(DocWriteRequest.REQUIRE_ALIAS); + private static final Set VALID_ACTIONS = Set.of("create", "delete", "index", "update"); + private static int findNextMarker(byte marker, int from, BytesReference data) { final int res = data.indexOf(marker, from); if (res != -1) { @@ -177,6 +180,15 @@ public void parse( ); } String action = parser.currentName(); + if (action == null || VALID_ACTIONS.contains(action) == false) { + throw new IllegalArgumentException( + "Malformed action/metadata line [" + + line + + "], expected one of [create, delete, index, update] but found [" + + action + + "]" + ); + } String index = defaultIndex; String id = null; diff --git a/server/src/main/java/org/opensearch/common/Classes.java b/server/src/main/java/org/opensearch/common/Classes.java index 1b297639aff6a..1fb7fde5f963b 100644 --- a/server/src/main/java/org/opensearch/common/Classes.java +++ b/server/src/main/java/org/opensearch/common/Classes.java @@ -41,25 +41,6 @@ */ public class Classes { - /** - * The package separator character '.' - */ - private static final char PACKAGE_SEPARATOR = '.'; - - /** - * Determine the name of the package of the given class: - * e.g. "java.lang" for the java.lang.String class. - * - * @param clazz the class - * @return the package name, or the empty String if the class - * is defined in the default package - */ - public static String getPackageName(Class clazz) { - String className = clazz.getName(); - int lastDotIndex = className.lastIndexOf(PACKAGE_SEPARATOR); - return (lastDotIndex != -1 ? className.substring(0, lastDotIndex) : ""); - } - public static boolean isInnerClass(Class clazz) { return !Modifier.isStatic(clazz.getModifiers()) && clazz.getEnclosingClass() != null; } diff --git a/server/src/main/java/org/opensearch/common/LegacyTimeBasedUUIDGenerator.java b/server/src/main/java/org/opensearch/common/LegacyTimeBasedUUIDGenerator.java deleted file mode 100644 index 1e2d9b87281d6..0000000000000 --- a/server/src/main/java/org/opensearch/common/LegacyTimeBasedUUIDGenerator.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common; - -import java.util.Base64; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * These are essentially flake ids, but we use 6 (not 8) bytes for timestamp, and use 3 (not 2) bytes for sequence number. - * For more information about flake ids, check out - * https://archive.fo/2015.07.08-082503/http://www.boundary.com/blog/2012/01/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang/ - * - * @opensearch.internal - */ - -class LegacyTimeBasedUUIDGenerator implements UUIDGenerator { - - // We only use bottom 3 bytes for the sequence number. Paranoia: init with random int so that if JVM/OS/machine goes down, clock slips - // backwards, and JVM comes back up, we are less likely to be on the same sequenceNumber at the same time: - private final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt()); - - // Used to ensure clock moves forward: - private long lastTimestamp; - - private static final byte[] SECURE_MUNGED_ADDRESS = MacAddressProvider.getSecureMungedAddress(); - - static { - assert SECURE_MUNGED_ADDRESS.length == 6; - } - - /** Puts the lower numberOfLongBytes from l into the array, starting index pos. */ - private static void putLong(byte[] array, long l, int pos, int numberOfLongBytes) { - for (int i = 0; i < numberOfLongBytes; ++i) { - array[pos + numberOfLongBytes - i - 1] = (byte) (l >>> (i * 8)); - } - } - - @Override - public String getBase64UUID() { - final int sequenceId = sequenceNumber.incrementAndGet() & 0xffffff; - long timestamp = System.currentTimeMillis(); - - synchronized (this) { - // Don't let timestamp go backwards, at least "on our watch" (while this JVM is running). We are still vulnerable if we are - // shut down, clock goes backwards, and we restart... for this we randomize the sequenceNumber on init to decrease chance of - // collision: - timestamp = Math.max(lastTimestamp, timestamp); - - if (sequenceId == 0) { - // Always force the clock to increment whenever sequence number is 0, in case we have a long time-slip backwards: - timestamp++; - } - - lastTimestamp = timestamp; - } - - final byte[] uuidBytes = new byte[15]; - - // Only use lower 6 bytes of the timestamp (this will suffice beyond the year 10000): - putLong(uuidBytes, timestamp, 0, 6); - - // MAC address adds 6 bytes: - System.arraycopy(SECURE_MUNGED_ADDRESS, 0, uuidBytes, 6, SECURE_MUNGED_ADDRESS.length); - - // Sequence number adds 3 bytes: - putLong(uuidBytes, sequenceId, 12, 3); - - assert 9 + SECURE_MUNGED_ADDRESS.length == uuidBytes.length; - - return Base64.getUrlEncoder().withoutPadding().encodeToString(uuidBytes); - } -} diff --git a/server/src/main/java/org/opensearch/common/Numbers.java b/server/src/main/java/org/opensearch/common/Numbers.java index 7a87cd58b0e29..dbcde890e8fe2 100644 --- a/server/src/main/java/org/opensearch/common/Numbers.java +++ b/server/src/main/java/org/opensearch/common/Numbers.java @@ -57,28 +57,6 @@ public static long bytesToLong(BytesRef bytes) { return (((long) high) << 32) | (low & 0x0ffffffffL); } - public static byte[] intToBytes(int val) { - byte[] arr = new byte[4]; - arr[0] = (byte) (val >>> 24); - arr[1] = (byte) (val >>> 16); - arr[2] = (byte) (val >>> 8); - arr[3] = (byte) (val); - return arr; - } - - /** - * Converts an int to a byte array. - * - * @param val The int to convert to a byte array - * @return The byte array converted - */ - public static byte[] shortToBytes(int val) { - byte[] arr = new byte[2]; - arr[0] = (byte) (val >>> 8); - arr[1] = (byte) (val); - return arr; - } - /** * Converts a long to a byte array. * @@ -98,16 +76,6 @@ public static byte[] longToBytes(long val) { return arr; } - /** - * Converts a double to a byte array. - * - * @param val The double to convert to a byte array - * @return The byte array converted - */ - public static byte[] doubleToBytes(double val) { - return longToBytes(Double.doubleToRawLongBits(val)); - } - /** Returns true if value is neither NaN nor infinite. */ public static boolean isValidDouble(double value) { if (Double.isNaN(value) || Double.isInfinite(value)) { diff --git a/server/src/main/java/org/opensearch/common/RandomBasedUUIDGenerator.java b/server/src/main/java/org/opensearch/common/RandomBasedUUIDGenerator.java index fdc53d8335c2f..f83ef930688f8 100644 --- a/server/src/main/java/org/opensearch/common/RandomBasedUUIDGenerator.java +++ b/server/src/main/java/org/opensearch/common/RandomBasedUUIDGenerator.java @@ -32,9 +32,6 @@ package org.opensearch.common; -import org.opensearch.common.settings.SecureString; - -import java.util.Arrays; import java.util.Base64; import java.util.Random; @@ -54,27 +51,6 @@ public String getBase64UUID() { return getBase64UUID(SecureRandomHolder.INSTANCE); } - /** - * Returns a Base64 encoded {@link SecureString} of a Version 4.0 compatible UUID - * as defined here: http://www.ietf.org/rfc/rfc4122.txt - */ - public SecureString getBase64UUIDSecureString() { - byte[] uuidBytes = null; - byte[] encodedBytes = null; - try { - uuidBytes = getUUIDBytes(SecureRandomHolder.INSTANCE); - encodedBytes = Base64.getUrlEncoder().withoutPadding().encode(uuidBytes); - return new SecureString(CharArrays.utf8BytesToChars(encodedBytes)); - } finally { - if (uuidBytes != null) { - Arrays.fill(uuidBytes, (byte) 0); - } - if (encodedBytes != null) { - Arrays.fill(encodedBytes, (byte) 0); - } - } - } - /** * Returns a Base64 encoded version of a Version 4.0 compatible UUID * randomly initialized by the given {@link java.util.Random} instance diff --git a/server/src/main/java/org/opensearch/common/Strings.java b/server/src/main/java/org/opensearch/common/Strings.java index 68b22589de76e..7ec053522c5a6 100644 --- a/server/src/main/java/org/opensearch/common/Strings.java +++ b/server/src/main/java/org/opensearch/common/Strings.java @@ -80,67 +80,6 @@ public static void spaceify(int spaces, String from, StringBuilder to) throws Ex } } - /** - * Splits a backslash escaped string on the separator. - *

- * Current backslash escaping supported: - *
\n \t \r \b \f are escaped the same as a Java String - *
Other characters following a backslash are produced verbatim (\c => c) - * - * @param s the string to split - * @param separator the separator to split on - * @param decode decode backslash escaping - */ - public static List splitSmart(String s, String separator, boolean decode) { - ArrayList lst = new ArrayList<>(2); - StringBuilder sb = new StringBuilder(); - int pos = 0, end = s.length(); - while (pos < end) { - if (s.startsWith(separator, pos)) { - if (sb.length() > 0) { - lst.add(sb.toString()); - sb = new StringBuilder(); - } - pos += separator.length(); - continue; - } - - char ch = s.charAt(pos++); - if (ch == '\\') { - if (!decode) sb.append(ch); - if (pos >= end) break; // ERROR, or let it go? - ch = s.charAt(pos++); - if (decode) { - switch (ch) { - case 'n': - ch = '\n'; - break; - case 't': - ch = '\t'; - break; - case 'r': - ch = '\r'; - break; - case 'b': - ch = '\b'; - break; - case 'f': - ch = '\f'; - break; - } - } - } - - sb.append(ch); - } - - if (sb.length() > 0) { - lst.add(sb.toString()); - } - - return lst; - } - // --------------------------------------------------------------------- // General convenience methods for working with Strings // --------------------------------------------------------------------- @@ -303,7 +242,7 @@ public static String replace(String inString, String oldPattern, String newPatte // the index of an occurrence we've found, or -1 int patLen = oldPattern.length(); while (index >= 0) { - sb.append(inString.substring(pos, index)); + sb.append(inString, pos, index); sb.append(newPattern); pos = index + patLen; index = inString.indexOf(oldPattern, pos); @@ -875,10 +814,6 @@ public static boolean isNullOrEmpty(@Nullable String s) { return s == null || s.isEmpty(); } - public static String coalesceToEmpty(@Nullable String s) { - return s == null ? "" : s; - } - public static String padStart(String s, int minimumLength, char c) { if (s == null) { throw new NullPointerException("s"); diff --git a/server/src/main/java/org/opensearch/common/UUIDs.java b/server/src/main/java/org/opensearch/common/UUIDs.java index a04a10430254f..c7d14878e8bd4 100644 --- a/server/src/main/java/org/opensearch/common/UUIDs.java +++ b/server/src/main/java/org/opensearch/common/UUIDs.java @@ -32,8 +32,6 @@ package org.opensearch.common; -import org.opensearch.common.settings.SecureString; - import java.util.Random; /** @@ -44,7 +42,6 @@ public class UUIDs { private static final RandomBasedUUIDGenerator RANDOM_UUID_GENERATOR = new RandomBasedUUIDGenerator(); - private static final UUIDGenerator LEGACY_TIME_UUID_GENERATOR = new LegacyTimeBasedUUIDGenerator(); private static final UUIDGenerator TIME_UUID_GENERATOR = new TimeBasedUUIDGenerator(); /** Generates a time-based UUID (similar to Flake IDs), which is preferred when generating an ID to be indexed into a Lucene index as @@ -53,11 +50,6 @@ public static String base64UUID() { return TIME_UUID_GENERATOR.getBase64UUID(); } - /** Legacy implementation of {@link #base64UUID()}, for pre 6.0 indices. */ - public static String legacyBase64UUID() { - return LEGACY_TIME_UUID_GENERATOR.getBase64UUID(); - } - /** Returns a Base64 encoded version of a Version 4.0 compatible UUID as defined here: http://www.ietf.org/rfc/rfc4122.txt, using the * provided {@code Random} instance */ public static String randomBase64UUID(Random random) { @@ -70,9 +62,4 @@ public static String randomBase64UUID() { return RANDOM_UUID_GENERATOR.getBase64UUID(); } - /** Returns a Base64 encoded {@link SecureString} of a Version 4.0 compatible UUID as defined here: http://www.ietf.org/rfc/rfc4122.txt, - * using a private {@code SecureRandom} instance */ - public static SecureString randomBase64UUIDSecureString() { - return RANDOM_UUID_GENERATOR.getBase64UUIDSecureString(); - } } diff --git a/server/src/main/java/org/opensearch/common/lucene/Lucene.java b/server/src/main/java/org/opensearch/common/lucene/Lucene.java index 7b69dff020bc4..66a18ee0bddfb 100644 --- a/server/src/main/java/org/opensearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/opensearch/common/lucene/Lucene.java @@ -32,20 +32,14 @@ package org.opensearch.common.lucene; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.document.NumericDocValuesField; -import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.FieldInfos; -import org.apache.lucene.index.Fields; import org.apache.lucene.index.FilterCodecReader; import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.FilterLeafReader; @@ -55,21 +49,12 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.LeafMetaData; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.index.PointValues; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SegmentReader; -import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.index.SortedSetDocValues; -import org.apache.lucene.index.StoredFieldVisitor; -import org.apache.lucene.index.Terms; -import org.apache.lucene.index.VectorValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.FieldDoc; @@ -142,18 +127,6 @@ public class Lucene { private Lucene() {} - public static Version parseVersion(@Nullable String version, Version defaultVersion, Logger logger) { - if (version == null) { - return defaultVersion; - } - try { - return Version.parse(version); - } catch (ParseException e) { - logger.warn(() -> new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e); - return defaultVersion; - } - } - /** * Reads the segments infos, failing if it fails to load */ @@ -697,34 +670,6 @@ public static boolean indexExists(final Directory directory) throws IOException return DirectoryReader.indexExists(directory); } - /** - * Wait for an index to exist for up to {@code timeLimitMillis}. Returns - * true if the index eventually exists, false if not. - * - * Will retry the directory every second for at least {@code timeLimitMillis} - */ - public static boolean waitForIndex(final Directory directory, final long timeLimitMillis) throws IOException { - final long DELAY = 1000; - long waited = 0; - try { - while (true) { - if (waited >= timeLimitMillis) { - break; - } - if (indexExists(directory)) { - return true; - } - Thread.sleep(DELAY); - waited += DELAY; - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - return false; - } - // one more try after all retries - return indexExists(directory); - } - /** * Returns {@code true} iff the given exception or * one of it's causes is an instance of {@link CorruptIndexException}, @@ -1024,92 +969,4 @@ public static NumericDocValuesField newSoftDeletesField() { return new NumericDocValuesField(SOFT_DELETES_FIELD, 1); } - /** - * Returns an empty leaf reader with the given max docs. The reader will be fully deleted. - */ - public static LeafReader emptyReader(final int maxDoc) { - return new LeafReader() { - final Bits liveDocs = new Bits.MatchNoBits(maxDoc); - - public Terms terms(String field) { - return null; - } - - public NumericDocValues getNumericDocValues(String field) { - return null; - } - - public BinaryDocValues getBinaryDocValues(String field) { - return null; - } - - public SortedDocValues getSortedDocValues(String field) { - return null; - } - - public SortedNumericDocValues getSortedNumericDocValues(String field) { - return null; - } - - public SortedSetDocValues getSortedSetDocValues(String field) { - return null; - } - - public NumericDocValues getNormValues(String field) { - return null; - } - - public FieldInfos getFieldInfos() { - return new FieldInfos(new FieldInfo[0]); - } - - public Bits getLiveDocs() { - return this.liveDocs; - } - - public PointValues getPointValues(String fieldName) { - return null; - } - - public void checkIntegrity() {} - - public Fields getTermVectors(int docID) { - return null; - } - - public int numDocs() { - return 0; - } - - public int maxDoc() { - return maxDoc; - } - - public void document(int docID, StoredFieldVisitor visitor) {} - - protected void doClose() {} - - public LeafMetaData getMetaData() { - return new LeafMetaData(Version.LATEST.major, Version.LATEST, null); - } - - public CacheHelper getCoreCacheHelper() { - return null; - } - - public CacheHelper getReaderCacheHelper() { - return null; - } - - @Override - public VectorValues getVectorValues(String field) throws IOException { - return null; - } - - @Override - public TopDocs searchNearestVectors(String field, float[] target, int k, Bits acceptDocs, int visitedLimit) throws IOException { - return null; - } - }; - } } diff --git a/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java b/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java index a893fcecf5b88..6986bd8504f84 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java @@ -323,10 +323,6 @@ private boolean termArraysEquals(List termArrays1, List termArra return true; } - public String getField() { - return field; - } - @Override public void visit(QueryVisitor visitor) { visitor.visitLeaf(this); diff --git a/server/src/main/java/org/opensearch/common/lucene/search/Queries.java b/server/src/main/java/org/opensearch/common/lucene/search/Queries.java index 8b64a45b9db25..125eab9512be8 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/Queries.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/Queries.java @@ -87,10 +87,6 @@ public static Query newLenientFieldQuery(String field, RuntimeException e) { return Queries.newMatchNoDocsQuery("failed [" + field + "] query, caused by " + message); } - public static Query newNestedFilter() { - return not(newNonNestedFilter()); - } - /** * Creates a new non-nested docs query */ diff --git a/server/src/main/java/org/opensearch/common/lucene/search/function/WeightFactorFunction.java b/server/src/main/java/org/opensearch/common/lucene/search/function/WeightFactorFunction.java index c439b57de41cd..625833618b464 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/function/WeightFactorFunction.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/function/WeightFactorFunction.java @@ -73,10 +73,6 @@ public WeightFactorFunction(float weight) { this(weight, null, null); } - public WeightFactorFunction(float weight, @Nullable String functionName) { - this(weight, null, functionName); - } - @Override public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) throws IOException { final LeafScoreFunction leafFunction = scoreFunction.getLeafScoreFunction(ctx); diff --git a/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java b/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java index 5feb994171b65..f299da0c1ac1e 100644 --- a/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java +++ b/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java @@ -36,6 +36,7 @@ import org.opensearch.common.io.stream.FilterStreamInput; import org.opensearch.common.io.stream.StreamInput; +import java.io.EOFException; import java.io.IOException; import java.util.zip.CRC32; import java.util.zip.Checksum; @@ -117,7 +118,11 @@ public void reset() throws IOException { @Override public int read() throws IOException { - return readByte() & 0xFF; + try { + return readByte() & 0xFF; + } catch (EOFException e) { + return -1; + } } @Override diff --git a/server/src/main/java/org/opensearch/index/translog/Checkpoint.java b/server/src/main/java/org/opensearch/index/translog/Checkpoint.java index ade28791b2e27..8df574ed8374f 100644 --- a/server/src/main/java/org/opensearch/index/translog/Checkpoint.java +++ b/server/src/main/java/org/opensearch/index/translog/Checkpoint.java @@ -59,7 +59,7 @@ * * @opensearch.internal */ -final class Checkpoint { +final public class Checkpoint { final long offset; final int numOps; @@ -262,6 +262,14 @@ public synchronized byte[] toByteArray() { return byteOutputStream.toByteArray(); } + public long getMinTranslogGeneration() { + return minTranslogGeneration; + } + + public long getGeneration() { + return generation; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogReader.java b/server/src/main/java/org/opensearch/index/translog/TranslogReader.java index 9d22fe0a498eb..205229949da77 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogReader.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogReader.java @@ -138,7 +138,7 @@ public int totalOperations() { } @Override - final Checkpoint getCheckpoint() { + final public Checkpoint getCheckpoint() { return checkpoint; } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java new file mode 100644 index 0000000000000..36d9d71217837 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionRunnable; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; + +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.ExecutorService; + +/** + * Service that handles remote transfer of translog and checkpoint files + * + * @opensearch.internal + */ +public class BlobStoreTransferService implements TransferService { + + private final BlobStore blobStore; + private final ExecutorService executorService; + + private static final Logger logger = LogManager.getLogger(BlobStoreTransferService.class); + + public BlobStoreTransferService(BlobStore blobStore, ExecutorService executorService) { + this.blobStore = blobStore; + this.executorService = executorService; + } + + @Override + public void uploadBlobAsync( + final TransferFileSnapshot fileSnapshot, + Iterable remoteTransferPath, + ActionListener listener + ) { + assert remoteTransferPath instanceof BlobPath; + BlobPath blobPath = (BlobPath) remoteTransferPath; + executorService.execute(ActionRunnable.wrap(listener, l -> { + try (InputStream inputStream = fileSnapshot.inputStream()) { + blobStore.blobContainer(blobPath) + .writeBlobAtomic(fileSnapshot.getName(), inputStream, fileSnapshot.getContentLength(), true); + l.onResponse(fileSnapshot); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", fileSnapshot.getName()), e); + l.onFailure(new FileTransferException(fileSnapshot, e)); + } + })); + } + + @Override + public void uploadBlob(final TransferFileSnapshot fileSnapshot, Iterable remoteTransferPath) throws IOException { + assert remoteTransferPath instanceof BlobPath; + BlobPath blobPath = (BlobPath) remoteTransferPath; + try (InputStream inputStream = fileSnapshot.inputStream()) { + blobStore.blobContainer(blobPath).writeBlobAtomic(fileSnapshot.getName(), inputStream, fileSnapshot.getContentLength(), true); + } catch (Exception ex) { + throw ex; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java new file mode 100644 index 0000000000000..e8c06e3d251c7 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java @@ -0,0 +1,223 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.BytesStreamInput; +import org.opensearch.common.io.stream.InputStreamStreamInput; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.index.translog.BufferedChecksumStreamInput; + +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.nio.channels.Channels; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Objects; + +/** + * Snapshot of a single file that gets transferred + * + * @opensearch.internal + */ +public class FileSnapshot implements Closeable { + + private final String name; + @Nullable + private final FileChannel fileChannel; + @Nullable + private Path path; + @Nullable + private byte[] content; + + private FileSnapshot(Path path) throws IOException { + Objects.requireNonNull(path); + this.name = path.getFileName().toString(); + this.path = path; + this.fileChannel = FileChannel.open(path, StandardOpenOption.READ); + } + + private FileSnapshot(String name, byte[] content) { + Objects.requireNonNull(name); + this.name = name; + this.content = content; + this.fileChannel = null; + } + + public Path getPath() { + return path; + } + + public String getName() { + return name; + } + + public long getContentLength() throws IOException { + return fileChannel == null ? content.length : fileChannel.size(); + } + + public InputStream inputStream() throws IOException { + return fileChannel != null + ? new BufferedChecksumStreamInput( + new InputStreamStreamInput(Channels.newInputStream(fileChannel), fileChannel.size()), + path.toString() + ) + : new BufferedChecksumStreamInput(new BytesStreamInput(content), name); + } + + @Override + public int hashCode() { + return Objects.hash(name, content, path); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FileSnapshot other = (FileSnapshot) o; + return Objects.equals(this.name, other.name) + && Objects.equals(this.content, other.content) + && Objects.equals(this.path, other.path); + } + + @Override + public String toString() { + return new StringBuilder("FileInfo [").append(" name = ") + .append(name) + .append(", path = ") + .append(path.toUri()) + .append("]") + .toString(); + } + + @Override + public void close() throws IOException { + IOUtils.close(fileChannel); + } + + /** + * Snapshot of a single file with primary term that gets transferred + * + * @opensearch.internal + */ + public static class TransferFileSnapshot extends FileSnapshot { + + private final long primaryTerm; + + public TransferFileSnapshot(Path path, long primaryTerm) throws IOException { + super(path); + this.primaryTerm = primaryTerm; + } + + public TransferFileSnapshot(String name, byte[] content, long primaryTerm) throws IOException { + super(name, content); + this.primaryTerm = primaryTerm; + } + + public long getPrimaryTerm() { + return primaryTerm; + } + + @Override + public int hashCode() { + return Objects.hash(primaryTerm, super.hashCode()); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o)) { + if (this == o) return true; + if (getClass() != o.getClass()) return false; + TransferFileSnapshot other = (TransferFileSnapshot) o; + return Objects.equals(this.primaryTerm, other.primaryTerm); + } + return false; + } + } + + /** + * Snapshot of a single .tlg file that gets transferred + * + * @opensearch.internal + */ + public static final class TranslogFileSnapshot extends TransferFileSnapshot { + + private final long generation; + + public TranslogFileSnapshot(long primaryTerm, long generation, Path path) throws IOException { + super(path, primaryTerm); + this.generation = generation; + } + + public long getGeneration() { + return generation; + } + + @Override + public int hashCode() { + return Objects.hash(generation, super.hashCode()); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o)) { + if (this == o) return true; + if (getClass() != o.getClass()) return false; + TranslogFileSnapshot other = (TranslogFileSnapshot) o; + return Objects.equals(this.generation, other.generation); + } + return false; + } + } + + /** + * Snapshot of a single .ckp file that gets transferred + * + * @opensearch.internal + */ + public static final class CheckpointFileSnapshot extends TransferFileSnapshot { + + private final long generation; + + private final long minTranslogGeneration; + + public CheckpointFileSnapshot(long primaryTerm, long generation, long minTranslogGeneration, Path path) throws IOException { + super(path, primaryTerm); + this.minTranslogGeneration = minTranslogGeneration; + this.generation = generation; + } + + public long getGeneration() { + return generation; + } + + public long getMinTranslogGeneration() { + return minTranslogGeneration; + } + + @Override + public int hashCode() { + return Objects.hash(generation, minTranslogGeneration, super.hashCode()); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o)) { + if (this == o) return true; + if (getClass() != o.getClass()) return false; + CheckpointFileSnapshot other = (CheckpointFileSnapshot) o; + return Objects.equals(this.minTranslogGeneration, other.minTranslogGeneration) + && Objects.equals(this.generation, other.generation); + } + return false; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferException.java b/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferException.java new file mode 100644 index 0000000000000..89a4135d2409b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferException.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; + +/** + * Exception when a single file transfer encounters a failure + * + * @opensearch.internal + */ +public class FileTransferException extends RuntimeException { + + private final TransferFileSnapshot fileSnapshot; + + public FileTransferException(TransferFileSnapshot fileSnapshot, Throwable cause) { + super(cause); + this.fileSnapshot = fileSnapshot; + } + + public TransferFileSnapshot getFileSnapshot() { + return fileSnapshot; + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java new file mode 100644 index 0000000000000..ed6c185352833 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.action.ActionListener; +import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; + +import java.io.IOException; + +/** + * Interface for the translog transfer service responsible for interacting with a remote store + * + * @opensearch.internal + */ +public interface TransferService { + + /** + * Uploads the {@link TransferFileSnapshot} async, once the upload is complete the callback is invoked + * @param fileSnapshot the file snapshot to upload + * @param remotePath the remote path where upload should be made + * @param listener the callback to be invoked once upload completes successfully/fails + */ + void uploadBlobAsync( + final TransferFileSnapshot fileSnapshot, + Iterable remotePath, + ActionListener listener + ); + + /** + * Uploads the {@link TransferFileSnapshot} blob + * @param fileSnapshot the file snapshot to upload + * @param remotePath the remote path where upload should be made + * @throws IOException the exception while transferring the data + */ + void uploadBlob(final TransferFileSnapshot fileSnapshot, Iterable remotePath) throws IOException; + +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java new file mode 100644 index 0000000000000..b4c1c97f04a7d --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; +import org.opensearch.index.translog.transfer.FileSnapshot.CheckpointFileSnapshot; +import org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; + +import java.util.Set; + +/** + * The snapshot of the generational translog and checkpoint files and it's corresponding metadata that is transferred + * to the {@link TransferService} + * + * @opensearch.internal + */ +public interface TransferSnapshot { + + /** + * The snapshot of the checkpoint generational files + * @return the set of {@link CheckpointFileSnapshot} + */ + Set getCheckpointFileSnapshots(); + + /** + * The snapshot of the translog generational files + * @return the set of {@link TranslogFileSnapshot} + */ + Set getTranslogFileSnapshots(); + + /** + * The translog transfer metadata of this {@link TransferSnapshot} + * @return the translog transfer metadata + */ + TranslogTransferMetadata getTranslogTransferMetadata(); +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java new file mode 100644 index 0000000000000..30b81627614b7 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java @@ -0,0 +1,148 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.common.collect.Tuple; +import org.opensearch.index.translog.TranslogReader; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +import static org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; +import static org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; +import static org.opensearch.index.translog.transfer.FileSnapshot.CheckpointFileSnapshot; + +/** + * Implementation for a {@link TransferSnapshot} which builds the snapshot from the translog and checkpoint files present on the local-disk + * + * @opensearch.internal + */ +public class TranslogCheckpointTransferSnapshot implements TransferSnapshot { + + private final Set> translogCheckpointFileInfoTupleSet; + private final int size; + private final long generation; + private final long primaryTerm; + private long minTranslogGeneration; + + TranslogCheckpointTransferSnapshot(long primaryTerm, long generation, int size) { + translogCheckpointFileInfoTupleSet = new HashSet<>(size); + this.size = size; + this.generation = generation; + this.primaryTerm = primaryTerm; + } + + private void add(TranslogFileSnapshot translogFileSnapshot, CheckpointFileSnapshot checkPointFileSnapshot) { + translogCheckpointFileInfoTupleSet.add(Tuple.tuple(translogFileSnapshot, checkPointFileSnapshot)); + assert translogFileSnapshot.getGeneration() == checkPointFileSnapshot.getGeneration(); + } + + private void setMinTranslogGeneration(long minTranslogGeneration) { + this.minTranslogGeneration = minTranslogGeneration; + } + + @Override + public Set getTranslogFileSnapshots() { + return translogCheckpointFileInfoTupleSet.stream().map(Tuple::v1).collect(Collectors.toSet()); + } + + @Override + public TranslogTransferMetadata getTranslogTransferMetadata() { + return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, translogCheckpointFileInfoTupleSet.size() * 2); + } + + @Override + public Set getCheckpointFileSnapshots() { + return translogCheckpointFileInfoTupleSet.stream().map(Tuple::v2).collect(Collectors.toSet()); + } + + @Override + public String toString() { + return new StringBuilder("TranslogTransferSnapshot [").append(" primary term = ") + .append(primaryTerm) + .append(", generation = ") + .append(generation) + .append(" ]") + .toString(); + } + + /** + * Builder for {@link TranslogCheckpointTransferSnapshot} + */ + public static class Builder { + private final long primaryTerm; + private final long generation; + private final List readers; + private final Function checkpointGenFileNameMapper; + private final Path location; + + public Builder( + long primaryTerm, + long generation, + Path location, + List readers, + Function checkpointGenFileNameMapper + ) { + this.primaryTerm = primaryTerm; + this.generation = generation; + this.readers = readers; + this.checkpointGenFileNameMapper = checkpointGenFileNameMapper; + this.location = location; + } + + public TranslogCheckpointTransferSnapshot build() throws IOException { + final List generations = new LinkedList<>(); + long highestGeneration = Long.MIN_VALUE; + long highestGenPrimaryTerm = Long.MIN_VALUE; + long lowestGeneration = Long.MAX_VALUE; + long highestGenMinTranslogGeneration = Long.MIN_VALUE; + TranslogCheckpointTransferSnapshot translogTransferSnapshot = new TranslogCheckpointTransferSnapshot( + primaryTerm, + generation, + readers.size() + ); + for (TranslogReader reader : readers) { + final long readerGeneration = reader.getGeneration(); + final long readerPrimaryTerm = reader.getPrimaryTerm(); + final long minTranslogGeneration = reader.getCheckpoint().getMinTranslogGeneration(); + final long checkpointGeneration = reader.getCheckpoint().getGeneration(); + Path translogPath = reader.path(); + Path checkpointPath = location.resolve(checkpointGenFileNameMapper.apply(readerGeneration)); + generations.add(readerGeneration); + translogTransferSnapshot.add( + new TranslogFileSnapshot(readerPrimaryTerm, readerGeneration, translogPath), + new CheckpointFileSnapshot(readerPrimaryTerm, checkpointGeneration, minTranslogGeneration, checkpointPath) + ); + if (readerGeneration > highestGeneration) { + highestGeneration = readerGeneration; + highestGenMinTranslogGeneration = minTranslogGeneration; + highestGenPrimaryTerm = readerPrimaryTerm; + } + lowestGeneration = Math.min(lowestGeneration, readerGeneration); + } + translogTransferSnapshot.setMinTranslogGeneration(highestGenMinTranslogGeneration); + + assert this.primaryTerm == highestGenPrimaryTerm : "inconsistent primary term"; + assert this.generation == highestGeneration : "inconsistent generation"; + assert LongStream.iterate(lowestGeneration, i -> i + 1) + .limit(highestGeneration) + .boxed() + .collect(Collectors.toList()) + .equals(generations.stream().sorted().collect(Collectors.toList())) == true : "generation gaps found"; + return translogTransferSnapshot; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java new file mode 100644 index 0000000000000..02ebab8ed6826 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.translog.transfer.listener.FileTransferListener; +import org.opensearch.index.translog.transfer.listener.TranslogTransferListener; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.UnaryOperator; +import java.util.stream.Collectors; + +import static org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; +import static org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; + +/** + * The class responsible for orchestrating the transfer of a {@link TransferSnapshot} via a {@link TransferService} + * + * @opensearch.internal + */ +public class TranslogTransferManager { + + private final TransferService transferService; + private final BlobPath remoteBaseTransferPath; + private final FileTransferListener fileTransferListener; + private final UnaryOperator> exclusionFilter; + + private static final long TRANSFER_TIMEOUT_IN_MILLIS = 30000; + + private static final Logger logger = LogManager.getLogger(TranslogTransferManager.class); + + public TranslogTransferManager( + TransferService transferService, + BlobPath remoteBaseTransferPath, + FileTransferListener fileTransferListener, + UnaryOperator> exclusionFilter + ) { + this.transferService = transferService; + this.remoteBaseTransferPath = remoteBaseTransferPath; + this.fileTransferListener = fileTransferListener; + this.exclusionFilter = exclusionFilter; + } + + public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTransferListener translogTransferListener) + throws IOException { + List exceptionList = new ArrayList<>(transferSnapshot.getTranslogTransferMetadata().getCount()); + Set toUpload = new HashSet<>(transferSnapshot.getTranslogTransferMetadata().getCount()); + try { + toUpload.addAll(exclusionFilter.apply(transferSnapshot.getTranslogFileSnapshots())); + toUpload.addAll(exclusionFilter.apply(transferSnapshot.getCheckpointFileSnapshots())); + final CountDownLatch latch = new CountDownLatch(toUpload.size()); + LatchedActionListener latchedActionListener = new LatchedActionListener<>( + ActionListener.wrap(fileTransferListener::onSuccess, ex -> { + assert ex instanceof FileTransferException; + logger.error( + () -> new ParameterizedMessage( + "Exception during transfer for file {}", + ((FileTransferException) ex).getFileSnapshot().getName() + ), + ex + ); + FileTransferException e = (FileTransferException) ex; + fileTransferListener.onFailure(e.getFileSnapshot(), ex); + exceptionList.add(ex); + }), + latch + ); + toUpload.forEach( + fileSnapshot -> transferService.uploadBlobAsync( + fileSnapshot, + remoteBaseTransferPath.add(String.valueOf(fileSnapshot.getPrimaryTerm())), + latchedActionListener + ) + ); + try { + if (latch.await(TRANSFER_TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS) == false) { + Exception ex = new TimeoutException("Timed out waiting for transfer of snapshot " + transferSnapshot + " to complete"); + exceptionList.forEach(ex::addSuppressed); + throw ex; + } + } catch (InterruptedException ex) { + exceptionList.forEach(ex::addSuppressed); + Thread.currentThread().interrupt(); + throw ex; + } + if (exceptionList.isEmpty()) { + final TransferFileSnapshot transferFileSnapshot = prepareMetadata(transferSnapshot); + transferService.uploadBlob( + prepareMetadata(transferSnapshot), + remoteBaseTransferPath.add(String.valueOf(transferFileSnapshot.getPrimaryTerm())) + ); + translogTransferListener.onUploadComplete(transferSnapshot); + return true; + } else { + Exception ex = new RuntimeException("Failed to upload some files during transfer"); + exceptionList.forEach(ex::addSuppressed); + throw ex; + } + } catch (Exception ex) { + logger.error(() -> new ParameterizedMessage("Transfer failed for snapshot {}", transferSnapshot), ex); + translogTransferListener.onUploadFailed(transferSnapshot, ex); + return false; + } + } + + private TransferFileSnapshot prepareMetadata(TransferSnapshot transferSnapshot) throws IOException { + Map generationPrimaryTermMap = transferSnapshot.getTranslogFileSnapshots().stream().map(s -> { + assert s instanceof TranslogFileSnapshot; + return (TranslogFileSnapshot) s; + }) + .collect( + Collectors.toMap( + snapshot -> String.valueOf(snapshot.getGeneration()), + snapshot -> String.valueOf(snapshot.getPrimaryTerm()) + ) + ); + TranslogTransferMetadata translogTransferMetadata = transferSnapshot.getTranslogTransferMetadata(); + translogTransferMetadata.setGenerationToPrimaryTermMapper(new HashMap<>(generationPrimaryTermMap)); + TransferFileSnapshot fileSnapshot = new TransferFileSnapshot( + translogTransferMetadata.getFileName(), + translogTransferMetadata.createMetadataBytes(), + translogTransferMetadata.getPrimaryTerm() + ); + + return fileSnapshot; + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java new file mode 100644 index 0000000000000..0aae773f593fd --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java @@ -0,0 +1,127 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.OutputStreamIndexOutput; +import org.apache.lucene.util.SetOnce; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamOutput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; +import java.util.Objects; + +/** + * The metadata associated with every transfer {@link TransferSnapshot}. The metadata is uploaded at the end of the + * tranlog and generational checkpoint uploads to mark the latest generation and the translog/checkpoint files that are + * still referenced by the last checkpoint. + * + * @opensearch.internal + */ +public class TranslogTransferMetadata { + + private final long primaryTerm; + + private final long generation; + + private final long minTranslogGeneration; + + private final long timeStamp; + + private final int count; + + private final SetOnce> generationToPrimaryTermMapper = new SetOnce<>(); + + private static final String METADATA_SEPARATOR = "__"; + + private static final int BUFFER_SIZE = 4096; + + private static final int CURRENT_VERSION = 1; + + private static final String METADATA_CODEC = "md"; + + public TranslogTransferMetadata(long primaryTerm, long generation, long minTranslogGeneration, int count) { + this.primaryTerm = primaryTerm; + this.generation = generation; + this.minTranslogGeneration = minTranslogGeneration; + this.timeStamp = System.currentTimeMillis(); + this.count = count; + } + + public long getPrimaryTerm() { + return primaryTerm; + } + + public long getGeneration() { + return generation; + } + + public long getMinTranslogGeneration() { + return minTranslogGeneration; + } + + public int getCount() { + return count; + } + + public void setGenerationToPrimaryTermMapper(Map generationToPrimaryTermMap) { + generationToPrimaryTermMapper.set(generationToPrimaryTermMap); + } + + public String getFileName() { + return String.join( + METADATA_SEPARATOR, + Arrays.asList(String.valueOf(primaryTerm), String.valueOf(generation), String.valueOf(timeStamp)) + ); + } + + public byte[] createMetadataBytes() throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + try ( + OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput( + "translog transfer metadata " + primaryTerm, + getFileName(), + output, + BUFFER_SIZE + ) + ) { + CodecUtil.writeHeader(indexOutput, METADATA_CODEC, CURRENT_VERSION); + write(indexOutput); + CodecUtil.writeFooter(indexOutput); + } + return BytesReference.toBytes(output.bytes()); + } + } + + @Override + public int hashCode() { + return Objects.hash(primaryTerm, generation, timeStamp); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TranslogTransferMetadata other = (TranslogTransferMetadata) o; + return Objects.equals(this.primaryTerm, other.primaryTerm) + && Objects.equals(this.generation, other.generation) + && Objects.equals(this.timeStamp, other.timeStamp); + } + + private void write(DataOutput out) throws IOException { + out.writeLong(primaryTerm); + out.writeLong(generation); + out.writeLong(minTranslogGeneration); + out.writeLong(timeStamp); + out.writeMapOfStrings(generationToPrimaryTermMapper.get()); + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/listener/FileTransferListener.java b/server/src/main/java/org/opensearch/index/translog/transfer/listener/FileTransferListener.java new file mode 100644 index 0000000000000..939b56f109a36 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/listener/FileTransferListener.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer.listener; + +import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; + +/** + * The listener to be invoked on the completion or failure of a {@link TransferFileSnapshot} + * + * @opensearch.internal + */ +public interface FileTransferListener { + + /** + * Invoked when the transfer of a single {@link TransferFileSnapshot} succeeds + * @param fileSnapshot the corresponding file snapshot + */ + void onSuccess(TransferFileSnapshot fileSnapshot); + + /** + * Invoked when the transfer of a single {@link TransferFileSnapshot} fails + * @param fileSnapshot the corresponding file snapshot + * @param e the exception while processing the {@link TransferFileSnapshot} + */ + void onFailure(TransferFileSnapshot fileSnapshot, Exception e); +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java b/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java new file mode 100644 index 0000000000000..c09fd8798e505 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer.listener; + +import org.opensearch.index.translog.transfer.TransferSnapshot; + +import java.io.IOException; + +/** + * The listener to be invoked on the completion or failure of a {@link TransferSnapshot} + * + * @opensearch.internal + */ +public interface TranslogTransferListener { + + /** + * Invoked when the transfer of {@link TransferSnapshot} succeeds + * @param transferSnapshot the transfer snapshot + * @throws IOException the exception during the transfer of data + */ + void onUploadComplete(TransferSnapshot transferSnapshot) throws IOException; + + /** + * Invoked when the transfer of {@link TransferSnapshot} fails + * @param transferSnapshot the transfer snapshot + * @param ex the exception while processing the {@link TransferSnapshot} + * @throws IOException the exception during the transfer of data + */ + void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/listener/package-info.java b/server/src/main/java/org/opensearch/index/translog/transfer/listener/package-info.java new file mode 100644 index 0000000000000..edb7f453515b1 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/listener/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all translog operations */ +package org.opensearch.index.translog.transfer.listener; diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/package-info.java b/server/src/main/java/org/opensearch/index/translog/transfer/package-info.java new file mode 100644 index 0000000000000..2ac96b01b0673 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all translog operations */ +package org.opensearch.index.translog.transfer; diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java index d3da77112408b..32a0b3723f7ae 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java @@ -234,4 +234,30 @@ public void testParseDeduplicatesParameterStrings() throws IOException { assertSame(first.getPipeline(), second.getPipeline()); assertSame(first.routing(), second.routing()); } + + public void testFailOnUnsupportedAction() { + BytesArray request = new BytesArray("{ \"baz\":{ \"_id\": \"bar\" } }\n{}\n"); + BulkRequestParser parser = new BulkRequestParser(); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> parser.parse( + request, + "foo", + null, + null, + null, + true, + false, + XContentType.JSON, + req -> fail(), + req -> fail(), + req -> fail() + ) + ); + assertEquals( + "Malformed action/metadata line [1], expected one of [create, delete, index, update] but found [baz]", + ex.getMessage() + ); + } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index 87cab4a006a63..014f2d237a306 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -901,7 +901,10 @@ public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Excep try { ClusterState state = clusterStateForWeightedRouting(indexNames, numShards, numReplicas); - Settings setting = Settings.builder().put("cluster.routing.allocation.awareness.attributes", "zone").build(); + Settings setting = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); threadPool = new TestThreadPool("testThatOnlyNodesSupport"); clusterService = ClusterServiceUtils.createClusterService(threadPool); @@ -932,8 +935,9 @@ public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Excep ); for (ShardIterator it : groupIterator) { - List shardRoutings = Collections.singletonList(it.nextOrNull()); - for (ShardRouting shardRouting : shardRoutings) { + while (it.remaining() > 0) { + ShardRouting shardRouting = it.nextOrNull(); + assertNotNull(shardRouting); selectedNodes.add(shardRouting.currentNodeId()); } } @@ -950,9 +954,8 @@ public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Excep assertFalse(weighAwayNodesInUndefinedZone); selectedNodes = new HashSet<>(); - setting = Settings.builder().put("cluster.routing.allocation.awareness.attributes", "zone").build(); - // Updating weighted round robin weights in cluster state + // Updating weighted round-robin weights in cluster state weights = Map.of("a", 0.0, "b", 1.0); state = setWeightedRoutingWeights(state, weights); @@ -964,11 +967,13 @@ public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Excep groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); for (ShardIterator it : groupIterator) { - List shardRoutings = Collections.singletonList(it.nextOrNull()); - for (ShardRouting shardRouting : shardRoutings) { + while (it.remaining() > 0) { + ShardRouting shardRouting = it.nextOrNull(); + assertNotNull(shardRouting); selectedNodes.add(shardRouting.currentNodeId()); } } + // tests that no shards are assigned to zone with weight zero // tests shards are assigned to nodes in zone c weighAwayNodesInUndefinedZone = true; diff --git a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java index 0edcd55cc35c3..2b54455f589fd 100644 --- a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java @@ -92,62 +92,12 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.equalTo; public class LuceneTests extends OpenSearchTestCase { private static final NamedWriteableRegistry EMPTY_REGISTRY = new NamedWriteableRegistry(Collections.emptyList()); - public void testWaitForIndex() throws Exception { - final MockDirectoryWrapper dir = newMockDirectory(); - - final AtomicBoolean succeeded = new AtomicBoolean(false); - final CountDownLatch latch = new CountDownLatch(1); - - // Create a shadow Engine, which will freak out because there is no - // index yet - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - latch.await(); - if (Lucene.waitForIndex(dir, 5000)) { - succeeded.set(true); - } else { - fail("index should have eventually existed!"); - } - } catch (InterruptedException e) { - // ignore interruptions - } catch (Exception e) { - fail("should have been able to create the engine! " + e.getMessage()); - } - } - }); - t.start(); - - // count down latch - // now shadow engine should try to be created - latch.countDown(); - - IndexWriterConfig iwc = newIndexWriterConfig(); - iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); - iwc.setMergePolicy(NoMergePolicy.INSTANCE); - iwc.setMaxBufferedDocs(2); - IndexWriter writer = new IndexWriter(dir, iwc); - Document doc = new Document(); - doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); - writer.addDocument(doc); - writer.commit(); - - t.join(); - - writer.close(); - dir.close(); - assertTrue("index should have eventually existed", succeeded.get()); - } - public void testCleanIndex() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(); diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java new file mode 100644 index 0000000000000..adca47bf64c64 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java @@ -0,0 +1,123 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.env.TestEnvironment; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.blobstore.BlobStoreTestUtil; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +public class BlobStoreTransferServiceTests extends OpenSearchTestCase { + + private ExecutorService executorService; + + private BlobStoreRepository repository; + + @Override + public void setUp() throws Exception { + super.setUp(); + repository = createRepository(); + executorService = Executors.newFixedThreadPool(1); + } + + public void testUploadBlob() throws IOException { + Path testFile = createTempFile(); + Files.write(testFile, randomByteArrayOfLength(128), StandardOpenOption.APPEND); + FileSnapshot.TransferFileSnapshot transferFileSnapshot = new FileSnapshot.TransferFileSnapshot(testFile, randomNonNegativeLong()); + TransferService transferService = new BlobStoreTransferService(repository.blobStore(), executorService); + transferService.uploadBlob(transferFileSnapshot, repository.basePath()); + } + + public void testUploadBlobAsync() throws IOException, InterruptedException { + Path testFile = createTempFile(); + Files.write(testFile, randomByteArrayOfLength(128), StandardOpenOption.APPEND); + AtomicBoolean succeeded = new AtomicBoolean(false); + FileSnapshot.TransferFileSnapshot transferFileSnapshot = new FileSnapshot.TransferFileSnapshot(testFile, randomNonNegativeLong()); + CountDownLatch latch = new CountDownLatch(1); + TransferService transferService = new BlobStoreTransferService(repository.blobStore(), executorService); + transferService.uploadBlobAsync(transferFileSnapshot, repository.basePath(), new LatchedActionListener<>(new ActionListener<>() { + @Override + public void onResponse(FileSnapshot.TransferFileSnapshot fileSnapshot) { + assert succeeded.compareAndSet(false, true); + assertEquals(transferFileSnapshot.getPrimaryTerm(), fileSnapshot.getPrimaryTerm()); + assertEquals(transferFileSnapshot.getName(), fileSnapshot.getName()); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Failed to perform uploadBlobAsync", e); + } + }, latch)); + assertTrue(latch.await(1000, TimeUnit.MILLISECONDS)); + assertTrue(succeeded.get()); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + repository.stop(); + executorService.shutdown(); + executorService.awaitTermination(1000, TimeUnit.MILLISECONDS); + } + + /** Create a {@link Repository} with a random name **/ + private BlobStoreRepository createRepository() { + Settings settings = Settings.builder().put("location", randomAlphaOfLength(10)).build(); + RepositoryMetadata repositoryMetadata = new RepositoryMetadata(randomAlphaOfLength(10), FsRepository.TYPE, settings); + final ClusterService clusterService = BlobStoreTestUtil.mockClusterService(repositoryMetadata); + final FsRepository repository = new FsRepository( + repositoryMetadata, + createEnvironment(), + xContentRegistry(), + clusterService, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ) { + @Override + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we create repo manually + } + }; + clusterService.addStateApplier(event -> repository.updateState(event.state())); + // Apply state once to initialize repo properly like RepositoriesService would + repository.updateState(clusterService.state()); + repository.start(); + return repository; + } + + /** Create a {@link Environment} with random path.home and path.repo **/ + private Environment createEnvironment() { + Path home = createTempDir(); + return TestEnvironment.newEnvironment( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), home.toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), home.resolve("repo").toAbsolutePath()) + .build() + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java new file mode 100644 index 0000000000000..60b7029f18fa6 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.apache.lucene.tests.util.LuceneTestCase; +import org.mockito.Mockito; +import org.opensearch.action.ActionListener; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.translog.Translog; +import org.opensearch.index.translog.transfer.listener.FileTransferListener; +import org.opensearch.index.translog.transfer.listener.TranslogTransferListener; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.index.translog.transfer.FileSnapshot.CheckpointFileSnapshot; +import org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; +import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; + +import java.io.IOException; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; + +@LuceneTestCase.SuppressFileSystems("*") +public class TranslogTransferManagerTests extends OpenSearchTestCase { + + private TransferService transferService; + private BlobPath remoteBaseTransferPath; + private long primaryTerm; + private long generation; + private long minTranslogGeneration; + + @Override + public void setUp() throws Exception { + super.setUp(); + primaryTerm = randomNonNegativeLong(); + generation = randomNonNegativeLong(); + minTranslogGeneration = randomLongBetween(0, generation); + remoteBaseTransferPath = new BlobPath().add("base_path"); + transferService = mock(TransferService.class); + } + + @SuppressWarnings("unchecked") + public void testTransferSnapshot() throws IOException { + AtomicInteger fileTransferSucceeded = new AtomicInteger(); + AtomicInteger fileTransferFailed = new AtomicInteger(); + AtomicInteger translogTransferSucceeded = new AtomicInteger(); + AtomicInteger translogTransferFailed = new AtomicInteger(); + + doNothing().when(transferService) + .uploadBlob(any(TransferFileSnapshot.class), Mockito.eq(remoteBaseTransferPath.add(String.valueOf(primaryTerm)))); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse((TransferFileSnapshot) invocationOnMock.getArguments()[0]); + return null; + }).when(transferService).uploadBlobAsync(any(TransferFileSnapshot.class), any(BlobPath.class), any(ActionListener.class)); + + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + transferService, + remoteBaseTransferPath, + new FileTransferListener() { + @Override + public void onSuccess(TransferFileSnapshot fileSnapshot) { + fileTransferSucceeded.incrementAndGet(); + } + + @Override + public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { + fileTransferFailed.incrementAndGet(); + } + }, + r -> r + ); + + assertTrue(translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) { + translogTransferSucceeded.incrementAndGet(); + } + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { + translogTransferFailed.incrementAndGet(); + } + })); + assertEquals(4, fileTransferSucceeded.get()); + assertEquals(0, fileTransferFailed.get()); + assertEquals(1, translogTransferSucceeded.get()); + assertEquals(0, translogTransferFailed.get()); + } + + private TransferSnapshot createTransferSnapshot() { + return new TransferSnapshot() { + @Override + public Set getCheckpointFileSnapshots() { + try { + return Set.of( + new CheckpointFileSnapshot( + primaryTerm, + generation, + minTranslogGeneration, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.CHECKPOINT_SUFFIX) + ), + new CheckpointFileSnapshot( + primaryTerm, + generation, + minTranslogGeneration, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.CHECKPOINT_SUFFIX) + ) + ); + } catch (IOException e) { + throw new AssertionError("Failed to create temp file", e); + } + } + + @Override + public Set getTranslogFileSnapshots() { + try { + return Set.of( + new TranslogFileSnapshot( + primaryTerm, + generation, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.TRANSLOG_FILE_SUFFIX) + ), + new TranslogFileSnapshot( + primaryTerm, + generation - 1, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.TRANSLOG_FILE_SUFFIX) + ) + ); + } catch (IOException e) { + throw new AssertionError("Failed to create temp file", e); + } + } + + @Override + public TranslogTransferMetadata getTranslogTransferMetadata() { + return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, randomInt(5)); + } + }; + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java index 94fb6cded637d..050965b37c068 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java @@ -101,7 +101,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Set; import java.util.function.Predicate; @@ -116,60 +115,56 @@ * */ public class AggregationsTests extends OpenSearchTestCase { - private static final List> aggsTests = getAggsTests(); - - private static List> getAggsTests() { - List> aggsTests = new ArrayList<>(); - aggsTests.add(new InternalCardinalityTests()); - aggsTests.add(new InternalTDigestPercentilesTests()); - aggsTests.add(new InternalTDigestPercentilesRanksTests()); - aggsTests.add(new InternalHDRPercentilesTests()); - aggsTests.add(new InternalHDRPercentilesRanksTests()); - aggsTests.add(new InternalPercentilesBucketTests()); - aggsTests.add(new InternalMinTests()); - aggsTests.add(new InternalMaxTests()); - aggsTests.add(new InternalAvgTests()); - aggsTests.add(new InternalWeightedAvgTests()); - aggsTests.add(new InternalSumTests()); - aggsTests.add(new InternalValueCountTests()); - aggsTests.add(new InternalSimpleValueTests()); - aggsTests.add(new InternalDerivativeTests()); - aggsTests.add(new InternalBucketMetricValueTests()); - aggsTests.add(new InternalStatsTests()); - aggsTests.add(new InternalStatsBucketTests()); - aggsTests.add(new InternalExtendedStatsTests()); - aggsTests.add(new InternalExtendedStatsBucketTests()); - aggsTests.add(new InternalGeoCentroidTests()); - aggsTests.add(new InternalHistogramTests()); - aggsTests.add(new InternalDateHistogramTests()); - aggsTests.add(new InternalAutoDateHistogramTests()); - aggsTests.add(new InternalVariableWidthHistogramTests()); - aggsTests.add(new LongTermsTests()); - aggsTests.add(new DoubleTermsTests()); - aggsTests.add(new StringTermsTests()); - aggsTests.add(new LongRareTermsTests()); - aggsTests.add(new StringRareTermsTests()); - aggsTests.add(new InternalMissingTests()); - aggsTests.add(new InternalNestedTests()); - aggsTests.add(new InternalReverseNestedTests()); - aggsTests.add(new InternalGlobalTests()); - aggsTests.add(new InternalFilterTests()); - aggsTests.add(new InternalSamplerTests()); - aggsTests.add(new InternalRangeTests()); - aggsTests.add(new InternalDateRangeTests()); - aggsTests.add(new InternalGeoDistanceTests()); - aggsTests.add(new InternalFiltersTests()); - aggsTests.add(new InternalAdjacencyMatrixTests()); - aggsTests.add(new SignificantLongTermsTests()); - aggsTests.add(new SignificantStringTermsTests()); - aggsTests.add(new InternalScriptedMetricTests()); - aggsTests.add(new InternalBinaryRangeTests()); - aggsTests.add(new InternalTopHitsTests()); - aggsTests.add(new InternalCompositeTests()); - aggsTests.add(new InternalMedianAbsoluteDeviationTests()); - aggsTests.add(new InternalMultiTermsTests()); - return Collections.unmodifiableList(aggsTests); - } + private static final List> aggsTests = List.of( + new InternalCardinalityTests(), + new InternalTDigestPercentilesTests(), + new InternalTDigestPercentilesRanksTests(), + new InternalHDRPercentilesTests(), + new InternalHDRPercentilesRanksTests(), + new InternalPercentilesBucketTests(), + new InternalMinTests(), + new InternalMaxTests(), + new InternalAvgTests(), + new InternalWeightedAvgTests(), + new InternalSumTests(), + new InternalValueCountTests(), + new InternalSimpleValueTests(), + new InternalDerivativeTests(), + new InternalBucketMetricValueTests(), + new InternalStatsTests(), + new InternalStatsBucketTests(), + new InternalExtendedStatsTests(), + new InternalExtendedStatsBucketTests(), + new InternalGeoCentroidTests(), + new InternalHistogramTests(), + new InternalDateHistogramTests(), + new InternalAutoDateHistogramTests(), + new InternalVariableWidthHistogramTests(), + new LongTermsTests(), + new DoubleTermsTests(), + new StringTermsTests(), + new LongRareTermsTests(), + new StringRareTermsTests(), + new InternalMissingTests(), + new InternalNestedTests(), + new InternalReverseNestedTests(), + new InternalGlobalTests(), + new InternalFilterTests(), + new InternalSamplerTests(), + new InternalRangeTests(), + new InternalDateRangeTests(), + new InternalGeoDistanceTests(), + new InternalFiltersTests(), + new InternalAdjacencyMatrixTests(), + new SignificantLongTermsTests(), + new SignificantStringTermsTests(), + new InternalScriptedMetricTests(), + new InternalBinaryRangeTests(), + new InternalTopHitsTests(), + new InternalCompositeTests(), + new InternalMedianAbsoluteDeviationTests(), + new InternalMultiTermsTests() + ); @Override protected NamedXContentRegistry xContentRegistry() { @@ -226,7 +221,7 @@ public void testFromXContentWithRandomFields() throws IOException { private void parseAndAssert(boolean addRandomFields) throws IOException { XContentType xContentType = randomFrom(XContentType.values()); final ToXContent.Params params = new ToXContent.MapParams(singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); - Aggregations aggregations = createTestInstance(); + Aggregations aggregations = createTestInstance(1, 0, 3); BytesReference originalBytes = toShuffledXContent(aggregations, xContentType, params, randomBoolean()); BytesReference mutated; if (addRandomFields) { diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 8307443defa3a..66d74afad5bc8 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -38,11 +38,11 @@ dependencies { exclude module: 'jettison' } api "org.codehaus.jettison:jettison:${versions.jettison}" - api "org.apache.commons:commons-compress:1.21" + api "org.apache.commons:commons-compress:1.22" api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api "io.netty:netty-all:${versions.netty}" - api 'com.google.code.gson:gson:2.9.1' + api 'com.google.code.gson:gson:2.10' api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" diff --git a/test/fixtures/minio-fixture/Dockerfile b/test/fixtures/minio-fixture/Dockerfile index b56440c0d44a9..81655aa545afd 100644 --- a/test/fixtures/minio-fixture/Dockerfile +++ b/test/fixtures/minio-fixture/Dockerfile @@ -1,4 +1,4 @@ -FROM minio/minio:RELEASE.2022-06-25T15-50-16Z +FROM minio/minio:RELEASE.2022-11-17T23-20-09Z ARG bucket ARG accessKey diff --git a/test/fixtures/minio-fixture/docker-compose.yml b/test/fixtures/minio-fixture/docker-compose.yml index 4c0245772ed4c..e4d2faab9a657 100644 --- a/test/fixtures/minio-fixture/docker-compose.yml +++ b/test/fixtures/minio-fixture/docker-compose.yml @@ -14,6 +14,14 @@ services: soft: 4096 ports: - "9000" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] + interval: 30s + timeout: 20s + retries: 3 + volumes: + - type: tmpfs + target: /minio/data command: ["server", "--console-address", ":9001", "/minio/data"] minio-fixture-other: build: @@ -29,6 +37,14 @@ services: soft: 4096 ports: - "9000" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] + interval: 30s + timeout: 20s + retries: 3 + volumes: + - type: tmpfs + target: /minio/data command: ["server", "--console-address", ":9001", "/minio/data"] minio-fixture-for-snapshot-tool: build: @@ -44,4 +60,12 @@ services: soft: 4096 ports: - "9000" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] + interval: 30s + timeout: 20s + retries: 3 + volumes: + - type: tmpfs + target: /minio/data command: ["server", "--console-address", ":9001", "/minio/data"] diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index f874ab44d9d3b..11b3ce1dd05d4 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -1301,8 +1301,9 @@ public void onReplicationFailure( } ); ids.add(target); - countDownLatch.await(1, TimeUnit.SECONDS); } + countDownLatch.await(30, TimeUnit.SECONDS); + assertEquals("Replication should complete successfully", 0, countDownLatch.getCount()); return ids; }